DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [dpdk-dev v1 0/7] drivers/qat: QAT symmetric crypto datapatch rework
@ 2021-10-26 17:25 Kai Ji
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
                   ` (7 more replies)
  0 siblings, 8 replies; 156+ messages in thread
From: Kai Ji @ 2021-10-26 17:25 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch reworks QAT symmetric crypto datapatch implmentation where each
generation request building separated and the crypto operation under the raw
datapath api implmentations are unified.

In addtion this patchset also enables QAT OOP support in raw datapath api
implmentation.

This patch depends on http://patchwork.dpdk.org/project/dpdk/cover/20211026171633.19498-1-kai.ji@intel.com/

Kai Ji (7):
  crypro/qat: qat driver refactor skeleton
  crypto/qat: qat driver sym op refactor
  crypto/qat: qat driver asym op refactor
  crypto/qat: qat driver session method rework
  crypto/qat: qat driver datapath rework
  app/test: cryptodev test fix
  crypto/qat: qat driver rework clean up

 app/test/test_cryptodev.c                    |  52 +-
 drivers/common/qat/meson.build               |   4 +-
 drivers/common/qat/qat_device.c              |   2 +-
 drivers/common/qat/qat_qp.c                  |  40 +-
 drivers/common/qat/qat_qp.h                  |  38 +-
 drivers/compress/qat/qat_comp_pmd.c          |  12 +-
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   7 +
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  90 ++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 487 +++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 253 +++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 911 +++++++++++++++++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 941 +++++++++++++++++-
 drivers/crypto/qat/qat_asym.c                | 786 +++++++++------
 drivers/crypto/qat/qat_asym.h                |  77 +-
 drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
 drivers/crypto/qat/qat_asym_pmd.h            |  54 -
 drivers/crypto/qat/qat_crypto.c              |   1 +
 drivers/crypto/qat/qat_crypto.h              |  14 +-
 drivers/crypto/qat/qat_sym.c                 | 978 ++++++------------
 drivers/crypto/qat/qat_sym.h                 | 141 ++-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 983 -------------------
 drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
 drivers/crypto/qat/qat_sym_pmd.h             |  95 --
 drivers/crypto/qat/qat_sym_session.c         | 114 +--
 drivers/crypto/qat/qat_sym_session.h         |   8 +-
 25 files changed, 3825 insertions(+), 2745 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

--
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v1 1/7] crypro/qat: qat driver refactor skeleton
  2021-10-26 17:25 [dpdk-dev] [dpdk-dev v1 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
@ 2021-10-26 17:25 ` Kai Ji
  2021-10-29 13:58   ` Zhang, Roy Fan
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 2/7] crypto/qat: qat driver sym op refactor Kai Ji
                   ` (6 subsequent siblings)
  7 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2021-10-26 17:25 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

Add-in enqueue/dequeue op burst and build-request skeleton functions
for qat crypto driver refactor.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c          | 16 +++++++++++++
 drivers/common/qat/qat_qp.h          | 22 +++++++++++++++++
 drivers/crypto/qat/qat_asym.c        | 35 ++++++++++++++++++++++++++++
 drivers/crypto/qat/qat_asym.h        | 15 ++++++++++++
 drivers/crypto/qat/qat_sym.c         | 25 ++++++++++++++++++++
 drivers/crypto/qat/qat_sym.h         | 16 +++++++++++++
 drivers/crypto/qat/qat_sym_session.h |  6 +++++
 7 files changed, 135 insertions(+)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index cde421eb77..0fda890075 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -549,6 +549,22 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 	return data & modulo_mask;
 }
 
+uint16_t
+refactor_qat_enqueue_op_burst(__rte_unused void *qp,
+		__rte_unused qat_op_build_request_t op_build_request,
+		__rte_unused void **ops, __rte_unused uint16_t nb_ops)
+{
+	return 0;
+}
+
+uint16_t
+refactor_qat_dequeue_op_burst(__rte_unused void *qp, __rte_unused void **ops,
+		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
+		__rte_unused uint16_t nb_ops)
+{
+	return 0;
+}
+
 uint16_t
 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
 {
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index deafb407b3..c5f115310c 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -36,6 +36,19 @@ struct qat_queue {
 	/* number of responses processed since last CSR head write */
 };
 
+/**
+ * @brief Function prototype to build a QAT request.
+ * @param opaque: an opaque data may be used to store context may be useful
+ *    between 2 enqueue operations.
+ **/
+typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
+
+typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused);
+
+#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE	2
+
 struct qat_qp {
 	void			*mmap_bar_addr;
 	struct qat_queue	tx_q;
@@ -44,6 +57,7 @@ struct qat_qp {
 	struct rte_mempool *op_cookie_pool;
 	void **op_cookies;
 	uint32_t nb_descriptors;
+	uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
 	enum qat_device_gen qat_dev_gen;
 	enum qat_service_type service_type;
 	struct qat_pci_device *qat_dev;
@@ -77,6 +91,14 @@ struct qat_qp_config {
 	const char *service_str;
 };
 
+uint16_t
+refactor_qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops);
+
+uint16_t
+refactor_qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
+
 uint16_t
 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 85973812a8..d5b4c66d68 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -456,6 +456,41 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	return 0;
 }
 
+static __rte_always_inline int
+refactor_qat_asym_build_request(__rte_unused void *in_op,
+		__rte_unused uint8_t *out_msg, __rte_unused void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
+{
+	return 0;
+}
+
+int
+refactor_qat_asym_process_response(__rte_unused void **op,
+		__rte_unused uint8_t *resp,
+		__rte_unused void *op_cookie,
+		__rte_unused uint64_t *dequeue_err_count)
+{
+	return 0;
+}
+
+uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return refactor_qat_enqueue_op_burst(qp,
+					refactor_qat_asym_build_request,
+					(void **)ops, nb_ops);
+}
+
+uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return refactor_qat_dequeue_op_burst(qp, (void **)ops,
+				refactor_qat_asym_process_response, nb_ops);
+}
+
 int
 qat_asym_build_request(void *in_op,
 			uint8_t *out_msg,
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 308b6b2e0b..50c2641eba 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -92,4 +92,19 @@ void
 qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
 		void *op_cookie);
 
+int
+refactor_qat_asym_process_response(__rte_unused void **op,
+		__rte_unused uint8_t *resp,
+		__rte_unused void *op_cookie,
+		__rte_unused uint64_t *dequeue_err_count);
+
+uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+
+uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 #endif /* _QAT_ASYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 93b257522b..a92874cd27 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -210,6 +210,31 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
 			ICP_QAT_FW_LA_NO_PROTO);
 }
 
+static __rte_always_inline int
+refactor_qat_sym_build_request(__rte_unused void *in_op,
+		__rte_unused uint8_t *out_msg, __rte_unused	void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
+{
+	return 0;
+}
+
+uint16_t
+refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return refactor_qat_enqueue_op_burst(qp, refactor_qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
+
+uint16_t
+refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return refactor_qat_dequeue_op_burst(qp, (void **)ops,
+				refactor_qat_sym_process_response, nb_ops);
+}
+
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		void *op_cookie, enum qat_device_gen qat_dev_gen)
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index e3ec7f0de4..17b2c871bd 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -54,6 +54,22 @@ struct qat_sym_op_cookie {
 	} opt;
 };
 
+static __rte_always_inline int
+refactor_qat_sym_process_response(__rte_unused void **op,
+		__rte_unused uint8_t *resp, __rte_unused void *op_cookie,
+		__rte_unused uint64_t *dequeue_err_count)
+{
+	return 0;
+}
+
+uint16_t
+refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		void *op_cookie, enum qat_device_gen qat_dev_gen);
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 6ebc176729..73493ec864 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -55,6 +55,11 @@
 #define QAT_SESSION_IS_SLICE_SET(flags, flag)	\
 	(!!((flags) & (flag)))
 
+struct qat_sym_session;
+
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 enum qat_sym_proto_flag {
 	QAT_CRYPTO_PROTO_FLAG_NONE = 0,
 	QAT_CRYPTO_PROTO_FLAG_CCM = 1,
@@ -107,6 +112,7 @@ struct qat_sym_session {
 	/* Some generations need different setup of counter */
 	uint32_t slice_types;
 	enum qat_sym_proto_flag qat_proto_flag;
+	qat_sym_build_request_t build_request[2];
 };
 
 int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v1 2/7] crypto/qat: qat driver sym op refactor
  2021-10-26 17:25 [dpdk-dev] [dpdk-dev v1 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
@ 2021-10-26 17:25 ` Kai Ji
  2021-10-29 14:26   ` Zhang, Roy Fan
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 3/7] crypto/qat: qat driver asym " Kai Ji
                   ` (5 subsequent siblings)
  7 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2021-10-26 17:25 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch add-in refactored qat symmetirc build request function
implementation (qat_sym_refactor.c & qat_sym_refactor.h)
Add-in QAT sym build ops in auth, cipher, chain and aead operation for
QAT generation 1

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 1071 ++++++++++++++++++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  895 +++++++++++++++
 drivers/crypto/qat/qat_sym.h                 |    8 +
 drivers/crypto/qat/qat_sym_hw_dp.c           |    8 -
 drivers/crypto/qat/qat_sym_refactor.c        |  409 +++++++
 drivers/crypto/qat/qat_sym_refactor.h        |  402 +++++++
 6 files changed, 2785 insertions(+), 8 deletions(-)
 create mode 100644 drivers/crypto/qat/qat_sym_refactor.c
 create mode 100644 drivers/crypto/qat/qat_sym_refactor.h

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 67a4d2cb2c..07020741bd 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -8,14 +8,1082 @@
 #include <rte_cryptodev.h>
 #include "qat_crypto.h"
 #include "qat_sym_session.h"
+#include "qat_sym.h"
+
+#define QAT_BASE_GEN1_SYM_CAPABILITIES \
+	QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
+		CAP_RNG(digest_size, 1, 20, 1)), \
+	QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
+	QAT_SYM_AEAD_CAP(AES_CCM,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
+		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
+	QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)), \
+	QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
+			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
+		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
+		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(MD5_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SNOW3G_UIA2, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_AUTH_CAP(KASUMI_F9, CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
+		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_CIPHER_CAP(AES_CTR,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_CIPHER_CAP(AES_XTS,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_CIPHER_CAP(KASUMI_F8,  CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)), \
+	QAT_SYM_CIPHER_CAP(NULL,  CAP_SET(block_size, 1), \
+		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_CIPHER_CAP(3DES_CBC,  CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
+	QAT_SYM_CIPHER_CAP(3DES_CTR,  CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
+	QAT_SYM_CIPHER_CAP(DES_CBC,  CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
+	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,  CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0))
+
+#define QAT_BASE_GEN1_ASYM_CAPABILITIES				\
+	QAT_ASYM_CAP(MODEX, 0, 1, 512, 1),			\
+	QAT_ASYM_CAP(MODINV, 0, 1, 512, 1),			\
+	QAT_ASYM_CAP(RSA,					\
+			((1 << RTE_CRYPTO_ASYM_OP_SIGN) |	\
+			(1 << RTE_CRYPTO_ASYM_OP_VERIFY) |	\
+			(1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) |	\
+			(1 << RTE_CRYPTO_ASYM_OP_DECRYPT)),	\
+			64, 512, 64)
+
+#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \
+	QAT_SYM_CIPHER_CAP(ZUC_EEA3, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_AUTH_CAP(ZUC_EIA3, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)) \
+
+#define QAT_EXTRA_GEN3_SYM_CAPABILITIES \
+	QAT_SYM_AEAD_CAP(CHACHA20_POLY1305, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 32, 32, 0), \
+		CAP_RNG(digest_size, 16, 16, 0), \
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0))
+
+#define QAT_BASE_GEN4_SYM_CAPABILITIES \
+	QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
+		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
+		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
+		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_CIPHER_CAP(NULL,  CAP_SET(block_size, 1), \
+		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
+		CAP_RNG(digest_size, 1, 20, 1)), \
+	QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_CIPHER_CAP(AES_CTR,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
+	QAT_SYM_AEAD_CAP(AES_CCM,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
+		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
+	QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)) \
+
+#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
+	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
+
+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
+	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
+	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
+
+static __rte_always_inline int
+op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_decrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_decrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+	return -EINVAL;
+}
+
+static __rte_always_inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		/* Decrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely((sym_op->m_dst != NULL)
+				&& (sym_op->m_dst != sym_op->m_src)))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = last_block - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
+			dst, last_block_len);
+#endif
+		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+			dst, last_block_len);
+#endif
+	}
+
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+static __rte_always_inline int
+qat_chk_len_in_bits_auth(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_chk_len_in_bits_cipher(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+			((op->sym->cipher.data.offset %
+			BYTE_LENGTH) != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int32_t
+qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
+		void *opaque, struct qat_sym_op_cookie *cookie,
+		struct rte_crypto_vec *src_vec, uint16_t n_src,
+		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
+{
+	struct qat_sgl *list;
+	uint32_t i;
+	uint32_t tl_src = 0, total_len_src, total_len_dst;
+	uint64_t src_data_start = 0, dst_data_start = 0;
+	int is_sgl = n_src > 1 || n_dst > 1;
+
+	if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
+			n_dst > QAT_SYM_SGL_MAX_NUMBER))
+		return -1;
+
+	if (likely(!is_sgl)) {
+		src_data_start = src_vec[0].iova;
+		tl_src = total_len_src =
+				src_vec[0].len;
+		if (unlikely(n_dst)) { /* oop */
+			total_len_dst = dst_vec[0].len;
+
+			dst_data_start = dst_vec[0].iova;
+			if (unlikely(total_len_src != total_len_dst))
+				return -EINVAL;
+		} else {
+			dst_data_start = src_data_start;
+			total_len_dst = tl_src;
+		}
+	} else { /* sgl */
+		total_len_dst = total_len_src = 0;
+
+		ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+			QAT_COMN_PTR_TYPE_SGL);
+
+		list = (struct qat_sgl *)&cookie->qat_sgl_src;
+		for (i = 0; i < n_src; i++) {
+			list->buffers[i].len = src_vec[i].len;
+			list->buffers[i].resrvd = 0;
+			list->buffers[i].addr = src_vec[i].iova;
+			if (tl_src + src_vec[i].len > UINT32_MAX) {
+				QAT_DP_LOG(ERR, "Message too long");
+				return -1;
+			}
+			tl_src += src_vec[i].len;
+		}
+
+		list->num_bufs = i;
+		src_data_start = cookie->qat_sgl_src_phys_addr;
+
+		if (unlikely(n_dst > 0)) { /* oop sgl */
+			uint32_t tl_dst = 0;
+
+			list = (struct qat_sgl *)&cookie->qat_sgl_dst;
+
+			for (i = 0; i < n_dst; i++) {
+				list->buffers[i].len = dst_vec[i].len;
+				list->buffers[i].resrvd = 0;
+				list->buffers[i].addr = dst_vec[i].iova;
+				if (tl_dst + dst_vec[i].len > UINT32_MAX) {
+					QAT_DP_LOG(ERR, "Message too long");
+					return -ENOTSUP;
+				}
+
+				tl_dst += dst_vec[i].len;
+			}
+
+			if (tl_src != tl_dst)
+				return -EINVAL;
+			list->num_bufs = i;
+			dst_data_start = cookie->qat_sgl_dst_phys_addr;
+		} else
+			dst_data_start = src_data_start;
+	}
+
+	req->comn_mid.src_data_addr = src_data_start;
+	req->comn_mid.dest_data_addr = dst_data_start;
+	req->comn_mid.src_length = total_len_src;
+	req->comn_mid.dst_length = total_len_dst;
+	req->comn_mid.opaque_data = (uintptr_t)opaque;
+
+	return tl_src;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int n_src = 0;
+	int ret;
+
+	ret = qat_chk_len_in_bits_cipher(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->cipher.data.length >> 3;
+		cipher_ofs = op->sym->cipher.data.offset >> 3;
+		break;
+	case 0:
+		if (ctx->bpi_ctx) {
+			/* DOCSIS - only send complete blocks to device.
+			 * Process any partial block using CFB mode.
+			 * Even if 0 complete blocks, still send this to device
+			 * to get into rx queue for post-process and dequeuing
+			 */
+			cipher_len = qat_bpicipher_preprocess(ctx, op);
+			cipher_ofs = op->sym->cipher.data.offset;
+		} else {
+			cipher_len = op->sym->cipher.data.length;
+			cipher_ofs = op->sym->cipher.data.offset;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
+			cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t auth_ofs = 0, auth_len = 0;
+	int n_src, ret;
+
+	ret = qat_chk_len_in_bits_auth(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+				ctx->auth_iv.offset);
+		auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+				ctx->auth_iv.offset);
+		break;
+	case 0:
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+			/* AES-GMAC */
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+					ctx->auth_iv.offset);
+			auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+					ctx->auth_iv.offset);
+		} else {
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = NULL;
+			auth_iv->iova = 0;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
+			auth_ofs + auth_len, in_sgl->vec,
+			QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
+				auth_ofs + auth_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	union rte_crypto_sym_ofs ofs;
+	uint32_t min_ofs = 0, max_len = 0;
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	uint32_t auth_len = 0, auth_ofs = 0;
+	int is_oop = (op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src);
+	int is_sgl = op->sym->m_src->nb_segs > 1;
+	int n_src;
+	int ret;
+
+	if (unlikely(is_oop))
+		is_sgl |= op->sym->m_dst->nb_segs > 1;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->auth_iv.offset);
+	auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->auth_iv.offset);
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	ret = qat_chk_len_in_bits_cipher(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->aead.data.length >> 3;
+		cipher_ofs = op->sym->aead.data.offset >> 3;
+		break;
+	case 0:
+		cipher_len = op->sym->aead.data.length;
+		cipher_ofs = op->sym->aead.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	ret = qat_chk_len_in_bits_auth(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		break;
+	case 0:
+		auth_len = op->sym->auth.data.length;
+		auth_ofs = op->sym->auth.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
+
+	/* digest in buffer check. Needed only for wireless algos */
+	if (ret == 1) {
+		/* Handle digest-encrypted cases, i.e.
+		 * auth-gen-then-cipher-encrypt and
+		 * cipher-decrypt-then-auth-verify
+		 */
+		uint64_t auth_end_iova;
+
+		if (unlikely(is_sgl)) {
+			uint32_t remaining_off = auth_ofs + auth_len;
+			struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
+				op->sym->m_src);
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else
+			auth_end_iova = (is_oop ?
+				rte_pktmbuf_iova(op->sym->m_dst) :
+				rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
+					auth_len;
+
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
+				(digest->iova == auth_end_iova))
+			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
+					ctx->digest_length);
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return -1;
+	}
+	in_sgl->num = n_src;
+
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs,
+				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	ofs.ofs.cipher.head = cipher_ofs;
+	ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
+	ofs.ofs.auth.head = auth_ofs;
+	ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
+
+	return ofs.raw;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int32_t n_src = 0;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
+	auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
+	digest->va = (void *)op->sym->aead.digest.data;
+	digest->iova = op->sym->aead.digest.phys_addr;
+
+	cipher_len = op->sym->aead.data.length;
+	cipher_ofs = op->sym->aead.data.offset;
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline void
+qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
+		struct icp_qat_fw_la_bulk_req *qat_req)
+{
+	/* copy IV into request if it fits */
+	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
+				iv_len);
+	else {
+		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+				qat_req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
+	}
+}
+
+static __rte_always_inline void
+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
+{
+	uint32_t i;
+
+	for (i = 0; i < n; i++)
+		sta[i] = status;
+}
+
+static __rte_always_inline void
+enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+
+	/* cipher IV */
+	qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
+				ctx->auth_iv.length);
+		break;
+	default:
+		break;
+	}
+}
+
+static __rte_always_inline int
+enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_vec *src_vec,
+	uint16_t n_src_vecs,
+	struct rte_crypto_vec *dst_vec,
+	uint16_t n_dst_vecs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
+			dst_vec : src_vec;
+	rte_iova_t auth_iova_end;
+	int cipher_len, auth_len;
+	int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	cipher_len = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (unlikely(cipher_len < 0 || auth_len < 0))
+		return -1;
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = cipher_len;
+	qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = auth_len;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		break;
+	default:
+		break;
+	}
+
+	if (unlikely(is_sgl)) {
+		/* sgl */
+		int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
+		uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
+
+		while (remaining_off >= cvec->len && i >= 1) {
+			i--;
+			remaining_off -= cvec->len;
+			cvec++;
+		}
+
+		auth_iova_end = cvec->iova + remaining_off;
+	} else
+		auth_iova_end = cvec[0].iova + auth_param->auth_off +
+			auth_param->auth_len;
+
+	/* Then check if digest-encrypted conditions are met */
+	if ((auth_param->auth_off + auth_param->auth_len <
+		cipher_param->cipher_offset + cipher_param->cipher_length) &&
+			(digest->iova == auth_iova_end)) {
+		/* Handle partial digest encryption */
+		if (cipher_param->cipher_offset + cipher_param->cipher_length <
+			auth_param->auth_off + auth_param->auth_len +
+				ctx->digest_length && !is_sgl)
+			req->comn_mid.dst_length = req->comn_mid.src_length =
+				auth_param->auth_off + auth_param->auth_len +
+					ctx->digest_length;
+		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void
+enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param =
+		(void *)&req->serv_specif_rqpars;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(void *)((uint8_t *)&req->serv_specif_rqpars +
+		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+	uint8_t *aad_data;
+	uint8_t aad_ccm_real_len;
+	uint8_t aad_len_field_sz;
+	uint32_t msg_len_be;
+	rte_iova_t aad_iova = 0;
+	uint8_t q;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
+				ctx->cipher_iv.length);
+		aad_iova = aad->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		aad_data = aad->va;
+		aad_iova = aad->iova;
+		aad_ccm_real_len = 0;
+		aad_len_field_sz = 0;
+		msg_len_be = rte_bswap32((uint32_t)data_len -
+				ofs.ofs.cipher.head);
+
+		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			aad_ccm_real_len = ctx->aad_len -
+				ICP_QAT_HW_CCM_AAD_B0_LEN -
+				ICP_QAT_HW_CCM_AAD_LEN_INFO;
+		} else {
+			aad_data = iv->va;
+			aad_iova = iv->iova;
+		}
+
+		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+			aad_len_field_sz, ctx->digest_length, q);
+		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+				(uint8_t *)&msg_len_be,
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+		} else {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+				(uint8_t *)&msg_len_be +
+				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+				- q), q);
+		}
+
+		if (aad_len_field_sz > 0) {
+			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+				rte_bswap16(aad_ccm_real_len);
+
+			if ((aad_ccm_real_len + aad_len_field_sz)
+				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+				uint8_t pad_len = 0;
+				uint8_t pad_idx = 0;
+
+				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+					((aad_ccm_real_len +
+					aad_len_field_sz) %
+					ICP_QAT_HW_CCM_AAD_B0_LEN);
+				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+					aad_ccm_real_len +
+					aad_len_field_sz;
+				memset(&aad_data[pad_idx], 0, pad_len);
+			}
+		}
+
+		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va +
+			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
+		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+		rte_memcpy((uint8_t *)aad->va +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			ctx->cipher_iv.length);
+		break;
+	default:
+		break;
+	}
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_param->auth_off = ofs.ofs.cipher.head;
+	auth_param->auth_len = cipher_param->cipher_length;
+	auth_param->auth_res_addr = digest->iova;
+	auth_param->u1.aad_adr = aad_iova;
+}
 
 extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
 extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
 
+/* -----------------GEN 1 sym crypto op data path APIs ---------------- */
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+/* -----------------GEN 1 sym crypto raw data path APIs ---------------- */
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status);
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status);
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_dp_denqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 void
 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 		uint8_t hash_flag);
@@ -23,6 +1091,9 @@ qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 struct qat_capabilities_info
 qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 uint64_t
 qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index e156f194e2..e1fd14956b 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -269,6 +269,901 @@ qat_sym_create_security_gen1(void *cryptodev)
 
 #endif
 
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, NULL, NULL);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, NULL, NULL);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
+			total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
+			out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			&auth_iv, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct qat_sym_op_cookie *cookie;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, &iv,
+			NULL, NULL, NULL);
+#endif
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i],
+				cookie, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
+			(uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				NULL, NULL, NULL);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
+			NULL, 0, cipher_iv, digest, auth_iv, ofs,
+			(uint32_t)data_len)))
+		return -1;
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				NULL, 0,
+				&vec->iv[i], &vec->digest[i],
+				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+			break;
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				&vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp;
+	void *resp_opaque;
+	uint32_t i, n, inflight;
+	uint32_t head;
+	uint8_t status;
+
+	*n_success_jobs = 0;
+	*return_status = 0;
+	head = dp_ctx->head;
+
+	inflight = qp->enqueued - qp->dequeued;
+	if (unlikely(inflight == 0))
+		return 0;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			head);
+	/* no operation ready */
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return 0;
+
+	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
+	/* get the dequeue count */
+	if (get_dequeue_count) {
+		n = get_dequeue_count(resp_opaque);
+		if (unlikely(n == 0))
+			return 0;
+	} else {
+		if (unlikely(max_nb_to_dequeue == 0))
+			return 0;
+		n = max_nb_to_dequeue;
+	}
+
+	out_user_data[0] = resp_opaque;
+	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+	post_dequeue(resp_opaque, 0, status);
+	*n_success_jobs += status;
+
+	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+
+	/* we already finished dequeue when n == 1 */
+	if (unlikely(n == 1)) {
+		i = 1;
+		goto end_deq;
+	}
+
+	if (is_user_data_array) {
+		for (i = 1; i < n; i++) {
+			resp = (struct icp_qat_fw_comn_resp *)(
+				(uint8_t *)rx_queue->base_addr + head);
+			if (unlikely(*(uint32_t *)resp ==
+					ADF_RING_EMPTY_SIG))
+				goto end_deq;
+			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
+			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+			*n_success_jobs += status;
+			post_dequeue(out_user_data[i], i, status);
+			head = (head + rx_queue->msg_size) &
+					rx_queue->modulo_mask;
+		}
+
+		goto end_deq;
+	}
+
+	/* opaque is not array */
+	for (i = 1; i < n; i++) {
+		resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+			goto end_deq;
+		head = (head + rx_queue->msg_size) &
+				rx_queue->modulo_mask;
+		post_dequeue(resp_opaque, i, status);
+		*n_success_jobs += status;
+	}
+
+end_deq:
+	dp_ctx->head = head;
+	dp_ctx->cached_dequeue += i;
+	return i;
+}
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	register struct icp_qat_fw_comn_resp *resp;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			dp_ctx->head);
+
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return NULL;
+
+	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
+			rx_queue->modulo_mask;
+	dp_ctx->cached_dequeue++;
+
+	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
+			RTE_CRYPTO_OP_STATUS_SUCCESS :
+			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	*dequeue_status = 0;
+	return (void *)(uintptr_t)resp->opaque_data;
+}
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_enqueue != n))
+		return -1;
+
+	qp->enqueued += n;
+	qp->stats.enqueued_count += n;
+
+	tx_queue->tail = dp_ctx->tail;
+
+	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number, tx_queue->tail);
+	tx_queue->csr_tail = tx_queue->tail;
+	dp_ctx->cached_enqueue = 0;
+
+	return 0;
+}
+
+int
+qat_sym_dp_denqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_dequeue != n))
+		return -1;
+
+	rx_queue->head = dp_ctx->head;
+	rx_queue->nb_processed_responses += n;
+	qp->dequeued += n;
+	qp->stats.dequeued_count += n;
+	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
+		uint32_t old_head, new_head;
+		uint32_t max_head;
+
+		old_head = rx_queue->csr_head;
+		new_head = rx_queue->head;
+		max_head = qp->nb_descriptors * rx_queue->msg_size;
+
+		/* write out free descriptors */
+		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
+
+		if (new_head < old_head) {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
+					max_head - old_head);
+			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
+					new_head);
+		} else {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
+					old_head);
+		}
+		rx_queue->nb_processed_responses = 0;
+		rx_queue->csr_head = new_head;
+
+		/* write current head to CSR */
+		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
+			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
+			new_head);
+	}
+
+	dp_ctx->cached_dequeue = 0;
+	return 0;
+}
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+
+	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
+	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
+	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
+	raw_dp_ctx->dequeue_done = qat_sym_dp_denqueue_done_gen1;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_chain_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_chain_gen1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
+			ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_cipher_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_cipher_gen1;
+		}
+	} else
+		return -1;
+
+	return 0;
+}
+
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int handle_mixed = 0;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			/* do_aead = 1; */
+			build_request = qat_sym_build_op_aead_gen1;
+		} else {
+			/* do_auth = 1; do_cipher = 1; */
+			build_request = qat_sym_build_op_chain_gen1;
+			handle_mixed = 1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		/* do_auth = 1; do_cipher = 0;*/
+		build_request = qat_sym_build_op_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		/* do_auth = 0; do_cipher = 1; */
+		build_request = qat_sym_build_op_cipher_gen1;
+	}
+
+	if (!build_request)
+		return 0;
+	ctx->build_request[proc_type] = build_request;
+
+	if (!handle_mixed)
+		return 0;
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		return -ENOTSUP;
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		return -ENOTSUP;
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen1_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index 17b2c871bd..4801bd50a7 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -54,6 +54,14 @@ struct qat_sym_op_cookie {
 	} opt;
 };
 
+struct qat_sym_dp_ctx {
+	struct qat_sym_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
 static __rte_always_inline int
 refactor_qat_sym_process_response(__rte_unused void **op,
 		__rte_unused uint8_t *resp, __rte_unused void *op_cookie,
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 12825e448b..94589458d0 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -13,14 +13,6 @@
 #include "qat_sym_session.h"
 #include "qat_qp.h"
 
-struct qat_sym_dp_ctx {
-	struct qat_sym_session *session;
-	uint32_t tail;
-	uint32_t head;
-	uint16_t cached_enqueue;
-	uint16_t cached_dequeue;
-};
-
 static __rte_always_inline int32_t
 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
 		struct rte_crypto_vec *data, uint16_t n_data_vecs)
diff --git a/drivers/crypto/qat/qat_sym_refactor.c b/drivers/crypto/qat/qat_sym_refactor.c
new file mode 100644
index 0000000000..0412902e70
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_refactor.c
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+
+#include <openssl/evp.h>
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_crypto_sym.h>
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+
+#include "qat_sym.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
+
+uint8_t qat_sym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
+void
+qat_sym_init_op_cookie(void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+
+	cookie->qat_sgl_src_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_src);
+
+	cookie->qat_sgl_dst_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_dst);
+
+	cookie->opt.spc_gmac.cd_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			opt.spc_gmac.cd_cipher);
+}
+
+static __rte_always_inline int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
+{
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	void *sess = (void *)opaque[0];
+	qat_sym_build_request_t build_request = (void *)opaque[1];
+	struct qat_sym_session *ctx = NULL;
+
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
+
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
+	}
+
+#ifdef RTE_LIB_SECURITY
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if (sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
+			if (unlikely(ctx->bpi_ctx == NULL)) {
+				QAT_DP_LOG(ERR, "QAT PMD only supports security"
+						" operation requests for"
+						" DOCSIS, op (%p) is not for"
+						" DOCSIS.", op);
+				return -EINVAL;
+			} else if (unlikely(((op->sym->m_dst != NULL) &&
+					(op->sym->m_dst != op->sym->m_src)) ||
+					op->sym->m_src->nb_segs > 1)) {
+				QAT_DP_LOG(ERR, "OOP and/or multi-segment"
+						" buffers not supported for"
+						" DOCSIS security.");
+				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+				return -EINVAL;
+			}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
+
+			sess = (void *)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)sess;
+			opaque[1] = (uintptr_t)build_request;
+		}
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
+	}
+
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
+
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
+	}
+
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
+		}
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
+	} else {
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
+	}
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
+		}
+	}
+
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
+
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+
+	return 0;
+
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
+
+	return ret;
+}
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
+
+	return 0;
+}
+
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct qat_cryptodev_private *internals = dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_crypto_gen_dev_ops *gen_dev_ops =
+			&qat_sym_gen_dev_ops[qat_dev_gen];
+	struct qat_qp *qp;
+	struct qat_sym_session *ctx;
+	struct qat_sym_dp_ctx *dp_ctx;
+
+	if (!gen_dev_ops->set_raw_dp_ctx) {
+		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+				qat_dev_gen);
+		return -ENOTSUP;
+	}
+
+	qp = dev->data->queue_pairs[qp_id];
+	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(struct qat_sym_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+		dp_ctx->tail = qp->tx_q.tail;
+		dp_ctx->head = qp->rx_q.head;
+		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
+	}
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -EINVAL;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, qat_sym_driver_id);
+
+	dp_ctx->session = ctx;
+
+	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
+}
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct qat_sym_dp_ctx);
+}
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_refactor.h b/drivers/crypto/qat/qat_sym_refactor.h
new file mode 100644
index 0000000000..d4bfe8f364
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_refactor.h
@@ -0,0 +1,402 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_H_
+#define _QAT_SYM_H_
+
+#include <rte_cryptodev_pmd.h>
+#ifdef RTE_LIB_SECURITY
+#include <rte_net_crc.h>
+#endif
+
+#include <openssl/evp.h>
+
+#include "qat_common.h"
+#include "qat_sym_session.h"
+#include "qat_crypto.h"
+#include "qat_logs.h"
+
+#define CRYPTODEV_NAME_QAT_SYM_PMD   crypto_qat
+
+#define BYTE_LENGTH    8
+/* bpi is only used for partial blocks of DES and AES
+ * so AES block len can be assumed as max len for iv, src and dst
+ */
+#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
+
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
+#define QAT_SYM_CAP_VALID		(1 << 31)
+
+/* Macro to add a capability */
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, d					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_##n,		\
+				b, k, i					\
+			}, }						\
+		}, }							\
+	}
+
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SYM_SGL_MAX_NUMBER	16
+
+/* Maximum data length for single pass GMAC: 2^14-1 */
+#define QAT_AES_GMAC_SPC_MAX_SIZE 16383
+
+struct qat_sym_session;
+
+struct qat_sym_sgl {
+	qat_sgl_hdr;
+	struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_sym_op_cookie {
+	struct qat_sym_sgl qat_sgl_src;
+	struct qat_sym_sgl qat_sgl_dst;
+	phys_addr_t qat_sgl_src_phys_addr;
+	phys_addr_t qat_sgl_dst_phys_addr;
+	union {
+		/* Used for Single-Pass AES-GMAC only */
+		struct {
+			struct icp_qat_hw_cipher_algo_blk cd_cipher
+					__rte_packed __rte_cache_aligned;
+			phys_addr_t cd_phys_addr;
+		} spc_gmac;
+	} opt;
+};
+
+struct qat_sym_dp_ctx {
+	struct qat_sym_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+/** Encrypt a single partial block
+ *  Depends on openssl libcrypto
+ *  Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt the IV, then XOR this with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_encrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_encrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
+	return -EINVAL;
+}
+
+static inline uint32_t
+qat_bpicipher_postprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len > 0 &&
+			ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+
+		/* Encrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset;
+
+		last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely(sym_op->m_dst != NULL))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = dst - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG,
+				"BPI: dst before post-process:",
+				dst, last_block_len);
+#endif
+		bpi_cipher_encrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
+				last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG,
+				"BPI: dst after post-process:",
+				dst, last_block_len);
+#endif
+	}
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+#ifdef RTE_LIB_SECURITY
+static inline void
+qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint32_t crc_data_ofs, crc_data_len, crc;
+	uint8_t *crc_data;
+
+	if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT &&
+			sym_op->auth.data.length != 0) {
+
+		crc_data_ofs = sym_op->auth.data.offset;
+		crc_data_len = sym_op->auth.data.length;
+		crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+				crc_data_ofs);
+
+		crc = rte_net_crc_calc(crc_data, crc_data_len,
+				RTE_NET_CRC32_ETH);
+
+		if (crc != *(uint32_t *)(crc_data + crc_data_len))
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	}
+}
+
+static inline void
+qat_crc_generate(struct qat_sym_session *ctx,
+			struct rte_crypto_op *op)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint32_t *crc, crc_data_len;
+	uint8_t *crc_data;
+
+	if (ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT &&
+			sym_op->auth.data.length != 0 &&
+			sym_op->m_src->nb_segs == 1) {
+
+		crc_data_len = sym_op->auth.data.length;
+		crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+				sym_op->auth.data.offset);
+		crc = (uint32_t *)(crc_data + crc_data_len);
+		*crc = rte_net_crc_calc(crc_data, crc_data_len,
+				RTE_NET_CRC32_ETH);
+	}
+}
+
+static inline void
+qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
+{
+	struct rte_crypto_op *op;
+	struct qat_sym_session *ctx;
+	uint16_t i;
+
+	for (i = 0; i < nb_ops; i++) {
+		op = (struct rte_crypto_op *)ops[i];
+
+		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+			ctx = (struct qat_sym_session *)
+				get_sec_session_private_data(
+					op->sym->sec_session);
+
+			if (ctx == NULL || ctx->bpi_ctx == NULL)
+				continue;
+
+			qat_crc_generate(ctx, op);
+		}
+	}
+}
+#endif
+
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused)
+{
+	struct icp_qat_fw_comn_resp *resp_msg =
+			(struct icp_qat_fw_comn_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+			(resp_msg->opaque_data);
+	struct qat_sym_session *sess;
+	uint8_t is_docsis_sec;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+			sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+#ifdef RTE_LIB_SECURITY
+	if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		/*
+		 * Assuming at this point that if it's a security
+		 * op, that this is for DOCSIS
+		 */
+		sess = (struct qat_sym_session *)
+				get_sec_session_private_data(
+				rx_op->sym->sec_session);
+		is_docsis_sec = 1;
+	} else
+#endif
+	{
+		sess = (struct qat_sym_session *)
+				get_sym_session_private_data(
+				rx_op->sym->session,
+				qat_sym_driver_id);
+		is_docsis_sec = 0;
+	}
+
+	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+			resp_msg->comn_hdr.comn_status)) {
+
+		rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	} else {
+		rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+		if (sess->bpi_ctx) {
+			qat_bpicipher_postprocess(sess, rx_op);
+#ifdef RTE_LIB_SECURITY
+			if (is_docsis_sec)
+				qat_crc_verify(sess, rx_op);
+#endif
+		}
+	}
+
+	if (sess->is_single_pass_gmac) {
+		struct qat_sym_op_cookie *cookie =
+				(struct qat_sym_op_cookie *) op_cookie;
+		memset(cookie->opt.spc_gmac.cd_cipher.key, 0,
+				sess->auth_key_length);
+	}
+
+	*op = (void *)rx_op;
+
+	return 1;
+}
+
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
+
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_vec *vec, uint32_t vec_len,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t i;
+
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_la_bulk_req));
+	for (i = 0; i < vec_len; i++)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+	if (cipher_iv && ctx->cipher_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+				ctx->cipher_iv.length);
+	if (auth_iv && ctx->auth_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+				ctx->auth_iv.length);
+	if (aad && ctx->aad_len > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+				ctx->aad_len);
+	if (digest && ctx->digest_length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+				ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+		struct qat_sym_session *ctx __rte_unused,
+		struct rte_crypto_vec *vec __rte_unused,
+		uint32_t vec_len __rte_unused,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{}
+#endif
+
+#endif /* _QAT_SYM_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v1 3/7] crypto/qat: qat driver asym op refactor
  2021-10-26 17:25 [dpdk-dev] [dpdk-dev v1 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 2/7] crypto/qat: qat driver sym op refactor Kai Ji
@ 2021-10-26 17:25 ` Kai Ji
  2021-10-29 14:36   ` Zhang, Roy Fan
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 4/7] crypto/qat: qat driver session method rework Kai Ji
                   ` (4 subsequent siblings)
  7 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2021-10-26 17:25 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch add-in refactored qat asymmetric build request
function implementation (qat_asym_refactor.c & qat_asym_refactor.h)

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c |   7 +
 drivers/crypto/qat/qat_asym_refactor.c     | 994 +++++++++++++++++++++
 drivers/crypto/qat/qat_asym_refactor.h     | 125 +++
 3 files changed, 1126 insertions(+)
 create mode 100644 drivers/crypto/qat/qat_asym_refactor.c
 create mode 100644 drivers/crypto/qat/qat_asym_refactor.h

diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
index 9ed1f21d9d..99d90fa56c 100644
--- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
@@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_asym_crypto_set_session_gen1(void *cdev __rte_unused,
+		void *session __rte_unused)
+{
+	return 0;
+}
+
 RTE_INIT(qat_asym_crypto_gen1_init)
 {
 	qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops =
diff --git a/drivers/crypto/qat/qat_asym_refactor.c b/drivers/crypto/qat/qat_asym_refactor.c
new file mode 100644
index 0000000000..8e789920cb
--- /dev/null
+++ b/drivers/crypto/qat/qat_asym_refactor.c
@@ -0,0 +1,994 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 - 2021 Intel Corporation
+ */
+
+
+#include <stdarg.h>
+
+#include <rte_cryptodev_pmd.h>
+
+#include "icp_qat_fw_pke.h"
+#include "icp_qat_fw.h"
+#include "qat_pke_functionality_arrays.h"
+
+#include "qat_device.h"
+
+#include "qat_logs.h"
+#include "qat_asym.h"
+
+uint8_t qat_asym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
+
+void
+qat_asym_init_op_cookie(void *op_cookie)
+{
+	int j;
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	cookie->input_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					input_params_ptrs);
+
+	cookie->output_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					output_params_ptrs);
+
+	for (j = 0; j < 8; j++) {
+		cookie->input_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						input_array[j]);
+		cookie->output_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						output_array[j]);
+	}
+}
+
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_asym_xform *xform,
+		struct rte_cryptodev_asym_session *sess,
+		struct rte_mempool *mempool)
+{
+	int err = 0;
+	void *sess_private_data;
+	struct qat_asym_session *session;
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		QAT_LOG(ERR,
+			"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	session = sess_private_data;
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		if (xform->modex.exponent.length == 0 ||
+				xform->modex.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod exp input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		if (xform->modinv.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod inv input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+		if (xform->rsa.n.length == 0) {
+			QAT_LOG(ERR, "Invalid rsa input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
+			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
+		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
+		err = -EINVAL;
+		goto error;
+	} else {
+		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
+		err = -EINVAL;
+		goto error;
+	}
+
+	session->xform = xform;
+	qat_asym_build_req_tmpl(sess_private_data);
+	set_asym_session_private_data(sess, dev->driver_id,
+		sess_private_data);
+
+	return 0;
+error:
+	rte_mempool_put(mempool, sess_private_data);
+	return err;
+}
+
+unsigned int
+qat_asym_session_get_private_size(
+		struct rte_cryptodev *dev __rte_unused)
+{
+	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
+}
+
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_asym_session *sess)
+{
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_asym_session_private_data(sess, index);
+	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
+
+	if (sess_priv) {
+		memset(s, 0, qat_asym_session_get_private_size(dev));
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		set_asym_session_private_data(sess, index, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
+	}
+}
+
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+	.name = qat_asym_drv_name,
+	.alias = qat_asym_drv_name
+};
+
+
+static void
+qat_clear_arrays(struct qat_asym_op_cookie *cookie,
+		int in_count, int out_count, int in_size, int out_size)
+{
+	int i;
+
+	for (i = 0; i < in_count; i++)
+		memset(cookie->input_array[i], 0x0, in_size);
+	for (i = 0; i < out_count; i++)
+		memset(cookie->output_array[i], 0x0, out_size);
+}
+
+static void
+qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
+		enum rte_crypto_asym_xform_type alg, int in_size, int out_size)
+{
+	if (alg == RTE_CRYPTO_ASYM_XFORM_MODEX)
+		qat_clear_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS,
+				QAT_ASYM_MODEXP_NUM_OUT_PARAMS, in_size,
+				out_size);
+	else if (alg == RTE_CRYPTO_ASYM_XFORM_MODINV)
+		qat_clear_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS,
+				QAT_ASYM_MODINV_NUM_OUT_PARAMS, in_size,
+				out_size);
+}
+
+static void
+qat_asym_collect_response(struct rte_crypto_op *rx_op,
+		struct qat_asym_op_cookie *cookie,
+		struct rte_crypto_asym_xform *xform)
+{
+	size_t alg_size, alg_size_in_bytes = 0;
+	struct rte_crypto_asym_op *asym_op = rx_op->asym;
+
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		rte_crypto_param n = xform->modex.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modexp_result = asym_op->modex.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modexp_result +
+				(asym_op->modex.result.length -
+					n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length
+				);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		rte_crypto_param n = xform->modinv.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modinv_result = asym_op->modinv.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modinv_result +
+				(asym_op->modinv.result.length - n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
+				asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+							cookie->output_array[0],
+							alg_size_in_bytes);
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+			}
+		} else {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_DECRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.message.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
+						rsa_result, alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+						RTE_CRYPTO_ASYM_OP_SIGN) {
+				uint8_t *rsa_result = asym_op->rsa.sign.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			}
+		}
+	}
+	qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes,
+			alg_size_in_bytes);
+}
+
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
+{
+	struct qat_asym_session *ctx;
+	struct icp_qat_fw_pke_resp *resp_msg =
+			(struct icp_qat_fw_pke_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+			(resp_msg->opaque);
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	if (cookie->error) {
+		cookie->error = 0;
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		QAT_DP_LOG(ERR, "Cookie status returned error");
+	} else {
+		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric response status"
+					" returned error");
+		}
+		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric common status"
+					" returned error");
+		}
+	}
+
+	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		ctx = (struct qat_asym_session *)get_asym_session_private_data(
+			rx_op->asym->session, qat_asym_driver_id);
+		qat_asym_collect_response(rx_op, cookie, ctx->xform);
+	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
+	}
+	*op = rx_op;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
+			sizeof(struct icp_qat_fw_pke_resp));
+#endif
+
+	return 1;
+}
+
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+		size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+	size_t i;
+
+	for (i = 0; i < arr_sz; i++) {
+		if (*size <= arr[i][0]) {
+			*size = arr[i][0];
+			*func_id = arr[i][1];
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+	va_list args;
+	size_t len = 0, num;
+	int i;
+
+	va_start(args, n);
+	len = va_arg(args, size_t);
+
+	for (i = 0; i < n - 1; i++) {
+		num = va_arg(args, size_t);
+		if (num > len)
+			len = num;
+	}
+	va_end(args);
+
+	return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
+{
+	if (n.length < 8) {
+		/* Not a case for any cryptograpic function except for DH
+		 * generator which very often can be of one byte length
+		 */
+		size_t i;
+
+		if (n.data[n.length - 1] == 0x0) {
+			for (i = 0; i < n.length - 1; i++)
+				if (n.data[i] != 0x0)
+					break;
+			if (i == n.length - 1)
+				return -(EINVAL);
+		}
+	} else if (*(uint64_t *)&n.data[
+				n.length - 8] == 0) {
+		/* Very likely it is zeroed modulus */
+		size_t i;
+
+		for (i = 0; i < n.length - 8; i++)
+			if (n.data[i] != 0x0)
+				break;
+		if (i == n.length - 8)
+			return -(EINVAL);
+	}
+
+	return 0;
+}
+
+static int
+qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
+		struct icp_qat_fw_pke_request *qat_req,
+		struct qat_asym_op_cookie *cookie,
+		struct rte_crypto_asym_xform *xform)
+{
+	int err = 0;
+	size_t alg_size;
+	size_t alg_size_in_bytes;
+	uint32_t func_id = 0;
+
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		err = qat_asym_check_nonzero(xform->modex.modulus);
+		if (err) {
+			QAT_LOG(ERR, "Empty modulus in modular exponentiation,"
+					" aborting this operation");
+			return err;
+		}
+
+		alg_size_in_bytes = max_of(3, asym_op->modex.base.length,
+			       xform->modex.exponent.length,
+			       xform->modex.modulus.length);
+		alg_size = alg_size_in_bytes << 3;
+
+		if (qat_asym_get_sz_and_func_id(MOD_EXP_SIZE,
+				sizeof(MOD_EXP_SIZE)/sizeof(*MOD_EXP_SIZE),
+				&alg_size, &func_id)) {
+			return -(EINVAL);
+		}
+
+		alg_size_in_bytes = alg_size >> 3;
+		rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
+			asym_op->modex.base.length
+			, asym_op->modex.base.data,
+			asym_op->modex.base.length);
+		rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
+			xform->modex.exponent.length
+			, xform->modex.exponent.data,
+			xform->modex.exponent.length);
+		rte_memcpy(cookie->input_array[2]  + alg_size_in_bytes -
+			xform->modex.modulus.length,
+			xform->modex.modulus.data,
+			xform->modex.modulus.length);
+		cookie->alg_size = alg_size;
+		qat_req->pke_hdr.cd_pars.func_id = func_id;
+		qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS;
+		qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp base",
+				cookie->input_array[0],
+				alg_size_in_bytes);
+		QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp exponent",
+				cookie->input_array[1],
+				alg_size_in_bytes);
+		QAT_DP_HEXDUMP_LOG(DEBUG, " ModExpmodulus",
+				cookie->input_array[2],
+				alg_size_in_bytes);
+#endif
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		err = qat_asym_check_nonzero(xform->modinv.modulus);
+		if (err) {
+			QAT_LOG(ERR, "Empty modulus in modular multiplicative"
+					" inverse, aborting this operation");
+			return err;
+		}
+
+		alg_size_in_bytes = max_of(2, asym_op->modinv.base.length,
+				xform->modinv.modulus.length);
+		alg_size = alg_size_in_bytes << 3;
+
+		if (xform->modinv.modulus.data[
+				xform->modinv.modulus.length - 1] & 0x01) {
+			if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_ODD,
+					sizeof(MOD_INV_IDS_ODD)/
+					sizeof(*MOD_INV_IDS_ODD),
+					&alg_size, &func_id)) {
+				return -(EINVAL);
+			}
+		} else {
+			if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_EVEN,
+					sizeof(MOD_INV_IDS_EVEN)/
+					sizeof(*MOD_INV_IDS_EVEN),
+					&alg_size, &func_id)) {
+				return -(EINVAL);
+			}
+		}
+
+		alg_size_in_bytes = alg_size >> 3;
+		rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
+			asym_op->modinv.base.length
+				, asym_op->modinv.base.data,
+				asym_op->modinv.base.length);
+		rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
+				xform->modinv.modulus.length
+				, xform->modinv.modulus.data,
+				xform->modinv.modulus.length);
+		cookie->alg_size = alg_size;
+		qat_req->pke_hdr.cd_pars.func_id = func_id;
+		qat_req->input_param_count =
+				QAT_ASYM_MODINV_NUM_IN_PARAMS;
+		qat_req->output_param_count =
+				QAT_ASYM_MODINV_NUM_OUT_PARAMS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv base",
+				cookie->input_array[0],
+				alg_size_in_bytes);
+		QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv modulus",
+				cookie->input_array[1],
+				alg_size_in_bytes);
+#endif
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+		err = qat_asym_check_nonzero(xform->rsa.n);
+		if (err) {
+			QAT_LOG(ERR, "Empty modulus in RSA"
+					" inverse, aborting this operation");
+			return err;
+		}
+
+		alg_size_in_bytes = xform->rsa.n.length;
+		alg_size = alg_size_in_bytes << 3;
+
+		qat_req->input_param_count =
+				QAT_ASYM_RSA_NUM_IN_PARAMS;
+		qat_req->output_param_count =
+				QAT_ASYM_RSA_NUM_OUT_PARAMS;
+
+		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
+				asym_op->rsa.op_type ==
+						RTE_CRYPTO_ASYM_OP_VERIFY) {
+
+			if (qat_asym_get_sz_and_func_id(RSA_ENC_IDS,
+					sizeof(RSA_ENC_IDS)/
+					sizeof(*RSA_ENC_IDS),
+					&alg_size, &func_id)) {
+				err = -(EINVAL);
+				QAT_LOG(ERR,
+					"Not supported RSA parameter size (key)");
+				return err;
+			}
+			alg_size_in_bytes = alg_size >> 3;
+			if (asym_op->rsa.op_type ==
+				RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(cookie->input_array[0] +
+						alg_size_in_bytes -
+						asym_op->rsa.message.length
+						, asym_op->rsa.message.data,
+						asym_op->rsa.message.length);
+					break;
+				default:
+					err = -(EINVAL);
+					QAT_LOG(ERR,
+						"Invalid RSA padding (Encryption)");
+					return err;
+				}
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Message",
+						cookie->input_array[0],
+						alg_size_in_bytes);
+#endif
+			} else {
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(cookie->input_array[0],
+						asym_op->rsa.sign.data,
+						alg_size_in_bytes);
+					break;
+				default:
+					err = -(EINVAL);
+					QAT_LOG(ERR,
+						"Invalid RSA padding (Verify)");
+					return err;
+				}
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, " RSA Signature",
+						cookie->input_array[0],
+						alg_size_in_bytes);
+#endif
+
+			}
+			rte_memcpy(cookie->input_array[1] +
+					alg_size_in_bytes -
+					xform->rsa.e.length
+					, xform->rsa.e.data,
+					xform->rsa.e.length);
+			rte_memcpy(cookie->input_array[2] +
+					alg_size_in_bytes -
+					xform->rsa.n.length,
+					xform->rsa.n.data,
+					xform->rsa.n.length);
+
+			cookie->alg_size = alg_size;
+			qat_req->pke_hdr.cd_pars.func_id = func_id;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Public Key",
+					cookie->input_array[1],
+					alg_size_in_bytes);
+			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Modulus",
+					cookie->input_array[2],
+					alg_size_in_bytes);
+#endif
+		} else {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_DECRYPT) {
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(cookie->input_array[0]
+						+ alg_size_in_bytes -
+						asym_op->rsa.cipher.length,
+						asym_op->rsa.cipher.data,
+						asym_op->rsa.cipher.length);
+					break;
+				default:
+					QAT_LOG(ERR,
+						"Invalid padding of RSA (Decrypt)");
+					return -(EINVAL);
+				}
+
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_SIGN) {
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(cookie->input_array[0]
+						+ alg_size_in_bytes -
+						asym_op->rsa.message.length,
+						asym_op->rsa.message.data,
+						asym_op->rsa.message.length);
+					break;
+				default:
+					QAT_LOG(ERR,
+						"Invalid padding of RSA (Signature)");
+					return -(EINVAL);
+				}
+			}
+			if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT) {
+
+				qat_req->input_param_count =
+						QAT_ASYM_RSA_QT_NUM_IN_PARAMS;
+				if (qat_asym_get_sz_and_func_id(RSA_DEC_CRT_IDS,
+						sizeof(RSA_DEC_CRT_IDS)/
+						sizeof(*RSA_DEC_CRT_IDS),
+						&alg_size, &func_id)) {
+					return -(EINVAL);
+				}
+				alg_size_in_bytes = alg_size >> 3;
+
+				rte_memcpy(cookie->input_array[1] +
+						(alg_size_in_bytes >> 1) -
+						xform->rsa.qt.p.length
+						, xform->rsa.qt.p.data,
+						xform->rsa.qt.p.length);
+				rte_memcpy(cookie->input_array[2] +
+						(alg_size_in_bytes >> 1) -
+						xform->rsa.qt.q.length
+						, xform->rsa.qt.q.data,
+						xform->rsa.qt.q.length);
+				rte_memcpy(cookie->input_array[3] +
+						(alg_size_in_bytes >> 1) -
+						xform->rsa.qt.dP.length
+						, xform->rsa.qt.dP.data,
+						xform->rsa.qt.dP.length);
+				rte_memcpy(cookie->input_array[4] +
+						(alg_size_in_bytes >> 1) -
+						xform->rsa.qt.dQ.length
+						, xform->rsa.qt.dQ.data,
+						xform->rsa.qt.dQ.length);
+				rte_memcpy(cookie->input_array[5] +
+						(alg_size_in_bytes >> 1) -
+						xform->rsa.qt.qInv.length
+						, xform->rsa.qt.qInv.data,
+						xform->rsa.qt.qInv.length);
+				cookie->alg_size = alg_size;
+				qat_req->pke_hdr.cd_pars.func_id = func_id;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "C",
+						cookie->input_array[0],
+						alg_size_in_bytes);
+				QAT_DP_HEXDUMP_LOG(DEBUG, "p",
+						cookie->input_array[1],
+						alg_size_in_bytes);
+				QAT_DP_HEXDUMP_LOG(DEBUG, "q",
+						cookie->input_array[2],
+						alg_size_in_bytes);
+				QAT_DP_HEXDUMP_LOG(DEBUG,
+						"dP", cookie->input_array[3],
+						alg_size_in_bytes);
+				QAT_DP_HEXDUMP_LOG(DEBUG,
+						"dQ", cookie->input_array[4],
+						alg_size_in_bytes);
+				QAT_DP_HEXDUMP_LOG(DEBUG,
+						"qInv", cookie->input_array[5],
+						alg_size_in_bytes);
+#endif
+			} else if (xform->rsa.key_type ==
+					RTE_RSA_KEY_TYPE_EXP) {
+				if (qat_asym_get_sz_and_func_id(
+						RSA_DEC_IDS,
+						sizeof(RSA_DEC_IDS)/
+						sizeof(*RSA_DEC_IDS),
+						&alg_size, &func_id)) {
+					return -(EINVAL);
+				}
+				alg_size_in_bytes = alg_size >> 3;
+				rte_memcpy(cookie->input_array[1] +
+						alg_size_in_bytes -
+						xform->rsa.d.length,
+						xform->rsa.d.data,
+						xform->rsa.d.length);
+				rte_memcpy(cookie->input_array[2] +
+						alg_size_in_bytes -
+						xform->rsa.n.length,
+						xform->rsa.n.data,
+						xform->rsa.n.length);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA ciphertext",
+					cookie->input_array[0],
+					alg_size_in_bytes);
+			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA d",
+					cookie->input_array[1],
+					alg_size_in_bytes);
+			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA n",
+					cookie->input_array[2],
+					alg_size_in_bytes);
+#endif
+
+				cookie->alg_size = alg_size;
+				qat_req->pke_hdr.cd_pars.func_id = func_id;
+			} else {
+				QAT_LOG(ERR, "Invalid RSA key type");
+				return -(EINVAL);
+			}
+		}
+	} else {
+		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
+		return -(EINVAL);
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
+{
+	struct qat_asym_session *ctx;
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	struct rte_crypto_asym_op *asym_op = op->asym;
+	struct icp_qat_fw_pke_request *qat_req =
+			(struct icp_qat_fw_pke_request *)out_msg;
+	struct qat_asym_op_cookie *cookie =
+				(struct qat_asym_op_cookie *)op_cookie;
+	int err = 0;
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		ctx = (struct qat_asym_session *)
+			get_asym_session_private_data(
+			op->asym->session, qat_asym_driver_id);
+		if (unlikely(ctx == NULL)) {
+			QAT_LOG(ERR, "Session has not been created for this device");
+			goto error;
+		}
+		rte_mov64((uint8_t *)qat_req,
+			(const uint8_t *)&(ctx->req_tmpl));
+		err = qat_asym_fill_arrays(asym_op, qat_req,
+				cookie, ctx->xform);
+		if (err) {
+			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			goto error;
+		}
+	} else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		qat_fill_req_tmpl(qat_req);
+		err = qat_asym_fill_arrays(asym_op, qat_req, cookie,
+				op->asym->xform);
+		if (err) {
+			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			goto error;
+		}
+	} else {
+		QAT_DP_LOG(ERR, "Invalid session/xform settings");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+		goto error;
+	}
+
+	qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
+	qat_req->pke_mid.src_data_addr = cookie->input_addr;
+	qat_req->pke_mid.dest_data_addr = cookie->output_addr;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_pke_request));
+#endif
+
+	return 0;
+error:
+
+	qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+		sizeof(struct icp_qat_fw_pke_request));
+#endif
+
+	qat_req->output_param_count = 0;
+	qat_req->input_param_count = 0;
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
+	cookie->error |= err;
+
+	return 0;
+}
+
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+			nb_ops);
+}
+
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+				nb_ops);
+}
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param)
+{
+	struct qat_cryptodev_private *internals;
+	struct rte_cryptodev *cryptodev;
+	struct qat_device_info *qat_dev_instance =
+		&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	uint64_t capa_size;
+	int i = 0;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "asym");
+	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+				name);
+		return -(EFAULT);
+	}
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_asym_driver_id =
+				qat_asym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_asym_driver_id !=
+				qat_asym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
+		}
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+	qat_dev_instance->asym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->asym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_asym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_ASYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			rte_cryptodev_pmd_destroy(cryptodev);
+			memset(&qat_dev_instance->asym_rte_dev, 0,
+				sizeof(qat_dev_instance->asym_rte_dev));
+			return -EFAULT;
+		}
+	}
+
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
+
+	qat_pci_dev->asym_dev = internals;
+	internals->service_type = QAT_SERVICE_ASYMMETRIC;
+	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+	return 0;
+}
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->asym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(
+			qat_pci_dev->asym_dev->dev_id);
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+	qat_pci_dev->asym_dev = NULL;
+
+	return 0;
+}
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_asym_driver,
+		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_refactor.h b/drivers/crypto/qat/qat_asym_refactor.h
new file mode 100644
index 0000000000..9ecabdbe8f
--- /dev/null
+++ b/drivers/crypto/qat/qat_asym_refactor.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _QAT_ASYM_H_
+#define _QAT_ASYM_H_
+
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto_asym.h>
+#include "icp_qat_fw_pke.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
+#include "icp_qat_fw.h"
+
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
+
+typedef uint64_t large_int_ptr;
+#define MAX_PKE_PARAMS	8
+#define QAT_PKE_MAX_LN_SIZE 512
+#define _PKE_ALIGN_ __rte_aligned(8)
+
+#define QAT_ASYM_MAX_PARAMS			8
+#define QAT_ASYM_MODINV_NUM_IN_PARAMS		2
+#define QAT_ASYM_MODINV_NUM_OUT_PARAMS		1
+#define QAT_ASYM_MODEXP_NUM_IN_PARAMS		3
+#define QAT_ASYM_MODEXP_NUM_OUT_PARAMS		1
+#define QAT_ASYM_RSA_NUM_IN_PARAMS		3
+#define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
+#define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
+
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
+		{.asym = {						\
+			.xform_capa = {					\
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+				.op_types = o,				\
+				{					\
+				.modlen = {				\
+				.min = l,				\
+				.max = r,				\
+				.increment = i				\
+				}, }					\
+			}						\
+		},							\
+		}							\
+	}
+
+struct qat_asym_op_cookie {
+	size_t alg_size;
+	uint64_t error;
+	rte_iova_t input_addr;
+	rte_iova_t output_addr;
+	large_int_ptr input_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
+	large_int_ptr output_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
+	union {
+		uint8_t input_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE];
+		uint8_t input_buffer[MAX_PKE_PARAMS * QAT_PKE_MAX_LN_SIZE];
+	} _PKE_ALIGN_;
+	uint8_t output_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE] _PKE_ALIGN_;
+} _PKE_ALIGN_;
+
+struct qat_asym_session {
+	struct icp_qat_fw_pke_request req_tmpl;
+	struct rte_crypto_asym_xform *xform;
+};
+
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+	memset(qat_req, 0, sizeof(*qat_req));
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+	qat_req->pke_hdr.hdr_flags =
+			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+	struct icp_qat_fw_pke_request *qat_req;
+	struct qat_asym_session *session = sess_private_data;
+
+	qat_req = &session->req_tmpl;
+	qat_fill_req_tmpl(qat_req);
+}
+
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_asym_xform *xform,
+		struct rte_cryptodev_asym_session *sess,
+		struct rte_mempool *mempool);
+
+unsigned int
+qat_asym_session_get_private_size(struct rte_cryptodev *dev);
+
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_asym_session *sess);
+
+/*
+ * Process PKE response received from outgoing queue of QAT
+ *
+ * @param	op		a ptr to the rte_crypto_op referred to by
+ *				the response message is returned in this param
+ * @param	resp		icp_qat_fw_pke_resp message received from
+ *				outgoing fw message queue
+ * @param	op_cookie	Cookie pointer that holds private metadata
+ * @param	dequeue_err_count	Error count number pointer
+ *
+ */
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count);
+
+void
+qat_asym_init_op_cookie(void *cookie);
+
+#endif /* _QAT_ASYM_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v1 4/7] crypto/qat: qat driver session method rework
  2021-10-26 17:25 [dpdk-dev] [dpdk-dev v1 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                   ` (2 preceding siblings ...)
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 3/7] crypto/qat: qat driver asym " Kai Ji
@ 2021-10-26 17:25 ` Kai Ji
  2021-10-29 14:40   ` Zhang, Roy Fan
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 5/7] crypto/qat: qat driver datapath rework Kai Ji
                   ` (3 subsequent siblings)
  7 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2021-10-26 17:25 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

The patch introduce set_session & set_raw_dp_ctx methods to qat gen dev ops
Replace min_qat_dev_gen_id with dev_id, the session will be invalid if the
device generation id is not matching during init and ops

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  90 ++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 467 +++++++++++++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 243 ++++++++++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |   7 +-
 drivers/crypto/qat/qat_crypto.c              |   1 +
 drivers/crypto/qat/qat_crypto.h              |   9 +
 drivers/crypto/qat/qat_sym.c                 |   8 +-
 drivers/crypto/qat/qat_sym_session.c         | 106 +----
 drivers/crypto/qat/qat_sym_session.h         |   2 +-
 9 files changed, 832 insertions(+), 101 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index b4ec440e05..72609703f9 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -166,6 +166,90 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
 	return 0;
 }
 
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+		uint8_t hash_flag)
+{
+	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+	/* Set the Use Extended Protocol Flags bit in LW 1 */
+	QAT_FIELD_SET(header->comn_req_flags,
+			QAT_COMN_EXT_FLAGS_USED,
+			QAT_COMN_EXT_FLAGS_BITPOS,
+			QAT_COMN_EXT_FLAGS_MASK);
+
+	/* Set Hash Flags in LW 28 */
+	cd_ctrl->hash_flags |= hash_flag;
+
+	/* Set proto flags in LW 1 */
+	switch (session->qat_cipher_alg) {
+	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_SNOW_3G_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags,
+				ICP_QAT_FW_LA_ZUC_3G_PROTO);
+		break;
+	default:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	}
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+	struct rte_cryptodev *dev = cdev;
+	struct qat_sym_session *ctx = session;
+	const struct qat_cryptodev_private *qat_private =
+			dev->data->dev_private;
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	if (ret == 0 || ret != -ENOTSUP)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * but some are not supported by GEN2, so checking here
+	 */
+	if ((qat_private->internal_capabilities &
+			QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+		return -ENOTSUP;
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
 
 	/* Device related operations */
@@ -204,6 +288,10 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
 			qat_sym_crypto_cap_get_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_sym_crypto_set_session_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
@@ -221,4 +309,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
 			qat_asym_crypto_cap_get_gen1;
 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_asym_crypto_feature_flags_get_gen1;
+	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_asym_crypto_set_session_gen1;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index d3336cf4a1..6494019050 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -143,6 +143,468 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass) {
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN3 uses single pass to treat AEAD as
+		 * cipher operation
+		 */
+		cipher_param = (void *)&req->serv_specif_rqpars;
+
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+				ofs.ofs.cipher.tail;
+
+		cipher_param->spc_aad_addr = aad->iova;
+		cipher_param->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+	struct qat_sym_op_cookie *cookie,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	uint32_t ver_key_offset;
+	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+
+	if (!ctx->is_single_pass_gmac ||
+			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+				data_len);
+		return;
+	}
+
+	cipher_cd_ctrl = (void *) &req->cd_ctrl;
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+			sizeof(struct icp_qat_hw_cipher_config);
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+		/* AES-GMAC */
+		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+				req);
+	}
+
+	/* Fill separate Content Descriptor for this op */
+	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+				ctx->cd.cipher.key :
+				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+			ctx->auth_key_length);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+				ICP_QAT_HW_CIPHER_AEAD_MODE,
+				ctx->qat_cipher_alg,
+				ICP_QAT_HW_CIPHER_NO_CONVERT,
+				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+					ICP_QAT_HW_CIPHER_ENCRYPT :
+					ICP_QAT_HW_CIPHER_DECRYPT));
+	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+			ctx->digest_length,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+	/* Update the request */
+	req->cd_pars.u.s.content_desc_addr =
+			cookie->opt.spc_gmac.cd_phys_addr;
+	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+			sizeof(struct icp_qat_hw_cipher_config) +
+			ctx->auth_key_length, 8) >> 3;
+	req->comn_mid.src_length = data_len;
+	req->comn_mid.dst_length = 0;
+
+	cipher_param->spc_aad_addr = 0;
+	cipher_param->spc_auth_res_addr = digest->iova;
+	cipher_param->spc_aad_sz = auth_data_len;
+	cipher_param->reserved = 0;
+	cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+	ICP_QAT_FW_LA_PROTO_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_NO_PROTO);
+}
+
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN3 */
+	if (ctx->is_single_pass)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+	else if (ctx->is_single_pass_gmac)
+		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+	if (ret == 0)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * this is addressed by GEN3
+	 */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
+static int
+qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
+	} else if (ctx->is_single_pass_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
+	}
+
+	return 0;
+}
+
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -150,6 +612,10 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_cap_get_gen3;
 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+			qat_sym_crypto_set_session_gen3;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
@@ -161,4 +627,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 37a58c026f..167c95abcf 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -103,11 +103,253 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+			(void *)&req->serv_specif_rqpars;
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN4 uses single pass to treat AEAD as cipher
+		 * operation
+		 */
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+				req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len -
+				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+		cipher_param_20->spc_aad_addr = aad->iova;
+		cipher_param_20->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN4 */
+	if (ctx->is_single_pass && ctx->is_ucs)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+	if (ret == 0)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * this is addressed by GEN4
+	 */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
+static int
+qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
 			qat_sym_crypto_cap_get_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+			qat_sym_crypto_set_session_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
@@ -121,4 +363,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
 	qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index e1fd14956b..e32bdf1c4b 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -6,6 +6,7 @@
 #ifdef RTE_LIB_SECURITY
 #include <rte_security_driver.h>
 #endif
+#include <cryptodev_pmd.h>
 
 #include "adf_transport_access_macros.h"
 #include "icp_qat_fw.h"
@@ -154,7 +155,7 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 };
 
 static struct qat_capabilities_info
-qat_sym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev __rte_unused)
+qat_sym_crypto_cap_get_gen1(struct qat_pci_device * qat_dev __rte_unused)
 {
 	struct qat_capabilities_info capa_info;
 	capa_info.data = qat_sym_crypto_caps_gen1;
@@ -1169,6 +1170,10 @@ RTE_INIT(qat_sym_crypto_gen1_init)
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
 			qat_sym_crypto_cap_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+			qat_sym_crypto_set_session_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 01d2439b93..6f17abd404 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -2,6 +2,7 @@
  * Copyright(c) 2021 Intel Corporation
  */
 
+#include <cryptodev_pmd.h>
 #include "qat_device.h"
 #include "qat_qp.h"
 #include "qat_crypto.h"
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6eaa15b975..8fe3f0b061 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -48,15 +48,24 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
 
 typedef void * (*create_security_ctx_t)(void *cryptodev);
 
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
+typedef int (*set_raw_dp_ctx_t)(void *raw_dp_ctx, void *ctx);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
+	set_session_t set_session;
+	set_raw_dp_ctx_t set_raw_dp_ctx;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
 };
 
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
+
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
 		struct rte_cryptodev_config *config);
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index a92874cd27..de687de857 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -237,7 +237,7 @@ refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen)
+		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
 {
 	int ret = 0;
 	struct qat_sym_session *ctx = NULL;
@@ -302,12 +302,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		return -EINVAL;
 	}
 
-	if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
-		QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -EINVAL;
-	}
-
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 8ca475ca8b..52837d7c9c 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
 	return 0;
 }
 
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
-		uint8_t hash_flag)
-{
-	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
-	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
-			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
-			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
-	/* Set the Use Extended Protocol Flags bit in LW 1 */
-	QAT_FIELD_SET(header->comn_req_flags,
-			QAT_COMN_EXT_FLAGS_USED,
-			QAT_COMN_EXT_FLAGS_BITPOS,
-			QAT_COMN_EXT_FLAGS_MASK);
-
-	/* Set Hash Flags in LW 28 */
-	cd_ctrl->hash_flags |= hash_flag;
-
-	/* Set proto flags in LW 1 */
-	switch (session->qat_cipher_alg) {
-	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_SNOW_3G_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_ZUC_3G_PROTO);
-		break;
-	default:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	}
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
-		struct qat_sym_session *session)
-{
-	const struct qat_cryptodev_private *qat_private =
-			dev->data->dev_private;
-	enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
-			QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
-	if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
-	} else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
-	} else if ((session->aes_cmac ||
-			session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
-			(session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session, 0);
-	}
-}
-
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
 	int ret;
 	int qat_cmd_id;
-	int handle_mixed = 0;
 
 	/* Verify the session physical address is known */
 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
+	session->dev_id = internals->dev_id;
 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
 	session->is_ucs = 0;
 
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		return -ENOTSUP;
 	}
 	qat_sym_session_finalize(session);
-	if (handle_mixed) {
-		/* Special handling of mixed hash+cipher algorithms */
-		qat_sym_session_handle_mixed(dev, session);
-	}
 
-	return 0;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+			(void *)session);
 }
 
 static int
@@ -678,7 +598,6 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 {
 	session->is_single_pass = 1;
 	session->is_auth = 1;
-	session->min_qat_dev_gen = QAT_GEN3;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
 	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
@@ -1205,9 +1124,10 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-			uint8_t *data_in,
-			uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in,
+		uint8_t *data_out)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1574,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1921,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2181,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
-
 	/* Get requested QAT command id - should be cipher */
 	qat_cmd_id = qat_get_cmd_id(xform);
 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2205,9 @@ qat_security_session_create(void *dev,
 {
 	void *sess_private_data;
 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct qat_cryptodev_private *internals = cdev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_sym_session *sym_session = NULL;
 	int ret;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2231,11 @@ qat_security_session_create(void *dev,
 	}
 
 	set_sec_session_private_data(sess, sess_private_data);
+	sym_session = (struct qat_sym_session *)sess_private_data;
+	sym_session->dev_id = internals->dev_id;
 
-	return ret;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+			sess_private_data);
 }
 
 int
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 73493ec864..e1b5edb2f9 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -100,7 +100,7 @@ struct qat_sym_session {
 	uint16_t auth_key_length;
 	uint16_t digest_length;
 	rte_spinlock_t lock;	/* protects this struct */
-	enum qat_device_gen min_qat_dev_gen;
+	uint16_t dev_id;
 	uint8_t aes_cmac;
 	uint8_t is_single_pass;
 	uint8_t is_single_pass_gmac;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v1 5/7] crypto/qat: qat driver datapath rework
  2021-10-26 17:25 [dpdk-dev] [dpdk-dev v1 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                   ` (3 preceding siblings ...)
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 4/7] crypto/qat: qat driver session method rework Kai Ji
@ 2021-10-26 17:25 ` Kai Ji
  2021-10-29 14:41   ` Zhang, Roy Fan
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 6/7] app/test: cryptodev test fix Kai Ji
                   ` (2 subsequent siblings)
  7 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2021-10-26 17:25 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch introduce op_build_request func in qat enqueue op
burst. Add-in qat_dequeue_process_response in qat dequeue op
burst. Enable session build request op based on crypto operation

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build               |  4 +-
 drivers/common/qat/qat_qp.c                  | 60 +++++---------------
 drivers/common/qat/qat_qp.h                  | 32 +++++++----
 drivers/compress/qat/qat_comp_pmd.c          | 12 +++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  4 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |  4 +-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  2 +-
 drivers/crypto/qat/qat_asym_refactor.c       |  4 +-
 drivers/crypto/qat/qat_asym_refactor.h       |  2 +-
 drivers/crypto/qat/qat_sym_hw_dp.c           |  2 +-
 drivers/crypto/qat/qat_sym_refactor.c        | 51 +----------------
 drivers/crypto/qat/qat_sym_refactor.h        |  2 +-
 12 files changed, 57 insertions(+), 122 deletions(-)

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index ce9959d103..b7b18b6c0f 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -70,8 +70,8 @@ if qat_compress
 endif
 
 if qat_crypto
-    foreach f: ['qat_sym_pmd.c', 'qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
+    foreach f: ['qat_sym_refactor.c', 'qat_sym_session.c',
+            'qat_sym_hw_dp.c', 'qat_asym_refactor.c', 'qat_crypto.c',
 	    'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 0fda890075..82adda0698 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -15,8 +15,8 @@
 #include "qat_logs.h"
 #include "qat_device.h"
 #include "qat_qp.h"
-#include "qat_sym.h"
-#include "qat_asym.h"
+#include "qat_sym_refactor.h"
+#include "qat_asym_refactor.h"
 #include "qat_comp.h"
 
 #define QAT_CQ_MAX_DEQ_RETRIES 10
@@ -550,23 +550,8 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-refactor_qat_enqueue_op_burst(__rte_unused void *qp,
-		__rte_unused qat_op_build_request_t op_build_request,
-		__rte_unused void **ops, __rte_unused uint16_t nb_ops)
-{
-	return 0;
-}
-
-uint16_t
-refactor_qat_dequeue_op_burst(__rte_unused void *qp, __rte_unused void **ops,
-		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
-		__rte_unused uint16_t nb_ops)
-{
-	return 0;
-}
-
-uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -616,29 +601,18 @@ qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
 		}
 	}
 
-#ifdef BUILD_QAT_SYM
+#ifdef RTE_LIB_SECURITY
 	if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 		qat_sym_preprocess_requests(ops, nb_ops_possible);
 #endif
 
+	memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque));
+
 	while (nb_ops_sent != nb_ops_possible) {
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
-#ifdef BUILD_QAT_SYM
-			ret = qat_sym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		} else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
-			ret = qat_comp_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-		} else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
-#ifdef BUILD_QAT_ASYM
-			ret = qat_asym_build_request(*ops, base_addr + tail,
+		ret = op_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		}
+				tmp_qp->opaque, tmp_qp->qat_dev_gen);
+
 		if (ret != 0) {
 			tmp_qp->stats.enqueue_err_count++;
 			/* This message cannot be enqueued */
@@ -833,7 +807,8 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 }
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -851,19 +826,10 @@ qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
 
 		nb_fw_responses = 1;
 
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
-			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
-		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
-			nb_fw_responses = qat_comp_process_response(
+		nb_fw_responses = qat_dequeue_process_response(
 				ops, resp_msg,
 				tmp_qp->op_cookies[head >> rx_queue->trailz],
 				&tmp_qp->stats.dequeue_err_count);
-#ifdef BUILD_QAT_ASYM
-		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
-			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
-#endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
 				  rx_queue->modulo_mask);
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index c5f115310c..b4518df7d0 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -14,6 +14,24 @@
 
 struct qat_pci_device;
 
+/* Default qp configuration for GEN4 devices */
+#define QAT_GEN4_QP_DEFCON	(QAT_SERVICE_SYMMETRIC |	\
+				QAT_SERVICE_SYMMETRIC << 8 |	\
+				QAT_SERVICE_SYMMETRIC << 16 |	\
+				QAT_SERVICE_SYMMETRIC << 24)
+
+/* QAT GEN 4 specific macros */
+#define QAT_GEN4_BUNDLE_NUM             4
+#define QAT_GEN4_QPS_PER_BUNDLE_NUM     1
+
+/* Queue pair setup error codes */
+#define QAT_NOMEM		1
+#define QAT_QP_INVALID_DESC_NO	2
+#define QAT_QP_BUSY		3
+#define QAT_PCI_NO_RESOURCE	4
+
+#define ADF_ARB_RINGSRVARBEN_OFFSET		0x19C
+
 /**
  * Structure associated with each queue.
  */
@@ -92,21 +110,15 @@ struct qat_qp_config {
 };
 
 uint16_t
-refactor_qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
 		void **ops, uint16_t nb_ops);
 
-uint16_t
-refactor_qat_dequeue_op_burst(void *qp, void **ops,
-		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
-
-uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
-
 uint16_t
 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
 
 int
 qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
@@ -118,7 +130,7 @@ qat_qp_setup(struct qat_pci_device *qat_dev,
 
 int
 qat_qps_per_service(struct qat_pci_device *qat_dev,
-		enum qat_service_type service);
+			enum qat_service_type service);
 
 const struct qat_qp_hw_data *
 qat_qp_get_hw_data(struct qat_pci_device *qat_dev,
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 9b24d46e97..2f17f8825e 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -616,11 +616,18 @@ static struct rte_compressdev_ops compress_qat_dummy_ops = {
 	.private_xform_free	= qat_comp_private_xform_free
 };
 
+static uint16_t
+qat_comp_dequeue_burst(void *qp, struct rte_comp_op **ops,  uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_comp_process_response,
+				nb_ops);
+}
+
 static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	uint16_t ret = qat_comp_dequeue_burst(qp, ops, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
@@ -638,8 +645,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 
 		} else {
 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
-					(compressdev_dequeue_pkt_burst_t)
-					qat_dequeue_op_burst;
+					qat_comp_dequeue_burst;
 		}
 	}
 	return ret;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 72609703f9..86c124f6c8 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -5,8 +5,8 @@
 #include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
 #include "qat_sym_session.h"
-#include "qat_sym.h"
-#include "qat_asym.h"
+#include "qat_sym_refactor.h"
+#include "qat_asym_refactor.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 167c95abcf..108e07ee7f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -5,8 +5,8 @@
 #include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
 #include "qat_sym_session.h"
-#include "qat_sym.h"
-#include "qat_asym.h"
+#include "qat_sym_refactor.h"
+#include "qat_asym_refactor.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index e32bdf1c4b..aea56fba4c 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -13,7 +13,7 @@
 #include "icp_qat_fw_la.h"
 
 #include "qat_sym_session.h"
-#include "qat_sym.h"
+#include "qat_sym_refactor.h"
 #include "qat_sym_session.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
diff --git a/drivers/crypto/qat/qat_asym_refactor.c b/drivers/crypto/qat/qat_asym_refactor.c
index 8e789920cb..3a9b1d4054 100644
--- a/drivers/crypto/qat/qat_asym_refactor.c
+++ b/drivers/crypto/qat/qat_asym_refactor.c
@@ -5,7 +5,7 @@
 
 #include <stdarg.h>
 
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
 
 #include "icp_qat_fw_pke.h"
 #include "icp_qat_fw.h"
@@ -14,7 +14,7 @@
 #include "qat_device.h"
 
 #include "qat_logs.h"
-#include "qat_asym.h"
+#include "qat_asym_refactor.h"
 
 uint8_t qat_asym_driver_id;
 
diff --git a/drivers/crypto/qat/qat_asym_refactor.h b/drivers/crypto/qat/qat_asym_refactor.h
index 9ecabdbe8f..6d3d991bc7 100644
--- a/drivers/crypto/qat/qat_asym_refactor.h
+++ b/drivers/crypto/qat/qat_asym_refactor.h
@@ -5,7 +5,7 @@
 #ifndef _QAT_ASYM_H_
 #define _QAT_ASYM_H_
 
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
 #include <rte_crypto_asym.h>
 #include "icp_qat_fw_pke.h"
 #include "qat_device.h"
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 94589458d0..af75ac2011 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -8,7 +8,7 @@
 #include "icp_qat_fw.h"
 #include "icp_qat_fw_la.h"
 
-#include "qat_sym.h"
+#include "qat_sym_refactor.h"
 #include "qat_sym_pmd.h"
 #include "qat_sym_session.h"
 #include "qat_qp.h"
diff --git a/drivers/crypto/qat/qat_sym_refactor.c b/drivers/crypto/qat/qat_sym_refactor.c
index 0412902e70..82f078ff1e 100644
--- a/drivers/crypto/qat/qat_sym_refactor.c
+++ b/drivers/crypto/qat/qat_sym_refactor.c
@@ -10,7 +10,7 @@
 #include <rte_bus_pci.h>
 #include <rte_byteorder.h>
 
-#include "qat_sym.h"
+#include "qat_sym_refactor.h"
 #include "qat_crypto.h"
 #include "qat_qp.h"
 
@@ -354,55 +354,6 @@ qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
 	return 0;
 }
 
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
-	struct qat_cryptodev_private *internals = dev->data->dev_private;
-	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
-	struct qat_crypto_gen_dev_ops *gen_dev_ops =
-			&qat_sym_gen_dev_ops[qat_dev_gen];
-	struct qat_qp *qp;
-	struct qat_sym_session *ctx;
-	struct qat_sym_dp_ctx *dp_ctx;
-
-	if (!gen_dev_ops->set_raw_dp_ctx) {
-		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
-				qat_dev_gen);
-		return -ENOTSUP;
-	}
-
-	qp = dev->data->queue_pairs[qp_id];
-	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
-	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(struct qat_sym_dp_ctx));
-		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
-		dp_ctx->tail = qp->tx_q.tail;
-		dp_ctx->head = qp->rx_q.head;
-		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
-	}
-
-	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
-		return -EINVAL;
-
-	ctx = (struct qat_sym_session *)get_sym_session_private_data(
-			session_ctx.crypto_sess, qat_sym_driver_id);
-
-	dp_ctx->session = ctx;
-
-	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
-}
-
-int
-qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
-{
-	return sizeof(struct qat_sym_dp_ctx);
-}
-
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_sym_driver,
diff --git a/drivers/crypto/qat/qat_sym_refactor.h b/drivers/crypto/qat/qat_sym_refactor.h
index d4bfe8f364..44feca8251 100644
--- a/drivers/crypto/qat/qat_sym_refactor.h
+++ b/drivers/crypto/qat/qat_sym_refactor.h
@@ -5,7 +5,7 @@
 #ifndef _QAT_SYM_H_
 #define _QAT_SYM_H_
 
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
 #ifdef RTE_LIB_SECURITY
 #include <rte_net_crc.h>
 #endif
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v1 6/7] app/test: cryptodev test fix
  2021-10-26 17:25 [dpdk-dev] [dpdk-dev v1 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                   ` (4 preceding siblings ...)
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 5/7] crypto/qat: qat driver datapath rework Kai Ji
@ 2021-10-26 17:25 ` Kai Ji
  2021-10-29 14:43   ` Zhang, Roy Fan
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 7/7] crypto/qat: qat driver rework clean up Kai Ji
  2021-11-01 23:12 ` [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  7 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2021-10-26 17:25 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji, pablo.de.lara.guarch, adamx.dybkowski

test_mixed_auth_cipher: ensure enough space allocate in ibuf & obuf
for mbuf to vec conversion
test_kasumi_decryption: cipher length update.
qat/dev: support sgl oop operation

Fixes: 681f540da52b ("cryptodev: do not use AAD in wireless algorithms")
Cc: pablo.de.lara.guarch@intel.com

Fixes: e847fc512817 ("test/crypto: add encrypted digest case for AES-CTR-CMAC")
Cc: adamx.dybkowski@intel.com

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 app/test/test_cryptodev.c                    | 52 ++++++++++++++----
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++--
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 55 +++++++++++++++++---
 4 files changed, 124 insertions(+), 25 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 814a0b401d..dd791a181a 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -179,6 +179,10 @@ post_process_raw_dp_op(void *user_data,	uint32_t index __rte_unused,
 			RTE_CRYPTO_OP_STATUS_ERROR;
 }
 
+static struct crypto_testsuite_params testsuite_params = { NULL };
+struct crypto_testsuite_params *p_testsuite_params = &testsuite_params;
+static struct crypto_unittest_params unittest_params;
+
 void
 process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 		struct rte_crypto_op *op, uint8_t is_cipher, uint8_t is_auth,
@@ -193,6 +197,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	struct rte_crypto_sgl sgl, dest_sgl;
 	uint32_t max_len;
 	union rte_cryptodev_session_ctx sess;
+	uint64_t auth_end_iova;
 	uint32_t count = 0;
 	struct rte_crypto_raw_dp_ctx *ctx;
 	uint32_t cipher_offset = 0, cipher_len = 0, auth_offset = 0,
@@ -202,6 +207,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	int ctx_service_size;
 	int32_t status = 0;
 	int enqueue_status, dequeue_status;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+	/* oop is not supported in raw hw dp api*/
+	int is_sgl = sop->m_src->nb_segs > 1;
 
 	ctx_service_size = rte_cryptodev_get_raw_dp_ctx_size(dev_id);
 	if (ctx_service_size < 0) {
@@ -267,6 +275,30 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 		digest.va = (void *)sop->auth.digest.data;
 		digest.iova = sop->auth.digest.phys_addr;
 
+		if (is_sgl) {
+			uint32_t remaining_off = auth_offset + auth_len;
+			struct rte_mbuf *sgl_buf = sop->m_src;
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else {
+			/* oop is not supported in raw hw dp api */
+			auth_end_iova = rte_pktmbuf_iova(op->sym->m_src) +
+							 auth_offset + auth_len;
+		}
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_offset + auth_len < cipher_offset + cipher_len) &&
+				(digest.iova == auth_end_iova) && is_sgl)
+			max_len = RTE_MAX(max_len,
+				auth_offset + auth_len +
+				ut_params->auth_xform.auth.digest_length);
+
 	} else if (is_cipher) {
 		cipher_offset = sop->cipher.data.offset;
 		cipher_len = sop->cipher.data.length;
@@ -503,10 +535,6 @@ process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
 	return op;
 }
 
-static struct crypto_testsuite_params testsuite_params = { NULL };
-struct crypto_testsuite_params *p_testsuite_params = &testsuite_params;
-static struct crypto_unittest_params unittest_params;
-
 static int
 testsuite_setup(void)
 {
@@ -4077,9 +4105,9 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata)
 
 	/* Create KASUMI operation */
 	retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data,
-					tdata->cipher_iv.len,
-					tdata->ciphertext.len,
-					tdata->validCipherOffsetInBits.len);
+			tdata->cipher_iv.len,
+			RTE_ALIGN_CEIL(tdata->validCipherLenInBits.len, 8),
+			tdata->validCipherOffsetInBits.len);
 	if (retval < 0)
 		return retval;
 
@@ -7310,6 +7338,7 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata,
 	unsigned int plaintext_len;
 	unsigned int ciphertext_pad_len;
 	unsigned int ciphertext_len;
+	unsigned int data_len;
 
 	struct rte_cryptodev_info dev_info;
 	struct rte_crypto_op *op;
@@ -7370,21 +7399,22 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata,
 	plaintext_len = ceil_byte_length(tdata->plaintext.len_bits);
 	ciphertext_pad_len = RTE_ALIGN_CEIL(ciphertext_len, 16);
 	plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
+	data_len = RTE_MAX(ciphertext_pad_len, plaintext_pad_len);
 
 	if (verify) {
 		ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
-				ciphertext_pad_len);
+				data_len);
 		memcpy(ciphertext, tdata->ciphertext.data, ciphertext_len);
 		if (op_mode == OUT_OF_PLACE)
-			rte_pktmbuf_append(ut_params->obuf, ciphertext_pad_len);
+			rte_pktmbuf_append(ut_params->obuf, data_len);
 		debug_hexdump(stdout, "ciphertext:", ciphertext,
 				ciphertext_len);
 	} else {
 		plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
-				plaintext_pad_len);
+				data_len);
 		memcpy(plaintext, tdata->plaintext.data, plaintext_len);
 		if (op_mode == OUT_OF_PLACE)
-			rte_pktmbuf_append(ut_params->obuf, plaintext_pad_len);
+			rte_pktmbuf_append(ut_params->obuf, data_len);
 		debug_hexdump(stdout, "plaintext:", plaintext, plaintext_len);
 	}
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index 6494019050..c59c25fe8f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -467,8 +467,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -564,8 +574,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 108e07ee7f..1b6cf10589 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -295,8 +295,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index aea56fba4c..85d5e45a42 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -529,9 +529,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i],
-				cookie, vec->src_sgl[i].vec,
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
 				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
@@ -628,8 +637,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
@@ -728,8 +747,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -833,8 +862,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v1 7/7] crypto/qat: qat driver rework clean up
  2021-10-26 17:25 [dpdk-dev] [dpdk-dev v1 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                   ` (5 preceding siblings ...)
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 6/7] app/test: cryptodev test fix Kai Ji
@ 2021-10-26 17:25 ` Kai Ji
  2021-10-29 14:46   ` Zhang, Roy Fan
  2021-11-01 23:12 ` [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  7 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2021-10-26 17:25 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch rename back refactor code to qat_sym.c and qat_asym.c, meson
build file also updated accordingly.
Integrate qat hw dp apis and qat pmd function to qat_sym.c and qat_asym.c

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build               |   4 +-
 drivers/common/qat/qat_device.c              |   2 +-
 drivers/common/qat/qat_qp.c                  |   4 +-
 drivers/common/qat/qat_qp.h                  |   2 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |   4 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |   4 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 164 +--
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |   2 +-
 drivers/crypto/qat/qat_asym.c                | 803 +++++++++------
 drivers/crypto/qat/qat_asym.h                |  90 +-
 drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
 drivers/crypto/qat/qat_asym_pmd.h            |  54 -
 drivers/crypto/qat/qat_asym_refactor.c       | 994 ------------------
 drivers/crypto/qat/qat_asym_refactor.h       | 125 ---
 drivers/crypto/qat/qat_crypto.h              |   5 +-
 drivers/crypto/qat/qat_sym.c                 | 997 ++++++-------------
 drivers/crypto/qat/qat_sym.h                 | 141 ++-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 975 ------------------
 drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
 drivers/crypto/qat/qat_sym_pmd.h             |  95 --
 drivers/crypto/qat/qat_sym_refactor.c        | 360 -------
 drivers/crypto/qat/qat_sym_refactor.h        | 402 --------
 drivers/crypto/qat/qat_sym_session.c         |   8 +-
 23 files changed, 997 insertions(+), 4720 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_asym_refactor.c
 delete mode 100644 drivers/crypto/qat/qat_asym_refactor.h
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_sym_refactor.c
 delete mode 100644 drivers/crypto/qat/qat_sym_refactor.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index b7b18b6c0f..e53b51dcac 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -70,8 +70,8 @@ if qat_compress
 endif
 
 if qat_crypto
-    foreach f: ['qat_sym_refactor.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym_refactor.c', 'qat_crypto.c',
+    foreach f: ['qat_sym.c', 'qat_sym_session.c',
+            'qat_asym.c', 'qat_crypto.c',
 	    'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 437996f2e8..e1fc1c37ec 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -8,7 +8,7 @@
 
 #include "qat_device.h"
 #include "adf_transport_access_macros.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 #include "qat_comp_pmd.h"
 #include "adf_pf2vf_msg.h"
 #include "qat_pf2vf.h"
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 82adda0698..56facf0fbe 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -15,8 +15,8 @@
 #include "qat_logs.h"
 #include "qat_device.h"
 #include "qat_qp.h"
-#include "qat_sym_refactor.h"
-#include "qat_asym_refactor.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
 #include "qat_comp.h"
 
 #define QAT_CQ_MAX_DEQ_RETRIES 10
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index b4518df7d0..fc77173fc1 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -130,7 +130,7 @@ qat_qp_setup(struct qat_pci_device *qat_dev,
 
 int
 qat_qps_per_service(struct qat_pci_device *qat_dev,
-			enum qat_service_type service);
+		enum qat_service_type service);
 
 const struct qat_qp_hw_data *
 qat_qp_get_hw_data(struct qat_pci_device *qat_dev,
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 86c124f6c8..72609703f9 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -5,8 +5,8 @@
 #include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
 #include "qat_sym_session.h"
-#include "qat_sym_refactor.h"
-#include "qat_asym_refactor.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 1b6cf10589..529878e3e7 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -5,8 +5,8 @@
 #include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
 #include "qat_sym_session.h"
-#include "qat_sym_refactor.h"
-#include "qat_asym_refactor.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 07020741bd..b69d4d9091 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -10,166 +10,6 @@
 #include "qat_sym_session.h"
 #include "qat_sym.h"
 
-#define QAT_BASE_GEN1_SYM_CAPABILITIES \
-	QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
-		CAP_RNG(digest_size, 1, 20, 1)), \
-	QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
-		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
-	QAT_SYM_AEAD_CAP(AES_CCM,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
-		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
-	QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)), \
-	QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
-			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
-		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
-		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(MD5_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SNOW3G_UIA2, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_AUTH_CAP(KASUMI_F9, CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
-		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_CIPHER_CAP(AES_CTR,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_CIPHER_CAP(AES_XTS,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_CIPHER_CAP(KASUMI_F8,  CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)), \
-	QAT_SYM_CIPHER_CAP(NULL,  CAP_SET(block_size, 1), \
-		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_CIPHER_CAP(3DES_CBC,  CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
-	QAT_SYM_CIPHER_CAP(3DES_CTR,  CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
-	QAT_SYM_CIPHER_CAP(DES_CBC,  CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
-	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,  CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0))
-
-#define QAT_BASE_GEN1_ASYM_CAPABILITIES				\
-	QAT_ASYM_CAP(MODEX, 0, 1, 512, 1),			\
-	QAT_ASYM_CAP(MODINV, 0, 1, 512, 1),			\
-	QAT_ASYM_CAP(RSA,					\
-			((1 << RTE_CRYPTO_ASYM_OP_SIGN) |	\
-			(1 << RTE_CRYPTO_ASYM_OP_VERIFY) |	\
-			(1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) |	\
-			(1 << RTE_CRYPTO_ASYM_OP_DECRYPT)),	\
-			64, 512, 64)
-
-#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \
-	QAT_SYM_CIPHER_CAP(ZUC_EEA3, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_AUTH_CAP(ZUC_EIA3, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)) \
-
-#define QAT_EXTRA_GEN3_SYM_CAPABILITIES \
-	QAT_SYM_AEAD_CAP(CHACHA20_POLY1305, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 32, 32, 0), \
-		CAP_RNG(digest_size, 16, 16, 0), \
-		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0))
-
-#define QAT_BASE_GEN4_SYM_CAPABILITIES \
-	QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
-		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
-		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
-		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_CIPHER_CAP(NULL,  CAP_SET(block_size, 1), \
-		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
-		CAP_RNG(digest_size, 1, 20, 1)), \
-	QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_CIPHER_CAP(AES_CTR,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
-		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
-	QAT_SYM_AEAD_CAP(AES_CCM,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
-		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
-	QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)) \
-
 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
 	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
 
@@ -432,7 +272,7 @@ qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
 				cipher_len, out_sgl->vec,
 				QAT_SYM_SGL_MAX_NUMBER);
 
-		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
 			return UINT64_MAX;
 		}
@@ -506,7 +346,7 @@ qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
 				auth_ofs + auth_len, out_sgl->vec,
 				QAT_SYM_SGL_MAX_NUMBER);
 
-		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
 			return UINT64_MAX;
 		}
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 85d5e45a42..8b88443a40 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -13,7 +13,7 @@
 #include "icp_qat_fw_la.h"
 
 #include "qat_sym_session.h"
-#include "qat_sym_refactor.h"
+#include "qat_sym.h"
 #include "qat_sym_session.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index d5b4c66d68..b80b81e8da 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -1,70 +1,147 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2021 Intel Corporation
  */
 
 #include <stdarg.h>
 
-#include "qat_asym.h"
+#include <cryptodev_pmd.h>
+
 #include "icp_qat_fw_pke.h"
 #include "icp_qat_fw.h"
 #include "qat_pke_functionality_arrays.h"
 
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+#include "qat_device.h"
 
-static int qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
-		size_t arr_sz, size_t *size, uint32_t *func_id)
+#include "qat_logs.h"
+#include "qat_asym.h"
+
+uint8_t qat_asym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
+
+void
+qat_asym_init_op_cookie(void *op_cookie)
 {
-	size_t i;
+	int j;
+	struct qat_asym_op_cookie *cookie = op_cookie;
 
-	for (i = 0; i < arr_sz; i++) {
-		if (*size <= arr[i][0]) {
-			*size = arr[i][0];
-			*func_id = arr[i][1];
-			return 0;
-		}
+	cookie->input_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					input_params_ptrs);
+
+	cookie->output_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					output_params_ptrs);
+
+	for (j = 0; j < 8; j++) {
+		cookie->input_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						input_array[j]);
+		cookie->output_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						output_array[j]);
 	}
-	return -1;
 }
 
-static inline void qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_asym_xform *xform,
+		struct rte_cryptodev_asym_session *sess,
+		struct rte_mempool *mempool)
 {
-	memset(qat_req, 0, sizeof(*qat_req));
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+	int err = 0;
+	void *sess_private_data;
+	struct qat_asym_session *session;
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		QAT_LOG(ERR,
+			"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
 
-	qat_req->pke_hdr.hdr_flags =
-			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
-			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	session = sess_private_data;
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		if (xform->modex.exponent.length == 0 ||
+				xform->modex.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod exp input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		if (xform->modinv.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod inv input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+		if (xform->rsa.n.length == 0) {
+			QAT_LOG(ERR, "Invalid rsa input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
+			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
+		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
+		err = -EINVAL;
+		goto error;
+	} else {
+		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
+		err = -EINVAL;
+		goto error;
+	}
+
+	session->xform = xform;
+	qat_asym_build_req_tmpl(sess_private_data);
+	set_asym_session_private_data(sess, dev->driver_id,
+		sess_private_data);
+
+	return 0;
+error:
+	rte_mempool_put(mempool, sess_private_data);
+	return err;
 }
 
-static inline void qat_asym_build_req_tmpl(void *sess_private_data)
+unsigned int
+qat_asym_session_get_private_size(
+		struct rte_cryptodev *dev __rte_unused)
 {
-	struct icp_qat_fw_pke_request *qat_req;
-	struct qat_asym_session *session = sess_private_data;
-
-	qat_req = &session->req_tmpl;
-	qat_fill_req_tmpl(qat_req);
+	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
 }
 
-static size_t max_of(int n, ...)
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_asym_session *sess)
 {
-	va_list args;
-	size_t len = 0, num;
-	int i;
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_asym_session_private_data(sess, index);
+	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
 
-	va_start(args, n);
-	len = va_arg(args, size_t);
+	if (sess_priv) {
+		memset(s, 0, qat_asym_session_get_private_size(dev));
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
 
-	for (i = 0; i < n - 1; i++) {
-		num = va_arg(args, size_t);
-		if (num > len)
-			len = num;
+		set_asym_session_private_data(sess, index, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
 	}
-	va_end(args);
-
-	return len;
 }
 
-static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+	.name = qat_asym_drv_name,
+	.alias = qat_asym_drv_name
+};
+
+
+static void
+qat_clear_arrays(struct qat_asym_op_cookie *cookie,
 		int in_count, int out_count, int in_size, int out_size)
 {
 	int i;
@@ -75,7 +152,8 @@ static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
 		memset(cookie->output_array[i], 0x0, out_size);
 }
 
-static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
+static void
+qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
 		enum rte_crypto_asym_xform_type alg, int in_size, int out_size)
 {
 	if (alg == RTE_CRYPTO_ASYM_XFORM_MODEX)
@@ -88,7 +166,229 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
 				out_size);
 }
 
-static int qat_asym_check_nonzero(rte_crypto_param n)
+static void
+qat_asym_collect_response(struct rte_crypto_op *rx_op,
+		struct qat_asym_op_cookie *cookie,
+		struct rte_crypto_asym_xform *xform)
+{
+	size_t alg_size, alg_size_in_bytes = 0;
+	struct rte_crypto_asym_op *asym_op = rx_op->asym;
+
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		rte_crypto_param n = xform->modex.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modexp_result = asym_op->modex.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modexp_result +
+				(asym_op->modex.result.length -
+					n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length
+				);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		rte_crypto_param n = xform->modinv.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modinv_result = asym_op->modinv.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modinv_result +
+				(asym_op->modinv.result.length - n.length),
+				cookie->output_array[0] +
+				alg_size_in_bytes - n.length, n.length);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
+				asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+							cookie->output_array[0],
+							alg_size_in_bytes);
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+			}
+		} else {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_DECRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.message.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
+						rsa_result, alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_SIGN) {
+				uint8_t *rsa_result = asym_op->rsa.sign.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			}
+		}
+	}
+	qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes,
+			alg_size_in_bytes);
+}
+
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
+{
+	struct qat_asym_session *ctx;
+	struct icp_qat_fw_pke_resp *resp_msg =
+			(struct icp_qat_fw_pke_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+			(resp_msg->opaque);
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	if (cookie->error) {
+		cookie->error = 0;
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		QAT_DP_LOG(ERR, "Cookie status returned error");
+	} else {
+		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric response status"
+					" returned error");
+		}
+		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric common status"
+					" returned error");
+		}
+	}
+
+	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		ctx = (struct qat_asym_session *)get_asym_session_private_data(
+			rx_op->asym->session, qat_asym_driver_id);
+		qat_asym_collect_response(rx_op, cookie, ctx->xform);
+	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
+	}
+	*op = rx_op;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
+			sizeof(struct icp_qat_fw_pke_resp));
+#endif
+
+	return 1;
+}
+
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+		size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+	size_t i;
+
+	for (i = 0; i < arr_sz; i++) {
+		if (*size <= arr[i][0]) {
+			*size = arr[i][0];
+			*func_id = arr[i][1];
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+	va_list args;
+	size_t len = 0, num;
+	int i;
+
+	va_start(args, n);
+	len = va_arg(args, size_t);
+
+	for (i = 0; i < n - 1; i++) {
+		num = va_arg(args, size_t);
+		if (num > len)
+			len = num;
+	}
+	va_end(args);
+
+	return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
 {
 	if (n.length < 8) {
 		/* Not a case for any cryptograpic function except for DH
@@ -452,50 +752,14 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	} else {
 		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
 		return -(EINVAL);
-	}
-	return 0;
-}
-
-static __rte_always_inline int
-refactor_qat_asym_build_request(__rte_unused void *in_op,
-		__rte_unused uint8_t *out_msg, __rte_unused void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		__rte_unused enum qat_device_gen dev_gen)
-{
-	return 0;
-}
-
-int
-refactor_qat_asym_process_response(__rte_unused void **op,
-		__rte_unused uint8_t *resp,
-		__rte_unused void *op_cookie,
-		__rte_unused uint64_t *dequeue_err_count)
-{
+	}
 	return 0;
 }
 
-uint16_t
-qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return refactor_qat_enqueue_op_burst(qp,
-					refactor_qat_asym_build_request,
-					(void **)ops, nb_ops);
-}
-
-uint16_t
-qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return refactor_qat_dequeue_op_burst(qp, (void **)ops,
-				refactor_qat_asym_process_response, nb_ops);
-}
-
-int
-qat_asym_build_request(void *in_op,
-			uint8_t *out_msg,
-			void *op_cookie,
-			__rte_unused enum qat_device_gen qat_dev_gen)
+static __rte_always_inline int
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
 {
 	struct qat_asym_session *ctx;
 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
@@ -562,262 +826,161 @@ qat_asym_build_request(void *in_op,
 	return 0;
 }
 
-static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
-		struct qat_asym_op_cookie *cookie,
-		struct rte_crypto_asym_xform *xform)
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
 {
-	size_t alg_size, alg_size_in_bytes = 0;
-	struct rte_crypto_asym_op *asym_op = rx_op->asym;
-
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		rte_crypto_param n = xform->modex.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modexp_result = asym_op->modex.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modexp_result +
-				(asym_op->modex.result.length -
-					n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length
-				);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		rte_crypto_param n = xform->modinv.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modinv_result = asym_op->modinv.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modinv_result + (asym_op->modinv.result.length
-				- n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
-				asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-							cookie->output_array[0],
-							alg_size_in_bytes);
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-			}
-		} else {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_DECRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.message.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
-						rsa_result, alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
-				uint8_t *rsa_result = asym_op->rsa.sign.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			}
-		}
-	}
-	qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes,
-			alg_size_in_bytes);
+	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+			nb_ops);
 }
 
-void
-qat_asym_process_response(void **op, uint8_t *resp,
-		void *op_cookie)
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
 {
-	struct qat_asym_session *ctx;
-	struct icp_qat_fw_pke_resp *resp_msg =
-			(struct icp_qat_fw_pke_resp *)resp;
-	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
-			(resp_msg->opaque);
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	if (cookie->error) {
-		cookie->error = 0;
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		QAT_DP_LOG(ERR, "Cookie status returned error");
-	} else {
-		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
-			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric response status"
-					" returned error");
-		}
-		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric common status"
-					" returned error");
-		}
-	}
-
-	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_asym_session *)get_asym_session_private_data(
-			rx_op->asym->session, qat_asym_driver_id);
-		qat_asym_collect_response(rx_op, cookie, ctx->xform);
-	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
-	}
-	*op = rx_op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
-			sizeof(struct icp_qat_fw_pke_resp));
-#endif
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+				nb_ops);
 }
 
 int
-qat_asym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_asym_xform *xform,
-		struct rte_cryptodev_asym_session *sess,
-		struct rte_mempool *mempool)
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param)
 {
-	int err = 0;
-	void *sess_private_data;
-	struct qat_asym_session *session;
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		QAT_LOG(ERR,
-			"Couldn't get object from session mempool");
-		return -ENOMEM;
+	struct qat_cryptodev_private *internals;
+	struct rte_cryptodev *cryptodev;
+	struct qat_device_info *qat_dev_instance =
+		&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	uint64_t capa_size;
+	int i = 0;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "asym");
+	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	session = sess_private_data;
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		if (xform->modex.exponent.length == 0 ||
-				xform->modex.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod exp input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		if (xform->modinv.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod inv input parameter");
-			err = -EINVAL;
-			goto error;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_asym_driver_id =
+				qat_asym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_asym_driver_id !=
+				qat_asym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-		if (xform->rsa.n.length == 0) {
-			QAT_LOG(ERR, "Invalid rsa input parameter");
-			err = -EINVAL;
-			goto error;
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+	qat_dev_instance->asym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->asym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_asym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_ASYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			rte_cryptodev_pmd_destroy(cryptodev);
+			memset(&qat_dev_instance->asym_rte_dev, 0,
+				sizeof(qat_dev_instance->asym_rte_dev));
+			return -EFAULT;
 		}
-	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
-			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
-		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
-		err = -EINVAL;
-		goto error;
-	} else {
-		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
-		err = -EINVAL;
-		goto error;
 	}
 
-	session->xform = xform;
-	qat_asym_build_req_tmpl(sess_private_data);
-	set_asym_session_private_data(sess, dev->driver_id,
-		sess_private_data);
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-	return 0;
-error:
-	rte_mempool_put(mempool, sess_private_data);
-	return err;
-}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
 
-unsigned int qat_asym_session_get_private_size(
-		struct rte_cryptodev *dev __rte_unused)
-{
-	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
+	qat_pci_dev->asym_dev = internals;
+	internals->service_type = QAT_SERVICE_ASYMMETRIC;
+	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+	return 0;
 }
 
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_asym_session *sess)
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
 {
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_asym_session_private_data(sess, index);
-	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
-
-	if (sess_priv) {
-		memset(s, 0, qat_asym_session_get_private_size(dev));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->asym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(
+			qat_pci_dev->asym_dev->dev_id);
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+	qat_pci_dev->asym_dev = NULL;
 
-		set_asym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
+	return 0;
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_asym_driver,
+		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 50c2641eba..55b1e1b2cc 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -8,10 +8,13 @@
 #include <cryptodev_pmd.h>
 #include <rte_crypto_asym.h>
 #include "icp_qat_fw_pke.h"
-#include "qat_common.h"
-#include "qat_asym_pmd.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
 #include "icp_qat_fw.h"
 
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
+
 typedef uint64_t large_int_ptr;
 #define MAX_PKE_PARAMS	8
 #define QAT_PKE_MAX_LN_SIZE 512
@@ -26,6 +29,28 @@ typedef uint64_t large_int_ptr;
 #define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
 #define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
 
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
+		{.asym = {						\
+			.xform_capa = {					\
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+				.op_types = o,				\
+				{					\
+				.modlen = {				\
+				.min = l,				\
+				.max = r,				\
+				.increment = i				\
+				}, }					\
+			}						\
+		},							\
+		}							\
+	}
+
 struct qat_asym_op_cookie {
 	size_t alg_size;
 	uint64_t error;
@@ -45,6 +70,27 @@ struct qat_asym_session {
 	struct rte_crypto_asym_xform *xform;
 };
 
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+	memset(qat_req, 0, sizeof(*qat_req));
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+	qat_req->pke_hdr.hdr_flags =
+			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+	struct icp_qat_fw_pke_request *qat_req;
+	struct qat_asym_session *session = sess_private_data;
+
+	qat_req = &session->req_tmpl;
+	qat_fill_req_tmpl(qat_req);
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev,
 		struct rte_crypto_asym_xform *xform,
@@ -58,26 +104,6 @@ void
 qat_asym_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_asym_session *sess);
 
-/*
- * Build PKE request to be sent to the fw, partially uses template
- * request generated during session creation.
- *
- * @param	in_op		Pointer to the crypto operation, for every
- *				service it points to service specific struct.
- * @param	out_msg		Message to be returned to enqueue function
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	qat_dev_gen	Generation of QAT hardware
- *
- * @return
- *	This function always returns zero,
- *	it is because of backward compatibility.
- *	- 0: Always returned
- *
- */
-int
-qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
 /*
  * Process PKE response received from outgoing queue of QAT
  *
@@ -88,23 +114,11 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg,
  * @param	op_cookie	Cookie pointer that holds private metadata
  *
  */
-void
-qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
-		void *op_cookie);
-
 int
-refactor_qat_asym_process_response(__rte_unused void **op,
-		__rte_unused uint8_t *resp,
-		__rte_unused void *op_cookie,
-		__rte_unused uint64_t *dequeue_err_count);
-
-uint16_t
-qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops);
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count);
 
-
-uint16_t
-qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops);
+void
+qat_asym_init_op_cookie(void *cookie);
 
 #endif /* _QAT_ASYM_H_ */
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
deleted file mode 100644
index 284b8096fe..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "qat_logs.h"
-
-#include "qat_crypto.h"
-#include "qat_asym.h"
-#include "qat_asym_pmd.h"
-
-uint8_t qat_asym_driver_id;
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
-	int j;
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	cookie->input_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					input_params_ptrs);
-
-	cookie->output_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					output_params_ptrs);
-
-	for (j = 0; j < 8; j++) {
-		cookie->input_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						input_array[j]);
-		cookie->output_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						output_array[j]);
-	}
-}
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
-	/* Device related operations */
-	.dev_configure		= qat_cryptodev_config,
-	.dev_start		= qat_cryptodev_start,
-	.dev_stop		= qat_cryptodev_stop,
-	.dev_close		= qat_cryptodev_close,
-	.dev_infos_get		= qat_cryptodev_info_get,
-
-	.stats_get		= qat_cryptodev_stats_get,
-	.stats_reset		= qat_cryptodev_stats_reset,
-	.queue_pair_setup	= qat_cryptodev_qp_setup,
-	.queue_pair_release	= qat_cryptodev_qp_release,
-
-	/* Crypto related operations */
-	.asym_session_get_size	= qat_asym_session_get_private_size,
-	.asym_session_configure	= qat_asym_session_configure,
-	.asym_session_clear	= qat_asym_session_clear
-};
-
-uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
-	.name = qat_asym_drv_name,
-	.alias = qat_asym_drv_name
-};
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
-	int i = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	uint64_t capa_size;
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "asym");
-	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_asym_driver_id =
-				qat_asym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_asym_driver_id !=
-				qat_asym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
-	qat_dev_instance->asym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->asym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->asym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_asym_driver_id;
-	cryptodev->dev_ops = &crypto_qat_ops;
-
-	cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst;
-
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_ASYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->dev_id = cryptodev->data->dev_id;
-	internals->service_type = QAT_SERVICE_ASYMMETRIC;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			rte_cryptodev_pmd_destroy(cryptodev);
-			memset(&qat_dev_instance->asym_rte_dev, 0,
-				sizeof(qat_dev_instance->asym_rte_dev));
-			return -EFAULT;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->asym_dev = internals;
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-	return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->asym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(
-			qat_pci_dev->asym_dev->dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
-	qat_pci_dev->asym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_asym_driver,
-		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_pmd.h b/drivers/crypto/qat/qat_asym_pmd.h
deleted file mode 100644
index fd6b406248..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-
-#ifndef _QAT_ASYM_PMD_H_
-#define _QAT_ASYM_PMD_H_
-
-#include <rte_cryptodev.h>
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD driver name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
-
-
-/**
- * Helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
-		{.asym = {						\
-			.xform_capa = {					\
-				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
-				.op_types = o,				\
-				{					\
-				.modlen = {				\
-				.min = l,				\
-				.max = r,				\
-				.increment = i				\
-				}, }					\
-			}						\
-		},							\
-		}							\
-	}
-
-extern uint8_t qat_asym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
-
-void
-qat_asym_init_op_cookie(void *op_cookie);
-
-uint16_t
-qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-uint16_t
-qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-#endif /* _QAT_ASYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_asym_refactor.c b/drivers/crypto/qat/qat_asym_refactor.c
deleted file mode 100644
index 3a9b1d4054..0000000000
--- a/drivers/crypto/qat/qat_asym_refactor.c
+++ /dev/null
@@ -1,994 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 - 2021 Intel Corporation
- */
-
-
-#include <stdarg.h>
-
-#include <cryptodev_pmd.h>
-
-#include "icp_qat_fw_pke.h"
-#include "icp_qat_fw.h"
-#include "qat_pke_functionality_arrays.h"
-
-#include "qat_device.h"
-
-#include "qat_logs.h"
-#include "qat_asym_refactor.h"
-
-uint8_t qat_asym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
-	int j;
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	cookie->input_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					input_params_ptrs);
-
-	cookie->output_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					output_params_ptrs);
-
-	for (j = 0; j < 8; j++) {
-		cookie->input_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						input_array[j]);
-		cookie->output_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						output_array[j]);
-	}
-}
-
-int
-qat_asym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_asym_xform *xform,
-		struct rte_cryptodev_asym_session *sess,
-		struct rte_mempool *mempool)
-{
-	int err = 0;
-	void *sess_private_data;
-	struct qat_asym_session *session;
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		QAT_LOG(ERR,
-			"Couldn't get object from session mempool");
-		return -ENOMEM;
-	}
-
-	session = sess_private_data;
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		if (xform->modex.exponent.length == 0 ||
-				xform->modex.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod exp input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		if (xform->modinv.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod inv input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-		if (xform->rsa.n.length == 0) {
-			QAT_LOG(ERR, "Invalid rsa input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
-			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
-		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
-		err = -EINVAL;
-		goto error;
-	} else {
-		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
-		err = -EINVAL;
-		goto error;
-	}
-
-	session->xform = xform;
-	qat_asym_build_req_tmpl(sess_private_data);
-	set_asym_session_private_data(sess, dev->driver_id,
-		sess_private_data);
-
-	return 0;
-error:
-	rte_mempool_put(mempool, sess_private_data);
-	return err;
-}
-
-unsigned int
-qat_asym_session_get_private_size(
-		struct rte_cryptodev *dev __rte_unused)
-{
-	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
-}
-
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_asym_session *sess)
-{
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_asym_session_private_data(sess, index);
-	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
-
-	if (sess_priv) {
-		memset(s, 0, qat_asym_session_get_private_size(dev));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
-
-		set_asym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
-	.name = qat_asym_drv_name,
-	.alias = qat_asym_drv_name
-};
-
-
-static void
-qat_clear_arrays(struct qat_asym_op_cookie *cookie,
-		int in_count, int out_count, int in_size, int out_size)
-{
-	int i;
-
-	for (i = 0; i < in_count; i++)
-		memset(cookie->input_array[i], 0x0, in_size);
-	for (i = 0; i < out_count; i++)
-		memset(cookie->output_array[i], 0x0, out_size);
-}
-
-static void
-qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
-		enum rte_crypto_asym_xform_type alg, int in_size, int out_size)
-{
-	if (alg == RTE_CRYPTO_ASYM_XFORM_MODEX)
-		qat_clear_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS,
-				QAT_ASYM_MODEXP_NUM_OUT_PARAMS, in_size,
-				out_size);
-	else if (alg == RTE_CRYPTO_ASYM_XFORM_MODINV)
-		qat_clear_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS,
-				QAT_ASYM_MODINV_NUM_OUT_PARAMS, in_size,
-				out_size);
-}
-
-static void
-qat_asym_collect_response(struct rte_crypto_op *rx_op,
-		struct qat_asym_op_cookie *cookie,
-		struct rte_crypto_asym_xform *xform)
-{
-	size_t alg_size, alg_size_in_bytes = 0;
-	struct rte_crypto_asym_op *asym_op = rx_op->asym;
-
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		rte_crypto_param n = xform->modex.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modexp_result = asym_op->modex.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modexp_result +
-				(asym_op->modex.result.length -
-					n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length
-				);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		rte_crypto_param n = xform->modinv.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modinv_result = asym_op->modinv.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modinv_result +
-				(asym_op->modinv.result.length - n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
-				asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-							cookie->output_array[0],
-							alg_size_in_bytes);
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-			}
-		} else {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_DECRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.message.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
-						rsa_result, alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type ==
-						RTE_CRYPTO_ASYM_OP_SIGN) {
-				uint8_t *rsa_result = asym_op->rsa.sign.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			}
-		}
-	}
-	qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes,
-			alg_size_in_bytes);
-}
-
-int
-qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
-		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
-{
-	struct qat_asym_session *ctx;
-	struct icp_qat_fw_pke_resp *resp_msg =
-			(struct icp_qat_fw_pke_resp *)resp;
-	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
-			(resp_msg->opaque);
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	if (cookie->error) {
-		cookie->error = 0;
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		QAT_DP_LOG(ERR, "Cookie status returned error");
-	} else {
-		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
-			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric response status"
-					" returned error");
-		}
-		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric common status"
-					" returned error");
-		}
-	}
-
-	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_asym_session *)get_asym_session_private_data(
-			rx_op->asym->session, qat_asym_driver_id);
-		qat_asym_collect_response(rx_op, cookie, ctx->xform);
-	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
-	}
-	*op = rx_op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
-			sizeof(struct icp_qat_fw_pke_resp));
-#endif
-
-	return 1;
-}
-
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
-
-static int
-qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
-		size_t arr_sz, size_t *size, uint32_t *func_id)
-{
-	size_t i;
-
-	for (i = 0; i < arr_sz; i++) {
-		if (*size <= arr[i][0]) {
-			*size = arr[i][0];
-			*func_id = arr[i][1];
-			return 0;
-		}
-	}
-	return -1;
-}
-
-static size_t
-max_of(int n, ...)
-{
-	va_list args;
-	size_t len = 0, num;
-	int i;
-
-	va_start(args, n);
-	len = va_arg(args, size_t);
-
-	for (i = 0; i < n - 1; i++) {
-		num = va_arg(args, size_t);
-		if (num > len)
-			len = num;
-	}
-	va_end(args);
-
-	return len;
-}
-
-static int
-qat_asym_check_nonzero(rte_crypto_param n)
-{
-	if (n.length < 8) {
-		/* Not a case for any cryptograpic function except for DH
-		 * generator which very often can be of one byte length
-		 */
-		size_t i;
-
-		if (n.data[n.length - 1] == 0x0) {
-			for (i = 0; i < n.length - 1; i++)
-				if (n.data[i] != 0x0)
-					break;
-			if (i == n.length - 1)
-				return -(EINVAL);
-		}
-	} else if (*(uint64_t *)&n.data[
-				n.length - 8] == 0) {
-		/* Very likely it is zeroed modulus */
-		size_t i;
-
-		for (i = 0; i < n.length - 8; i++)
-			if (n.data[i] != 0x0)
-				break;
-		if (i == n.length - 8)
-			return -(EINVAL);
-	}
-
-	return 0;
-}
-
-static int
-qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
-		struct icp_qat_fw_pke_request *qat_req,
-		struct qat_asym_op_cookie *cookie,
-		struct rte_crypto_asym_xform *xform)
-{
-	int err = 0;
-	size_t alg_size;
-	size_t alg_size_in_bytes;
-	uint32_t func_id = 0;
-
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		err = qat_asym_check_nonzero(xform->modex.modulus);
-		if (err) {
-			QAT_LOG(ERR, "Empty modulus in modular exponentiation,"
-					" aborting this operation");
-			return err;
-		}
-
-		alg_size_in_bytes = max_of(3, asym_op->modex.base.length,
-			       xform->modex.exponent.length,
-			       xform->modex.modulus.length);
-		alg_size = alg_size_in_bytes << 3;
-
-		if (qat_asym_get_sz_and_func_id(MOD_EXP_SIZE,
-				sizeof(MOD_EXP_SIZE)/sizeof(*MOD_EXP_SIZE),
-				&alg_size, &func_id)) {
-			return -(EINVAL);
-		}
-
-		alg_size_in_bytes = alg_size >> 3;
-		rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
-			asym_op->modex.base.length
-			, asym_op->modex.base.data,
-			asym_op->modex.base.length);
-		rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
-			xform->modex.exponent.length
-			, xform->modex.exponent.data,
-			xform->modex.exponent.length);
-		rte_memcpy(cookie->input_array[2]  + alg_size_in_bytes -
-			xform->modex.modulus.length,
-			xform->modex.modulus.data,
-			xform->modex.modulus.length);
-		cookie->alg_size = alg_size;
-		qat_req->pke_hdr.cd_pars.func_id = func_id;
-		qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS;
-		qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp base",
-				cookie->input_array[0],
-				alg_size_in_bytes);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp exponent",
-				cookie->input_array[1],
-				alg_size_in_bytes);
-		QAT_DP_HEXDUMP_LOG(DEBUG, " ModExpmodulus",
-				cookie->input_array[2],
-				alg_size_in_bytes);
-#endif
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		err = qat_asym_check_nonzero(xform->modinv.modulus);
-		if (err) {
-			QAT_LOG(ERR, "Empty modulus in modular multiplicative"
-					" inverse, aborting this operation");
-			return err;
-		}
-
-		alg_size_in_bytes = max_of(2, asym_op->modinv.base.length,
-				xform->modinv.modulus.length);
-		alg_size = alg_size_in_bytes << 3;
-
-		if (xform->modinv.modulus.data[
-				xform->modinv.modulus.length - 1] & 0x01) {
-			if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_ODD,
-					sizeof(MOD_INV_IDS_ODD)/
-					sizeof(*MOD_INV_IDS_ODD),
-					&alg_size, &func_id)) {
-				return -(EINVAL);
-			}
-		} else {
-			if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_EVEN,
-					sizeof(MOD_INV_IDS_EVEN)/
-					sizeof(*MOD_INV_IDS_EVEN),
-					&alg_size, &func_id)) {
-				return -(EINVAL);
-			}
-		}
-
-		alg_size_in_bytes = alg_size >> 3;
-		rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
-			asym_op->modinv.base.length
-				, asym_op->modinv.base.data,
-				asym_op->modinv.base.length);
-		rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
-				xform->modinv.modulus.length
-				, xform->modinv.modulus.data,
-				xform->modinv.modulus.length);
-		cookie->alg_size = alg_size;
-		qat_req->pke_hdr.cd_pars.func_id = func_id;
-		qat_req->input_param_count =
-				QAT_ASYM_MODINV_NUM_IN_PARAMS;
-		qat_req->output_param_count =
-				QAT_ASYM_MODINV_NUM_OUT_PARAMS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv base",
-				cookie->input_array[0],
-				alg_size_in_bytes);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv modulus",
-				cookie->input_array[1],
-				alg_size_in_bytes);
-#endif
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-		err = qat_asym_check_nonzero(xform->rsa.n);
-		if (err) {
-			QAT_LOG(ERR, "Empty modulus in RSA"
-					" inverse, aborting this operation");
-			return err;
-		}
-
-		alg_size_in_bytes = xform->rsa.n.length;
-		alg_size = alg_size_in_bytes << 3;
-
-		qat_req->input_param_count =
-				QAT_ASYM_RSA_NUM_IN_PARAMS;
-		qat_req->output_param_count =
-				QAT_ASYM_RSA_NUM_OUT_PARAMS;
-
-		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
-				asym_op->rsa.op_type ==
-						RTE_CRYPTO_ASYM_OP_VERIFY) {
-
-			if (qat_asym_get_sz_and_func_id(RSA_ENC_IDS,
-					sizeof(RSA_ENC_IDS)/
-					sizeof(*RSA_ENC_IDS),
-					&alg_size, &func_id)) {
-				err = -(EINVAL);
-				QAT_LOG(ERR,
-					"Not supported RSA parameter size (key)");
-				return err;
-			}
-			alg_size_in_bytes = alg_size >> 3;
-			if (asym_op->rsa.op_type ==
-				RTE_CRYPTO_ASYM_OP_ENCRYPT) {
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(cookie->input_array[0] +
-						alg_size_in_bytes -
-						asym_op->rsa.message.length
-						, asym_op->rsa.message.data,
-						asym_op->rsa.message.length);
-					break;
-				default:
-					err = -(EINVAL);
-					QAT_LOG(ERR,
-						"Invalid RSA padding (Encryption)");
-					return err;
-				}
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Message",
-						cookie->input_array[0],
-						alg_size_in_bytes);
-#endif
-			} else {
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(cookie->input_array[0],
-						asym_op->rsa.sign.data,
-						alg_size_in_bytes);
-					break;
-				default:
-					err = -(EINVAL);
-					QAT_LOG(ERR,
-						"Invalid RSA padding (Verify)");
-					return err;
-				}
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, " RSA Signature",
-						cookie->input_array[0],
-						alg_size_in_bytes);
-#endif
-
-			}
-			rte_memcpy(cookie->input_array[1] +
-					alg_size_in_bytes -
-					xform->rsa.e.length
-					, xform->rsa.e.data,
-					xform->rsa.e.length);
-			rte_memcpy(cookie->input_array[2] +
-					alg_size_in_bytes -
-					xform->rsa.n.length,
-					xform->rsa.n.data,
-					xform->rsa.n.length);
-
-			cookie->alg_size = alg_size;
-			qat_req->pke_hdr.cd_pars.func_id = func_id;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Public Key",
-					cookie->input_array[1],
-					alg_size_in_bytes);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Modulus",
-					cookie->input_array[2],
-					alg_size_in_bytes);
-#endif
-		} else {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_DECRYPT) {
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(cookie->input_array[0]
-						+ alg_size_in_bytes -
-						asym_op->rsa.cipher.length,
-						asym_op->rsa.cipher.data,
-						asym_op->rsa.cipher.length);
-					break;
-				default:
-					QAT_LOG(ERR,
-						"Invalid padding of RSA (Decrypt)");
-					return -(EINVAL);
-				}
-
-			} else if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_SIGN) {
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(cookie->input_array[0]
-						+ alg_size_in_bytes -
-						asym_op->rsa.message.length,
-						asym_op->rsa.message.data,
-						asym_op->rsa.message.length);
-					break;
-				default:
-					QAT_LOG(ERR,
-						"Invalid padding of RSA (Signature)");
-					return -(EINVAL);
-				}
-			}
-			if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT) {
-
-				qat_req->input_param_count =
-						QAT_ASYM_RSA_QT_NUM_IN_PARAMS;
-				if (qat_asym_get_sz_and_func_id(RSA_DEC_CRT_IDS,
-						sizeof(RSA_DEC_CRT_IDS)/
-						sizeof(*RSA_DEC_CRT_IDS),
-						&alg_size, &func_id)) {
-					return -(EINVAL);
-				}
-				alg_size_in_bytes = alg_size >> 3;
-
-				rte_memcpy(cookie->input_array[1] +
-						(alg_size_in_bytes >> 1) -
-						xform->rsa.qt.p.length
-						, xform->rsa.qt.p.data,
-						xform->rsa.qt.p.length);
-				rte_memcpy(cookie->input_array[2] +
-						(alg_size_in_bytes >> 1) -
-						xform->rsa.qt.q.length
-						, xform->rsa.qt.q.data,
-						xform->rsa.qt.q.length);
-				rte_memcpy(cookie->input_array[3] +
-						(alg_size_in_bytes >> 1) -
-						xform->rsa.qt.dP.length
-						, xform->rsa.qt.dP.data,
-						xform->rsa.qt.dP.length);
-				rte_memcpy(cookie->input_array[4] +
-						(alg_size_in_bytes >> 1) -
-						xform->rsa.qt.dQ.length
-						, xform->rsa.qt.dQ.data,
-						xform->rsa.qt.dQ.length);
-				rte_memcpy(cookie->input_array[5] +
-						(alg_size_in_bytes >> 1) -
-						xform->rsa.qt.qInv.length
-						, xform->rsa.qt.qInv.data,
-						xform->rsa.qt.qInv.length);
-				cookie->alg_size = alg_size;
-				qat_req->pke_hdr.cd_pars.func_id = func_id;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "C",
-						cookie->input_array[0],
-						alg_size_in_bytes);
-				QAT_DP_HEXDUMP_LOG(DEBUG, "p",
-						cookie->input_array[1],
-						alg_size_in_bytes);
-				QAT_DP_HEXDUMP_LOG(DEBUG, "q",
-						cookie->input_array[2],
-						alg_size_in_bytes);
-				QAT_DP_HEXDUMP_LOG(DEBUG,
-						"dP", cookie->input_array[3],
-						alg_size_in_bytes);
-				QAT_DP_HEXDUMP_LOG(DEBUG,
-						"dQ", cookie->input_array[4],
-						alg_size_in_bytes);
-				QAT_DP_HEXDUMP_LOG(DEBUG,
-						"qInv", cookie->input_array[5],
-						alg_size_in_bytes);
-#endif
-			} else if (xform->rsa.key_type ==
-					RTE_RSA_KEY_TYPE_EXP) {
-				if (qat_asym_get_sz_and_func_id(
-						RSA_DEC_IDS,
-						sizeof(RSA_DEC_IDS)/
-						sizeof(*RSA_DEC_IDS),
-						&alg_size, &func_id)) {
-					return -(EINVAL);
-				}
-				alg_size_in_bytes = alg_size >> 3;
-				rte_memcpy(cookie->input_array[1] +
-						alg_size_in_bytes -
-						xform->rsa.d.length,
-						xform->rsa.d.data,
-						xform->rsa.d.length);
-				rte_memcpy(cookie->input_array[2] +
-						alg_size_in_bytes -
-						xform->rsa.n.length,
-						xform->rsa.n.data,
-						xform->rsa.n.length);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA ciphertext",
-					cookie->input_array[0],
-					alg_size_in_bytes);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA d",
-					cookie->input_array[1],
-					alg_size_in_bytes);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA n",
-					cookie->input_array[2],
-					alg_size_in_bytes);
-#endif
-
-				cookie->alg_size = alg_size;
-				qat_req->pke_hdr.cd_pars.func_id = func_id;
-			} else {
-				QAT_LOG(ERR, "Invalid RSA key type");
-				return -(EINVAL);
-			}
-		}
-	} else {
-		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
-		return -(EINVAL);
-	}
-	return 0;
-}
-
-static __rte_always_inline int
-qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		__rte_unused enum qat_device_gen dev_gen)
-{
-	struct qat_asym_session *ctx;
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct rte_crypto_asym_op *asym_op = op->asym;
-	struct icp_qat_fw_pke_request *qat_req =
-			(struct icp_qat_fw_pke_request *)out_msg;
-	struct qat_asym_op_cookie *cookie =
-				(struct qat_asym_op_cookie *)op_cookie;
-	int err = 0;
-
-	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_asym_session *)
-			get_asym_session_private_data(
-			op->asym->session, qat_asym_driver_id);
-		if (unlikely(ctx == NULL)) {
-			QAT_LOG(ERR, "Session has not been created for this device");
-			goto error;
-		}
-		rte_mov64((uint8_t *)qat_req,
-			(const uint8_t *)&(ctx->req_tmpl));
-		err = qat_asym_fill_arrays(asym_op, qat_req,
-				cookie, ctx->xform);
-		if (err) {
-			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			goto error;
-		}
-	} else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		qat_fill_req_tmpl(qat_req);
-		err = qat_asym_fill_arrays(asym_op, qat_req, cookie,
-				op->asym->xform);
-		if (err) {
-			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			goto error;
-		}
-	} else {
-		QAT_DP_LOG(ERR, "Invalid session/xform settings");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		goto error;
-	}
-
-	qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
-	qat_req->pke_mid.src_data_addr = cookie->input_addr;
-	qat_req->pke_mid.dest_data_addr = cookie->output_addr;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_pke_request));
-#endif
-
-	return 0;
-error:
-
-	qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-		sizeof(struct icp_qat_fw_pke_request));
-#endif
-
-	qat_req->output_param_count = 0;
-	qat_req->input_param_count = 0;
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
-	cookie->error |= err;
-
-	return 0;
-}
-
-static uint16_t
-qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
-			nb_ops);
-}
-
-static uint16_t
-qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
-				nb_ops);
-}
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
-	struct qat_cryptodev_private *internals;
-	struct rte_cryptodev *cryptodev;
-	struct qat_device_info *qat_dev_instance =
-		&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	uint64_t capa_size;
-	int i = 0;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "asym");
-	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
-				name);
-		return -(EFAULT);
-	}
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_asym_driver_id =
-				qat_asym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_asym_driver_id !=
-				qat_asym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
-	qat_dev_instance->asym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->asym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->asym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_asym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_ASYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			rte_cryptodev_pmd_destroy(cryptodev);
-			memset(&qat_dev_instance->asym_rte_dev, 0,
-				sizeof(qat_dev_instance->asym_rte_dev));
-			return -EFAULT;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->asym_dev = internals;
-	internals->service_type = QAT_SERVICE_ASYMMETRIC;
-	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-	return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->asym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(
-			qat_pci_dev->asym_dev->dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
-	qat_pci_dev->asym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_asym_driver,
-		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_refactor.h b/drivers/crypto/qat/qat_asym_refactor.h
deleted file mode 100644
index 6d3d991bc7..0000000000
--- a/drivers/crypto/qat/qat_asym_refactor.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#ifndef _QAT_ASYM_H_
-#define _QAT_ASYM_H_
-
-#include <cryptodev_pmd.h>
-#include <rte_crypto_asym.h>
-#include "icp_qat_fw_pke.h"
-#include "qat_device.h"
-#include "qat_crypto.h"
-#include "icp_qat_fw.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD driver name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
-
-typedef uint64_t large_int_ptr;
-#define MAX_PKE_PARAMS	8
-#define QAT_PKE_MAX_LN_SIZE 512
-#define _PKE_ALIGN_ __rte_aligned(8)
-
-#define QAT_ASYM_MAX_PARAMS			8
-#define QAT_ASYM_MODINV_NUM_IN_PARAMS		2
-#define QAT_ASYM_MODINV_NUM_OUT_PARAMS		1
-#define QAT_ASYM_MODEXP_NUM_IN_PARAMS		3
-#define QAT_ASYM_MODEXP_NUM_OUT_PARAMS		1
-#define QAT_ASYM_RSA_NUM_IN_PARAMS		3
-#define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
-#define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
-
-/**
- * helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
-		{.asym = {						\
-			.xform_capa = {					\
-				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
-				.op_types = o,				\
-				{					\
-				.modlen = {				\
-				.min = l,				\
-				.max = r,				\
-				.increment = i				\
-				}, }					\
-			}						\
-		},							\
-		}							\
-	}
-
-struct qat_asym_op_cookie {
-	size_t alg_size;
-	uint64_t error;
-	rte_iova_t input_addr;
-	rte_iova_t output_addr;
-	large_int_ptr input_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
-	large_int_ptr output_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
-	union {
-		uint8_t input_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE];
-		uint8_t input_buffer[MAX_PKE_PARAMS * QAT_PKE_MAX_LN_SIZE];
-	} _PKE_ALIGN_;
-	uint8_t output_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE] _PKE_ALIGN_;
-} _PKE_ALIGN_;
-
-struct qat_asym_session {
-	struct icp_qat_fw_pke_request req_tmpl;
-	struct rte_crypto_asym_xform *xform;
-};
-
-static inline void
-qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
-{
-	memset(qat_req, 0, sizeof(*qat_req));
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
-
-	qat_req->pke_hdr.hdr_flags =
-			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
-			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
-}
-
-static inline void
-qat_asym_build_req_tmpl(void *sess_private_data)
-{
-	struct icp_qat_fw_pke_request *qat_req;
-	struct qat_asym_session *session = sess_private_data;
-
-	qat_req = &session->req_tmpl;
-	qat_fill_req_tmpl(qat_req);
-}
-
-int
-qat_asym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_asym_xform *xform,
-		struct rte_cryptodev_asym_session *sess,
-		struct rte_mempool *mempool);
-
-unsigned int
-qat_asym_session_get_private_size(struct rte_cryptodev *dev);
-
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_asym_session *sess);
-
-/*
- * Process PKE response received from outgoing queue of QAT
- *
- * @param	op		a ptr to the rte_crypto_op referred to by
- *				the response message is returned in this param
- * @param	resp		icp_qat_fw_pke_resp message received from
- *				outgoing fw message queue
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	dequeue_err_count	Error count number pointer
- *
- */
-int
-qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
-		void *op_cookie, __rte_unused uint64_t *dequeue_err_count);
-
-void
-qat_asym_init_op_cookie(void *cookie);
-
-#endif /* _QAT_ASYM_H_ */
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 8fe3f0b061..203f74f46f 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -12,7 +12,10 @@
 extern uint8_t qat_sym_driver_id;
 extern uint8_t qat_asym_driver_id;
 
-/** helper macro to set cryptodev capability range **/
+/**
+ * helper macro to set cryptodev capability range
+ * <n: name> <l: min > <r: max> <i: increment> <v: value>
+ **/
 #define CAP_RNG(n, l, r, i) .n = {.min = l, .max = r, .increment = i}
 
 #define CAP_RNG_ZERO(n) .n = {.min = 0, .max = 0, .increment = 0}
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index de687de857..b78e6a6ef3 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -11,273 +11,107 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
 
+uint8_t qat_sym_driver_id;
 
-/** Decrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_decrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_decrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
-	return -EINVAL;
-}
-
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
-		/* Decrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely((sym_op->m_dst != NULL)
-				&& (sym_op->m_dst != sym_op->m_src)))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = last_block - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
-			dst, last_block_len);
-#endif
-		bpi_cipher_decrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
-			dst, last_block_len);
-#endif
-	}
-
-	return sym_op->cipher.data.length - last_block_len;
-}
-
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	/* copy IV into request if it fits */
-	if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
-		rte_memcpy(cipher_param->u.cipher_IV_array,
-				rte_crypto_op_ctod_offset(op, uint8_t *,
-					iv_offset),
-				iv_length);
-	} else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr =
-				rte_crypto_op_ctophys_offset(op,
-					iv_offset);
-	}
-}
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
-/** Set IV for CCM is special case, 0th byte is set to q-1
- *  where q is padding of nonce in 16 byte block
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
  */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
+void
+qat_sym_init_op_cookie(void *op_cookie)
 {
-	rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
-			ICP_QAT_HW_CCM_NONCE_OFFSET,
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-	*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-	if (aad_len_field_sz)
-		rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-}
-
-/** Handle Single-Pass AES-GMAC on QAT GEN3 */
-static inline void
-handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
-		struct qat_sym_op_cookie *cookie,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	static const uint32_t ver_key_offset =
-			sizeof(struct icp_qat_hw_auth_setup) +
-			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
-			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
-			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
-			sizeof(struct icp_qat_hw_cipher_config);
-	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
-			(void *) &qat_req->cd_ctrl;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-			(void *) &qat_req->serv_specif_rqpars;
-	uint32_t data_length = op->sym->auth.data.length;
-
-	/* Fill separate Content Descriptor for this op */
-	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
-			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-				ctx->cd.cipher.key :
-				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
-			ctx->auth_key_length);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
-				ICP_QAT_HW_CIPHER_AEAD_MODE,
-				ctx->qat_cipher_alg,
-				ICP_QAT_HW_CIPHER_NO_CONVERT,
-				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-					ICP_QAT_HW_CIPHER_ENCRYPT :
-					ICP_QAT_HW_CIPHER_DECRYPT));
-	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
-			ctx->digest_length,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
-
-	/* Update the request */
-	qat_req->cd_pars.u.s.content_desc_addr =
-			cookie->opt.spc_gmac.cd_phys_addr;
-	qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
-			sizeof(struct icp_qat_hw_cipher_config) +
-			ctx->auth_key_length, 8) >> 3;
-	qat_req->comn_mid.src_length = data_length;
-	qat_req->comn_mid.dst_length = 0;
-
-	cipher_param->spc_aad_addr = 0;
-	cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
-	cipher_param->spc_aad_sz = data_length;
-	cipher_param->reserved = 0;
-	cipher_param->spc_auth_res_sz = ctx->digest_length;
-
-	qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
-	cipher_cd_ctrl->cipher_cfg_offset = 0;
-	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
-	ICP_QAT_FW_LA_PROTO_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_NO_PROTO);
+	struct qat_sym_op_cookie *cookie = op_cookie;
+
+	cookie->qat_sgl_src_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_src);
+
+	cookie->qat_sgl_dst_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_dst);
+
+	cookie->opt.spc_gmac.cd_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			opt.spc_gmac.cd_cipher);
 }
 
 static __rte_always_inline int
-refactor_qat_sym_build_request(__rte_unused void *in_op,
-		__rte_unused uint8_t *out_msg, __rte_unused	void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		__rte_unused enum qat_device_gen dev_gen)
-{
-	return 0;
-}
-
-uint16_t
-refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return refactor_qat_enqueue_op_burst(qp, refactor_qat_sym_build_request,
-			(void **)ops, nb_ops);
-}
-
-uint16_t
-refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return refactor_qat_dequeue_op_burst(qp, (void **)ops,
-				refactor_qat_sym_process_response, nb_ops);
-}
-
-int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
 {
-	int ret = 0;
-	struct qat_sym_session *ctx = NULL;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	register struct icp_qat_fw_la_bulk_req *qat_req;
-	uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
-	uint32_t cipher_len = 0, cipher_ofs = 0;
-	uint32_t auth_len = 0, auth_ofs = 0;
-	uint32_t min_ofs = 0;
-	uint64_t src_buf_start = 0, dst_buf_start = 0;
-	uint64_t auth_data_end = 0;
-	uint8_t do_sgl = 0;
-	uint8_t in_place = 1;
-	int alignment_adjustment = 0;
-	int oop_shift = 0;
 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *)op_cookie;
+	void *sess = (void *)opaque[0];
+	qat_sym_build_request_t build_request = (void *)opaque[1];
+	struct qat_sym_session *ctx = NULL;
 
-	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
-				"operation requests, op (%p) is not a "
-				"symmetric operation.", op);
-		return -EINVAL;
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
+
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
 	}
 
-	if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
-				" requests, op (%p) is sessionless.", op);
-		return -EINVAL;
-	} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_sym_session *)get_sym_session_private_data(
-				op->sym->session, qat_sym_driver_id);
 #ifdef RTE_LIB_SECURITY
-	} else {
-		ctx = (struct qat_sym_session *)get_sec_session_private_data(
-				op->sym->sec_session);
-		if (likely(ctx)) {
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if (sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
 			if (unlikely(ctx->bpi_ctx == NULL)) {
 				QAT_DP_LOG(ERR, "QAT PMD only supports security"
 						" operation requests for"
@@ -293,463 +127,284 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 				return -EINVAL;
 			}
-		}
-#endif
-	}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
 
-	if (unlikely(ctx == NULL)) {
-		QAT_DP_LOG(ERR, "Session was not created for this device");
-		return -EINVAL;
-	}
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
 
-	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
-	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
-	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
-	cipher_param = (void *)&qat_req->serv_specif_rqpars;
-	cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			do_aead = 1;
-		} else {
-			do_auth = 1;
-			do_cipher = 1;
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
+
+			sess = (void *)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)sess;
+			opaque[1] = (uintptr_t)build_request;
 		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		do_auth = 1;
-		do_cipher = 0;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		do_auth = 0;
-		do_cipher = 1;
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
 	}
 
-	if (do_cipher) {
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
 
-		if (ctx->qat_cipher_alg ==
-					 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
 
-			if (unlikely(
-			    (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
-			    (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cipher_len = op->sym->cipher.data.length >> 3;
-			cipher_ofs = op->sym->cipher.data.offset >> 3;
-
-		} else if (ctx->bpi_ctx) {
-			/* DOCSIS - only send complete blocks to device.
-			 * Process any partial block using CFB mode.
-			 * Even if 0 complete blocks, still send this to device
-			 * to get into rx queue for post-process and dequeuing
-			 */
-			cipher_len = qat_bpicipher_preprocess(ctx, op);
-			cipher_ofs = op->sym->cipher.data.offset;
-		} else {
-			cipher_len = op->sym->cipher.data.length;
-			cipher_ofs = op->sym->cipher.data.offset;
-		}
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
 
-		set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
-				cipher_param, op, qat_req);
-		min_ofs = cipher_ofs;
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	if (do_auth) {
-
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
-			ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
-			if (unlikely(
-			    (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
-			    (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			auth_ofs = op->sym->auth.data.offset >> 3;
-			auth_len = op->sym->auth.data.length >> 3;
-
-			auth_param->u1.aad_adr =
-					rte_crypto_op_ctophys_offset(op,
-							ctx->auth_iv.offset);
-
-		} else if (ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-			/* AES-GMAC */
-			set_cipher_iv(ctx->auth_iv.length,
-				ctx->auth_iv.offset,
-				cipher_param, op, qat_req);
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
-			auth_param->u1.aad_adr = 0;
-			auth_param->u2.aad_sz = 0;
-
-		} else {
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-		min_ofs = auth_ofs;
-
-		if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
-				ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
-			auth_param->auth_res_addr =
-					op->sym->auth.digest.phys_addr;
-
 	}
 
-	if (do_aead) {
-		/*
-		 * This address may used for setting AAD physical pointer
-		 * into IV offset from op
-		 */
-		rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
-		if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-
-			set_cipher_iv(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, qat_req);
-
-		} else if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
-			/* In case of AES-CCM this may point to user selected
-			 * memory or iv offset in cypto_op
-			 */
-			uint8_t *aad_data = op->sym->aead.aad.data;
-			/* This is true AAD length, it not includes 18 bytes of
-			 * preceding data
-			 */
-			uint8_t aad_ccm_real_len = 0;
-			uint8_t aad_len_field_sz = 0;
-			uint32_t msg_len_be =
-					rte_bswap32(op->sym->aead.data.length);
-
-			if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-				aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-				aad_ccm_real_len = ctx->aad_len -
-					ICP_QAT_HW_CCM_AAD_B0_LEN -
-					ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			} else {
-				/*
-				 * aad_len not greater than 18, so no actual aad
-				 *  data, then use IV after op for B0 block
-				 */
-				aad_data = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-				aad_phys_addr_aead =
-						rte_crypto_op_ctophys_offset(op,
-							ctx->cipher_iv.offset);
-			}
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
 
-			uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
-							ctx->cipher_iv.length;
-
-			aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-							aad_len_field_sz,
-							ctx->digest_length, q);
-
-			if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET +
-				    (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				    (uint8_t *)&msg_len_be,
-				    ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-			} else {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET,
-				    (uint8_t *)&msg_len_be
-				    + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				    - q), q);
-			}
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
 
-			if (aad_len_field_sz > 0) {
-				*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
-						= rte_bswap16(aad_ccm_real_len);
-
-				if ((aad_ccm_real_len + aad_len_field_sz)
-						% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-					uint8_t pad_len = 0;
-					uint8_t pad_idx = 0;
-
-					pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len + aad_len_field_sz) %
-						ICP_QAT_HW_CCM_AAD_B0_LEN);
-					pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					    aad_ccm_real_len + aad_len_field_sz;
-					memset(&aad_data[pad_idx],
-							0, pad_len);
-				}
+	if (cryptodev == NULL)
+		return -ENODEV;
 
-			}
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
-			set_cipher_iv_ccm(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, q,
-					aad_len_field_sz);
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
-		}
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
-		cipher_len = op->sym->aead.data.length;
-		cipher_ofs = op->sym->aead.data.offset;
-		auth_len = op->sym->aead.data.length;
-		auth_ofs = op->sym->aead.data.offset;
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
 
-		auth_param->u1.aad_adr = aad_phys_addr_aead;
-		auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
-		min_ofs = op->sym->aead.data.offset;
-	}
-
-	if (op->sym->m_src->nb_segs > 1 ||
-			(op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
-		do_sgl = 1;
-
-	/* adjust for chain case */
-	if (do_cipher && do_auth)
-		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
-	if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
-		min_ofs = 0;
-
-	if (unlikely((op->sym->m_dst != NULL) &&
-			(op->sym->m_dst != op->sym->m_src))) {
-		/* Out-of-place operation (OOP)
-		 * Don't align DMA start. DMA the minimum data-set
-		 * so as not to overwrite data in dest buffer
-		 */
-		in_place = 0;
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-		dst_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-		oop_shift = min_ofs;
-
-	} else {
-		/* In-place operation
-		 * Start DMA at nearest aligned address below min_ofs
-		 */
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
-						& QAT_64_BTYE_ALIGN_MASK;
-
-		if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
-					rte_pktmbuf_headroom(op->sym->m_src))
-							> src_buf_start)) {
-			/* alignment has pushed addr ahead of start of mbuf
-			 * so revert and take the performance hit
-			 */
-			src_buf_start =
-				rte_pktmbuf_iova_offset(op->sym->m_src,
-								min_ofs);
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
 		}
-		dst_buf_start = src_buf_start;
-
-		/* remember any adjustment for later, note, can be +/- */
-		alignment_adjustment = src_buf_start -
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-	}
 
-	if (do_cipher || do_aead) {
-		cipher_param->cipher_offset =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, cipher_ofs) - src_buf_start;
-		cipher_param->cipher_length = cipher_len;
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
 	} else {
-		cipher_param->cipher_offset = 0;
-		cipher_param->cipher_length = 0;
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
 	}
-
-	if (!ctx->is_single_pass) {
-		/* Do not let to overwrite spc_aad len */
-		if (do_auth || do_aead) {
-			auth_param->auth_off =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, auth_ofs) - src_buf_start;
-			auth_param->auth_len = auth_len;
-		} else {
-			auth_param->auth_off = 0;
-			auth_param->auth_len = 0;
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
 		}
 	}
 
-	qat_req->comn_mid.dst_length =
-		qat_req->comn_mid.src_length =
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		> (auth_param->auth_off + auth_param->auth_len) ?
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		: (auth_param->auth_off + auth_param->auth_len);
-
-	if (do_auth && do_cipher) {
-		/* Handle digest-encrypted cases, i.e.
-		 * auth-gen-then-cipher-encrypt and
-		 * cipher-decrypt-then-auth-verify
-		 */
-		 /* First find the end of the data */
-		if (do_sgl) {
-			uint32_t remaining_off = auth_param->auth_off +
-				auth_param->auth_len + alignment_adjustment + oop_shift;
-			struct rte_mbuf *sgl_buf =
-				(in_place ?
-					op->sym->m_src : op->sym->m_dst);
-
-			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
-					&& sgl_buf->next != NULL) {
-				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
-				sgl_buf = sgl_buf->next;
-			}
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-			auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
-				sgl_buf, remaining_off);
-		} else {
-			auth_data_end = (in_place ?
-				src_buf_start : dst_buf_start) +
-				auth_param->auth_off + auth_param->auth_len;
-		}
-		/* Then check if digest-encrypted conditions are met */
-		if ((auth_param->auth_off + auth_param->auth_len <
-					cipher_param->cipher_offset +
-					cipher_param->cipher_length) &&
-				(op->sym->auth.digest.phys_addr ==
-					auth_data_end)) {
-			/* Handle partial digest encryption */
-			if (cipher_param->cipher_offset +
-					cipher_param->cipher_length <
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length)
-				qat_req->comn_mid.dst_length =
-					qat_req->comn_mid.src_length =
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length;
-			struct icp_qat_fw_comn_req_hdr *header =
-				&qat_req->comn_hdr;
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-		}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
 	}
 
-	if (do_sgl) {
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
 
-		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
-				QAT_COMN_PTR_TYPE_SGL);
-		ret = qat_sgl_fill_array(op->sym->m_src,
-		   (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
-		   &cookie->qat_sgl_src,
-		   qat_req->comn_mid.src_length,
-		   QAT_SYM_SGL_MAX_NUMBER);
+	return 0;
 
-		if (unlikely(ret)) {
-			QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
-			return ret;
-		}
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
 
-		if (in_place)
-			qat_req->comn_mid.dest_data_addr =
-				qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-		else {
-			ret = qat_sgl_fill_array(op->sym->m_dst,
-				(int64_t)(dst_buf_start -
-					  rte_pktmbuf_iova(op->sym->m_dst)),
-				 &cookie->qat_sgl_dst,
-				 qat_req->comn_mid.dst_length,
-				 QAT_SYM_SGL_MAX_NUMBER);
-
-			if (unlikely(ret)) {
-				QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
-				return ret;
-			}
+	return ret;
+}
 
-			qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-			qat_req->comn_mid.dest_data_addr =
-					cookie->qat_sgl_dst_phys_addr;
-		}
-		qat_req->comn_mid.src_length = 0;
-		qat_req->comn_mid.dst_length = 0;
-	} else {
-		qat_req->comn_mid.src_data_addr = src_buf_start;
-		qat_req->comn_mid.dest_data_addr = dst_buf_start;
-	}
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
 
-	if (ctx->is_single_pass) {
-		if (ctx->is_ucs) {
-			/* GEN 4 */
-			cipher_param20->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param20->spc_auth_res_addr =
-				op->sym->aead.digest.phys_addr;
-		} else {
-			cipher_param->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param->spc_auth_res_addr =
-					op->sym->aead.digest.phys_addr;
-		}
-	} else if (ctx->is_single_pass_gmac &&
-		       op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
-		/* Handle Single-Pass AES-GMAC */
-		handle_spc_gmac(ctx, op, cookie, qat_req);
-	}
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
 
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
-			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
-			rte_pktmbuf_data_len(op->sym->m_src));
-	if (do_cipher) {
-		uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
-				ctx->cipher_iv.length);
-	}
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
 
-	if (do_auth) {
-		if (ctx->auth_iv.length) {
-			uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
-							uint8_t *,
-							ctx->auth_iv.offset);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
-						ctx->auth_iv.length);
-		}
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
-				ctx->digest_length);
+	return 0;
+}
+
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct qat_cryptodev_private *internals = dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_crypto_gen_dev_ops *gen_dev_ops =
+			&qat_sym_gen_dev_ops[qat_dev_gen];
+	struct qat_qp *qp;
+	struct qat_sym_session *ctx;
+	struct qat_sym_dp_ctx *dp_ctx;
+
+	if (!gen_dev_ops->set_raw_dp_ctx) {
+		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+				qat_dev_gen);
+		return -ENOTSUP;
 	}
 
-	if (do_aead) {
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
-				ctx->digest_length);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
-				ctx->aad_len);
+	qp = dev->data->queue_pairs[qp_id];
+	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(struct qat_sym_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+		dp_ctx->tail = qp->tx_q.tail;
+		dp_ctx->head = qp->rx_q.head;
+		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
 	}
-#endif
-	return 0;
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -EINVAL;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, qat_sym_driver_id);
+
+	dp_ctx->session = ctx;
+
+	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
+}
+
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct qat_sym_dp_ctx);
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index 4801bd50a7..278abbfd3a 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -15,15 +15,75 @@
 
 #include "qat_common.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_crypto.h"
 #include "qat_logs.h"
 
+#define CRYPTODEV_NAME_QAT_SYM_PMD   crypto_qat
+
 #define BYTE_LENGTH    8
 /* bpi is only used for partial blocks of DES and AES
  * so AES block len can be assumed as max len for iv, src and dst
  */
 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
+#define QAT_SYM_CAP_VALID		(1 << 31)
+
+/**
+ * Macro to add a sym capability
+ * helper function to add an sym capability
+ * <n: name> <b: block size> <k: key size> <d: digest size>
+ * <a: aad_size> <i: iv_size>
+ **/
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, d					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_##n,		\
+				b, k, i					\
+			}, }						\
+		}, }							\
+	}
+
 /*
  * Maximum number of SGL entries
  */
@@ -62,27 +122,14 @@ struct qat_sym_dp_ctx {
 	uint16_t cached_dequeue;
 };
 
-static __rte_always_inline int
-refactor_qat_sym_process_response(__rte_unused void **op,
-		__rte_unused uint8_t *resp, __rte_unused void *op_cookie,
-		__rte_unused uint64_t *dequeue_err_count)
-{
-	return 0;
-}
-
 uint16_t
-refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
 uint16_t
-refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
-
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
@@ -237,17 +284,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
 		}
 	}
 }
-#else
-
-static inline void
-qat_sym_preprocess_requests(void **ops __rte_unused,
-				uint16_t nb_ops __rte_unused)
-{
-}
 #endif
 
-static inline void
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused)
 {
 	struct icp_qat_fw_comn_resp *resp_msg =
 			(struct icp_qat_fw_comn_resp *)resp;
@@ -306,6 +347,8 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
 	}
 
 	*op = (void *)rx_op;
+
+	return 1;
 }
 
 int
@@ -317,6 +360,52 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 int
 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
 
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_vec *vec, uint32_t vec_len,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t i;
+
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_la_bulk_req));
+	for (i = 0; i < vec_len; i++)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+	if (cipher_iv && ctx->cipher_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+				ctx->cipher_iv.length);
+	if (auth_iv && ctx->auth_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+				ctx->auth_iv.length);
+	if (aad && ctx->aad_len > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+				ctx->aad_len);
+	if (digest && ctx->digest_length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+				ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+		struct qat_sym_session *ctx __rte_unused,
+		struct rte_crypto_vec *vec __rte_unused,
+		uint32_t vec_len __rte_unused,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+}
+#endif
+
 #else
 
 static inline void
@@ -331,5 +420,5 @@ qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
 {
 }
 
-#endif
+#endif /* BUILD_QAT_SYM */
 #endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
deleted file mode 100644
index af75ac2011..0000000000
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ /dev/null
@@ -1,975 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "adf_transport_access_macros.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-
-#include "qat_sym_refactor.h"
-#include "qat_sym_pmd.h"
-#include "qat_sym_session.h"
-#include "qat_qp.h"
-
-static __rte_always_inline int32_t
-qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
-		struct rte_crypto_vec *data, uint16_t n_data_vecs)
-{
-	struct qat_queue *tx_queue;
-	struct qat_sym_op_cookie *cookie;
-	struct qat_sgl *list;
-	uint32_t i;
-	uint32_t total_len;
-
-	if (likely(n_data_vecs == 1)) {
-		req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			data[0].iova;
-		req->comn_mid.src_length = req->comn_mid.dst_length =
-			data[0].len;
-		return data[0].len;
-	}
-
-	if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
-		return -1;
-
-	total_len = 0;
-	tx_queue = &qp->tx_q;
-
-	ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
-			QAT_COMN_PTR_TYPE_SGL);
-	cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
-	list = (struct qat_sgl *)&cookie->qat_sgl_src;
-
-	for (i = 0; i < n_data_vecs; i++) {
-		list->buffers[i].len = data[i].len;
-		list->buffers[i].resrvd = 0;
-		list->buffers[i].addr = data[i].iova;
-		if (total_len + data[i].len > UINT32_MAX) {
-			QAT_DP_LOG(ERR, "Message too long");
-			return -1;
-		}
-		total_len += data[i].len;
-	}
-
-	list->num_bufs = i;
-	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			cookie->qat_sgl_src_phys_addr;
-	req->comn_mid.src_length = req->comn_mid.dst_length = 0;
-	return total_len;
-}
-
-static __rte_always_inline void
-set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	/* copy IV into request if it fits */
-	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
-				iv_len);
-	else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
-	}
-}
-
-#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
-	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
-	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
-
-static __rte_always_inline void
-qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
-{
-	uint32_t i;
-
-	for (i = 0; i < n; i++)
-		sta[i] = status;
-}
-
-#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
-	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
-
-static __rte_always_inline void
-enqueue_one_cipher_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-
-	/* cipher IV */
-	set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest __rte_unused,
-	struct rte_crypto_va_iova_ptr *aad __rte_unused,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
-			(uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_auth_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = data_len - ofs.ofs.auth.head -
-			ofs.ofs.auth.tail;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
-				ctx->auth_iv.length);
-		break;
-	default:
-		break;
-	}
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv __rte_unused,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
-			(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_auth_job(ctx, req, &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline int
-enqueue_one_chain_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_vec *data,
-	uint16_t n_data_vecs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	rte_iova_t auth_iova_end;
-	int32_t cipher_len, auth_len;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	cipher_len = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
-
-	if (unlikely(cipher_len < 0 || auth_len < 0))
-		return -1;
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = cipher_len;
-	set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = auth_len;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		break;
-	default:
-		break;
-	}
-
-	if (unlikely(n_data_vecs > 1)) {
-		int auth_end_get = 0, i = n_data_vecs - 1;
-		struct rte_crypto_vec *cvec = &data[0];
-		uint32_t len;
-
-		len = data_len - ofs.ofs.auth.tail;
-
-		while (i >= 0 && len > 0) {
-			if (cvec->len >= len) {
-				auth_iova_end = cvec->iova + len;
-				len = 0;
-				auth_end_get = 1;
-				break;
-			}
-			len -= cvec->len;
-			i--;
-			cvec++;
-		}
-
-		if (unlikely(auth_end_get == 0))
-			return -1;
-	} else
-		auth_iova_end = data[0].iova + auth_param->auth_off +
-			auth_param->auth_len;
-
-	/* Then check if digest-encrypted conditions are met */
-	if ((auth_param->auth_off + auth_param->auth_len <
-		cipher_param->cipher_offset +
-		cipher_param->cipher_length) &&
-		(digest->iova == auth_iova_end)) {
-		/* Handle partial digest encryption */
-		if (cipher_param->cipher_offset +
-				cipher_param->cipher_length <
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length)
-			req->comn_mid.dst_length =
-				req->comn_mid.src_length =
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length;
-		struct icp_qat_fw_comn_req_hdr *header =
-			&req->comn_hdr;
-		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-			header->serv_specif_flags,
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-	}
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
-			cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
-		return -1;
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num,
-			&vec->iv[i], &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
-			break;
-
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_aead_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-		(void *)&req->serv_specif_rqpars;
-	struct icp_qat_fw_la_auth_req_params *auth_param =
-		(void *)((uint8_t *)&req->serv_specif_rqpars +
-		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-	uint8_t *aad_data;
-	uint8_t aad_ccm_real_len;
-	uint8_t aad_len_field_sz;
-	uint32_t msg_len_be;
-	rte_iova_t aad_iova = 0;
-	uint8_t q;
-
-	/* CPM 1.7 uses single pass to treat AEAD as cipher operation */
-	if (ctx->is_single_pass) {
-		enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
-		cipher_param->spc_aad_addr = aad->iova;
-		cipher_param->spc_auth_res_addr = digest->iova;
-		return;
-	}
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
-				ctx->cipher_iv.length);
-		aad_iova = aad->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
-		aad_data = aad->va;
-		aad_iova = aad->iova;
-		aad_ccm_real_len = 0;
-		aad_len_field_sz = 0;
-		msg_len_be = rte_bswap32((uint32_t)data_len -
-				ofs.ofs.cipher.head);
-
-		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			aad_ccm_real_len = ctx->aad_len -
-				ICP_QAT_HW_CCM_AAD_B0_LEN -
-				ICP_QAT_HW_CCM_AAD_LEN_INFO;
-		} else {
-			aad_data = iv->va;
-			aad_iova = iv->iova;
-		}
-
-		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-			aad_len_field_sz, ctx->digest_length, q);
-		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				(uint8_t *)&msg_len_be,
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-		} else {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-				(uint8_t *)&msg_len_be +
-				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				- q), q);
-		}
-
-		if (aad_len_field_sz > 0) {
-			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
-				rte_bswap16(aad_ccm_real_len);
-
-			if ((aad_ccm_real_len + aad_len_field_sz)
-				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-				uint8_t pad_len = 0;
-				uint8_t pad_idx = 0;
-
-				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len +
-					aad_len_field_sz) %
-					ICP_QAT_HW_CCM_AAD_B0_LEN);
-				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					aad_ccm_real_len +
-					aad_len_field_sz;
-				memset(&aad_data[pad_idx], 0, pad_len);
-			}
-		}
-
-		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
-			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va +
-			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
-		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-		rte_memcpy((uint8_t *)aad->va +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			ctx->cipher_iv.length);
-		break;
-	default:
-		break;
-	}
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_param->auth_off = ofs.ofs.cipher.head;
-	auth_param->auth_len = cipher_param->cipher_length;
-	auth_param->auth_res_addr = digest->iova;
-	auth_param->u1.aad_adr = aad_iova;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
-		(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
-			&vec->aad[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
-	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
-	uint32_t max_nb_to_dequeue,
-	rte_cryptodev_raw_post_dequeue_t post_dequeue,
-	void **out_user_data, uint8_t is_user_data_array,
-	uint32_t *n_success_jobs, int *return_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct icp_qat_fw_comn_resp *resp;
-	void *resp_opaque;
-	uint32_t i, n, inflight;
-	uint32_t head;
-	uint8_t status;
-
-	*n_success_jobs = 0;
-	*return_status = 0;
-	head = dp_ctx->head;
-
-	inflight = qp->enqueued - qp->dequeued;
-	if (unlikely(inflight == 0))
-		return 0;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			head);
-	/* no operation ready */
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return 0;
-
-	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
-	/* get the dequeue count */
-	if (get_dequeue_count) {
-		n = get_dequeue_count(resp_opaque);
-		if (unlikely(n == 0))
-			return 0;
-	} else {
-		if (unlikely(max_nb_to_dequeue == 0))
-			return 0;
-		n = max_nb_to_dequeue;
-	}
-
-	out_user_data[0] = resp_opaque;
-	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-	post_dequeue(resp_opaque, 0, status);
-	*n_success_jobs += status;
-
-	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
-
-	/* we already finished dequeue when n == 1 */
-	if (unlikely(n == 1)) {
-		i = 1;
-		goto end_deq;
-	}
-
-	if (is_user_data_array) {
-		for (i = 1; i < n; i++) {
-			resp = (struct icp_qat_fw_comn_resp *)(
-				(uint8_t *)rx_queue->base_addr + head);
-			if (unlikely(*(uint32_t *)resp ==
-					ADF_RING_EMPTY_SIG))
-				goto end_deq;
-			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
-			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-			*n_success_jobs += status;
-			post_dequeue(out_user_data[i], i, status);
-			head = (head + rx_queue->msg_size) &
-					rx_queue->modulo_mask;
-		}
-
-		goto end_deq;
-	}
-
-	/* opaque is not array */
-	for (i = 1; i < n; i++) {
-		resp = (struct icp_qat_fw_comn_resp *)(
-			(uint8_t *)rx_queue->base_addr + head);
-		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-			goto end_deq;
-		head = (head + rx_queue->msg_size) &
-				rx_queue->modulo_mask;
-		post_dequeue(resp_opaque, i, status);
-		*n_success_jobs += status;
-	}
-
-end_deq:
-	dp_ctx->head = head;
-	dp_ctx->cached_dequeue += i;
-	return i;
-}
-
-static __rte_always_inline void *
-qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
-		enum rte_crypto_op_status *op_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	register struct icp_qat_fw_comn_resp *resp;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			dp_ctx->head);
-
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return NULL;
-
-	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
-			rx_queue->modulo_mask;
-	dp_ctx->cached_dequeue++;
-
-	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
-			RTE_CRYPTO_OP_STATUS_SUCCESS :
-			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	*dequeue_status = 0;
-	return (void *)(uintptr_t)resp->opaque_data;
-}
-
-static __rte_always_inline int
-qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_enqueue != n))
-		return -1;
-
-	qp->enqueued += n;
-	qp->stats.enqueued_count += n;
-
-	tx_queue->tail = dp_ctx->tail;
-
-	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
-			tx_queue->hw_bundle_number,
-			tx_queue->hw_queue_number, tx_queue->tail);
-	tx_queue->csr_tail = tx_queue->tail;
-	dp_ctx->cached_enqueue = 0;
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_dequeue != n))
-		return -1;
-
-	rx_queue->head = dp_ctx->head;
-	rx_queue->nb_processed_responses += n;
-	qp->dequeued += n;
-	qp->stats.dequeued_count += n;
-	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
-		uint32_t old_head, new_head;
-		uint32_t max_head;
-
-		old_head = rx_queue->csr_head;
-		new_head = rx_queue->head;
-		max_head = qp->nb_descriptors * rx_queue->msg_size;
-
-		/* write out free descriptors */
-		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
-
-		if (new_head < old_head) {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
-					max_head - old_head);
-			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
-					new_head);
-		} else {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
-					old_head);
-		}
-		rx_queue->nb_processed_responses = 0;
-		rx_queue->csr_head = new_head;
-
-		/* write current head to CSR */
-		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
-			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
-			new_head);
-	}
-
-	dp_ctx->cached_dequeue = 0;
-	return 0;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
-	struct qat_qp *qp;
-	struct qat_sym_session *ctx;
-	struct qat_sym_dp_ctx *dp_ctx;
-
-	qp = dev->data->queue_pairs[qp_id];
-	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
-	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(struct qat_sym_dp_ctx));
-		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
-		dp_ctx->tail = qp->tx_q.tail;
-		dp_ctx->head = qp->rx_q.head;
-		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
-	}
-
-	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
-		return -EINVAL;
-
-	ctx = (struct qat_sym_session *)get_sym_session_private_data(
-			session_ctx.crypto_sess, qat_sym_driver_id);
-
-	dp_ctx->session = ctx;
-
-	raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
-	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
-	raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
-	raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_chain_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
-		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
-		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_cipher_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
-		}
-	} else
-		return -1;
-
-	return 0;
-}
-
-int
-qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
-{
-	return sizeof(struct qat_sym_dp_ctx);
-}
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
deleted file mode 100644
index b835245f17..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security_driver.h>
-#endif
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
-
-#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
-	struct qat_sym_op_cookie *cookie = op_cookie;
-
-	cookie->qat_sgl_src_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_src);
-
-	cookie->qat_sgl_dst_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_dst);
-
-	cookie->opt.spc_gmac.cd_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			opt.spc_gmac.cd_cipher);
-}
-
-static uint16_t
-qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-static uint16_t
-qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
-	.name = qat_sym_drv_name,
-	.alias = qat_sym_drv_name
-};
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
-	int i = 0, ret = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "sym");
-	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	/*
-	 * All processes must use same driver id so they can share sessions.
-	 * Store driver_id so we can validate that all processes have the same
-	 * value, typically they have, but could differ if binaries built
-	 * separately.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_sym_driver_id =
-				qat_sym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_sym_driver_id !=
-				qat_sym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
-	qat_dev_instance->sym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->sym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->sym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_sym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-#ifdef RTE_LIB_SECURITY
-	if (gen_dev_ops->create_security_ctx) {
-		cryptodev->security_ctx =
-			gen_dev_ops->create_security_ctx((void *)cryptodev);
-		if (cryptodev->security_ctx == NULL) {
-			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
-	} else
-		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-
-#endif
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_SYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->service_type = QAT_SERVICE_SYMMETRIC;
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating capability memzon for %s",
-				name);
-			ret = -EFAULT;
-			goto error;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->sym_dev = internals;
-	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	memset(&qat_dev_instance->sym_rte_dev, 0,
-		sizeof(qat_dev_instance->sym_rte_dev));
-
-	return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->sym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
-	qat_pci_dev->sym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_sym_driver,
-		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
deleted file mode 100644
index 0dc0c6f0d9..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_PMD_H_
-#define _QAT_SYM_PMD_H_
-
-#ifdef BUILD_QAT_SYM
-
-#include <rte_ether.h>
-#include <rte_cryptodev.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security.h>
-#endif
-
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Symmetric Crypto PMD driver name */
-#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
-#define QAT_SYM_CAP_VALID		(1 << 31)
-
-/**
- * Macro to add a sym capability
- * helper function to add an sym capability
- * <n: name> <b: block size> <k: key size> <d: digest size>
- * <a: aad_size> <i: iv_size>
- **/
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, d					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
-			{.aead = {					\
-				.algo = RTE_CRYPTO_AEAD_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_##n,		\
-				b, k, i					\
-			}, }						\
-		}, }							\
-	}
-
-extern uint8_t qat_sym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param);
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
-
-void
-qat_sym_init_op_cookie(void *op_cookie);
-
-#endif
-#endif /* _QAT_SYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_sym_refactor.c b/drivers/crypto/qat/qat_sym_refactor.c
deleted file mode 100644
index 82f078ff1e..0000000000
--- a/drivers/crypto/qat/qat_sym_refactor.c
+++ /dev/null
@@ -1,360 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
- */
-
-#include <openssl/evp.h>
-
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_crypto_sym.h>
-#include <rte_bus_pci.h>
-#include <rte_byteorder.h>
-
-#include "qat_sym_refactor.h"
-#include "qat_crypto.h"
-#include "qat_qp.h"
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
-	.name = qat_sym_drv_name,
-	.alias = qat_sym_drv_name
-};
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
-	struct qat_sym_op_cookie *cookie = op_cookie;
-
-	cookie->qat_sgl_src_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_src);
-
-	cookie->qat_sgl_dst_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_dst);
-
-	cookie->opt.spc_gmac.cd_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			opt.spc_gmac.cd_cipher);
-}
-
-static __rte_always_inline int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
-{
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	void *sess = (void *)opaque[0];
-	qat_sym_build_request_t build_request = (void *)opaque[1];
-	struct qat_sym_session *ctx = NULL;
-
-	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
-		ctx = get_sym_session_private_data(op->sym->session,
-				qat_sym_driver_id);
-		if (unlikely(!ctx)) {
-			QAT_DP_LOG(ERR, "No session for this device");
-			return -EINVAL;
-		}
-		if (sess != ctx) {
-			struct rte_cryptodev *cdev;
-			struct qat_cryptodev_private *internals;
-			enum rte_proc_type_t proc_type;
-
-			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
-			internals = cdev->data->dev_private;
-			proc_type = rte_eal_process_type();
-
-			if (internals->qat_dev->qat_dev_gen != dev_gen) {
-				op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-				return -EINVAL;
-			}
-
-			if (unlikely(ctx->build_request[proc_type] == NULL)) {
-				int ret =
-				qat_sym_gen_dev_ops[dev_gen].set_session(
-					(void *)cdev, sess);
-				if (ret < 0) {
-					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-					return -EINVAL;
-				}
-			}
-
-			build_request = ctx->build_request[proc_type];
-			opaque[0] = (uintptr_t)ctx;
-			opaque[1] = (uintptr_t)build_request;
-		}
-	}
-
-#ifdef RTE_LIB_SECURITY
-	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
-		if (sess != (void *)op->sym->sec_session) {
-			struct rte_cryptodev *cdev;
-			struct qat_cryptodev_private *internals;
-			enum rte_proc_type_t proc_type;
-
-			ctx = get_sec_session_private_data(
-					op->sym->sec_session);
-			if (unlikely(!ctx)) {
-				QAT_DP_LOG(ERR, "No session for this device");
-				return -EINVAL;
-			}
-			if (unlikely(ctx->bpi_ctx == NULL)) {
-				QAT_DP_LOG(ERR, "QAT PMD only supports security"
-						" operation requests for"
-						" DOCSIS, op (%p) is not for"
-						" DOCSIS.", op);
-				return -EINVAL;
-			} else if (unlikely(((op->sym->m_dst != NULL) &&
-					(op->sym->m_dst != op->sym->m_src)) ||
-					op->sym->m_src->nb_segs > 1)) {
-				QAT_DP_LOG(ERR, "OOP and/or multi-segment"
-						" buffers not supported for"
-						" DOCSIS security.");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
-			internals = cdev->data->dev_private;
-			proc_type = rte_eal_process_type();
-
-			if (internals->qat_dev->qat_dev_gen != dev_gen) {
-				op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-				return -EINVAL;
-			}
-
-			if (unlikely(ctx->build_request[proc_type] == NULL)) {
-				int ret =
-				qat_sym_gen_dev_ops[dev_gen].set_session(
-					(void *)cdev, sess);
-				if (ret < 0) {
-					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-					return -EINVAL;
-				}
-			}
-
-			sess = (void *)op->sym->sec_session;
-			build_request = ctx->build_request[proc_type];
-			opaque[0] = (uintptr_t)sess;
-			opaque[1] = (uintptr_t)build_request;
-		}
-	}
-#endif
-	else { /* RTE_CRYPTO_OP_SESSIONLESS */
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
-		return -1;
-	}
-
-	return build_request(op, (void *)ctx, out_msg, op_cookie);
-}
-
-uint16_t
-qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, qat_sym_build_request,
-			(void **)ops, nb_ops);
-}
-
-uint16_t
-qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops,
-				qat_sym_process_response, nb_ops);
-}
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
-	int i = 0, ret = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "sym");
-	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
-				name);
-		return -(EFAULT);
-	}
-
-	/*
-	 * All processes must use same driver id so they can share sessions.
-	 * Store driver_id so we can validate that all processes have the same
-	 * value, typically they have, but could differ if binaries built
-	 * separately.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_sym_driver_id =
-				qat_sym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_sym_driver_id !=
-				qat_sym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
-	qat_dev_instance->sym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->sym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->sym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_sym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
-	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-#ifdef RTE_LIB_SECURITY
-	if (gen_dev_ops->create_security_ctx) {
-		cryptodev->security_ctx =
-			gen_dev_ops->create_security_ctx((void *)cryptodev);
-		if (cryptodev->security_ctx == NULL) {
-			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
-	} else {
-		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-	}
-#endif
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_SYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			ret = -EFAULT;
-			goto error;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	internals->service_type = QAT_SERVICE_SYMMETRIC;
-	qat_pci_dev->sym_dev = internals;
-	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-
-	return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	memset(&qat_dev_instance->sym_rte_dev, 0,
-		sizeof(qat_dev_instance->sym_rte_dev));
-
-	return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->sym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
-	qat_pci_dev->sym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_sym_driver,
-		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_refactor.h b/drivers/crypto/qat/qat_sym_refactor.h
deleted file mode 100644
index 44feca8251..0000000000
--- a/drivers/crypto/qat/qat_sym_refactor.h
+++ /dev/null
@@ -1,402 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_H_
-#define _QAT_SYM_H_
-
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_net_crc.h>
-#endif
-
-#include <openssl/evp.h>
-
-#include "qat_common.h"
-#include "qat_sym_session.h"
-#include "qat_crypto.h"
-#include "qat_logs.h"
-
-#define CRYPTODEV_NAME_QAT_SYM_PMD   crypto_qat
-
-#define BYTE_LENGTH    8
-/* bpi is only used for partial blocks of DES and AES
- * so AES block len can be assumed as max len for iv, src and dst
- */
-#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
-#define QAT_SYM_CAP_VALID		(1 << 31)
-
-/* Macro to add a capability */
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, d					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
-			{.aead = {					\
-				.algo = RTE_CRYPTO_AEAD_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_##n,		\
-				b, k, i					\
-			}, }						\
-		}, }							\
-	}
-
-/*
- * Maximum number of SGL entries
- */
-#define QAT_SYM_SGL_MAX_NUMBER	16
-
-/* Maximum data length for single pass GMAC: 2^14-1 */
-#define QAT_AES_GMAC_SPC_MAX_SIZE 16383
-
-struct qat_sym_session;
-
-struct qat_sym_sgl {
-	qat_sgl_hdr;
-	struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
-} __rte_packed __rte_cache_aligned;
-
-struct qat_sym_op_cookie {
-	struct qat_sym_sgl qat_sgl_src;
-	struct qat_sym_sgl qat_sgl_dst;
-	phys_addr_t qat_sgl_src_phys_addr;
-	phys_addr_t qat_sgl_dst_phys_addr;
-	union {
-		/* Used for Single-Pass AES-GMAC only */
-		struct {
-			struct icp_qat_hw_cipher_algo_blk cd_cipher
-					__rte_packed __rte_cache_aligned;
-			phys_addr_t cd_phys_addr;
-		} spc_gmac;
-	} opt;
-};
-
-struct qat_sym_dp_ctx {
-	struct qat_sym_session *session;
-	uint32_t tail;
-	uint32_t head;
-	uint16_t cached_enqueue;
-	uint16_t cached_dequeue;
-};
-
-uint16_t
-qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops);
-
-uint16_t
-qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops);
-
-/** Encrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt the IV, then XOR this with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_encrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_encrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
-	return -EINVAL;
-}
-
-static inline uint32_t
-qat_bpicipher_postprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len > 0 &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
-
-		/* Encrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset;
-
-		last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely(sym_op->m_dst != NULL))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = dst - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG,
-				"BPI: dst before post-process:",
-				dst, last_block_len);
-#endif
-		bpi_cipher_encrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
-				last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG,
-				"BPI: dst after post-process:",
-				dst, last_block_len);
-#endif
-	}
-	return sym_op->cipher.data.length - last_block_len;
-}
-
-#ifdef RTE_LIB_SECURITY
-static inline void
-qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op)
-{
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint32_t crc_data_ofs, crc_data_len, crc;
-	uint8_t *crc_data;
-
-	if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT &&
-			sym_op->auth.data.length != 0) {
-
-		crc_data_ofs = sym_op->auth.data.offset;
-		crc_data_len = sym_op->auth.data.length;
-		crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
-				crc_data_ofs);
-
-		crc = rte_net_crc_calc(crc_data, crc_data_len,
-				RTE_NET_CRC32_ETH);
-
-		if (crc != *(uint32_t *)(crc_data + crc_data_len))
-			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	}
-}
-
-static inline void
-qat_crc_generate(struct qat_sym_session *ctx,
-			struct rte_crypto_op *op)
-{
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint32_t *crc, crc_data_len;
-	uint8_t *crc_data;
-
-	if (ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT &&
-			sym_op->auth.data.length != 0 &&
-			sym_op->m_src->nb_segs == 1) {
-
-		crc_data_len = sym_op->auth.data.length;
-		crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
-				sym_op->auth.data.offset);
-		crc = (uint32_t *)(crc_data + crc_data_len);
-		*crc = rte_net_crc_calc(crc_data, crc_data_len,
-				RTE_NET_CRC32_ETH);
-	}
-}
-
-static inline void
-qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
-{
-	struct rte_crypto_op *op;
-	struct qat_sym_session *ctx;
-	uint16_t i;
-
-	for (i = 0; i < nb_ops; i++) {
-		op = (struct rte_crypto_op *)ops[i];
-
-		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
-			ctx = (struct qat_sym_session *)
-				get_sec_session_private_data(
-					op->sym->sec_session);
-
-			if (ctx == NULL || ctx->bpi_ctx == NULL)
-				continue;
-
-			qat_crc_generate(ctx, op);
-		}
-	}
-}
-#endif
-
-static __rte_always_inline int
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
-		uint64_t *dequeue_err_count __rte_unused)
-{
-	struct icp_qat_fw_comn_resp *resp_msg =
-			(struct icp_qat_fw_comn_resp *)resp;
-	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
-			(resp_msg->opaque_data);
-	struct qat_sym_session *sess;
-	uint8_t is_docsis_sec;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
-			sizeof(struct icp_qat_fw_comn_resp));
-#endif
-
-#ifdef RTE_LIB_SECURITY
-	if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
-		/*
-		 * Assuming at this point that if it's a security
-		 * op, that this is for DOCSIS
-		 */
-		sess = (struct qat_sym_session *)
-				get_sec_session_private_data(
-				rx_op->sym->sec_session);
-		is_docsis_sec = 1;
-	} else
-#endif
-	{
-		sess = (struct qat_sym_session *)
-				get_sym_session_private_data(
-				rx_op->sym->session,
-				qat_sym_driver_id);
-		is_docsis_sec = 0;
-	}
-
-	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
-			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
-			resp_msg->comn_hdr.comn_status)) {
-
-		rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	} else {
-		rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
-		if (sess->bpi_ctx) {
-			qat_bpicipher_postprocess(sess, rx_op);
-#ifdef RTE_LIB_SECURITY
-			if (is_docsis_sec)
-				qat_crc_verify(sess, rx_op);
-#endif
-		}
-	}
-
-	if (sess->is_single_pass_gmac) {
-		struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *) op_cookie;
-		memset(cookie->opt.spc_gmac.cd_cipher.key, 0,
-				sess->auth_key_length);
-	}
-
-	*op = (void *)rx_op;
-
-	return 1;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
-
-int
-qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
-
-void
-qat_sym_init_op_cookie(void *cookie);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-static __rte_always_inline void
-qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
-		struct qat_sym_session *ctx,
-		struct rte_crypto_vec *vec, uint32_t vec_len,
-		struct rte_crypto_va_iova_ptr *cipher_iv,
-		struct rte_crypto_va_iova_ptr *auth_iv,
-		struct rte_crypto_va_iova_ptr *aad,
-		struct rte_crypto_va_iova_ptr *digest)
-{
-	uint32_t i;
-
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	for (i = 0; i < vec_len; i++)
-		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
-	if (cipher_iv && ctx->cipher_iv.length > 0)
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
-				ctx->cipher_iv.length);
-	if (auth_iv && ctx->auth_iv.length > 0)
-		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
-				ctx->auth_iv.length);
-	if (aad && ctx->aad_len > 0)
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
-				ctx->aad_len);
-	if (digest && ctx->digest_length > 0)
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
-				ctx->digest_length);
-}
-#else
-static __rte_always_inline void
-qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
-		struct qat_sym_session *ctx __rte_unused,
-		struct rte_crypto_vec *vec __rte_unused,
-		uint32_t vec_len __rte_unused,
-		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
-		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
-		struct rte_crypto_va_iova_ptr *aad __rte_unused,
-		struct rte_crypto_va_iova_ptr *digest __rte_unused)
-{}
-#endif
-
-#endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 52837d7c9c..27cdeb1de2 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -20,7 +20,7 @@
 
 #include "qat_logs.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 
 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
 static const uint8_t sha1InitialState[] = {
@@ -600,11 +600,11 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 	session->is_auth = 1;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
-	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
 		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
-	} else {
+	else
 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
-	}
+
 	session->cipher_iv.offset = aead_xform->iv.offset;
 	session->cipher_iv.length = aead_xform->iv.length;
 	session->aad_len = aead_xform->aad_length;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* Re: [dpdk-dev] [dpdk-dev v1 1/7] crypro/qat: qat driver refactor skeleton
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
@ 2021-10-29 13:58   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2021-10-29 13:58 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: Ji, Kai

Hi Kai,

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Kai Ji
> Sent: Tuesday, October 26, 2021 6:25 PM
> To: dev@dpdk.org
> Cc: Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev] [dpdk-dev v1 1/7] crypro/qat: qat driver refactor
> skeleton
> 
> Add-in enqueue/dequeue op burst and build-request skeleton functions
> for qat crypto driver refactor.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---

...

> +/**
> + * @brief Function prototype to build a QAT request.
> + * @param opaque: an opaque data may be used to store context may be
> useful
> + *    between 2 enqueue operations.
> + **/
The comments for the function prototype can be polished to make it clearer.
Also the formatting shall be inline with the rest of dpdk code. Such as

/**
 * <brief introduction to the function prototype>
 *
 * @param x
 *   Description to the param x.
 * @return
 *   - 0 if the crypto request is built successfully.
*    - error code otherwise.
 */


> +typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
> +		void *op_cookie, uint64_t *opaque, enum qat_device_gen
> dev_gen);
> +
> +typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void
> *op_cookie,
> +		uint64_t *dequeue_err_count __rte_unused);

The above function prototype worth some description too?

> +
> +#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE	2
> +
>  struct qat_qp {
>  	void			*mmap_bar_addr;
>  	struct qat_queue	tx_q;
> @@ -44,6 +57,7 @@ struct qat_qp {
>  	struct rte_mempool *op_cookie_pool;
>  	void **op_cookies;
>  	uint32_t nb_descriptors;
> +	uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
>  	enum qat_device_gen qat_dev_gen;
>  	enum qat_service_type service_type;
>  	struct qat_pci_device *qat_dev;
> @@ -77,6 +91,14 @@ struct qat_qp_config {
>  	const char *service_str;
>  };
> 
> +uint16_t
> +refactor_qat_enqueue_op_burst(void *qp, qat_op_build_request_t
> op_build_request,
> +		void **ops, uint16_t nb_ops);
> +
> +uint16_t
> +refactor_qat_dequeue_op_burst(void *qp, void **ops,
> +		qat_op_dequeue_t qat_dequeue_process_response,
> uint16_t nb_ops);
> +
>  uint16_t
>  qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
> 
> diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
> index 85973812a8..d5b4c66d68 100644
> --- a/drivers/crypto/qat/qat_asym.c
> +++ b/drivers/crypto/qat/qat_asym.c
> @@ -456,6 +456,41 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op
> *asym_op,
>  	return 0;
>  }
> 
> +static __rte_always_inline int
> +refactor_qat_asym_build_request(__rte_unused void *in_op,
> +		__rte_unused uint8_t *out_msg, __rte_unused void
> *op_cookie,
> +		__rte_unused uint64_t *opaque,
> +		__rte_unused enum qat_device_gen dev_gen)
> +{
> +	return 0;
> +}
> +
> +int
> +refactor_qat_asym_process_response(__rte_unused void **op,
> +		__rte_unused uint8_t *resp,
> +		__rte_unused void *op_cookie,
> +		__rte_unused uint64_t *dequeue_err_count)
> +{
> +	return 0;
> +}

I assume the functions refactor_xxx() is removed in future patches right?
Maybe adding some comments to help reviewing.

> +
> +uint16_t
> +qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op
> **ops,
> +		uint16_t nb_ops)
> +{
> +	return refactor_qat_enqueue_op_burst(qp,
> +					refactor_qat_asym_build_request,
> +					(void **)ops, nb_ops);
> +}
> +
> +uint16_t
> +qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op
> **ops,
> +		uint16_t nb_ops)
> +{
> +	return refactor_qat_dequeue_op_burst(qp, (void **)ops,
> +				refactor_qat_asym_process_response,
> nb_ops);
> +}
> +
>  int
>  qat_asym_build_request(void *in_op,
>  			uint8_t *out_msg,
> diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
> index 308b6b2e0b..50c2641eba 100644
> --- a/drivers/crypto/qat/qat_asym.h
> +++ b/drivers/crypto/qat/qat_asym.h
> @@ -92,4 +92,19 @@ void
>  qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
>  		void *op_cookie);
> 
> +int
> +refactor_qat_asym_process_response(__rte_unused void **op,
> +		__rte_unused uint8_t *resp,
> +		__rte_unused void *op_cookie,
> +		__rte_unused uint64_t *dequeue_err_count);
> +
> +uint16_t
> +qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op
> **ops,
> +		uint16_t nb_ops);
> +
> +
> +uint16_t
> +qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op
> **ops,
> +		uint16_t nb_ops);
> +
>  #endif /* _QAT_ASYM_H_ */
> diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
> index 93b257522b..a92874cd27 100644
> --- a/drivers/crypto/qat/qat_sym.c
> +++ b/drivers/crypto/qat/qat_sym.c
> @@ -210,6 +210,31 @@ handle_spc_gmac(struct qat_sym_session *ctx,
> struct rte_crypto_op *op,
>  			ICP_QAT_FW_LA_NO_PROTO);
>  }
> 
> +static __rte_always_inline int
> +refactor_qat_sym_build_request(__rte_unused void *in_op,
> +		__rte_unused uint8_t *out_msg, __rte_unused	void
> *op_cookie,
> +		__rte_unused uint64_t *opaque,
> +		__rte_unused enum qat_device_gen dev_gen)
> +{
> +	return 0;
> +}
> +
> +uint16_t
> +refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
> +		uint16_t nb_ops)
> +{
> +	return refactor_qat_enqueue_op_burst(qp,
> refactor_qat_sym_build_request,
> +			(void **)ops, nb_ops);
> +}
> +

Same as above, some comments would be helpful and shall remove altogether in
later patches.

> +uint16_t
> +refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
> +		uint16_t nb_ops)
> +{
> +	return refactor_qat_dequeue_op_burst(qp, (void **)ops,
> +				refactor_qat_sym_process_response,
> nb_ops);
> +}
> +
>  int
>  qat_sym_build_request(void *in_op, uint8_t *out_msg,
>  		void *op_cookie, enum qat_device_gen qat_dev_gen)
> diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
> index e3ec7f0de4..17b2c871bd 100644
> --- a/drivers/crypto/qat/qat_sym.h
> +++ b/drivers/crypto/qat/qat_sym.h
> @@ -54,6 +54,22 @@ struct qat_sym_op_cookie {
>  	} opt;
>  };
> 
> +static __rte_always_inline int
> +refactor_qat_sym_process_response(__rte_unused void **op,
> +		__rte_unused uint8_t *resp, __rte_unused void *op_cookie,
> +		__rte_unused uint64_t *dequeue_err_count)
> +{
> +	return 0;
> +}
> +
> +uint16_t
> +refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
> +		uint16_t nb_ops);
> +
> +uint16_t
> +refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
> +		uint16_t nb_ops);
> +
>  int
>  qat_sym_build_request(void *in_op, uint8_t *out_msg,
>  		void *op_cookie, enum qat_device_gen qat_dev_gen);
> diff --git a/drivers/crypto/qat/qat_sym_session.h
> b/drivers/crypto/qat/qat_sym_session.h
> index 6ebc176729..73493ec864 100644
> --- a/drivers/crypto/qat/qat_sym_session.h
> +++ b/drivers/crypto/qat/qat_sym_session.h
> @@ -55,6 +55,11 @@
>  #define QAT_SESSION_IS_SLICE_SET(flags, flag)	\
>  	(!!((flags) & (flag)))
> 
> +struct qat_sym_session;
> +
> +typedef int (*qat_sym_build_request_t)(void *in_op, struct
> qat_sym_session *ctx,
> +		uint8_t *out_msg, void *op_cookie);
> +
>  enum qat_sym_proto_flag {
>  	QAT_CRYPTO_PROTO_FLAG_NONE = 0,
>  	QAT_CRYPTO_PROTO_FLAG_CCM = 1,
> @@ -107,6 +112,7 @@ struct qat_sym_session {
>  	/* Some generations need different setup of counter */
>  	uint32_t slice_types;
>  	enum qat_sym_proto_flag qat_proto_flag;
> +	qat_sym_build_request_t build_request[2];
>  };
> 
>  int
> --
> 2.17.1

All comments are towards the comments - as of the rest
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* Re: [dpdk-dev] [dpdk-dev v1 2/7] crypto/qat: qat driver sym op refactor
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 2/7] crypto/qat: qat driver sym op refactor Kai Ji
@ 2021-10-29 14:26   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2021-10-29 14:26 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: Ji, Kai, Akhil Goyal

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Kai Ji
> Sent: Tuesday, October 26, 2021 6:25 PM
> To: dev@dpdk.org
> Cc: Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev] [dpdk-dev v1 2/7] crypto/qat: qat driver sym op refactor
> 
> This patch add-in refactored qat symmetirc build request function
> implementation (qat_sym_refactor.c & qat_sym_refactor.h)
> Add-in QAT sym build ops in auth, cipher, chain and aead operation for
> QAT generation 1
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* Re: [dpdk-dev] [dpdk-dev v1 3/7] crypto/qat: qat driver asym op refactor
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 3/7] crypto/qat: qat driver asym " Kai Ji
@ 2021-10-29 14:36   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2021-10-29 14:36 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: Ji, Kai

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Kai Ji
> Sent: Tuesday, October 26, 2021 6:25 PM
> To: dev@dpdk.org
> Cc: Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev] [dpdk-dev v1 3/7] crypto/qat: qat driver asym op
> refactor
> 
> This patch add-in refactored qat asymmetric build request
> function implementation (qat_asym_refactor.c & qat_asym_refactor.h)
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* Re: [dpdk-dev] [dpdk-dev v1 4/7] crypto/qat: qat driver session method rework
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 4/7] crypto/qat: qat driver session method rework Kai Ji
@ 2021-10-29 14:40   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2021-10-29 14:40 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: Ji, Kai

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Kai Ji
> Sent: Tuesday, October 26, 2021 6:25 PM
> To: dev@dpdk.org
> Cc: Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev] [dpdk-dev v1 4/7] crypto/qat: qat driver session method
> rework
> 
> The patch introduce set_session & set_raw_dp_ctx methods to qat gen dev
> ops
> Replace min_qat_dev_gen_id with dev_id, the session will be invalid if the
> device generation id is not matching during init and ops
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* Re: [dpdk-dev] [dpdk-dev v1 5/7] crypto/qat: qat driver datapath rework
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 5/7] crypto/qat: qat driver datapath rework Kai Ji
@ 2021-10-29 14:41   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2021-10-29 14:41 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: Ji, Kai

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Kai Ji
> Sent: Tuesday, October 26, 2021 6:25 PM
> To: dev@dpdk.org
> Cc: Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev] [dpdk-dev v1 5/7] crypto/qat: qat driver datapath
> rework
> 
> This patch introduce op_build_request func in qat enqueue op
> burst. Add-in qat_dequeue_process_response in qat dequeue op
> burst. Enable session build request op based on crypto operation
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* Re: [dpdk-dev] [dpdk-dev v1 6/7] app/test: cryptodev test fix
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 6/7] app/test: cryptodev test fix Kai Ji
@ 2021-10-29 14:43   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2021-10-29 14:43 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: Ji, Kai, De Lara Guarch, Pablo, adamx.dybkowski

Hi Kai,

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Kai Ji
> Sent: Tuesday, October 26, 2021 6:25 PM
> To: dev@dpdk.org
> Cc: Ji, Kai <kai.ji@intel.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; adamx.dybkowski@intel.com
> Subject: [dpdk-dev] [dpdk-dev v1 6/7] app/test: cryptodev test fix
> 
> test_mixed_auth_cipher: ensure enough space allocate in ibuf & obuf
> for mbuf to vec conversion
> test_kasumi_decryption: cipher length update.
> qat/dev: support sgl oop operation
> 
> Fixes: 681f540da52b ("cryptodev: do not use AAD in wireless algorithms")
> Cc: pablo.de.lara.guarch@intel.com
> 
> Fixes: e847fc512817 ("test/crypto: add encrypted digest case for AES-CTR-
> CMAC")
> Cc: adamx.dybkowski@intel.com
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
>  app/test/test_cryptodev.c                    | 52 ++++++++++++++----
>  drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++--
>  drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++-
>  drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 55 +++++++++++++++++-
> --
>  4 files changed, 124 insertions(+), 25 deletions(-)
> 
> diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
> index 814a0b401d..dd791a181a 100644
> --- a/app/test/test_cryptodev.c
> +++ b/app/test/test_cryptodev.c
> @@ -179,6 +179,10 @@ post_process_raw_dp_op(void *user_data,
> 	uint32_t index __rte_unused,
>  			RTE_CRYPTO_OP_STATUS_ERROR;
>  }
> 
> +static struct crypto_testsuite_params testsuite_params = { NULL };
> +struct crypto_testsuite_params *p_testsuite_params = &testsuite_params;
> +static struct crypto_unittest_params unittest_params;
> +
>  void
>  process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
>  		struct rte_crypto_op *op, uint8_t is_cipher, uint8_t is_auth,
> @@ -193,6 +197,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t
> qp_id,
>  	struct rte_crypto_sgl sgl, dest_sgl;
>  	uint32_t max_len;
>  	union rte_cryptodev_session_ctx sess;
> +	uint64_t auth_end_iova;
>  	uint32_t count = 0;
>  	struct rte_crypto_raw_dp_ctx *ctx;
>  	uint32_t cipher_offset = 0, cipher_len = 0, auth_offset = 0,
> @@ -202,6 +207,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t
> qp_id,
>  	int ctx_service_size;
>  	int32_t status = 0;
>  	int enqueue_status, dequeue_status;
> +	struct crypto_unittest_params *ut_params = &unittest_params;
> +	/* oop is not supported in raw hw dp api*/
> +	int is_sgl = sop->m_src->nb_segs > 1;
> 
>  	ctx_service_size = rte_cryptodev_get_raw_dp_ctx_size(dev_id);
>  	if (ctx_service_size < 0) {
> @@ -267,6 +275,30 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t
> qp_id,
>  		digest.va = (void *)sop->auth.digest.data;
>  		digest.iova = sop->auth.digest.phys_addr;
> 
> +		if (is_sgl) {
> +			uint32_t remaining_off = auth_offset + auth_len;
> +			struct rte_mbuf *sgl_buf = sop->m_src;
> +
> +			while (remaining_off >=
> rte_pktmbuf_data_len(sgl_buf)
> +					&& sgl_buf->next != NULL) {
> +				remaining_off -=
> rte_pktmbuf_data_len(sgl_buf);
> +				sgl_buf = sgl_buf->next;
> +			}
> +
> +			auth_end_iova =
> (uint64_t)rte_pktmbuf_iova_offset(
> +				sgl_buf, remaining_off);
> +		} else {
> +			/* oop is not supported in raw hw dp api */
> +			auth_end_iova = rte_pktmbuf_iova(op->sym-
> >m_src) +
> +							 auth_offset +
> auth_len;
> +		}
> +		/* Then check if digest-encrypted conditions are met */
> +		if ((auth_offset + auth_len < cipher_offset + cipher_len) &&
> +				(digest.iova == auth_end_iova) && is_sgl)
> +			max_len = RTE_MAX(max_len,
> +				auth_offset + auth_len +
> +				ut_params->auth_xform.auth.digest_length);
> +

This fix to me only address the in-place SGL problem but not the out-of-place SGL.

Regards,
Fan


^ permalink raw reply	[flat|nested] 156+ messages in thread

* Re: [dpdk-dev] [dpdk-dev v1 7/7] crypto/qat: qat driver rework clean up
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 7/7] crypto/qat: qat driver rework clean up Kai Ji
@ 2021-10-29 14:46   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2021-10-29 14:46 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: Ji, Kai

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Kai Ji
> Sent: Tuesday, October 26, 2021 6:25 PM
> To: dev@dpdk.org
> Cc: Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev] [dpdk-dev v1 7/7] crypto/qat: qat driver rework clean up
> 
> This patch rename back refactor code to qat_sym.c and qat_asym.c, meson
> build file also updated accordingly.
> Integrate qat hw dp apis and qat pmd function to qat_sym.c and qat_asym.c
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework
  2021-10-26 17:25 [dpdk-dev] [dpdk-dev v1 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                   ` (6 preceding siblings ...)
  2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 7/7] crypto/qat: qat driver rework clean up Kai Ji
@ 2021-11-01 23:12 ` Kai Ji
  2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
                     ` (7 more replies)
  7 siblings, 8 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-01 23:12 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch reworks QAT symmetric crypto datapatch implementation where each
generation request building separated and the crypto operation under the
raw datapath api implementation are unified.

In addtion this patchset also enables QAT OOP support in raw datapath api
implementation.

This patch depends on http://patchwork.dpdk.org/project/dpdk/cover/20211027155055.32264-1-kai.ji@intel.com/

v2:
- review comments addressed

Kai Ji (7):
  crypro/qat: qat driver refactor skeleton
  crypto/qat: qat driver sym op refactor
  crypto/qat: qat driver asym op refactor
  crypto/qat: qat driver session method rework
  crypto/qat: qat driver datapath rework
  app/test: cryptodev test fix
  crypto/qat: qat driver rework clean up

 app/test/test_cryptodev.c                    |  58 +-
 drivers/common/qat/meson.build               |   4 +-
 drivers/common/qat/qat_device.c              |   2 +-
 drivers/common/qat/qat_qp.c                  |  40 +-
 drivers/common/qat/qat_qp.h                  |  70 +-
 drivers/compress/qat/qat_comp_pmd.c          |  12 +-
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   7 +
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  90 ++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 487 +++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 253 +++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 911 +++++++++++++++++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 939 ++++++++++++++++++
 drivers/crypto/qat/qat_asym.c                | 786 +++++++++------
 drivers/crypto/qat/qat_asym.h                |  77 +-
 drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
 drivers/crypto/qat/qat_asym_pmd.h            |  54 -
 drivers/crypto/qat/qat_crypto.c              |   1 +
 drivers/crypto/qat/qat_crypto.h              |  14 +-
 drivers/crypto/qat/qat_sym.c                 | 978 ++++++------------
 drivers/crypto/qat/qat_sym.h                 | 141 ++-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 983 -------------------
 drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
 drivers/crypto/qat/qat_sym_pmd.h             |  95 --
 drivers/crypto/qat/qat_sym_session.c         | 114 +--
 drivers/crypto/qat/qat_sym_session.h         |   8 +-
 25 files changed, 3861 insertions(+), 2745 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v2 1/7] crypro/qat: qat driver refactor skeleton
  2021-11-01 23:12 ` [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
@ 2021-11-01 23:12   ` Kai Ji
  2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 2/7] crypto/qat: qat driver sym op refactor Kai Ji
                     ` (6 subsequent siblings)
  7 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-01 23:12 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

Add-in enqueue/dequeue op burst and build-request skeleton functions
for qat crypto driver refactor.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/qat_qp.c          | 16 +++++++++
 drivers/common/qat/qat_qp.h          | 54 ++++++++++++++++++++++++++++
 drivers/crypto/qat/qat_asym.c        | 43 ++++++++++++++++++++++
 drivers/crypto/qat/qat_asym.h        | 15 ++++++++
 drivers/crypto/qat/qat_sym.c         | 37 +++++++++++++++++++
 drivers/crypto/qat/qat_sym.h         | 16 +++++++++
 drivers/crypto/qat/qat_sym_session.h |  6 ++++
 7 files changed, 187 insertions(+)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index cde421eb77..0fda890075 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -549,6 +549,22 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 	return data & modulo_mask;
 }
 
+uint16_t
+refactor_qat_enqueue_op_burst(__rte_unused void *qp,
+		__rte_unused qat_op_build_request_t op_build_request,
+		__rte_unused void **ops, __rte_unused uint16_t nb_ops)
+{
+	return 0;
+}
+
+uint16_t
+refactor_qat_dequeue_op_burst(__rte_unused void *qp, __rte_unused void **ops,
+		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
+		__rte_unused uint16_t nb_ops)
+{
+	return 0;
+}
+
 uint16_t
 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
 {
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index deafb407b3..b1350cdaf4 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -36,6 +36,51 @@ struct qat_queue {
 	/* number of responses processed since last CSR head write */
 };
 
+/**
+ * Type define qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ *
+ * @param in_op
+ *    An input op pointer
+ * @param out_msg
+ *    out_meg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param opaque
+ *    an opaque data may be used to store context may be useful between
+ *    2 enqueue operations.
+ * @param dev_gen
+ *    qat device gen id
+ * @return
+ *   - 0 if the crypto request is build successfully,
+ *   - error code otherwise
+ **/
+typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
+
+/**
+ * Type define qat_op_dequeue_t function pointer, passed in as argument
+ * in dequeue op burst, where a dequeue op assigned base on the type of
+ * crypto op.
+ *
+ * @param op
+ *    An input op pointer
+ * @param resp
+ *    qat response msg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param dequeue_err_count
+ *    dequeue error counter
+ * @return
+ *    - 0 if dequeue OP is successful
+ *    - error code otherwise
+ **/
+typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused);
+
+#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE	2
+
 struct qat_qp {
 	void			*mmap_bar_addr;
 	struct qat_queue	tx_q;
@@ -44,6 +89,7 @@ struct qat_qp {
 	struct rte_mempool *op_cookie_pool;
 	void **op_cookies;
 	uint32_t nb_descriptors;
+	uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
 	enum qat_device_gen qat_dev_gen;
 	enum qat_service_type service_type;
 	struct qat_pci_device *qat_dev;
@@ -77,6 +123,14 @@ struct qat_qp_config {
 	const char *service_str;
 };
 
+uint16_t
+refactor_qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops);
+
+uint16_t
+refactor_qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
+
 uint16_t
 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 85973812a8..c3bbdb6eb8 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -456,6 +456,49 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	return 0;
 }
 
+/*
+ * Asym build request refactor function template,
+ * will be removed in the later patchset.
+ */
+static __rte_always_inline int
+refactor_qat_asym_build_request(__rte_unused void *in_op,
+		__rte_unused uint8_t *out_msg, __rte_unused void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
+{
+	return 0;
+}
+
+/*
+ * Asym process response refactor function template,
+ * will be removed in the later patchset.
+ */
+int
+refactor_qat_asym_process_response(__rte_unused void **op,
+		__rte_unused uint8_t *resp,
+		__rte_unused void *op_cookie,
+		__rte_unused uint64_t *dequeue_err_count)
+{
+	return 0;
+}
+
+uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return refactor_qat_enqueue_op_burst(qp,
+					refactor_qat_asym_build_request,
+					(void **)ops, nb_ops);
+}
+
+uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return refactor_qat_dequeue_op_burst(qp, (void **)ops,
+				refactor_qat_asym_process_response, nb_ops);
+}
+
 int
 qat_asym_build_request(void *in_op,
 			uint8_t *out_msg,
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 308b6b2e0b..50c2641eba 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -92,4 +92,19 @@ void
 qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
 		void *op_cookie);
 
+int
+refactor_qat_asym_process_response(__rte_unused void **op,
+		__rte_unused uint8_t *resp,
+		__rte_unused void *op_cookie,
+		__rte_unused uint64_t *dequeue_err_count);
+
+uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+
+uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 #endif /* _QAT_ASYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 93b257522b..576ef532a7 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -210,6 +210,43 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
 			ICP_QAT_FW_LA_NO_PROTO);
 }
 
+/*
+ * Sym build request refactor function template,
+ * will be removed in the later patchset.
+ */
+static __rte_always_inline int
+refactor_qat_sym_build_request(__rte_unused void *in_op,
+		__rte_unused uint8_t *out_msg, __rte_unused	void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
+{
+	return 0;
+}
+
+/*
+ * Sym enqueue burst refactor function template,
+ * will be removed in the later patchset.
+ */
+uint16_t
+refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return refactor_qat_enqueue_op_burst(qp, refactor_qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
+
+/*
+ * Sym dequeue burst refactor function template,
+ * will be removed in the later patchset.
+ */
+uint16_t
+refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return refactor_qat_dequeue_op_burst(qp, (void **)ops,
+				refactor_qat_sym_process_response, nb_ops);
+}
+
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		void *op_cookie, enum qat_device_gen qat_dev_gen)
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index e3ec7f0de4..17b2c871bd 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -54,6 +54,22 @@ struct qat_sym_op_cookie {
 	} opt;
 };
 
+static __rte_always_inline int
+refactor_qat_sym_process_response(__rte_unused void **op,
+		__rte_unused uint8_t *resp, __rte_unused void *op_cookie,
+		__rte_unused uint64_t *dequeue_err_count)
+{
+	return 0;
+}
+
+uint16_t
+refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		void *op_cookie, enum qat_device_gen qat_dev_gen);
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 6ebc176729..73493ec864 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -55,6 +55,11 @@
 #define QAT_SESSION_IS_SLICE_SET(flags, flag)	\
 	(!!((flags) & (flag)))
 
+struct qat_sym_session;
+
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 enum qat_sym_proto_flag {
 	QAT_CRYPTO_PROTO_FLAG_NONE = 0,
 	QAT_CRYPTO_PROTO_FLAG_CCM = 1,
@@ -107,6 +112,7 @@ struct qat_sym_session {
 	/* Some generations need different setup of counter */
 	uint32_t slice_types;
 	enum qat_sym_proto_flag qat_proto_flag;
+	qat_sym_build_request_t build_request[2];
 };
 
 int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v2 2/7] crypto/qat: qat driver sym op refactor
  2021-11-01 23:12 ` [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
@ 2021-11-01 23:12   ` Kai Ji
  2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 3/7] crypto/qat: qat driver asym " Kai Ji
                     ` (5 subsequent siblings)
  7 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-01 23:12 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch add-in refactored qat symmetirc build request function
implementation (qat_sym_refactor.c & qat_sym_refactor.h)
Add-in QAT sym build ops in auth, cipher, chain and aead operation for
QAT generation 1

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 1071 ++++++++++++++++++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  895 +++++++++++++++
 drivers/crypto/qat/qat_sym.h                 |    8 +
 drivers/crypto/qat/qat_sym_hw_dp.c           |    8 -
 drivers/crypto/qat/qat_sym_refactor.c        |  409 +++++++
 drivers/crypto/qat/qat_sym_refactor.h        |  402 +++++++
 6 files changed, 2785 insertions(+), 8 deletions(-)
 create mode 100644 drivers/crypto/qat/qat_sym_refactor.c
 create mode 100644 drivers/crypto/qat/qat_sym_refactor.h

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 67a4d2cb2c..07020741bd 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -8,14 +8,1082 @@
 #include <rte_cryptodev.h>
 #include "qat_crypto.h"
 #include "qat_sym_session.h"
+#include "qat_sym.h"
+
+#define QAT_BASE_GEN1_SYM_CAPABILITIES \
+	QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
+		CAP_RNG(digest_size, 1, 20, 1)), \
+	QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
+	QAT_SYM_AEAD_CAP(AES_CCM,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
+		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
+	QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)), \
+	QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
+			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
+		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
+		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(MD5_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SNOW3G_UIA2, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_AUTH_CAP(KASUMI_F9, CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
+		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_CIPHER_CAP(AES_CTR,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_CIPHER_CAP(AES_XTS,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_CIPHER_CAP(KASUMI_F8,  CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)), \
+	QAT_SYM_CIPHER_CAP(NULL,  CAP_SET(block_size, 1), \
+		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_CIPHER_CAP(3DES_CBC,  CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
+	QAT_SYM_CIPHER_CAP(3DES_CTR,  CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
+	QAT_SYM_CIPHER_CAP(DES_CBC,  CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
+	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,  CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0))
+
+#define QAT_BASE_GEN1_ASYM_CAPABILITIES				\
+	QAT_ASYM_CAP(MODEX, 0, 1, 512, 1),			\
+	QAT_ASYM_CAP(MODINV, 0, 1, 512, 1),			\
+	QAT_ASYM_CAP(RSA,					\
+			((1 << RTE_CRYPTO_ASYM_OP_SIGN) |	\
+			(1 << RTE_CRYPTO_ASYM_OP_VERIFY) |	\
+			(1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) |	\
+			(1 << RTE_CRYPTO_ASYM_OP_DECRYPT)),	\
+			64, 512, 64)
+
+#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \
+	QAT_SYM_CIPHER_CAP(ZUC_EEA3, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_AUTH_CAP(ZUC_EIA3, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)) \
+
+#define QAT_EXTRA_GEN3_SYM_CAPABILITIES \
+	QAT_SYM_AEAD_CAP(CHACHA20_POLY1305, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 32, 32, 0), \
+		CAP_RNG(digest_size, 16, 16, 0), \
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0))
+
+#define QAT_BASE_GEN4_SYM_CAPABILITIES \
+	QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
+		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
+		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
+		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_CIPHER_CAP(NULL,  CAP_SET(block_size, 1), \
+		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
+		CAP_RNG(digest_size, 1, 20, 1)), \
+	QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_CIPHER_CAP(AES_CTR,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
+	QAT_SYM_AEAD_CAP(AES_CCM,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
+		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
+	QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)) \
+
+#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
+	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
+
+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
+	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
+	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
+
+static __rte_always_inline int
+op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_decrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_decrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+	return -EINVAL;
+}
+
+static __rte_always_inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		/* Decrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely((sym_op->m_dst != NULL)
+				&& (sym_op->m_dst != sym_op->m_src)))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = last_block - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
+			dst, last_block_len);
+#endif
+		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+			dst, last_block_len);
+#endif
+	}
+
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+static __rte_always_inline int
+qat_chk_len_in_bits_auth(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_chk_len_in_bits_cipher(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+			((op->sym->cipher.data.offset %
+			BYTE_LENGTH) != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int32_t
+qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
+		void *opaque, struct qat_sym_op_cookie *cookie,
+		struct rte_crypto_vec *src_vec, uint16_t n_src,
+		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
+{
+	struct qat_sgl *list;
+	uint32_t i;
+	uint32_t tl_src = 0, total_len_src, total_len_dst;
+	uint64_t src_data_start = 0, dst_data_start = 0;
+	int is_sgl = n_src > 1 || n_dst > 1;
+
+	if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
+			n_dst > QAT_SYM_SGL_MAX_NUMBER))
+		return -1;
+
+	if (likely(!is_sgl)) {
+		src_data_start = src_vec[0].iova;
+		tl_src = total_len_src =
+				src_vec[0].len;
+		if (unlikely(n_dst)) { /* oop */
+			total_len_dst = dst_vec[0].len;
+
+			dst_data_start = dst_vec[0].iova;
+			if (unlikely(total_len_src != total_len_dst))
+				return -EINVAL;
+		} else {
+			dst_data_start = src_data_start;
+			total_len_dst = tl_src;
+		}
+	} else { /* sgl */
+		total_len_dst = total_len_src = 0;
+
+		ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+			QAT_COMN_PTR_TYPE_SGL);
+
+		list = (struct qat_sgl *)&cookie->qat_sgl_src;
+		for (i = 0; i < n_src; i++) {
+			list->buffers[i].len = src_vec[i].len;
+			list->buffers[i].resrvd = 0;
+			list->buffers[i].addr = src_vec[i].iova;
+			if (tl_src + src_vec[i].len > UINT32_MAX) {
+				QAT_DP_LOG(ERR, "Message too long");
+				return -1;
+			}
+			tl_src += src_vec[i].len;
+		}
+
+		list->num_bufs = i;
+		src_data_start = cookie->qat_sgl_src_phys_addr;
+
+		if (unlikely(n_dst > 0)) { /* oop sgl */
+			uint32_t tl_dst = 0;
+
+			list = (struct qat_sgl *)&cookie->qat_sgl_dst;
+
+			for (i = 0; i < n_dst; i++) {
+				list->buffers[i].len = dst_vec[i].len;
+				list->buffers[i].resrvd = 0;
+				list->buffers[i].addr = dst_vec[i].iova;
+				if (tl_dst + dst_vec[i].len > UINT32_MAX) {
+					QAT_DP_LOG(ERR, "Message too long");
+					return -ENOTSUP;
+				}
+
+				tl_dst += dst_vec[i].len;
+			}
+
+			if (tl_src != tl_dst)
+				return -EINVAL;
+			list->num_bufs = i;
+			dst_data_start = cookie->qat_sgl_dst_phys_addr;
+		} else
+			dst_data_start = src_data_start;
+	}
+
+	req->comn_mid.src_data_addr = src_data_start;
+	req->comn_mid.dest_data_addr = dst_data_start;
+	req->comn_mid.src_length = total_len_src;
+	req->comn_mid.dst_length = total_len_dst;
+	req->comn_mid.opaque_data = (uintptr_t)opaque;
+
+	return tl_src;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int n_src = 0;
+	int ret;
+
+	ret = qat_chk_len_in_bits_cipher(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->cipher.data.length >> 3;
+		cipher_ofs = op->sym->cipher.data.offset >> 3;
+		break;
+	case 0:
+		if (ctx->bpi_ctx) {
+			/* DOCSIS - only send complete blocks to device.
+			 * Process any partial block using CFB mode.
+			 * Even if 0 complete blocks, still send this to device
+			 * to get into rx queue for post-process and dequeuing
+			 */
+			cipher_len = qat_bpicipher_preprocess(ctx, op);
+			cipher_ofs = op->sym->cipher.data.offset;
+		} else {
+			cipher_len = op->sym->cipher.data.length;
+			cipher_ofs = op->sym->cipher.data.offset;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
+			cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t auth_ofs = 0, auth_len = 0;
+	int n_src, ret;
+
+	ret = qat_chk_len_in_bits_auth(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+				ctx->auth_iv.offset);
+		auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+				ctx->auth_iv.offset);
+		break;
+	case 0:
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+			/* AES-GMAC */
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+					ctx->auth_iv.offset);
+			auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+					ctx->auth_iv.offset);
+		} else {
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = NULL;
+			auth_iv->iova = 0;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
+			auth_ofs + auth_len, in_sgl->vec,
+			QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
+				auth_ofs + auth_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	union rte_crypto_sym_ofs ofs;
+	uint32_t min_ofs = 0, max_len = 0;
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	uint32_t auth_len = 0, auth_ofs = 0;
+	int is_oop = (op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src);
+	int is_sgl = op->sym->m_src->nb_segs > 1;
+	int n_src;
+	int ret;
+
+	if (unlikely(is_oop))
+		is_sgl |= op->sym->m_dst->nb_segs > 1;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->auth_iv.offset);
+	auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->auth_iv.offset);
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	ret = qat_chk_len_in_bits_cipher(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->aead.data.length >> 3;
+		cipher_ofs = op->sym->aead.data.offset >> 3;
+		break;
+	case 0:
+		cipher_len = op->sym->aead.data.length;
+		cipher_ofs = op->sym->aead.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	ret = qat_chk_len_in_bits_auth(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		break;
+	case 0:
+		auth_len = op->sym->auth.data.length;
+		auth_ofs = op->sym->auth.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
+
+	/* digest in buffer check. Needed only for wireless algos */
+	if (ret == 1) {
+		/* Handle digest-encrypted cases, i.e.
+		 * auth-gen-then-cipher-encrypt and
+		 * cipher-decrypt-then-auth-verify
+		 */
+		uint64_t auth_end_iova;
+
+		if (unlikely(is_sgl)) {
+			uint32_t remaining_off = auth_ofs + auth_len;
+			struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
+				op->sym->m_src);
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else
+			auth_end_iova = (is_oop ?
+				rte_pktmbuf_iova(op->sym->m_dst) :
+				rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
+					auth_len;
+
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
+				(digest->iova == auth_end_iova))
+			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
+					ctx->digest_length);
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return -1;
+	}
+	in_sgl->num = n_src;
+
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs,
+				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	ofs.ofs.cipher.head = cipher_ofs;
+	ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
+	ofs.ofs.auth.head = auth_ofs;
+	ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
+
+	return ofs.raw;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int32_t n_src = 0;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
+	auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
+	digest->va = (void *)op->sym->aead.digest.data;
+	digest->iova = op->sym->aead.digest.phys_addr;
+
+	cipher_len = op->sym->aead.data.length;
+	cipher_ofs = op->sym->aead.data.offset;
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline void
+qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
+		struct icp_qat_fw_la_bulk_req *qat_req)
+{
+	/* copy IV into request if it fits */
+	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
+				iv_len);
+	else {
+		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+				qat_req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
+	}
+}
+
+static __rte_always_inline void
+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
+{
+	uint32_t i;
+
+	for (i = 0; i < n; i++)
+		sta[i] = status;
+}
+
+static __rte_always_inline void
+enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+
+	/* cipher IV */
+	qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
+				ctx->auth_iv.length);
+		break;
+	default:
+		break;
+	}
+}
+
+static __rte_always_inline int
+enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_vec *src_vec,
+	uint16_t n_src_vecs,
+	struct rte_crypto_vec *dst_vec,
+	uint16_t n_dst_vecs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
+			dst_vec : src_vec;
+	rte_iova_t auth_iova_end;
+	int cipher_len, auth_len;
+	int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	cipher_len = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (unlikely(cipher_len < 0 || auth_len < 0))
+		return -1;
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = cipher_len;
+	qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = auth_len;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		break;
+	default:
+		break;
+	}
+
+	if (unlikely(is_sgl)) {
+		/* sgl */
+		int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
+		uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
+
+		while (remaining_off >= cvec->len && i >= 1) {
+			i--;
+			remaining_off -= cvec->len;
+			cvec++;
+		}
+
+		auth_iova_end = cvec->iova + remaining_off;
+	} else
+		auth_iova_end = cvec[0].iova + auth_param->auth_off +
+			auth_param->auth_len;
+
+	/* Then check if digest-encrypted conditions are met */
+	if ((auth_param->auth_off + auth_param->auth_len <
+		cipher_param->cipher_offset + cipher_param->cipher_length) &&
+			(digest->iova == auth_iova_end)) {
+		/* Handle partial digest encryption */
+		if (cipher_param->cipher_offset + cipher_param->cipher_length <
+			auth_param->auth_off + auth_param->auth_len +
+				ctx->digest_length && !is_sgl)
+			req->comn_mid.dst_length = req->comn_mid.src_length =
+				auth_param->auth_off + auth_param->auth_len +
+					ctx->digest_length;
+		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void
+enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param =
+		(void *)&req->serv_specif_rqpars;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(void *)((uint8_t *)&req->serv_specif_rqpars +
+		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+	uint8_t *aad_data;
+	uint8_t aad_ccm_real_len;
+	uint8_t aad_len_field_sz;
+	uint32_t msg_len_be;
+	rte_iova_t aad_iova = 0;
+	uint8_t q;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
+				ctx->cipher_iv.length);
+		aad_iova = aad->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		aad_data = aad->va;
+		aad_iova = aad->iova;
+		aad_ccm_real_len = 0;
+		aad_len_field_sz = 0;
+		msg_len_be = rte_bswap32((uint32_t)data_len -
+				ofs.ofs.cipher.head);
+
+		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			aad_ccm_real_len = ctx->aad_len -
+				ICP_QAT_HW_CCM_AAD_B0_LEN -
+				ICP_QAT_HW_CCM_AAD_LEN_INFO;
+		} else {
+			aad_data = iv->va;
+			aad_iova = iv->iova;
+		}
+
+		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+			aad_len_field_sz, ctx->digest_length, q);
+		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+				(uint8_t *)&msg_len_be,
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+		} else {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+				(uint8_t *)&msg_len_be +
+				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+				- q), q);
+		}
+
+		if (aad_len_field_sz > 0) {
+			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+				rte_bswap16(aad_ccm_real_len);
+
+			if ((aad_ccm_real_len + aad_len_field_sz)
+				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+				uint8_t pad_len = 0;
+				uint8_t pad_idx = 0;
+
+				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+					((aad_ccm_real_len +
+					aad_len_field_sz) %
+					ICP_QAT_HW_CCM_AAD_B0_LEN);
+				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+					aad_ccm_real_len +
+					aad_len_field_sz;
+				memset(&aad_data[pad_idx], 0, pad_len);
+			}
+		}
+
+		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va +
+			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
+		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+		rte_memcpy((uint8_t *)aad->va +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			ctx->cipher_iv.length);
+		break;
+	default:
+		break;
+	}
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_param->auth_off = ofs.ofs.cipher.head;
+	auth_param->auth_len = cipher_param->cipher_length;
+	auth_param->auth_res_addr = digest->iova;
+	auth_param->u1.aad_adr = aad_iova;
+}
 
 extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
 extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
 
+/* -----------------GEN 1 sym crypto op data path APIs ---------------- */
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+/* -----------------GEN 1 sym crypto raw data path APIs ---------------- */
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status);
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status);
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_dp_denqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 void
 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 		uint8_t hash_flag);
@@ -23,6 +1091,9 @@ qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 struct qat_capabilities_info
 qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 uint64_t
 qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index e156f194e2..e1fd14956b 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -269,6 +269,901 @@ qat_sym_create_security_gen1(void *cryptodev)
 
 #endif
 
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, NULL, NULL);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, NULL, NULL);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
+			total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
+			out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			&auth_iv, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct qat_sym_op_cookie *cookie;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, &iv,
+			NULL, NULL, NULL);
+#endif
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i],
+				cookie, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
+			(uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				NULL, NULL, NULL);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
+			NULL, 0, cipher_iv, digest, auth_iv, ofs,
+			(uint32_t)data_len)))
+		return -1;
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				NULL, 0,
+				&vec->iv[i], &vec->digest[i],
+				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+			break;
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				&vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp;
+	void *resp_opaque;
+	uint32_t i, n, inflight;
+	uint32_t head;
+	uint8_t status;
+
+	*n_success_jobs = 0;
+	*return_status = 0;
+	head = dp_ctx->head;
+
+	inflight = qp->enqueued - qp->dequeued;
+	if (unlikely(inflight == 0))
+		return 0;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			head);
+	/* no operation ready */
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return 0;
+
+	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
+	/* get the dequeue count */
+	if (get_dequeue_count) {
+		n = get_dequeue_count(resp_opaque);
+		if (unlikely(n == 0))
+			return 0;
+	} else {
+		if (unlikely(max_nb_to_dequeue == 0))
+			return 0;
+		n = max_nb_to_dequeue;
+	}
+
+	out_user_data[0] = resp_opaque;
+	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+	post_dequeue(resp_opaque, 0, status);
+	*n_success_jobs += status;
+
+	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+
+	/* we already finished dequeue when n == 1 */
+	if (unlikely(n == 1)) {
+		i = 1;
+		goto end_deq;
+	}
+
+	if (is_user_data_array) {
+		for (i = 1; i < n; i++) {
+			resp = (struct icp_qat_fw_comn_resp *)(
+				(uint8_t *)rx_queue->base_addr + head);
+			if (unlikely(*(uint32_t *)resp ==
+					ADF_RING_EMPTY_SIG))
+				goto end_deq;
+			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
+			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+			*n_success_jobs += status;
+			post_dequeue(out_user_data[i], i, status);
+			head = (head + rx_queue->msg_size) &
+					rx_queue->modulo_mask;
+		}
+
+		goto end_deq;
+	}
+
+	/* opaque is not array */
+	for (i = 1; i < n; i++) {
+		resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+			goto end_deq;
+		head = (head + rx_queue->msg_size) &
+				rx_queue->modulo_mask;
+		post_dequeue(resp_opaque, i, status);
+		*n_success_jobs += status;
+	}
+
+end_deq:
+	dp_ctx->head = head;
+	dp_ctx->cached_dequeue += i;
+	return i;
+}
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	register struct icp_qat_fw_comn_resp *resp;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			dp_ctx->head);
+
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return NULL;
+
+	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
+			rx_queue->modulo_mask;
+	dp_ctx->cached_dequeue++;
+
+	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
+			RTE_CRYPTO_OP_STATUS_SUCCESS :
+			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	*dequeue_status = 0;
+	return (void *)(uintptr_t)resp->opaque_data;
+}
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_enqueue != n))
+		return -1;
+
+	qp->enqueued += n;
+	qp->stats.enqueued_count += n;
+
+	tx_queue->tail = dp_ctx->tail;
+
+	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number, tx_queue->tail);
+	tx_queue->csr_tail = tx_queue->tail;
+	dp_ctx->cached_enqueue = 0;
+
+	return 0;
+}
+
+int
+qat_sym_dp_denqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_dequeue != n))
+		return -1;
+
+	rx_queue->head = dp_ctx->head;
+	rx_queue->nb_processed_responses += n;
+	qp->dequeued += n;
+	qp->stats.dequeued_count += n;
+	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
+		uint32_t old_head, new_head;
+		uint32_t max_head;
+
+		old_head = rx_queue->csr_head;
+		new_head = rx_queue->head;
+		max_head = qp->nb_descriptors * rx_queue->msg_size;
+
+		/* write out free descriptors */
+		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
+
+		if (new_head < old_head) {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
+					max_head - old_head);
+			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
+					new_head);
+		} else {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
+					old_head);
+		}
+		rx_queue->nb_processed_responses = 0;
+		rx_queue->csr_head = new_head;
+
+		/* write current head to CSR */
+		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
+			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
+			new_head);
+	}
+
+	dp_ctx->cached_dequeue = 0;
+	return 0;
+}
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+
+	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
+	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
+	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
+	raw_dp_ctx->dequeue_done = qat_sym_dp_denqueue_done_gen1;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_chain_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_chain_gen1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
+			ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_cipher_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_cipher_gen1;
+		}
+	} else
+		return -1;
+
+	return 0;
+}
+
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int handle_mixed = 0;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			/* do_aead = 1; */
+			build_request = qat_sym_build_op_aead_gen1;
+		} else {
+			/* do_auth = 1; do_cipher = 1; */
+			build_request = qat_sym_build_op_chain_gen1;
+			handle_mixed = 1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		/* do_auth = 1; do_cipher = 0;*/
+		build_request = qat_sym_build_op_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		/* do_auth = 0; do_cipher = 1; */
+		build_request = qat_sym_build_op_cipher_gen1;
+	}
+
+	if (!build_request)
+		return 0;
+	ctx->build_request[proc_type] = build_request;
+
+	if (!handle_mixed)
+		return 0;
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		return -ENOTSUP;
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		return -ENOTSUP;
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen1_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index 17b2c871bd..4801bd50a7 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -54,6 +54,14 @@ struct qat_sym_op_cookie {
 	} opt;
 };
 
+struct qat_sym_dp_ctx {
+	struct qat_sym_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
 static __rte_always_inline int
 refactor_qat_sym_process_response(__rte_unused void **op,
 		__rte_unused uint8_t *resp, __rte_unused void *op_cookie,
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 12825e448b..94589458d0 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -13,14 +13,6 @@
 #include "qat_sym_session.h"
 #include "qat_qp.h"
 
-struct qat_sym_dp_ctx {
-	struct qat_sym_session *session;
-	uint32_t tail;
-	uint32_t head;
-	uint16_t cached_enqueue;
-	uint16_t cached_dequeue;
-};
-
 static __rte_always_inline int32_t
 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
 		struct rte_crypto_vec *data, uint16_t n_data_vecs)
diff --git a/drivers/crypto/qat/qat_sym_refactor.c b/drivers/crypto/qat/qat_sym_refactor.c
new file mode 100644
index 0000000000..0412902e70
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_refactor.c
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+
+#include <openssl/evp.h>
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_crypto_sym.h>
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+
+#include "qat_sym.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
+
+uint8_t qat_sym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
+void
+qat_sym_init_op_cookie(void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+
+	cookie->qat_sgl_src_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_src);
+
+	cookie->qat_sgl_dst_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_dst);
+
+	cookie->opt.spc_gmac.cd_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			opt.spc_gmac.cd_cipher);
+}
+
+static __rte_always_inline int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
+{
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	void *sess = (void *)opaque[0];
+	qat_sym_build_request_t build_request = (void *)opaque[1];
+	struct qat_sym_session *ctx = NULL;
+
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
+
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
+	}
+
+#ifdef RTE_LIB_SECURITY
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if (sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
+			if (unlikely(ctx->bpi_ctx == NULL)) {
+				QAT_DP_LOG(ERR, "QAT PMD only supports security"
+						" operation requests for"
+						" DOCSIS, op (%p) is not for"
+						" DOCSIS.", op);
+				return -EINVAL;
+			} else if (unlikely(((op->sym->m_dst != NULL) &&
+					(op->sym->m_dst != op->sym->m_src)) ||
+					op->sym->m_src->nb_segs > 1)) {
+				QAT_DP_LOG(ERR, "OOP and/or multi-segment"
+						" buffers not supported for"
+						" DOCSIS security.");
+				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+				return -EINVAL;
+			}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
+
+			sess = (void *)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)sess;
+			opaque[1] = (uintptr_t)build_request;
+		}
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
+	}
+
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
+
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
+	}
+
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
+		}
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
+	} else {
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
+	}
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
+		}
+	}
+
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
+
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+
+	return 0;
+
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
+
+	return ret;
+}
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
+
+	return 0;
+}
+
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct qat_cryptodev_private *internals = dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_crypto_gen_dev_ops *gen_dev_ops =
+			&qat_sym_gen_dev_ops[qat_dev_gen];
+	struct qat_qp *qp;
+	struct qat_sym_session *ctx;
+	struct qat_sym_dp_ctx *dp_ctx;
+
+	if (!gen_dev_ops->set_raw_dp_ctx) {
+		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+				qat_dev_gen);
+		return -ENOTSUP;
+	}
+
+	qp = dev->data->queue_pairs[qp_id];
+	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(struct qat_sym_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+		dp_ctx->tail = qp->tx_q.tail;
+		dp_ctx->head = qp->rx_q.head;
+		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
+	}
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -EINVAL;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, qat_sym_driver_id);
+
+	dp_ctx->session = ctx;
+
+	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
+}
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct qat_sym_dp_ctx);
+}
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_refactor.h b/drivers/crypto/qat/qat_sym_refactor.h
new file mode 100644
index 0000000000..d4bfe8f364
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_refactor.h
@@ -0,0 +1,402 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_H_
+#define _QAT_SYM_H_
+
+#include <rte_cryptodev_pmd.h>
+#ifdef RTE_LIB_SECURITY
+#include <rte_net_crc.h>
+#endif
+
+#include <openssl/evp.h>
+
+#include "qat_common.h"
+#include "qat_sym_session.h"
+#include "qat_crypto.h"
+#include "qat_logs.h"
+
+#define CRYPTODEV_NAME_QAT_SYM_PMD   crypto_qat
+
+#define BYTE_LENGTH    8
+/* bpi is only used for partial blocks of DES and AES
+ * so AES block len can be assumed as max len for iv, src and dst
+ */
+#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
+
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
+#define QAT_SYM_CAP_VALID		(1 << 31)
+
+/* Macro to add a capability */
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, d					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_##n,		\
+				b, k, i					\
+			}, }						\
+		}, }							\
+	}
+
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SYM_SGL_MAX_NUMBER	16
+
+/* Maximum data length for single pass GMAC: 2^14-1 */
+#define QAT_AES_GMAC_SPC_MAX_SIZE 16383
+
+struct qat_sym_session;
+
+struct qat_sym_sgl {
+	qat_sgl_hdr;
+	struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_sym_op_cookie {
+	struct qat_sym_sgl qat_sgl_src;
+	struct qat_sym_sgl qat_sgl_dst;
+	phys_addr_t qat_sgl_src_phys_addr;
+	phys_addr_t qat_sgl_dst_phys_addr;
+	union {
+		/* Used for Single-Pass AES-GMAC only */
+		struct {
+			struct icp_qat_hw_cipher_algo_blk cd_cipher
+					__rte_packed __rte_cache_aligned;
+			phys_addr_t cd_phys_addr;
+		} spc_gmac;
+	} opt;
+};
+
+struct qat_sym_dp_ctx {
+	struct qat_sym_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+/** Encrypt a single partial block
+ *  Depends on openssl libcrypto
+ *  Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt the IV, then XOR this with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_encrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_encrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
+	return -EINVAL;
+}
+
+static inline uint32_t
+qat_bpicipher_postprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len > 0 &&
+			ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+
+		/* Encrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset;
+
+		last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely(sym_op->m_dst != NULL))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = dst - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG,
+				"BPI: dst before post-process:",
+				dst, last_block_len);
+#endif
+		bpi_cipher_encrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
+				last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG,
+				"BPI: dst after post-process:",
+				dst, last_block_len);
+#endif
+	}
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+#ifdef RTE_LIB_SECURITY
+static inline void
+qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint32_t crc_data_ofs, crc_data_len, crc;
+	uint8_t *crc_data;
+
+	if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT &&
+			sym_op->auth.data.length != 0) {
+
+		crc_data_ofs = sym_op->auth.data.offset;
+		crc_data_len = sym_op->auth.data.length;
+		crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+				crc_data_ofs);
+
+		crc = rte_net_crc_calc(crc_data, crc_data_len,
+				RTE_NET_CRC32_ETH);
+
+		if (crc != *(uint32_t *)(crc_data + crc_data_len))
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	}
+}
+
+static inline void
+qat_crc_generate(struct qat_sym_session *ctx,
+			struct rte_crypto_op *op)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint32_t *crc, crc_data_len;
+	uint8_t *crc_data;
+
+	if (ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT &&
+			sym_op->auth.data.length != 0 &&
+			sym_op->m_src->nb_segs == 1) {
+
+		crc_data_len = sym_op->auth.data.length;
+		crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+				sym_op->auth.data.offset);
+		crc = (uint32_t *)(crc_data + crc_data_len);
+		*crc = rte_net_crc_calc(crc_data, crc_data_len,
+				RTE_NET_CRC32_ETH);
+	}
+}
+
+static inline void
+qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
+{
+	struct rte_crypto_op *op;
+	struct qat_sym_session *ctx;
+	uint16_t i;
+
+	for (i = 0; i < nb_ops; i++) {
+		op = (struct rte_crypto_op *)ops[i];
+
+		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+			ctx = (struct qat_sym_session *)
+				get_sec_session_private_data(
+					op->sym->sec_session);
+
+			if (ctx == NULL || ctx->bpi_ctx == NULL)
+				continue;
+
+			qat_crc_generate(ctx, op);
+		}
+	}
+}
+#endif
+
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused)
+{
+	struct icp_qat_fw_comn_resp *resp_msg =
+			(struct icp_qat_fw_comn_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+			(resp_msg->opaque_data);
+	struct qat_sym_session *sess;
+	uint8_t is_docsis_sec;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+			sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+#ifdef RTE_LIB_SECURITY
+	if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		/*
+		 * Assuming at this point that if it's a security
+		 * op, that this is for DOCSIS
+		 */
+		sess = (struct qat_sym_session *)
+				get_sec_session_private_data(
+				rx_op->sym->sec_session);
+		is_docsis_sec = 1;
+	} else
+#endif
+	{
+		sess = (struct qat_sym_session *)
+				get_sym_session_private_data(
+				rx_op->sym->session,
+				qat_sym_driver_id);
+		is_docsis_sec = 0;
+	}
+
+	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+			resp_msg->comn_hdr.comn_status)) {
+
+		rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	} else {
+		rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+		if (sess->bpi_ctx) {
+			qat_bpicipher_postprocess(sess, rx_op);
+#ifdef RTE_LIB_SECURITY
+			if (is_docsis_sec)
+				qat_crc_verify(sess, rx_op);
+#endif
+		}
+	}
+
+	if (sess->is_single_pass_gmac) {
+		struct qat_sym_op_cookie *cookie =
+				(struct qat_sym_op_cookie *) op_cookie;
+		memset(cookie->opt.spc_gmac.cd_cipher.key, 0,
+				sess->auth_key_length);
+	}
+
+	*op = (void *)rx_op;
+
+	return 1;
+}
+
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
+
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_vec *vec, uint32_t vec_len,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t i;
+
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_la_bulk_req));
+	for (i = 0; i < vec_len; i++)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+	if (cipher_iv && ctx->cipher_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+				ctx->cipher_iv.length);
+	if (auth_iv && ctx->auth_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+				ctx->auth_iv.length);
+	if (aad && ctx->aad_len > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+				ctx->aad_len);
+	if (digest && ctx->digest_length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+				ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+		struct qat_sym_session *ctx __rte_unused,
+		struct rte_crypto_vec *vec __rte_unused,
+		uint32_t vec_len __rte_unused,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{}
+#endif
+
+#endif /* _QAT_SYM_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v2 3/7] crypto/qat: qat driver asym op refactor
  2021-11-01 23:12 ` [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
  2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 2/7] crypto/qat: qat driver sym op refactor Kai Ji
@ 2021-11-01 23:12   ` Kai Ji
  2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 4/7] crypto/qat: qat driver session method rework Kai Ji
                     ` (4 subsequent siblings)
  7 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-01 23:12 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch add-in refactored qat asymmetric build request
function implementation (qat_asym_refactor.c & qat_asym_refactor.h)

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c |   7 +
 drivers/crypto/qat/qat_asym_refactor.c     | 994 +++++++++++++++++++++
 drivers/crypto/qat/qat_asym_refactor.h     | 125 +++
 3 files changed, 1126 insertions(+)
 create mode 100644 drivers/crypto/qat/qat_asym_refactor.c
 create mode 100644 drivers/crypto/qat/qat_asym_refactor.h

diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
index 9ed1f21d9d..99d90fa56c 100644
--- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
@@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_asym_crypto_set_session_gen1(void *cdev __rte_unused,
+		void *session __rte_unused)
+{
+	return 0;
+}
+
 RTE_INIT(qat_asym_crypto_gen1_init)
 {
 	qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops =
diff --git a/drivers/crypto/qat/qat_asym_refactor.c b/drivers/crypto/qat/qat_asym_refactor.c
new file mode 100644
index 0000000000..8e789920cb
--- /dev/null
+++ b/drivers/crypto/qat/qat_asym_refactor.c
@@ -0,0 +1,994 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 - 2021 Intel Corporation
+ */
+
+
+#include <stdarg.h>
+
+#include <rte_cryptodev_pmd.h>
+
+#include "icp_qat_fw_pke.h"
+#include "icp_qat_fw.h"
+#include "qat_pke_functionality_arrays.h"
+
+#include "qat_device.h"
+
+#include "qat_logs.h"
+#include "qat_asym.h"
+
+uint8_t qat_asym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
+
+void
+qat_asym_init_op_cookie(void *op_cookie)
+{
+	int j;
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	cookie->input_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					input_params_ptrs);
+
+	cookie->output_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					output_params_ptrs);
+
+	for (j = 0; j < 8; j++) {
+		cookie->input_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						input_array[j]);
+		cookie->output_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						output_array[j]);
+	}
+}
+
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_asym_xform *xform,
+		struct rte_cryptodev_asym_session *sess,
+		struct rte_mempool *mempool)
+{
+	int err = 0;
+	void *sess_private_data;
+	struct qat_asym_session *session;
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		QAT_LOG(ERR,
+			"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	session = sess_private_data;
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		if (xform->modex.exponent.length == 0 ||
+				xform->modex.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod exp input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		if (xform->modinv.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod inv input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+		if (xform->rsa.n.length == 0) {
+			QAT_LOG(ERR, "Invalid rsa input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
+			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
+		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
+		err = -EINVAL;
+		goto error;
+	} else {
+		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
+		err = -EINVAL;
+		goto error;
+	}
+
+	session->xform = xform;
+	qat_asym_build_req_tmpl(sess_private_data);
+	set_asym_session_private_data(sess, dev->driver_id,
+		sess_private_data);
+
+	return 0;
+error:
+	rte_mempool_put(mempool, sess_private_data);
+	return err;
+}
+
+unsigned int
+qat_asym_session_get_private_size(
+		struct rte_cryptodev *dev __rte_unused)
+{
+	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
+}
+
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_asym_session *sess)
+{
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_asym_session_private_data(sess, index);
+	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
+
+	if (sess_priv) {
+		memset(s, 0, qat_asym_session_get_private_size(dev));
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		set_asym_session_private_data(sess, index, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
+	}
+}
+
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+	.name = qat_asym_drv_name,
+	.alias = qat_asym_drv_name
+};
+
+
+static void
+qat_clear_arrays(struct qat_asym_op_cookie *cookie,
+		int in_count, int out_count, int in_size, int out_size)
+{
+	int i;
+
+	for (i = 0; i < in_count; i++)
+		memset(cookie->input_array[i], 0x0, in_size);
+	for (i = 0; i < out_count; i++)
+		memset(cookie->output_array[i], 0x0, out_size);
+}
+
+static void
+qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
+		enum rte_crypto_asym_xform_type alg, int in_size, int out_size)
+{
+	if (alg == RTE_CRYPTO_ASYM_XFORM_MODEX)
+		qat_clear_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS,
+				QAT_ASYM_MODEXP_NUM_OUT_PARAMS, in_size,
+				out_size);
+	else if (alg == RTE_CRYPTO_ASYM_XFORM_MODINV)
+		qat_clear_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS,
+				QAT_ASYM_MODINV_NUM_OUT_PARAMS, in_size,
+				out_size);
+}
+
+static void
+qat_asym_collect_response(struct rte_crypto_op *rx_op,
+		struct qat_asym_op_cookie *cookie,
+		struct rte_crypto_asym_xform *xform)
+{
+	size_t alg_size, alg_size_in_bytes = 0;
+	struct rte_crypto_asym_op *asym_op = rx_op->asym;
+
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		rte_crypto_param n = xform->modex.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modexp_result = asym_op->modex.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modexp_result +
+				(asym_op->modex.result.length -
+					n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length
+				);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		rte_crypto_param n = xform->modinv.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modinv_result = asym_op->modinv.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modinv_result +
+				(asym_op->modinv.result.length - n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
+				asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+							cookie->output_array[0],
+							alg_size_in_bytes);
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+			}
+		} else {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_DECRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.message.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
+						rsa_result, alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+						RTE_CRYPTO_ASYM_OP_SIGN) {
+				uint8_t *rsa_result = asym_op->rsa.sign.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			}
+		}
+	}
+	qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes,
+			alg_size_in_bytes);
+}
+
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
+{
+	struct qat_asym_session *ctx;
+	struct icp_qat_fw_pke_resp *resp_msg =
+			(struct icp_qat_fw_pke_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+			(resp_msg->opaque);
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	if (cookie->error) {
+		cookie->error = 0;
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		QAT_DP_LOG(ERR, "Cookie status returned error");
+	} else {
+		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric response status"
+					" returned error");
+		}
+		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric common status"
+					" returned error");
+		}
+	}
+
+	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		ctx = (struct qat_asym_session *)get_asym_session_private_data(
+			rx_op->asym->session, qat_asym_driver_id);
+		qat_asym_collect_response(rx_op, cookie, ctx->xform);
+	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
+	}
+	*op = rx_op;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
+			sizeof(struct icp_qat_fw_pke_resp));
+#endif
+
+	return 1;
+}
+
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+		size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+	size_t i;
+
+	for (i = 0; i < arr_sz; i++) {
+		if (*size <= arr[i][0]) {
+			*size = arr[i][0];
+			*func_id = arr[i][1];
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+	va_list args;
+	size_t len = 0, num;
+	int i;
+
+	va_start(args, n);
+	len = va_arg(args, size_t);
+
+	for (i = 0; i < n - 1; i++) {
+		num = va_arg(args, size_t);
+		if (num > len)
+			len = num;
+	}
+	va_end(args);
+
+	return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
+{
+	if (n.length < 8) {
+		/* Not a case for any cryptograpic function except for DH
+		 * generator which very often can be of one byte length
+		 */
+		size_t i;
+
+		if (n.data[n.length - 1] == 0x0) {
+			for (i = 0; i < n.length - 1; i++)
+				if (n.data[i] != 0x0)
+					break;
+			if (i == n.length - 1)
+				return -(EINVAL);
+		}
+	} else if (*(uint64_t *)&n.data[
+				n.length - 8] == 0) {
+		/* Very likely it is zeroed modulus */
+		size_t i;
+
+		for (i = 0; i < n.length - 8; i++)
+			if (n.data[i] != 0x0)
+				break;
+		if (i == n.length - 8)
+			return -(EINVAL);
+	}
+
+	return 0;
+}
+
+static int
+qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
+		struct icp_qat_fw_pke_request *qat_req,
+		struct qat_asym_op_cookie *cookie,
+		struct rte_crypto_asym_xform *xform)
+{
+	int err = 0;
+	size_t alg_size;
+	size_t alg_size_in_bytes;
+	uint32_t func_id = 0;
+
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		err = qat_asym_check_nonzero(xform->modex.modulus);
+		if (err) {
+			QAT_LOG(ERR, "Empty modulus in modular exponentiation,"
+					" aborting this operation");
+			return err;
+		}
+
+		alg_size_in_bytes = max_of(3, asym_op->modex.base.length,
+			       xform->modex.exponent.length,
+			       xform->modex.modulus.length);
+		alg_size = alg_size_in_bytes << 3;
+
+		if (qat_asym_get_sz_and_func_id(MOD_EXP_SIZE,
+				sizeof(MOD_EXP_SIZE)/sizeof(*MOD_EXP_SIZE),
+				&alg_size, &func_id)) {
+			return -(EINVAL);
+		}
+
+		alg_size_in_bytes = alg_size >> 3;
+		rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
+			asym_op->modex.base.length
+			, asym_op->modex.base.data,
+			asym_op->modex.base.length);
+		rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
+			xform->modex.exponent.length
+			, xform->modex.exponent.data,
+			xform->modex.exponent.length);
+		rte_memcpy(cookie->input_array[2]  + alg_size_in_bytes -
+			xform->modex.modulus.length,
+			xform->modex.modulus.data,
+			xform->modex.modulus.length);
+		cookie->alg_size = alg_size;
+		qat_req->pke_hdr.cd_pars.func_id = func_id;
+		qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS;
+		qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp base",
+				cookie->input_array[0],
+				alg_size_in_bytes);
+		QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp exponent",
+				cookie->input_array[1],
+				alg_size_in_bytes);
+		QAT_DP_HEXDUMP_LOG(DEBUG, " ModExpmodulus",
+				cookie->input_array[2],
+				alg_size_in_bytes);
+#endif
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		err = qat_asym_check_nonzero(xform->modinv.modulus);
+		if (err) {
+			QAT_LOG(ERR, "Empty modulus in modular multiplicative"
+					" inverse, aborting this operation");
+			return err;
+		}
+
+		alg_size_in_bytes = max_of(2, asym_op->modinv.base.length,
+				xform->modinv.modulus.length);
+		alg_size = alg_size_in_bytes << 3;
+
+		if (xform->modinv.modulus.data[
+				xform->modinv.modulus.length - 1] & 0x01) {
+			if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_ODD,
+					sizeof(MOD_INV_IDS_ODD)/
+					sizeof(*MOD_INV_IDS_ODD),
+					&alg_size, &func_id)) {
+				return -(EINVAL);
+			}
+		} else {
+			if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_EVEN,
+					sizeof(MOD_INV_IDS_EVEN)/
+					sizeof(*MOD_INV_IDS_EVEN),
+					&alg_size, &func_id)) {
+				return -(EINVAL);
+			}
+		}
+
+		alg_size_in_bytes = alg_size >> 3;
+		rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
+			asym_op->modinv.base.length
+				, asym_op->modinv.base.data,
+				asym_op->modinv.base.length);
+		rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
+				xform->modinv.modulus.length
+				, xform->modinv.modulus.data,
+				xform->modinv.modulus.length);
+		cookie->alg_size = alg_size;
+		qat_req->pke_hdr.cd_pars.func_id = func_id;
+		qat_req->input_param_count =
+				QAT_ASYM_MODINV_NUM_IN_PARAMS;
+		qat_req->output_param_count =
+				QAT_ASYM_MODINV_NUM_OUT_PARAMS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv base",
+				cookie->input_array[0],
+				alg_size_in_bytes);
+		QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv modulus",
+				cookie->input_array[1],
+				alg_size_in_bytes);
+#endif
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+		err = qat_asym_check_nonzero(xform->rsa.n);
+		if (err) {
+			QAT_LOG(ERR, "Empty modulus in RSA"
+					" inverse, aborting this operation");
+			return err;
+		}
+
+		alg_size_in_bytes = xform->rsa.n.length;
+		alg_size = alg_size_in_bytes << 3;
+
+		qat_req->input_param_count =
+				QAT_ASYM_RSA_NUM_IN_PARAMS;
+		qat_req->output_param_count =
+				QAT_ASYM_RSA_NUM_OUT_PARAMS;
+
+		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
+				asym_op->rsa.op_type ==
+						RTE_CRYPTO_ASYM_OP_VERIFY) {
+
+			if (qat_asym_get_sz_and_func_id(RSA_ENC_IDS,
+					sizeof(RSA_ENC_IDS)/
+					sizeof(*RSA_ENC_IDS),
+					&alg_size, &func_id)) {
+				err = -(EINVAL);
+				QAT_LOG(ERR,
+					"Not supported RSA parameter size (key)");
+				return err;
+			}
+			alg_size_in_bytes = alg_size >> 3;
+			if (asym_op->rsa.op_type ==
+				RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(cookie->input_array[0] +
+						alg_size_in_bytes -
+						asym_op->rsa.message.length
+						, asym_op->rsa.message.data,
+						asym_op->rsa.message.length);
+					break;
+				default:
+					err = -(EINVAL);
+					QAT_LOG(ERR,
+						"Invalid RSA padding (Encryption)");
+					return err;
+				}
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Message",
+						cookie->input_array[0],
+						alg_size_in_bytes);
+#endif
+			} else {
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(cookie->input_array[0],
+						asym_op->rsa.sign.data,
+						alg_size_in_bytes);
+					break;
+				default:
+					err = -(EINVAL);
+					QAT_LOG(ERR,
+						"Invalid RSA padding (Verify)");
+					return err;
+				}
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, " RSA Signature",
+						cookie->input_array[0],
+						alg_size_in_bytes);
+#endif
+
+			}
+			rte_memcpy(cookie->input_array[1] +
+					alg_size_in_bytes -
+					xform->rsa.e.length
+					, xform->rsa.e.data,
+					xform->rsa.e.length);
+			rte_memcpy(cookie->input_array[2] +
+					alg_size_in_bytes -
+					xform->rsa.n.length,
+					xform->rsa.n.data,
+					xform->rsa.n.length);
+
+			cookie->alg_size = alg_size;
+			qat_req->pke_hdr.cd_pars.func_id = func_id;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Public Key",
+					cookie->input_array[1],
+					alg_size_in_bytes);
+			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Modulus",
+					cookie->input_array[2],
+					alg_size_in_bytes);
+#endif
+		} else {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_DECRYPT) {
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(cookie->input_array[0]
+						+ alg_size_in_bytes -
+						asym_op->rsa.cipher.length,
+						asym_op->rsa.cipher.data,
+						asym_op->rsa.cipher.length);
+					break;
+				default:
+					QAT_LOG(ERR,
+						"Invalid padding of RSA (Decrypt)");
+					return -(EINVAL);
+				}
+
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_SIGN) {
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(cookie->input_array[0]
+						+ alg_size_in_bytes -
+						asym_op->rsa.message.length,
+						asym_op->rsa.message.data,
+						asym_op->rsa.message.length);
+					break;
+				default:
+					QAT_LOG(ERR,
+						"Invalid padding of RSA (Signature)");
+					return -(EINVAL);
+				}
+			}
+			if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT) {
+
+				qat_req->input_param_count =
+						QAT_ASYM_RSA_QT_NUM_IN_PARAMS;
+				if (qat_asym_get_sz_and_func_id(RSA_DEC_CRT_IDS,
+						sizeof(RSA_DEC_CRT_IDS)/
+						sizeof(*RSA_DEC_CRT_IDS),
+						&alg_size, &func_id)) {
+					return -(EINVAL);
+				}
+				alg_size_in_bytes = alg_size >> 3;
+
+				rte_memcpy(cookie->input_array[1] +
+						(alg_size_in_bytes >> 1) -
+						xform->rsa.qt.p.length
+						, xform->rsa.qt.p.data,
+						xform->rsa.qt.p.length);
+				rte_memcpy(cookie->input_array[2] +
+						(alg_size_in_bytes >> 1) -
+						xform->rsa.qt.q.length
+						, xform->rsa.qt.q.data,
+						xform->rsa.qt.q.length);
+				rte_memcpy(cookie->input_array[3] +
+						(alg_size_in_bytes >> 1) -
+						xform->rsa.qt.dP.length
+						, xform->rsa.qt.dP.data,
+						xform->rsa.qt.dP.length);
+				rte_memcpy(cookie->input_array[4] +
+						(alg_size_in_bytes >> 1) -
+						xform->rsa.qt.dQ.length
+						, xform->rsa.qt.dQ.data,
+						xform->rsa.qt.dQ.length);
+				rte_memcpy(cookie->input_array[5] +
+						(alg_size_in_bytes >> 1) -
+						xform->rsa.qt.qInv.length
+						, xform->rsa.qt.qInv.data,
+						xform->rsa.qt.qInv.length);
+				cookie->alg_size = alg_size;
+				qat_req->pke_hdr.cd_pars.func_id = func_id;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "C",
+						cookie->input_array[0],
+						alg_size_in_bytes);
+				QAT_DP_HEXDUMP_LOG(DEBUG, "p",
+						cookie->input_array[1],
+						alg_size_in_bytes);
+				QAT_DP_HEXDUMP_LOG(DEBUG, "q",
+						cookie->input_array[2],
+						alg_size_in_bytes);
+				QAT_DP_HEXDUMP_LOG(DEBUG,
+						"dP", cookie->input_array[3],
+						alg_size_in_bytes);
+				QAT_DP_HEXDUMP_LOG(DEBUG,
+						"dQ", cookie->input_array[4],
+						alg_size_in_bytes);
+				QAT_DP_HEXDUMP_LOG(DEBUG,
+						"qInv", cookie->input_array[5],
+						alg_size_in_bytes);
+#endif
+			} else if (xform->rsa.key_type ==
+					RTE_RSA_KEY_TYPE_EXP) {
+				if (qat_asym_get_sz_and_func_id(
+						RSA_DEC_IDS,
+						sizeof(RSA_DEC_IDS)/
+						sizeof(*RSA_DEC_IDS),
+						&alg_size, &func_id)) {
+					return -(EINVAL);
+				}
+				alg_size_in_bytes = alg_size >> 3;
+				rte_memcpy(cookie->input_array[1] +
+						alg_size_in_bytes -
+						xform->rsa.d.length,
+						xform->rsa.d.data,
+						xform->rsa.d.length);
+				rte_memcpy(cookie->input_array[2] +
+						alg_size_in_bytes -
+						xform->rsa.n.length,
+						xform->rsa.n.data,
+						xform->rsa.n.length);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA ciphertext",
+					cookie->input_array[0],
+					alg_size_in_bytes);
+			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA d",
+					cookie->input_array[1],
+					alg_size_in_bytes);
+			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA n",
+					cookie->input_array[2],
+					alg_size_in_bytes);
+#endif
+
+				cookie->alg_size = alg_size;
+				qat_req->pke_hdr.cd_pars.func_id = func_id;
+			} else {
+				QAT_LOG(ERR, "Invalid RSA key type");
+				return -(EINVAL);
+			}
+		}
+	} else {
+		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
+		return -(EINVAL);
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
+{
+	struct qat_asym_session *ctx;
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	struct rte_crypto_asym_op *asym_op = op->asym;
+	struct icp_qat_fw_pke_request *qat_req =
+			(struct icp_qat_fw_pke_request *)out_msg;
+	struct qat_asym_op_cookie *cookie =
+				(struct qat_asym_op_cookie *)op_cookie;
+	int err = 0;
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		ctx = (struct qat_asym_session *)
+			get_asym_session_private_data(
+			op->asym->session, qat_asym_driver_id);
+		if (unlikely(ctx == NULL)) {
+			QAT_LOG(ERR, "Session has not been created for this device");
+			goto error;
+		}
+		rte_mov64((uint8_t *)qat_req,
+			(const uint8_t *)&(ctx->req_tmpl));
+		err = qat_asym_fill_arrays(asym_op, qat_req,
+				cookie, ctx->xform);
+		if (err) {
+			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			goto error;
+		}
+	} else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		qat_fill_req_tmpl(qat_req);
+		err = qat_asym_fill_arrays(asym_op, qat_req, cookie,
+				op->asym->xform);
+		if (err) {
+			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			goto error;
+		}
+	} else {
+		QAT_DP_LOG(ERR, "Invalid session/xform settings");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+		goto error;
+	}
+
+	qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
+	qat_req->pke_mid.src_data_addr = cookie->input_addr;
+	qat_req->pke_mid.dest_data_addr = cookie->output_addr;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_pke_request));
+#endif
+
+	return 0;
+error:
+
+	qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+		sizeof(struct icp_qat_fw_pke_request));
+#endif
+
+	qat_req->output_param_count = 0;
+	qat_req->input_param_count = 0;
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
+	cookie->error |= err;
+
+	return 0;
+}
+
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+			nb_ops);
+}
+
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+				nb_ops);
+}
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param)
+{
+	struct qat_cryptodev_private *internals;
+	struct rte_cryptodev *cryptodev;
+	struct qat_device_info *qat_dev_instance =
+		&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	uint64_t capa_size;
+	int i = 0;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "asym");
+	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+				name);
+		return -(EFAULT);
+	}
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_asym_driver_id =
+				qat_asym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_asym_driver_id !=
+				qat_asym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
+		}
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+	qat_dev_instance->asym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->asym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_asym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_ASYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			rte_cryptodev_pmd_destroy(cryptodev);
+			memset(&qat_dev_instance->asym_rte_dev, 0,
+				sizeof(qat_dev_instance->asym_rte_dev));
+			return -EFAULT;
+		}
+	}
+
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
+
+	qat_pci_dev->asym_dev = internals;
+	internals->service_type = QAT_SERVICE_ASYMMETRIC;
+	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+	return 0;
+}
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->asym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(
+			qat_pci_dev->asym_dev->dev_id);
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+	qat_pci_dev->asym_dev = NULL;
+
+	return 0;
+}
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_asym_driver,
+		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_refactor.h b/drivers/crypto/qat/qat_asym_refactor.h
new file mode 100644
index 0000000000..9ecabdbe8f
--- /dev/null
+++ b/drivers/crypto/qat/qat_asym_refactor.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _QAT_ASYM_H_
+#define _QAT_ASYM_H_
+
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto_asym.h>
+#include "icp_qat_fw_pke.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
+#include "icp_qat_fw.h"
+
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
+
+typedef uint64_t large_int_ptr;
+#define MAX_PKE_PARAMS	8
+#define QAT_PKE_MAX_LN_SIZE 512
+#define _PKE_ALIGN_ __rte_aligned(8)
+
+#define QAT_ASYM_MAX_PARAMS			8
+#define QAT_ASYM_MODINV_NUM_IN_PARAMS		2
+#define QAT_ASYM_MODINV_NUM_OUT_PARAMS		1
+#define QAT_ASYM_MODEXP_NUM_IN_PARAMS		3
+#define QAT_ASYM_MODEXP_NUM_OUT_PARAMS		1
+#define QAT_ASYM_RSA_NUM_IN_PARAMS		3
+#define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
+#define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
+
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
+		{.asym = {						\
+			.xform_capa = {					\
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+				.op_types = o,				\
+				{					\
+				.modlen = {				\
+				.min = l,				\
+				.max = r,				\
+				.increment = i				\
+				}, }					\
+			}						\
+		},							\
+		}							\
+	}
+
+struct qat_asym_op_cookie {
+	size_t alg_size;
+	uint64_t error;
+	rte_iova_t input_addr;
+	rte_iova_t output_addr;
+	large_int_ptr input_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
+	large_int_ptr output_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
+	union {
+		uint8_t input_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE];
+		uint8_t input_buffer[MAX_PKE_PARAMS * QAT_PKE_MAX_LN_SIZE];
+	} _PKE_ALIGN_;
+	uint8_t output_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE] _PKE_ALIGN_;
+} _PKE_ALIGN_;
+
+struct qat_asym_session {
+	struct icp_qat_fw_pke_request req_tmpl;
+	struct rte_crypto_asym_xform *xform;
+};
+
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+	memset(qat_req, 0, sizeof(*qat_req));
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+	qat_req->pke_hdr.hdr_flags =
+			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+	struct icp_qat_fw_pke_request *qat_req;
+	struct qat_asym_session *session = sess_private_data;
+
+	qat_req = &session->req_tmpl;
+	qat_fill_req_tmpl(qat_req);
+}
+
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_asym_xform *xform,
+		struct rte_cryptodev_asym_session *sess,
+		struct rte_mempool *mempool);
+
+unsigned int
+qat_asym_session_get_private_size(struct rte_cryptodev *dev);
+
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_asym_session *sess);
+
+/*
+ * Process PKE response received from outgoing queue of QAT
+ *
+ * @param	op		a ptr to the rte_crypto_op referred to by
+ *				the response message is returned in this param
+ * @param	resp		icp_qat_fw_pke_resp message received from
+ *				outgoing fw message queue
+ * @param	op_cookie	Cookie pointer that holds private metadata
+ * @param	dequeue_err_count	Error count number pointer
+ *
+ */
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count);
+
+void
+qat_asym_init_op_cookie(void *cookie);
+
+#endif /* _QAT_ASYM_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v2 4/7] crypto/qat: qat driver session method rework
  2021-11-01 23:12 ` [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                     ` (2 preceding siblings ...)
  2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 3/7] crypto/qat: qat driver asym " Kai Ji
@ 2021-11-01 23:12   ` Kai Ji
  2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 5/7] crypto/qat: qat driver datapath rework Kai Ji
                     ` (3 subsequent siblings)
  7 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-01 23:12 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

The patch introduce set_session & set_raw_dp_ctx methods to qat gen dev ops
Replace min_qat_dev_gen_id with dev_id, the session will be invalid if the
device generation id is not matching during init and ops

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  90 ++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 467 +++++++++++++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 243 ++++++++++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |   5 +
 drivers/crypto/qat/qat_crypto.c              |   1 +
 drivers/crypto/qat/qat_crypto.h              |   9 +
 drivers/crypto/qat/qat_sym.c                 |   8 +-
 drivers/crypto/qat/qat_sym_session.c         | 106 +----
 drivers/crypto/qat/qat_sym_session.h         |   2 +-
 9 files changed, 831 insertions(+), 100 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index b4ec440e05..72609703f9 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -166,6 +166,90 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
 	return 0;
 }
 
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+		uint8_t hash_flag)
+{
+	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+	/* Set the Use Extended Protocol Flags bit in LW 1 */
+	QAT_FIELD_SET(header->comn_req_flags,
+			QAT_COMN_EXT_FLAGS_USED,
+			QAT_COMN_EXT_FLAGS_BITPOS,
+			QAT_COMN_EXT_FLAGS_MASK);
+
+	/* Set Hash Flags in LW 28 */
+	cd_ctrl->hash_flags |= hash_flag;
+
+	/* Set proto flags in LW 1 */
+	switch (session->qat_cipher_alg) {
+	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_SNOW_3G_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags,
+				ICP_QAT_FW_LA_ZUC_3G_PROTO);
+		break;
+	default:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	}
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+	struct rte_cryptodev *dev = cdev;
+	struct qat_sym_session *ctx = session;
+	const struct qat_cryptodev_private *qat_private =
+			dev->data->dev_private;
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	if (ret == 0 || ret != -ENOTSUP)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * but some are not supported by GEN2, so checking here
+	 */
+	if ((qat_private->internal_capabilities &
+			QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+		return -ENOTSUP;
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
 
 	/* Device related operations */
@@ -204,6 +288,10 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
 			qat_sym_crypto_cap_get_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_sym_crypto_set_session_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
@@ -221,4 +309,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
 			qat_asym_crypto_cap_get_gen1;
 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_asym_crypto_feature_flags_get_gen1;
+	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_asym_crypto_set_session_gen1;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index d3336cf4a1..6494019050 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -143,6 +143,468 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass) {
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN3 uses single pass to treat AEAD as
+		 * cipher operation
+		 */
+		cipher_param = (void *)&req->serv_specif_rqpars;
+
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+				ofs.ofs.cipher.tail;
+
+		cipher_param->spc_aad_addr = aad->iova;
+		cipher_param->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+	struct qat_sym_op_cookie *cookie,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	uint32_t ver_key_offset;
+	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+
+	if (!ctx->is_single_pass_gmac ||
+			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+				data_len);
+		return;
+	}
+
+	cipher_cd_ctrl = (void *) &req->cd_ctrl;
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+			sizeof(struct icp_qat_hw_cipher_config);
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+		/* AES-GMAC */
+		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+				req);
+	}
+
+	/* Fill separate Content Descriptor for this op */
+	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+				ctx->cd.cipher.key :
+				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+			ctx->auth_key_length);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+				ICP_QAT_HW_CIPHER_AEAD_MODE,
+				ctx->qat_cipher_alg,
+				ICP_QAT_HW_CIPHER_NO_CONVERT,
+				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+					ICP_QAT_HW_CIPHER_ENCRYPT :
+					ICP_QAT_HW_CIPHER_DECRYPT));
+	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+			ctx->digest_length,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+	/* Update the request */
+	req->cd_pars.u.s.content_desc_addr =
+			cookie->opt.spc_gmac.cd_phys_addr;
+	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+			sizeof(struct icp_qat_hw_cipher_config) +
+			ctx->auth_key_length, 8) >> 3;
+	req->comn_mid.src_length = data_len;
+	req->comn_mid.dst_length = 0;
+
+	cipher_param->spc_aad_addr = 0;
+	cipher_param->spc_auth_res_addr = digest->iova;
+	cipher_param->spc_aad_sz = auth_data_len;
+	cipher_param->reserved = 0;
+	cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+	ICP_QAT_FW_LA_PROTO_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_NO_PROTO);
+}
+
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN3 */
+	if (ctx->is_single_pass)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+	else if (ctx->is_single_pass_gmac)
+		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+	if (ret == 0)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * this is addressed by GEN3
+	 */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
+static int
+qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
+	} else if (ctx->is_single_pass_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
+	}
+
+	return 0;
+}
+
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -150,6 +612,10 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_cap_get_gen3;
 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+			qat_sym_crypto_set_session_gen3;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
@@ -161,4 +627,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 37a58c026f..167c95abcf 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -103,11 +103,253 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+			(void *)&req->serv_specif_rqpars;
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN4 uses single pass to treat AEAD as cipher
+		 * operation
+		 */
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+				req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len -
+				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+		cipher_param_20->spc_aad_addr = aad->iova;
+		cipher_param_20->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN4 */
+	if (ctx->is_single_pass && ctx->is_ucs)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+	if (ret == 0)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * this is addressed by GEN4
+	 */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
+static int
+qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
 			qat_sym_crypto_cap_get_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+			qat_sym_crypto_set_session_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
@@ -121,4 +363,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
 	qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index e1fd14956b..b044c7b2d3 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -6,6 +6,7 @@
 #ifdef RTE_LIB_SECURITY
 #include <rte_security_driver.h>
 #endif
+#include <cryptodev_pmd.h>
 
 #include "adf_transport_access_macros.h"
 #include "icp_qat_fw.h"
@@ -1169,6 +1170,10 @@ RTE_INIT(qat_sym_crypto_gen1_init)
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
 			qat_sym_crypto_cap_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+			qat_sym_crypto_set_session_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 84c26a8062..ee878e47f1 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -2,6 +2,7 @@
  * Copyright(c) 2021 Intel Corporation
  */
 
+#include <cryptodev_pmd.h>
 #include "qat_device.h"
 #include "qat_qp.h"
 #include "qat_crypto.h"
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6eaa15b975..8fe3f0b061 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -48,15 +48,24 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
 
 typedef void * (*create_security_ctx_t)(void *cryptodev);
 
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
+typedef int (*set_raw_dp_ctx_t)(void *raw_dp_ctx, void *ctx);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
+	set_session_t set_session;
+	set_raw_dp_ctx_t set_raw_dp_ctx;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
 };
 
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
+
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
 		struct rte_cryptodev_config *config);
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 576ef532a7..09c1e6f6a4 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -249,7 +249,7 @@ refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen)
+		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
 {
 	int ret = 0;
 	struct qat_sym_session *ctx = NULL;
@@ -314,12 +314,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		return -EINVAL;
 	}
 
-	if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
-		QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -EINVAL;
-	}
-
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 8ca475ca8b..52837d7c9c 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
 	return 0;
 }
 
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
-		uint8_t hash_flag)
-{
-	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
-	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
-			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
-			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
-	/* Set the Use Extended Protocol Flags bit in LW 1 */
-	QAT_FIELD_SET(header->comn_req_flags,
-			QAT_COMN_EXT_FLAGS_USED,
-			QAT_COMN_EXT_FLAGS_BITPOS,
-			QAT_COMN_EXT_FLAGS_MASK);
-
-	/* Set Hash Flags in LW 28 */
-	cd_ctrl->hash_flags |= hash_flag;
-
-	/* Set proto flags in LW 1 */
-	switch (session->qat_cipher_alg) {
-	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_SNOW_3G_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_ZUC_3G_PROTO);
-		break;
-	default:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	}
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
-		struct qat_sym_session *session)
-{
-	const struct qat_cryptodev_private *qat_private =
-			dev->data->dev_private;
-	enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
-			QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
-	if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
-	} else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
-	} else if ((session->aes_cmac ||
-			session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
-			(session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session, 0);
-	}
-}
-
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
 	int ret;
 	int qat_cmd_id;
-	int handle_mixed = 0;
 
 	/* Verify the session physical address is known */
 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
+	session->dev_id = internals->dev_id;
 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
 	session->is_ucs = 0;
 
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		return -ENOTSUP;
 	}
 	qat_sym_session_finalize(session);
-	if (handle_mixed) {
-		/* Special handling of mixed hash+cipher algorithms */
-		qat_sym_session_handle_mixed(dev, session);
-	}
 
-	return 0;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+			(void *)session);
 }
 
 static int
@@ -678,7 +598,6 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 {
 	session->is_single_pass = 1;
 	session->is_auth = 1;
-	session->min_qat_dev_gen = QAT_GEN3;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
 	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
@@ -1205,9 +1124,10 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-			uint8_t *data_in,
-			uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in,
+		uint8_t *data_out)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1574,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1921,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2181,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
-
 	/* Get requested QAT command id - should be cipher */
 	qat_cmd_id = qat_get_cmd_id(xform);
 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2205,9 @@ qat_security_session_create(void *dev,
 {
 	void *sess_private_data;
 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct qat_cryptodev_private *internals = cdev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_sym_session *sym_session = NULL;
 	int ret;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2231,11 @@ qat_security_session_create(void *dev,
 	}
 
 	set_sec_session_private_data(sess, sess_private_data);
+	sym_session = (struct qat_sym_session *)sess_private_data;
+	sym_session->dev_id = internals->dev_id;
 
-	return ret;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+			sess_private_data);
 }
 
 int
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 73493ec864..e1b5edb2f9 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -100,7 +100,7 @@ struct qat_sym_session {
 	uint16_t auth_key_length;
 	uint16_t digest_length;
 	rte_spinlock_t lock;	/* protects this struct */
-	enum qat_device_gen min_qat_dev_gen;
+	uint16_t dev_id;
 	uint8_t aes_cmac;
 	uint8_t is_single_pass;
 	uint8_t is_single_pass_gmac;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v2 5/7] crypto/qat: qat driver datapath rework
  2021-11-01 23:12 ` [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                     ` (3 preceding siblings ...)
  2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 4/7] crypto/qat: qat driver session method rework Kai Ji
@ 2021-11-01 23:12   ` Kai Ji
  2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 6/7] app/test: cryptodev test fix Kai Ji
                     ` (2 subsequent siblings)
  7 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-01 23:12 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch introduce op_build_request func in qat enqueue op
burst. Add-in qat_dequeue_process_response in qat dequeue op
burst. Enable session build request op based on crypto operation

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/meson.build               |  4 +-
 drivers/common/qat/qat_qp.c                  | 60 +++++---------------
 drivers/common/qat/qat_qp.h                  | 32 +++++++----
 drivers/compress/qat/qat_comp_pmd.c          | 12 +++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  4 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |  4 +-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  2 +-
 drivers/crypto/qat/qat_asym_refactor.c       |  4 +-
 drivers/crypto/qat/qat_asym_refactor.h       |  2 +-
 drivers/crypto/qat/qat_sym_hw_dp.c           |  2 +-
 drivers/crypto/qat/qat_sym_refactor.c        | 51 +----------------
 drivers/crypto/qat/qat_sym_refactor.h        |  2 +-
 12 files changed, 57 insertions(+), 122 deletions(-)

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index ce9959d103..b7b18b6c0f 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -70,8 +70,8 @@ if qat_compress
 endif
 
 if qat_crypto
-    foreach f: ['qat_sym_pmd.c', 'qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
+    foreach f: ['qat_sym_refactor.c', 'qat_sym_session.c',
+            'qat_sym_hw_dp.c', 'qat_asym_refactor.c', 'qat_crypto.c',
 	    'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 0fda890075..82adda0698 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -15,8 +15,8 @@
 #include "qat_logs.h"
 #include "qat_device.h"
 #include "qat_qp.h"
-#include "qat_sym.h"
-#include "qat_asym.h"
+#include "qat_sym_refactor.h"
+#include "qat_asym_refactor.h"
 #include "qat_comp.h"
 
 #define QAT_CQ_MAX_DEQ_RETRIES 10
@@ -550,23 +550,8 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-refactor_qat_enqueue_op_burst(__rte_unused void *qp,
-		__rte_unused qat_op_build_request_t op_build_request,
-		__rte_unused void **ops, __rte_unused uint16_t nb_ops)
-{
-	return 0;
-}
-
-uint16_t
-refactor_qat_dequeue_op_burst(__rte_unused void *qp, __rte_unused void **ops,
-		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
-		__rte_unused uint16_t nb_ops)
-{
-	return 0;
-}
-
-uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -616,29 +601,18 @@ qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
 		}
 	}
 
-#ifdef BUILD_QAT_SYM
+#ifdef RTE_LIB_SECURITY
 	if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 		qat_sym_preprocess_requests(ops, nb_ops_possible);
 #endif
 
+	memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque));
+
 	while (nb_ops_sent != nb_ops_possible) {
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
-#ifdef BUILD_QAT_SYM
-			ret = qat_sym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		} else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
-			ret = qat_comp_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-		} else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
-#ifdef BUILD_QAT_ASYM
-			ret = qat_asym_build_request(*ops, base_addr + tail,
+		ret = op_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		}
+				tmp_qp->opaque, tmp_qp->qat_dev_gen);
+
 		if (ret != 0) {
 			tmp_qp->stats.enqueue_err_count++;
 			/* This message cannot be enqueued */
@@ -833,7 +807,8 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 }
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -851,19 +826,10 @@ qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
 
 		nb_fw_responses = 1;
 
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
-			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
-		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
-			nb_fw_responses = qat_comp_process_response(
+		nb_fw_responses = qat_dequeue_process_response(
 				ops, resp_msg,
 				tmp_qp->op_cookies[head >> rx_queue->trailz],
 				&tmp_qp->stats.dequeue_err_count);
-#ifdef BUILD_QAT_ASYM
-		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
-			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
-#endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
 				  rx_queue->modulo_mask);
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index b1350cdaf4..b1829e122b 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -14,6 +14,24 @@
 
 struct qat_pci_device;
 
+/* Default qp configuration for GEN4 devices */
+#define QAT_GEN4_QP_DEFCON	(QAT_SERVICE_SYMMETRIC |	\
+				QAT_SERVICE_SYMMETRIC << 8 |	\
+				QAT_SERVICE_SYMMETRIC << 16 |	\
+				QAT_SERVICE_SYMMETRIC << 24)
+
+/* QAT GEN 4 specific macros */
+#define QAT_GEN4_BUNDLE_NUM             4
+#define QAT_GEN4_QPS_PER_BUNDLE_NUM     1
+
+/* Queue pair setup error codes */
+#define QAT_NOMEM		1
+#define QAT_QP_INVALID_DESC_NO	2
+#define QAT_QP_BUSY		3
+#define QAT_PCI_NO_RESOURCE	4
+
+#define ADF_ARB_RINGSRVARBEN_OFFSET		0x19C
+
 /**
  * Structure associated with each queue.
  */
@@ -124,21 +142,15 @@ struct qat_qp_config {
 };
 
 uint16_t
-refactor_qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
 		void **ops, uint16_t nb_ops);
 
-uint16_t
-refactor_qat_dequeue_op_burst(void *qp, void **ops,
-		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
-
-uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
-
 uint16_t
 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
 
 int
 qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
@@ -150,7 +162,7 @@ qat_qp_setup(struct qat_pci_device *qat_dev,
 
 int
 qat_qps_per_service(struct qat_pci_device *qat_dev,
-		enum qat_service_type service);
+			enum qat_service_type service);
 
 const struct qat_qp_hw_data *
 qat_qp_get_hw_data(struct qat_pci_device *qat_dev,
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 9b24d46e97..2f17f8825e 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -616,11 +616,18 @@ static struct rte_compressdev_ops compress_qat_dummy_ops = {
 	.private_xform_free	= qat_comp_private_xform_free
 };
 
+static uint16_t
+qat_comp_dequeue_burst(void *qp, struct rte_comp_op **ops,  uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_comp_process_response,
+				nb_ops);
+}
+
 static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	uint16_t ret = qat_comp_dequeue_burst(qp, ops, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
@@ -638,8 +645,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 
 		} else {
 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
-					(compressdev_dequeue_pkt_burst_t)
-					qat_dequeue_op_burst;
+					qat_comp_dequeue_burst;
 		}
 	}
 	return ret;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 72609703f9..86c124f6c8 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -5,8 +5,8 @@
 #include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
 #include "qat_sym_session.h"
-#include "qat_sym.h"
-#include "qat_asym.h"
+#include "qat_sym_refactor.h"
+#include "qat_asym_refactor.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 167c95abcf..108e07ee7f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -5,8 +5,8 @@
 #include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
 #include "qat_sym_session.h"
-#include "qat_sym.h"
-#include "qat_asym.h"
+#include "qat_sym_refactor.h"
+#include "qat_asym_refactor.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index b044c7b2d3..abd36c7f1c 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -13,7 +13,7 @@
 #include "icp_qat_fw_la.h"
 
 #include "qat_sym_session.h"
-#include "qat_sym.h"
+#include "qat_sym_refactor.h"
 #include "qat_sym_session.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
diff --git a/drivers/crypto/qat/qat_asym_refactor.c b/drivers/crypto/qat/qat_asym_refactor.c
index 8e789920cb..3a9b1d4054 100644
--- a/drivers/crypto/qat/qat_asym_refactor.c
+++ b/drivers/crypto/qat/qat_asym_refactor.c
@@ -5,7 +5,7 @@
 
 #include <stdarg.h>
 
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
 
 #include "icp_qat_fw_pke.h"
 #include "icp_qat_fw.h"
@@ -14,7 +14,7 @@
 #include "qat_device.h"
 
 #include "qat_logs.h"
-#include "qat_asym.h"
+#include "qat_asym_refactor.h"
 
 uint8_t qat_asym_driver_id;
 
diff --git a/drivers/crypto/qat/qat_asym_refactor.h b/drivers/crypto/qat/qat_asym_refactor.h
index 9ecabdbe8f..6d3d991bc7 100644
--- a/drivers/crypto/qat/qat_asym_refactor.h
+++ b/drivers/crypto/qat/qat_asym_refactor.h
@@ -5,7 +5,7 @@
 #ifndef _QAT_ASYM_H_
 #define _QAT_ASYM_H_
 
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
 #include <rte_crypto_asym.h>
 #include "icp_qat_fw_pke.h"
 #include "qat_device.h"
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 94589458d0..af75ac2011 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -8,7 +8,7 @@
 #include "icp_qat_fw.h"
 #include "icp_qat_fw_la.h"
 
-#include "qat_sym.h"
+#include "qat_sym_refactor.h"
 #include "qat_sym_pmd.h"
 #include "qat_sym_session.h"
 #include "qat_qp.h"
diff --git a/drivers/crypto/qat/qat_sym_refactor.c b/drivers/crypto/qat/qat_sym_refactor.c
index 0412902e70..82f078ff1e 100644
--- a/drivers/crypto/qat/qat_sym_refactor.c
+++ b/drivers/crypto/qat/qat_sym_refactor.c
@@ -10,7 +10,7 @@
 #include <rte_bus_pci.h>
 #include <rte_byteorder.h>
 
-#include "qat_sym.h"
+#include "qat_sym_refactor.h"
 #include "qat_crypto.h"
 #include "qat_qp.h"
 
@@ -354,55 +354,6 @@ qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
 	return 0;
 }
 
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
-	struct qat_cryptodev_private *internals = dev->data->dev_private;
-	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
-	struct qat_crypto_gen_dev_ops *gen_dev_ops =
-			&qat_sym_gen_dev_ops[qat_dev_gen];
-	struct qat_qp *qp;
-	struct qat_sym_session *ctx;
-	struct qat_sym_dp_ctx *dp_ctx;
-
-	if (!gen_dev_ops->set_raw_dp_ctx) {
-		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
-				qat_dev_gen);
-		return -ENOTSUP;
-	}
-
-	qp = dev->data->queue_pairs[qp_id];
-	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
-	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(struct qat_sym_dp_ctx));
-		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
-		dp_ctx->tail = qp->tx_q.tail;
-		dp_ctx->head = qp->rx_q.head;
-		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
-	}
-
-	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
-		return -EINVAL;
-
-	ctx = (struct qat_sym_session *)get_sym_session_private_data(
-			session_ctx.crypto_sess, qat_sym_driver_id);
-
-	dp_ctx->session = ctx;
-
-	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
-}
-
-int
-qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
-{
-	return sizeof(struct qat_sym_dp_ctx);
-}
-
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_sym_driver,
diff --git a/drivers/crypto/qat/qat_sym_refactor.h b/drivers/crypto/qat/qat_sym_refactor.h
index d4bfe8f364..44feca8251 100644
--- a/drivers/crypto/qat/qat_sym_refactor.h
+++ b/drivers/crypto/qat/qat_sym_refactor.h
@@ -5,7 +5,7 @@
 #ifndef _QAT_SYM_H_
 #define _QAT_SYM_H_
 
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
 #ifdef RTE_LIB_SECURITY
 #include <rte_net_crc.h>
 #endif
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v2 6/7] app/test: cryptodev test fix
  2021-11-01 23:12 ` [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                     ` (4 preceding siblings ...)
  2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 5/7] crypto/qat: qat driver datapath rework Kai Ji
@ 2021-11-01 23:12   ` Kai Ji
  2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 7/7] crypto/qat: qat driver rework clean up Kai Ji
  2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  7 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-01 23:12 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji, pablo.de.lara.guarch, adamx.dybkowski

test_mixed_auth_cipher: ensure enough space allocate in ibuf & obuf
for mbuf to vec conversion
test_kasumi_decryption: cipher length update.
qat/dev: support sgl oop operation

Fixes: 681f540da52b ("cryptodev: do not use AAD in wireless algorithms")
Cc: pablo.de.lara.guarch@intel.com

Fixes: e847fc512817 ("test/crypto: add encrypted digest case for AES-CTR-CMAC")
Cc: adamx.dybkowski@intel.com

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 app/test/test_cryptodev.c                    | 58 ++++++++++++++++----
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++--
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 55 ++++++++++++++++---
 4 files changed, 129 insertions(+), 26 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 814a0b401d..bfc6408eda 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -179,6 +179,10 @@ post_process_raw_dp_op(void *user_data,	uint32_t index __rte_unused,
 			RTE_CRYPTO_OP_STATUS_ERROR;
 }
 
+static struct crypto_testsuite_params testsuite_params = { NULL };
+struct crypto_testsuite_params *p_testsuite_params = &testsuite_params;
+static struct crypto_unittest_params unittest_params;
+
 void
 process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 		struct rte_crypto_op *op, uint8_t is_cipher, uint8_t is_auth,
@@ -193,6 +197,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	struct rte_crypto_sgl sgl, dest_sgl;
 	uint32_t max_len;
 	union rte_cryptodev_session_ctx sess;
+	uint64_t auth_end_iova;
 	uint32_t count = 0;
 	struct rte_crypto_raw_dp_ctx *ctx;
 	uint32_t cipher_offset = 0, cipher_len = 0, auth_offset = 0,
@@ -202,6 +207,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	int ctx_service_size;
 	int32_t status = 0;
 	int enqueue_status, dequeue_status;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+	int is_sgl = sop->m_src->nb_segs > 1;
+	int is_oop = 0;
 
 	ctx_service_size = rte_cryptodev_get_raw_dp_ctx_size(dev_id);
 	if (ctx_service_size < 0) {
@@ -240,6 +248,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 
 	ofs.raw = 0;
 
+	if ((sop->m_dst != NULL) && (sop->m_dst != sop->m_src))
+		is_oop = 1;
+
 	if (is_cipher && is_auth) {
 		cipher_offset = sop->cipher.data.offset;
 		cipher_len = sop->cipher.data.length;
@@ -267,6 +278,31 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 		digest.va = (void *)sop->auth.digest.data;
 		digest.iova = sop->auth.digest.phys_addr;
 
+		if (is_sgl) {
+			uint32_t remaining_off = auth_offset + auth_len;
+			struct rte_mbuf *sgl_buf = sop->m_src;
+			if (is_oop)
+				sgl_buf = sop->m_dst;
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else {
+			auth_end_iova = rte_pktmbuf_iova(op->sym->m_src) +
+							 auth_offset + auth_len;
+		}
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_offset + auth_len < cipher_offset + cipher_len) &&
+				(digest.iova == auth_end_iova) && is_sgl)
+			max_len = RTE_MAX(max_len,
+				auth_offset + auth_len +
+				ut_params->auth_xform.auth.digest_length);
+
 	} else if (is_cipher) {
 		cipher_offset = sop->cipher.data.offset;
 		cipher_len = sop->cipher.data.length;
@@ -327,7 +363,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 
 	sgl.num = n;
 	/* Out of place */
-	if (sop->m_dst != NULL) {
+	if (is_oop) {
 		dest_sgl.vec = dest_data_vec;
 		vec.dest_sgl = &dest_sgl;
 		n = rte_crypto_mbuf_to_vec(sop->m_dst, 0, max_len,
@@ -503,10 +539,6 @@ process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
 	return op;
 }
 
-static struct crypto_testsuite_params testsuite_params = { NULL };
-struct crypto_testsuite_params *p_testsuite_params = &testsuite_params;
-static struct crypto_unittest_params unittest_params;
-
 static int
 testsuite_setup(void)
 {
@@ -4077,9 +4109,9 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata)
 
 	/* Create KASUMI operation */
 	retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data,
-					tdata->cipher_iv.len,
-					tdata->ciphertext.len,
-					tdata->validCipherOffsetInBits.len);
+			tdata->cipher_iv.len,
+			RTE_ALIGN_CEIL(tdata->validCipherLenInBits.len, 8),
+			tdata->validCipherOffsetInBits.len);
 	if (retval < 0)
 		return retval;
 
@@ -7310,6 +7342,7 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata,
 	unsigned int plaintext_len;
 	unsigned int ciphertext_pad_len;
 	unsigned int ciphertext_len;
+	unsigned int data_len;
 
 	struct rte_cryptodev_info dev_info;
 	struct rte_crypto_op *op;
@@ -7370,21 +7403,22 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata,
 	plaintext_len = ceil_byte_length(tdata->plaintext.len_bits);
 	ciphertext_pad_len = RTE_ALIGN_CEIL(ciphertext_len, 16);
 	plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
+	data_len = RTE_MAX(ciphertext_pad_len, plaintext_pad_len);
 
 	if (verify) {
 		ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
-				ciphertext_pad_len);
+				data_len);
 		memcpy(ciphertext, tdata->ciphertext.data, ciphertext_len);
 		if (op_mode == OUT_OF_PLACE)
-			rte_pktmbuf_append(ut_params->obuf, ciphertext_pad_len);
+			rte_pktmbuf_append(ut_params->obuf, data_len);
 		debug_hexdump(stdout, "ciphertext:", ciphertext,
 				ciphertext_len);
 	} else {
 		plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
-				plaintext_pad_len);
+				data_len);
 		memcpy(plaintext, tdata->plaintext.data, plaintext_len);
 		if (op_mode == OUT_OF_PLACE)
-			rte_pktmbuf_append(ut_params->obuf, plaintext_pad_len);
+			rte_pktmbuf_append(ut_params->obuf, data_len);
 		debug_hexdump(stdout, "plaintext:", plaintext, plaintext_len);
 	}
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index 6494019050..c59c25fe8f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -467,8 +467,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -564,8 +574,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 108e07ee7f..1b6cf10589 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -295,8 +295,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index abd36c7f1c..9894757657 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -529,9 +529,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i],
-				cookie, vec->src_sgl[i].vec,
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
 				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
@@ -628,8 +637,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
@@ -728,8 +747,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -833,8 +862,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v2 7/7] crypto/qat: qat driver rework clean up
  2021-11-01 23:12 ` [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                     ` (5 preceding siblings ...)
  2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 6/7] app/test: cryptodev test fix Kai Ji
@ 2021-11-01 23:12   ` Kai Ji
  2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  7 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-01 23:12 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch rename back refactor code to qat_sym.c and qat_asym.c, meson
build file also updated accordingly.
Integrate qat hw dp apis and qat pmd function to qat_sym.c and qat_asym.c

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/meson.build               |    4 +-
 drivers/common/qat/qat_device.c              |    2 +-
 drivers/common/qat/qat_qp.c                  |    4 +-
 drivers/common/qat/qat_qp.h                  |    2 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |    4 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |    4 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |  164 +--
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |    2 +-
 drivers/crypto/qat/qat_asym.c                |  811 ++++++++------
 drivers/crypto/qat/qat_asym.h                |   90 +-
 drivers/crypto/qat/qat_asym_pmd.c            |  231 ----
 drivers/crypto/qat/qat_asym_pmd.h            |   54 -
 drivers/crypto/qat/qat_asym_refactor.c       |  994 -----------------
 drivers/crypto/qat/qat_asym_refactor.h       |  125 ---
 drivers/crypto/qat/qat_crypto.h              |    5 +-
 drivers/crypto/qat/qat_sym.c                 | 1009 ++++++------------
 drivers/crypto/qat/qat_sym.h                 |  141 ++-
 drivers/crypto/qat/qat_sym_hw_dp.c           |  975 -----------------
 drivers/crypto/qat/qat_sym_pmd.c             |  251 -----
 drivers/crypto/qat/qat_sym_pmd.h             |   95 --
 drivers/crypto/qat/qat_sym_refactor.c        |  360 -------
 drivers/crypto/qat/qat_sym_refactor.h        |  402 -------
 drivers/crypto/qat/qat_sym_session.c         |    8 +-
 23 files changed, 997 insertions(+), 4740 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_asym_refactor.c
 delete mode 100644 drivers/crypto/qat/qat_asym_refactor.h
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_sym_refactor.c
 delete mode 100644 drivers/crypto/qat/qat_sym_refactor.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index b7b18b6c0f..e53b51dcac 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -70,8 +70,8 @@ if qat_compress
 endif
 
 if qat_crypto
-    foreach f: ['qat_sym_refactor.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym_refactor.c', 'qat_crypto.c',
+    foreach f: ['qat_sym.c', 'qat_sym_session.c',
+            'qat_asym.c', 'qat_crypto.c',
 	    'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 437996f2e8..e1fc1c37ec 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -8,7 +8,7 @@
 
 #include "qat_device.h"
 #include "adf_transport_access_macros.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 #include "qat_comp_pmd.h"
 #include "adf_pf2vf_msg.h"
 #include "qat_pf2vf.h"
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 82adda0698..56facf0fbe 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -15,8 +15,8 @@
 #include "qat_logs.h"
 #include "qat_device.h"
 #include "qat_qp.h"
-#include "qat_sym_refactor.h"
-#include "qat_asym_refactor.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
 #include "qat_comp.h"
 
 #define QAT_CQ_MAX_DEQ_RETRIES 10
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index b1829e122b..3b58786bd8 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -162,7 +162,7 @@ qat_qp_setup(struct qat_pci_device *qat_dev,
 
 int
 qat_qps_per_service(struct qat_pci_device *qat_dev,
-			enum qat_service_type service);
+		enum qat_service_type service);
 
 const struct qat_qp_hw_data *
 qat_qp_get_hw_data(struct qat_pci_device *qat_dev,
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 86c124f6c8..72609703f9 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -5,8 +5,8 @@
 #include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
 #include "qat_sym_session.h"
-#include "qat_sym_refactor.h"
-#include "qat_asym_refactor.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 1b6cf10589..529878e3e7 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -5,8 +5,8 @@
 #include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
 #include "qat_sym_session.h"
-#include "qat_sym_refactor.h"
-#include "qat_asym_refactor.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 07020741bd..b69d4d9091 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -10,166 +10,6 @@
 #include "qat_sym_session.h"
 #include "qat_sym.h"
 
-#define QAT_BASE_GEN1_SYM_CAPABILITIES \
-	QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
-		CAP_RNG(digest_size, 1, 20, 1)), \
-	QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
-		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
-	QAT_SYM_AEAD_CAP(AES_CCM,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
-		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
-	QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)), \
-	QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
-			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
-		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
-		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(MD5_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SNOW3G_UIA2, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_AUTH_CAP(KASUMI_F9, CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
-		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_CIPHER_CAP(AES_CTR,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_CIPHER_CAP(AES_XTS,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_CIPHER_CAP(KASUMI_F8,  CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)), \
-	QAT_SYM_CIPHER_CAP(NULL,  CAP_SET(block_size, 1), \
-		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_CIPHER_CAP(3DES_CBC,  CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
-	QAT_SYM_CIPHER_CAP(3DES_CTR,  CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
-	QAT_SYM_CIPHER_CAP(DES_CBC,  CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
-	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,  CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0))
-
-#define QAT_BASE_GEN1_ASYM_CAPABILITIES				\
-	QAT_ASYM_CAP(MODEX, 0, 1, 512, 1),			\
-	QAT_ASYM_CAP(MODINV, 0, 1, 512, 1),			\
-	QAT_ASYM_CAP(RSA,					\
-			((1 << RTE_CRYPTO_ASYM_OP_SIGN) |	\
-			(1 << RTE_CRYPTO_ASYM_OP_VERIFY) |	\
-			(1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) |	\
-			(1 << RTE_CRYPTO_ASYM_OP_DECRYPT)),	\
-			64, 512, 64)
-
-#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \
-	QAT_SYM_CIPHER_CAP(ZUC_EEA3, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_AUTH_CAP(ZUC_EIA3, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)) \
-
-#define QAT_EXTRA_GEN3_SYM_CAPABILITIES \
-	QAT_SYM_AEAD_CAP(CHACHA20_POLY1305, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 32, 32, 0), \
-		CAP_RNG(digest_size, 16, 16, 0), \
-		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0))
-
-#define QAT_BASE_GEN4_SYM_CAPABILITIES \
-	QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
-		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
-		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
-		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_CIPHER_CAP(NULL,  CAP_SET(block_size, 1), \
-		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
-		CAP_RNG(digest_size, 1, 20, 1)), \
-	QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_CIPHER_CAP(AES_CTR,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
-		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
-	QAT_SYM_AEAD_CAP(AES_CCM,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
-		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
-	QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)) \
-
 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
 	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
 
@@ -432,7 +272,7 @@ qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
 				cipher_len, out_sgl->vec,
 				QAT_SYM_SGL_MAX_NUMBER);
 
-		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
 			return UINT64_MAX;
 		}
@@ -506,7 +346,7 @@ qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
 				auth_ofs + auth_len, out_sgl->vec,
 				QAT_SYM_SGL_MAX_NUMBER);
 
-		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
 			return UINT64_MAX;
 		}
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 9894757657..4bbdfdf367 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -13,7 +13,7 @@
 #include "icp_qat_fw_la.h"
 
 #include "qat_sym_session.h"
-#include "qat_sym_refactor.h"
+#include "qat_sym.h"
 #include "qat_sym_session.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index c3bbdb6eb8..b80b81e8da 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -1,70 +1,147 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2021 Intel Corporation
  */
 
 #include <stdarg.h>
 
-#include "qat_asym.h"
+#include <cryptodev_pmd.h>
+
 #include "icp_qat_fw_pke.h"
 #include "icp_qat_fw.h"
 #include "qat_pke_functionality_arrays.h"
 
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+#include "qat_device.h"
 
-static int qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
-		size_t arr_sz, size_t *size, uint32_t *func_id)
+#include "qat_logs.h"
+#include "qat_asym.h"
+
+uint8_t qat_asym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
+
+void
+qat_asym_init_op_cookie(void *op_cookie)
 {
-	size_t i;
+	int j;
+	struct qat_asym_op_cookie *cookie = op_cookie;
 
-	for (i = 0; i < arr_sz; i++) {
-		if (*size <= arr[i][0]) {
-			*size = arr[i][0];
-			*func_id = arr[i][1];
-			return 0;
-		}
+	cookie->input_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					input_params_ptrs);
+
+	cookie->output_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					output_params_ptrs);
+
+	for (j = 0; j < 8; j++) {
+		cookie->input_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						input_array[j]);
+		cookie->output_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						output_array[j]);
 	}
-	return -1;
 }
 
-static inline void qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_asym_xform *xform,
+		struct rte_cryptodev_asym_session *sess,
+		struct rte_mempool *mempool)
 {
-	memset(qat_req, 0, sizeof(*qat_req));
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+	int err = 0;
+	void *sess_private_data;
+	struct qat_asym_session *session;
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		QAT_LOG(ERR,
+			"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	session = sess_private_data;
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		if (xform->modex.exponent.length == 0 ||
+				xform->modex.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod exp input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		if (xform->modinv.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod inv input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+		if (xform->rsa.n.length == 0) {
+			QAT_LOG(ERR, "Invalid rsa input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
+			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
+		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
+		err = -EINVAL;
+		goto error;
+	} else {
+		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
+		err = -EINVAL;
+		goto error;
+	}
+
+	session->xform = xform;
+	qat_asym_build_req_tmpl(sess_private_data);
+	set_asym_session_private_data(sess, dev->driver_id,
+		sess_private_data);
 
-	qat_req->pke_hdr.hdr_flags =
-			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
-			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	return 0;
+error:
+	rte_mempool_put(mempool, sess_private_data);
+	return err;
 }
 
-static inline void qat_asym_build_req_tmpl(void *sess_private_data)
+unsigned int
+qat_asym_session_get_private_size(
+		struct rte_cryptodev *dev __rte_unused)
 {
-	struct icp_qat_fw_pke_request *qat_req;
-	struct qat_asym_session *session = sess_private_data;
-
-	qat_req = &session->req_tmpl;
-	qat_fill_req_tmpl(qat_req);
+	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
 }
 
-static size_t max_of(int n, ...)
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_asym_session *sess)
 {
-	va_list args;
-	size_t len = 0, num;
-	int i;
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_asym_session_private_data(sess, index);
+	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
 
-	va_start(args, n);
-	len = va_arg(args, size_t);
+	if (sess_priv) {
+		memset(s, 0, qat_asym_session_get_private_size(dev));
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
 
-	for (i = 0; i < n - 1; i++) {
-		num = va_arg(args, size_t);
-		if (num > len)
-			len = num;
+		set_asym_session_private_data(sess, index, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
 	}
-	va_end(args);
-
-	return len;
 }
 
-static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+	.name = qat_asym_drv_name,
+	.alias = qat_asym_drv_name
+};
+
+
+static void
+qat_clear_arrays(struct qat_asym_op_cookie *cookie,
 		int in_count, int out_count, int in_size, int out_size)
 {
 	int i;
@@ -75,7 +152,8 @@ static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
 		memset(cookie->output_array[i], 0x0, out_size);
 }
 
-static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
+static void
+qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
 		enum rte_crypto_asym_xform_type alg, int in_size, int out_size)
 {
 	if (alg == RTE_CRYPTO_ASYM_XFORM_MODEX)
@@ -88,7 +166,229 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
 				out_size);
 }
 
-static int qat_asym_check_nonzero(rte_crypto_param n)
+static void
+qat_asym_collect_response(struct rte_crypto_op *rx_op,
+		struct qat_asym_op_cookie *cookie,
+		struct rte_crypto_asym_xform *xform)
+{
+	size_t alg_size, alg_size_in_bytes = 0;
+	struct rte_crypto_asym_op *asym_op = rx_op->asym;
+
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		rte_crypto_param n = xform->modex.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modexp_result = asym_op->modex.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modexp_result +
+				(asym_op->modex.result.length -
+					n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length
+				);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		rte_crypto_param n = xform->modinv.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modinv_result = asym_op->modinv.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modinv_result +
+				(asym_op->modinv.result.length - n.length),
+				cookie->output_array[0] +
+				alg_size_in_bytes - n.length, n.length);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
+				asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+							cookie->output_array[0],
+							alg_size_in_bytes);
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+			}
+		} else {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_DECRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.message.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
+						rsa_result, alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_SIGN) {
+				uint8_t *rsa_result = asym_op->rsa.sign.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			}
+		}
+	}
+	qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes,
+			alg_size_in_bytes);
+}
+
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
+{
+	struct qat_asym_session *ctx;
+	struct icp_qat_fw_pke_resp *resp_msg =
+			(struct icp_qat_fw_pke_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+			(resp_msg->opaque);
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	if (cookie->error) {
+		cookie->error = 0;
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		QAT_DP_LOG(ERR, "Cookie status returned error");
+	} else {
+		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric response status"
+					" returned error");
+		}
+		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric common status"
+					" returned error");
+		}
+	}
+
+	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		ctx = (struct qat_asym_session *)get_asym_session_private_data(
+			rx_op->asym->session, qat_asym_driver_id);
+		qat_asym_collect_response(rx_op, cookie, ctx->xform);
+	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
+	}
+	*op = rx_op;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
+			sizeof(struct icp_qat_fw_pke_resp));
+#endif
+
+	return 1;
+}
+
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+		size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+	size_t i;
+
+	for (i = 0; i < arr_sz; i++) {
+		if (*size <= arr[i][0]) {
+			*size = arr[i][0];
+			*func_id = arr[i][1];
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+	va_list args;
+	size_t len = 0, num;
+	int i;
+
+	va_start(args, n);
+	len = va_arg(args, size_t);
+
+	for (i = 0; i < n - 1; i++) {
+		num = va_arg(args, size_t);
+		if (num > len)
+			len = num;
+	}
+	va_end(args);
+
+	return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
 {
 	if (n.length < 8) {
 		/* Not a case for any cryptograpic function except for DH
@@ -452,58 +752,14 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	} else {
 		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
 		return -(EINVAL);
-	}
-	return 0;
-}
-
-/*
- * Asym build request refactor function template,
- * will be removed in the later patchset.
- */
-static __rte_always_inline int
-refactor_qat_asym_build_request(__rte_unused void *in_op,
-		__rte_unused uint8_t *out_msg, __rte_unused void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		__rte_unused enum qat_device_gen dev_gen)
-{
-	return 0;
-}
-
-/*
- * Asym process response refactor function template,
- * will be removed in the later patchset.
- */
-int
-refactor_qat_asym_process_response(__rte_unused void **op,
-		__rte_unused uint8_t *resp,
-		__rte_unused void *op_cookie,
-		__rte_unused uint64_t *dequeue_err_count)
-{
+	}
 	return 0;
 }
 
-uint16_t
-qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return refactor_qat_enqueue_op_burst(qp,
-					refactor_qat_asym_build_request,
-					(void **)ops, nb_ops);
-}
-
-uint16_t
-qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return refactor_qat_dequeue_op_burst(qp, (void **)ops,
-				refactor_qat_asym_process_response, nb_ops);
-}
-
-int
-qat_asym_build_request(void *in_op,
-			uint8_t *out_msg,
-			void *op_cookie,
-			__rte_unused enum qat_device_gen qat_dev_gen)
+static __rte_always_inline int
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
 {
 	struct qat_asym_session *ctx;
 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
@@ -570,262 +826,161 @@ qat_asym_build_request(void *in_op,
 	return 0;
 }
 
-static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
-		struct qat_asym_op_cookie *cookie,
-		struct rte_crypto_asym_xform *xform)
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
 {
-	size_t alg_size, alg_size_in_bytes = 0;
-	struct rte_crypto_asym_op *asym_op = rx_op->asym;
-
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		rte_crypto_param n = xform->modex.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modexp_result = asym_op->modex.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modexp_result +
-				(asym_op->modex.result.length -
-					n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length
-				);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		rte_crypto_param n = xform->modinv.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modinv_result = asym_op->modinv.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modinv_result + (asym_op->modinv.result.length
-				- n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
-				asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-							cookie->output_array[0],
-							alg_size_in_bytes);
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-			}
-		} else {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_DECRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.message.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
-						rsa_result, alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
-				uint8_t *rsa_result = asym_op->rsa.sign.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			}
-		}
-	}
-	qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes,
-			alg_size_in_bytes);
+	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+			nb_ops);
 }
 
-void
-qat_asym_process_response(void **op, uint8_t *resp,
-		void *op_cookie)
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
 {
-	struct qat_asym_session *ctx;
-	struct icp_qat_fw_pke_resp *resp_msg =
-			(struct icp_qat_fw_pke_resp *)resp;
-	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
-			(resp_msg->opaque);
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	if (cookie->error) {
-		cookie->error = 0;
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		QAT_DP_LOG(ERR, "Cookie status returned error");
-	} else {
-		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
-			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric response status"
-					" returned error");
-		}
-		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric common status"
-					" returned error");
-		}
-	}
-
-	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_asym_session *)get_asym_session_private_data(
-			rx_op->asym->session, qat_asym_driver_id);
-		qat_asym_collect_response(rx_op, cookie, ctx->xform);
-	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
-	}
-	*op = rx_op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
-			sizeof(struct icp_qat_fw_pke_resp));
-#endif
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+				nb_ops);
 }
 
 int
-qat_asym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_asym_xform *xform,
-		struct rte_cryptodev_asym_session *sess,
-		struct rte_mempool *mempool)
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param)
 {
-	int err = 0;
-	void *sess_private_data;
-	struct qat_asym_session *session;
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		QAT_LOG(ERR,
-			"Couldn't get object from session mempool");
-		return -ENOMEM;
+	struct qat_cryptodev_private *internals;
+	struct rte_cryptodev *cryptodev;
+	struct qat_device_info *qat_dev_instance =
+		&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	uint64_t capa_size;
+	int i = 0;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "asym");
+	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	session = sess_private_data;
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		if (xform->modex.exponent.length == 0 ||
-				xform->modex.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod exp input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		if (xform->modinv.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod inv input parameter");
-			err = -EINVAL;
-			goto error;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_asym_driver_id =
+				qat_asym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_asym_driver_id !=
+				qat_asym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-		if (xform->rsa.n.length == 0) {
-			QAT_LOG(ERR, "Invalid rsa input parameter");
-			err = -EINVAL;
-			goto error;
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+	qat_dev_instance->asym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->asym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_asym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_ASYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			rte_cryptodev_pmd_destroy(cryptodev);
+			memset(&qat_dev_instance->asym_rte_dev, 0,
+				sizeof(qat_dev_instance->asym_rte_dev));
+			return -EFAULT;
 		}
-	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
-			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
-		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
-		err = -EINVAL;
-		goto error;
-	} else {
-		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
-		err = -EINVAL;
-		goto error;
 	}
 
-	session->xform = xform;
-	qat_asym_build_req_tmpl(sess_private_data);
-	set_asym_session_private_data(sess, dev->driver_id,
-		sess_private_data);
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-	return 0;
-error:
-	rte_mempool_put(mempool, sess_private_data);
-	return err;
-}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
 
-unsigned int qat_asym_session_get_private_size(
-		struct rte_cryptodev *dev __rte_unused)
-{
-	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
+	qat_pci_dev->asym_dev = internals;
+	internals->service_type = QAT_SERVICE_ASYMMETRIC;
+	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+	return 0;
 }
 
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_asym_session *sess)
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
 {
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_asym_session_private_data(sess, index);
-	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
-
-	if (sess_priv) {
-		memset(s, 0, qat_asym_session_get_private_size(dev));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->asym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(
+			qat_pci_dev->asym_dev->dev_id);
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+	qat_pci_dev->asym_dev = NULL;
 
-		set_asym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
+	return 0;
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_asym_driver,
+		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 50c2641eba..55b1e1b2cc 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -8,10 +8,13 @@
 #include <cryptodev_pmd.h>
 #include <rte_crypto_asym.h>
 #include "icp_qat_fw_pke.h"
-#include "qat_common.h"
-#include "qat_asym_pmd.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
 #include "icp_qat_fw.h"
 
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
+
 typedef uint64_t large_int_ptr;
 #define MAX_PKE_PARAMS	8
 #define QAT_PKE_MAX_LN_SIZE 512
@@ -26,6 +29,28 @@ typedef uint64_t large_int_ptr;
 #define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
 #define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
 
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
+		{.asym = {						\
+			.xform_capa = {					\
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+				.op_types = o,				\
+				{					\
+				.modlen = {				\
+				.min = l,				\
+				.max = r,				\
+				.increment = i				\
+				}, }					\
+			}						\
+		},							\
+		}							\
+	}
+
 struct qat_asym_op_cookie {
 	size_t alg_size;
 	uint64_t error;
@@ -45,6 +70,27 @@ struct qat_asym_session {
 	struct rte_crypto_asym_xform *xform;
 };
 
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+	memset(qat_req, 0, sizeof(*qat_req));
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+	qat_req->pke_hdr.hdr_flags =
+			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+	struct icp_qat_fw_pke_request *qat_req;
+	struct qat_asym_session *session = sess_private_data;
+
+	qat_req = &session->req_tmpl;
+	qat_fill_req_tmpl(qat_req);
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev,
 		struct rte_crypto_asym_xform *xform,
@@ -58,26 +104,6 @@ void
 qat_asym_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_asym_session *sess);
 
-/*
- * Build PKE request to be sent to the fw, partially uses template
- * request generated during session creation.
- *
- * @param	in_op		Pointer to the crypto operation, for every
- *				service it points to service specific struct.
- * @param	out_msg		Message to be returned to enqueue function
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	qat_dev_gen	Generation of QAT hardware
- *
- * @return
- *	This function always returns zero,
- *	it is because of backward compatibility.
- *	- 0: Always returned
- *
- */
-int
-qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
 /*
  * Process PKE response received from outgoing queue of QAT
  *
@@ -88,23 +114,11 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg,
  * @param	op_cookie	Cookie pointer that holds private metadata
  *
  */
-void
-qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
-		void *op_cookie);
-
 int
-refactor_qat_asym_process_response(__rte_unused void **op,
-		__rte_unused uint8_t *resp,
-		__rte_unused void *op_cookie,
-		__rte_unused uint64_t *dequeue_err_count);
-
-uint16_t
-qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops);
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count);
 
-
-uint16_t
-qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops);
+void
+qat_asym_init_op_cookie(void *cookie);
 
 #endif /* _QAT_ASYM_H_ */
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
deleted file mode 100644
index addee384e3..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "qat_logs.h"
-
-#include "qat_crypto.h"
-#include "qat_asym.h"
-#include "qat_asym_pmd.h"
-
-uint8_t qat_asym_driver_id;
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
-	int j;
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	cookie->input_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					input_params_ptrs);
-
-	cookie->output_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					output_params_ptrs);
-
-	for (j = 0; j < 8; j++) {
-		cookie->input_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						input_array[j]);
-		cookie->output_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						output_array[j]);
-	}
-}
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
-	/* Device related operations */
-	.dev_configure		= qat_cryptodev_config,
-	.dev_start		= qat_cryptodev_start,
-	.dev_stop		= qat_cryptodev_stop,
-	.dev_close		= qat_cryptodev_close,
-	.dev_infos_get		= qat_cryptodev_info_get,
-
-	.stats_get		= qat_cryptodev_stats_get,
-	.stats_reset		= qat_cryptodev_stats_reset,
-	.queue_pair_setup	= qat_cryptodev_qp_setup,
-	.queue_pair_release	= qat_cryptodev_qp_release,
-
-	/* Crypto related operations */
-	.asym_session_get_size	= qat_asym_session_get_private_size,
-	.asym_session_configure	= qat_asym_session_configure,
-	.asym_session_clear	= qat_asym_session_clear
-};
-
-uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
-	.name = qat_asym_drv_name,
-	.alias = qat_asym_drv_name
-};
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
-	int i = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "asym");
-	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_asym_driver_id =
-				qat_asym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_asym_driver_id !=
-				qat_asym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
-	qat_dev_instance->asym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->asym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->asym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_asym_driver_id;
-	cryptodev->dev_ops = &crypto_qat_ops;
-
-	cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst;
-
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_ASYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->dev_id = cryptodev->data->dev_id;
-	internals->service_type = QAT_SERVICE_ASYMMETRIC;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			rte_cryptodev_pmd_destroy(cryptodev);
-			memset(&qat_dev_instance->asym_rte_dev, 0,
-				sizeof(qat_dev_instance->asym_rte_dev));
-			return -EFAULT;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->asym_dev = internals;
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-	return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->asym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(
-			qat_pci_dev->asym_dev->dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
-	qat_pci_dev->asym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_asym_driver,
-		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_pmd.h b/drivers/crypto/qat/qat_asym_pmd.h
deleted file mode 100644
index fd6b406248..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-
-#ifndef _QAT_ASYM_PMD_H_
-#define _QAT_ASYM_PMD_H_
-
-#include <rte_cryptodev.h>
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD driver name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
-
-
-/**
- * Helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
-		{.asym = {						\
-			.xform_capa = {					\
-				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
-				.op_types = o,				\
-				{					\
-				.modlen = {				\
-				.min = l,				\
-				.max = r,				\
-				.increment = i				\
-				}, }					\
-			}						\
-		},							\
-		}							\
-	}
-
-extern uint8_t qat_asym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
-
-void
-qat_asym_init_op_cookie(void *op_cookie);
-
-uint16_t
-qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-uint16_t
-qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-#endif /* _QAT_ASYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_asym_refactor.c b/drivers/crypto/qat/qat_asym_refactor.c
deleted file mode 100644
index 3a9b1d4054..0000000000
--- a/drivers/crypto/qat/qat_asym_refactor.c
+++ /dev/null
@@ -1,994 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 - 2021 Intel Corporation
- */
-
-
-#include <stdarg.h>
-
-#include <cryptodev_pmd.h>
-
-#include "icp_qat_fw_pke.h"
-#include "icp_qat_fw.h"
-#include "qat_pke_functionality_arrays.h"
-
-#include "qat_device.h"
-
-#include "qat_logs.h"
-#include "qat_asym_refactor.h"
-
-uint8_t qat_asym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
-	int j;
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	cookie->input_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					input_params_ptrs);
-
-	cookie->output_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					output_params_ptrs);
-
-	for (j = 0; j < 8; j++) {
-		cookie->input_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						input_array[j]);
-		cookie->output_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						output_array[j]);
-	}
-}
-
-int
-qat_asym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_asym_xform *xform,
-		struct rte_cryptodev_asym_session *sess,
-		struct rte_mempool *mempool)
-{
-	int err = 0;
-	void *sess_private_data;
-	struct qat_asym_session *session;
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		QAT_LOG(ERR,
-			"Couldn't get object from session mempool");
-		return -ENOMEM;
-	}
-
-	session = sess_private_data;
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		if (xform->modex.exponent.length == 0 ||
-				xform->modex.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod exp input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		if (xform->modinv.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod inv input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-		if (xform->rsa.n.length == 0) {
-			QAT_LOG(ERR, "Invalid rsa input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
-			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
-		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
-		err = -EINVAL;
-		goto error;
-	} else {
-		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
-		err = -EINVAL;
-		goto error;
-	}
-
-	session->xform = xform;
-	qat_asym_build_req_tmpl(sess_private_data);
-	set_asym_session_private_data(sess, dev->driver_id,
-		sess_private_data);
-
-	return 0;
-error:
-	rte_mempool_put(mempool, sess_private_data);
-	return err;
-}
-
-unsigned int
-qat_asym_session_get_private_size(
-		struct rte_cryptodev *dev __rte_unused)
-{
-	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
-}
-
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_asym_session *sess)
-{
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_asym_session_private_data(sess, index);
-	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
-
-	if (sess_priv) {
-		memset(s, 0, qat_asym_session_get_private_size(dev));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
-
-		set_asym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
-	.name = qat_asym_drv_name,
-	.alias = qat_asym_drv_name
-};
-
-
-static void
-qat_clear_arrays(struct qat_asym_op_cookie *cookie,
-		int in_count, int out_count, int in_size, int out_size)
-{
-	int i;
-
-	for (i = 0; i < in_count; i++)
-		memset(cookie->input_array[i], 0x0, in_size);
-	for (i = 0; i < out_count; i++)
-		memset(cookie->output_array[i], 0x0, out_size);
-}
-
-static void
-qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
-		enum rte_crypto_asym_xform_type alg, int in_size, int out_size)
-{
-	if (alg == RTE_CRYPTO_ASYM_XFORM_MODEX)
-		qat_clear_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS,
-				QAT_ASYM_MODEXP_NUM_OUT_PARAMS, in_size,
-				out_size);
-	else if (alg == RTE_CRYPTO_ASYM_XFORM_MODINV)
-		qat_clear_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS,
-				QAT_ASYM_MODINV_NUM_OUT_PARAMS, in_size,
-				out_size);
-}
-
-static void
-qat_asym_collect_response(struct rte_crypto_op *rx_op,
-		struct qat_asym_op_cookie *cookie,
-		struct rte_crypto_asym_xform *xform)
-{
-	size_t alg_size, alg_size_in_bytes = 0;
-	struct rte_crypto_asym_op *asym_op = rx_op->asym;
-
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		rte_crypto_param n = xform->modex.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modexp_result = asym_op->modex.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modexp_result +
-				(asym_op->modex.result.length -
-					n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length
-				);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		rte_crypto_param n = xform->modinv.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modinv_result = asym_op->modinv.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modinv_result +
-				(asym_op->modinv.result.length - n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
-				asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-							cookie->output_array[0],
-							alg_size_in_bytes);
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-			}
-		} else {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_DECRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.message.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
-						rsa_result, alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type ==
-						RTE_CRYPTO_ASYM_OP_SIGN) {
-				uint8_t *rsa_result = asym_op->rsa.sign.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			}
-		}
-	}
-	qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes,
-			alg_size_in_bytes);
-}
-
-int
-qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
-		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
-{
-	struct qat_asym_session *ctx;
-	struct icp_qat_fw_pke_resp *resp_msg =
-			(struct icp_qat_fw_pke_resp *)resp;
-	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
-			(resp_msg->opaque);
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	if (cookie->error) {
-		cookie->error = 0;
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		QAT_DP_LOG(ERR, "Cookie status returned error");
-	} else {
-		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
-			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric response status"
-					" returned error");
-		}
-		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric common status"
-					" returned error");
-		}
-	}
-
-	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_asym_session *)get_asym_session_private_data(
-			rx_op->asym->session, qat_asym_driver_id);
-		qat_asym_collect_response(rx_op, cookie, ctx->xform);
-	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
-	}
-	*op = rx_op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
-			sizeof(struct icp_qat_fw_pke_resp));
-#endif
-
-	return 1;
-}
-
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
-
-static int
-qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
-		size_t arr_sz, size_t *size, uint32_t *func_id)
-{
-	size_t i;
-
-	for (i = 0; i < arr_sz; i++) {
-		if (*size <= arr[i][0]) {
-			*size = arr[i][0];
-			*func_id = arr[i][1];
-			return 0;
-		}
-	}
-	return -1;
-}
-
-static size_t
-max_of(int n, ...)
-{
-	va_list args;
-	size_t len = 0, num;
-	int i;
-
-	va_start(args, n);
-	len = va_arg(args, size_t);
-
-	for (i = 0; i < n - 1; i++) {
-		num = va_arg(args, size_t);
-		if (num > len)
-			len = num;
-	}
-	va_end(args);
-
-	return len;
-}
-
-static int
-qat_asym_check_nonzero(rte_crypto_param n)
-{
-	if (n.length < 8) {
-		/* Not a case for any cryptograpic function except for DH
-		 * generator which very often can be of one byte length
-		 */
-		size_t i;
-
-		if (n.data[n.length - 1] == 0x0) {
-			for (i = 0; i < n.length - 1; i++)
-				if (n.data[i] != 0x0)
-					break;
-			if (i == n.length - 1)
-				return -(EINVAL);
-		}
-	} else if (*(uint64_t *)&n.data[
-				n.length - 8] == 0) {
-		/* Very likely it is zeroed modulus */
-		size_t i;
-
-		for (i = 0; i < n.length - 8; i++)
-			if (n.data[i] != 0x0)
-				break;
-		if (i == n.length - 8)
-			return -(EINVAL);
-	}
-
-	return 0;
-}
-
-static int
-qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
-		struct icp_qat_fw_pke_request *qat_req,
-		struct qat_asym_op_cookie *cookie,
-		struct rte_crypto_asym_xform *xform)
-{
-	int err = 0;
-	size_t alg_size;
-	size_t alg_size_in_bytes;
-	uint32_t func_id = 0;
-
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		err = qat_asym_check_nonzero(xform->modex.modulus);
-		if (err) {
-			QAT_LOG(ERR, "Empty modulus in modular exponentiation,"
-					" aborting this operation");
-			return err;
-		}
-
-		alg_size_in_bytes = max_of(3, asym_op->modex.base.length,
-			       xform->modex.exponent.length,
-			       xform->modex.modulus.length);
-		alg_size = alg_size_in_bytes << 3;
-
-		if (qat_asym_get_sz_and_func_id(MOD_EXP_SIZE,
-				sizeof(MOD_EXP_SIZE)/sizeof(*MOD_EXP_SIZE),
-				&alg_size, &func_id)) {
-			return -(EINVAL);
-		}
-
-		alg_size_in_bytes = alg_size >> 3;
-		rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
-			asym_op->modex.base.length
-			, asym_op->modex.base.data,
-			asym_op->modex.base.length);
-		rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
-			xform->modex.exponent.length
-			, xform->modex.exponent.data,
-			xform->modex.exponent.length);
-		rte_memcpy(cookie->input_array[2]  + alg_size_in_bytes -
-			xform->modex.modulus.length,
-			xform->modex.modulus.data,
-			xform->modex.modulus.length);
-		cookie->alg_size = alg_size;
-		qat_req->pke_hdr.cd_pars.func_id = func_id;
-		qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS;
-		qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp base",
-				cookie->input_array[0],
-				alg_size_in_bytes);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp exponent",
-				cookie->input_array[1],
-				alg_size_in_bytes);
-		QAT_DP_HEXDUMP_LOG(DEBUG, " ModExpmodulus",
-				cookie->input_array[2],
-				alg_size_in_bytes);
-#endif
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		err = qat_asym_check_nonzero(xform->modinv.modulus);
-		if (err) {
-			QAT_LOG(ERR, "Empty modulus in modular multiplicative"
-					" inverse, aborting this operation");
-			return err;
-		}
-
-		alg_size_in_bytes = max_of(2, asym_op->modinv.base.length,
-				xform->modinv.modulus.length);
-		alg_size = alg_size_in_bytes << 3;
-
-		if (xform->modinv.modulus.data[
-				xform->modinv.modulus.length - 1] & 0x01) {
-			if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_ODD,
-					sizeof(MOD_INV_IDS_ODD)/
-					sizeof(*MOD_INV_IDS_ODD),
-					&alg_size, &func_id)) {
-				return -(EINVAL);
-			}
-		} else {
-			if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_EVEN,
-					sizeof(MOD_INV_IDS_EVEN)/
-					sizeof(*MOD_INV_IDS_EVEN),
-					&alg_size, &func_id)) {
-				return -(EINVAL);
-			}
-		}
-
-		alg_size_in_bytes = alg_size >> 3;
-		rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
-			asym_op->modinv.base.length
-				, asym_op->modinv.base.data,
-				asym_op->modinv.base.length);
-		rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
-				xform->modinv.modulus.length
-				, xform->modinv.modulus.data,
-				xform->modinv.modulus.length);
-		cookie->alg_size = alg_size;
-		qat_req->pke_hdr.cd_pars.func_id = func_id;
-		qat_req->input_param_count =
-				QAT_ASYM_MODINV_NUM_IN_PARAMS;
-		qat_req->output_param_count =
-				QAT_ASYM_MODINV_NUM_OUT_PARAMS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv base",
-				cookie->input_array[0],
-				alg_size_in_bytes);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv modulus",
-				cookie->input_array[1],
-				alg_size_in_bytes);
-#endif
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-		err = qat_asym_check_nonzero(xform->rsa.n);
-		if (err) {
-			QAT_LOG(ERR, "Empty modulus in RSA"
-					" inverse, aborting this operation");
-			return err;
-		}
-
-		alg_size_in_bytes = xform->rsa.n.length;
-		alg_size = alg_size_in_bytes << 3;
-
-		qat_req->input_param_count =
-				QAT_ASYM_RSA_NUM_IN_PARAMS;
-		qat_req->output_param_count =
-				QAT_ASYM_RSA_NUM_OUT_PARAMS;
-
-		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
-				asym_op->rsa.op_type ==
-						RTE_CRYPTO_ASYM_OP_VERIFY) {
-
-			if (qat_asym_get_sz_and_func_id(RSA_ENC_IDS,
-					sizeof(RSA_ENC_IDS)/
-					sizeof(*RSA_ENC_IDS),
-					&alg_size, &func_id)) {
-				err = -(EINVAL);
-				QAT_LOG(ERR,
-					"Not supported RSA parameter size (key)");
-				return err;
-			}
-			alg_size_in_bytes = alg_size >> 3;
-			if (asym_op->rsa.op_type ==
-				RTE_CRYPTO_ASYM_OP_ENCRYPT) {
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(cookie->input_array[0] +
-						alg_size_in_bytes -
-						asym_op->rsa.message.length
-						, asym_op->rsa.message.data,
-						asym_op->rsa.message.length);
-					break;
-				default:
-					err = -(EINVAL);
-					QAT_LOG(ERR,
-						"Invalid RSA padding (Encryption)");
-					return err;
-				}
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Message",
-						cookie->input_array[0],
-						alg_size_in_bytes);
-#endif
-			} else {
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(cookie->input_array[0],
-						asym_op->rsa.sign.data,
-						alg_size_in_bytes);
-					break;
-				default:
-					err = -(EINVAL);
-					QAT_LOG(ERR,
-						"Invalid RSA padding (Verify)");
-					return err;
-				}
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, " RSA Signature",
-						cookie->input_array[0],
-						alg_size_in_bytes);
-#endif
-
-			}
-			rte_memcpy(cookie->input_array[1] +
-					alg_size_in_bytes -
-					xform->rsa.e.length
-					, xform->rsa.e.data,
-					xform->rsa.e.length);
-			rte_memcpy(cookie->input_array[2] +
-					alg_size_in_bytes -
-					xform->rsa.n.length,
-					xform->rsa.n.data,
-					xform->rsa.n.length);
-
-			cookie->alg_size = alg_size;
-			qat_req->pke_hdr.cd_pars.func_id = func_id;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Public Key",
-					cookie->input_array[1],
-					alg_size_in_bytes);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Modulus",
-					cookie->input_array[2],
-					alg_size_in_bytes);
-#endif
-		} else {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_DECRYPT) {
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(cookie->input_array[0]
-						+ alg_size_in_bytes -
-						asym_op->rsa.cipher.length,
-						asym_op->rsa.cipher.data,
-						asym_op->rsa.cipher.length);
-					break;
-				default:
-					QAT_LOG(ERR,
-						"Invalid padding of RSA (Decrypt)");
-					return -(EINVAL);
-				}
-
-			} else if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_SIGN) {
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(cookie->input_array[0]
-						+ alg_size_in_bytes -
-						asym_op->rsa.message.length,
-						asym_op->rsa.message.data,
-						asym_op->rsa.message.length);
-					break;
-				default:
-					QAT_LOG(ERR,
-						"Invalid padding of RSA (Signature)");
-					return -(EINVAL);
-				}
-			}
-			if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT) {
-
-				qat_req->input_param_count =
-						QAT_ASYM_RSA_QT_NUM_IN_PARAMS;
-				if (qat_asym_get_sz_and_func_id(RSA_DEC_CRT_IDS,
-						sizeof(RSA_DEC_CRT_IDS)/
-						sizeof(*RSA_DEC_CRT_IDS),
-						&alg_size, &func_id)) {
-					return -(EINVAL);
-				}
-				alg_size_in_bytes = alg_size >> 3;
-
-				rte_memcpy(cookie->input_array[1] +
-						(alg_size_in_bytes >> 1) -
-						xform->rsa.qt.p.length
-						, xform->rsa.qt.p.data,
-						xform->rsa.qt.p.length);
-				rte_memcpy(cookie->input_array[2] +
-						(alg_size_in_bytes >> 1) -
-						xform->rsa.qt.q.length
-						, xform->rsa.qt.q.data,
-						xform->rsa.qt.q.length);
-				rte_memcpy(cookie->input_array[3] +
-						(alg_size_in_bytes >> 1) -
-						xform->rsa.qt.dP.length
-						, xform->rsa.qt.dP.data,
-						xform->rsa.qt.dP.length);
-				rte_memcpy(cookie->input_array[4] +
-						(alg_size_in_bytes >> 1) -
-						xform->rsa.qt.dQ.length
-						, xform->rsa.qt.dQ.data,
-						xform->rsa.qt.dQ.length);
-				rte_memcpy(cookie->input_array[5] +
-						(alg_size_in_bytes >> 1) -
-						xform->rsa.qt.qInv.length
-						, xform->rsa.qt.qInv.data,
-						xform->rsa.qt.qInv.length);
-				cookie->alg_size = alg_size;
-				qat_req->pke_hdr.cd_pars.func_id = func_id;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "C",
-						cookie->input_array[0],
-						alg_size_in_bytes);
-				QAT_DP_HEXDUMP_LOG(DEBUG, "p",
-						cookie->input_array[1],
-						alg_size_in_bytes);
-				QAT_DP_HEXDUMP_LOG(DEBUG, "q",
-						cookie->input_array[2],
-						alg_size_in_bytes);
-				QAT_DP_HEXDUMP_LOG(DEBUG,
-						"dP", cookie->input_array[3],
-						alg_size_in_bytes);
-				QAT_DP_HEXDUMP_LOG(DEBUG,
-						"dQ", cookie->input_array[4],
-						alg_size_in_bytes);
-				QAT_DP_HEXDUMP_LOG(DEBUG,
-						"qInv", cookie->input_array[5],
-						alg_size_in_bytes);
-#endif
-			} else if (xform->rsa.key_type ==
-					RTE_RSA_KEY_TYPE_EXP) {
-				if (qat_asym_get_sz_and_func_id(
-						RSA_DEC_IDS,
-						sizeof(RSA_DEC_IDS)/
-						sizeof(*RSA_DEC_IDS),
-						&alg_size, &func_id)) {
-					return -(EINVAL);
-				}
-				alg_size_in_bytes = alg_size >> 3;
-				rte_memcpy(cookie->input_array[1] +
-						alg_size_in_bytes -
-						xform->rsa.d.length,
-						xform->rsa.d.data,
-						xform->rsa.d.length);
-				rte_memcpy(cookie->input_array[2] +
-						alg_size_in_bytes -
-						xform->rsa.n.length,
-						xform->rsa.n.data,
-						xform->rsa.n.length);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA ciphertext",
-					cookie->input_array[0],
-					alg_size_in_bytes);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA d",
-					cookie->input_array[1],
-					alg_size_in_bytes);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA n",
-					cookie->input_array[2],
-					alg_size_in_bytes);
-#endif
-
-				cookie->alg_size = alg_size;
-				qat_req->pke_hdr.cd_pars.func_id = func_id;
-			} else {
-				QAT_LOG(ERR, "Invalid RSA key type");
-				return -(EINVAL);
-			}
-		}
-	} else {
-		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
-		return -(EINVAL);
-	}
-	return 0;
-}
-
-static __rte_always_inline int
-qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		__rte_unused enum qat_device_gen dev_gen)
-{
-	struct qat_asym_session *ctx;
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct rte_crypto_asym_op *asym_op = op->asym;
-	struct icp_qat_fw_pke_request *qat_req =
-			(struct icp_qat_fw_pke_request *)out_msg;
-	struct qat_asym_op_cookie *cookie =
-				(struct qat_asym_op_cookie *)op_cookie;
-	int err = 0;
-
-	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_asym_session *)
-			get_asym_session_private_data(
-			op->asym->session, qat_asym_driver_id);
-		if (unlikely(ctx == NULL)) {
-			QAT_LOG(ERR, "Session has not been created for this device");
-			goto error;
-		}
-		rte_mov64((uint8_t *)qat_req,
-			(const uint8_t *)&(ctx->req_tmpl));
-		err = qat_asym_fill_arrays(asym_op, qat_req,
-				cookie, ctx->xform);
-		if (err) {
-			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			goto error;
-		}
-	} else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		qat_fill_req_tmpl(qat_req);
-		err = qat_asym_fill_arrays(asym_op, qat_req, cookie,
-				op->asym->xform);
-		if (err) {
-			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			goto error;
-		}
-	} else {
-		QAT_DP_LOG(ERR, "Invalid session/xform settings");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		goto error;
-	}
-
-	qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
-	qat_req->pke_mid.src_data_addr = cookie->input_addr;
-	qat_req->pke_mid.dest_data_addr = cookie->output_addr;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_pke_request));
-#endif
-
-	return 0;
-error:
-
-	qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-		sizeof(struct icp_qat_fw_pke_request));
-#endif
-
-	qat_req->output_param_count = 0;
-	qat_req->input_param_count = 0;
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
-	cookie->error |= err;
-
-	return 0;
-}
-
-static uint16_t
-qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
-			nb_ops);
-}
-
-static uint16_t
-qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
-				nb_ops);
-}
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
-	struct qat_cryptodev_private *internals;
-	struct rte_cryptodev *cryptodev;
-	struct qat_device_info *qat_dev_instance =
-		&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	uint64_t capa_size;
-	int i = 0;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "asym");
-	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
-				name);
-		return -(EFAULT);
-	}
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_asym_driver_id =
-				qat_asym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_asym_driver_id !=
-				qat_asym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
-	qat_dev_instance->asym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->asym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->asym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_asym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_ASYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			rte_cryptodev_pmd_destroy(cryptodev);
-			memset(&qat_dev_instance->asym_rte_dev, 0,
-				sizeof(qat_dev_instance->asym_rte_dev));
-			return -EFAULT;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->asym_dev = internals;
-	internals->service_type = QAT_SERVICE_ASYMMETRIC;
-	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-	return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->asym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(
-			qat_pci_dev->asym_dev->dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
-	qat_pci_dev->asym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_asym_driver,
-		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_refactor.h b/drivers/crypto/qat/qat_asym_refactor.h
deleted file mode 100644
index 6d3d991bc7..0000000000
--- a/drivers/crypto/qat/qat_asym_refactor.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#ifndef _QAT_ASYM_H_
-#define _QAT_ASYM_H_
-
-#include <cryptodev_pmd.h>
-#include <rte_crypto_asym.h>
-#include "icp_qat_fw_pke.h"
-#include "qat_device.h"
-#include "qat_crypto.h"
-#include "icp_qat_fw.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD driver name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
-
-typedef uint64_t large_int_ptr;
-#define MAX_PKE_PARAMS	8
-#define QAT_PKE_MAX_LN_SIZE 512
-#define _PKE_ALIGN_ __rte_aligned(8)
-
-#define QAT_ASYM_MAX_PARAMS			8
-#define QAT_ASYM_MODINV_NUM_IN_PARAMS		2
-#define QAT_ASYM_MODINV_NUM_OUT_PARAMS		1
-#define QAT_ASYM_MODEXP_NUM_IN_PARAMS		3
-#define QAT_ASYM_MODEXP_NUM_OUT_PARAMS		1
-#define QAT_ASYM_RSA_NUM_IN_PARAMS		3
-#define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
-#define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
-
-/**
- * helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
-		{.asym = {						\
-			.xform_capa = {					\
-				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
-				.op_types = o,				\
-				{					\
-				.modlen = {				\
-				.min = l,				\
-				.max = r,				\
-				.increment = i				\
-				}, }					\
-			}						\
-		},							\
-		}							\
-	}
-
-struct qat_asym_op_cookie {
-	size_t alg_size;
-	uint64_t error;
-	rte_iova_t input_addr;
-	rte_iova_t output_addr;
-	large_int_ptr input_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
-	large_int_ptr output_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
-	union {
-		uint8_t input_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE];
-		uint8_t input_buffer[MAX_PKE_PARAMS * QAT_PKE_MAX_LN_SIZE];
-	} _PKE_ALIGN_;
-	uint8_t output_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE] _PKE_ALIGN_;
-} _PKE_ALIGN_;
-
-struct qat_asym_session {
-	struct icp_qat_fw_pke_request req_tmpl;
-	struct rte_crypto_asym_xform *xform;
-};
-
-static inline void
-qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
-{
-	memset(qat_req, 0, sizeof(*qat_req));
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
-
-	qat_req->pke_hdr.hdr_flags =
-			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
-			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
-}
-
-static inline void
-qat_asym_build_req_tmpl(void *sess_private_data)
-{
-	struct icp_qat_fw_pke_request *qat_req;
-	struct qat_asym_session *session = sess_private_data;
-
-	qat_req = &session->req_tmpl;
-	qat_fill_req_tmpl(qat_req);
-}
-
-int
-qat_asym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_asym_xform *xform,
-		struct rte_cryptodev_asym_session *sess,
-		struct rte_mempool *mempool);
-
-unsigned int
-qat_asym_session_get_private_size(struct rte_cryptodev *dev);
-
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_asym_session *sess);
-
-/*
- * Process PKE response received from outgoing queue of QAT
- *
- * @param	op		a ptr to the rte_crypto_op referred to by
- *				the response message is returned in this param
- * @param	resp		icp_qat_fw_pke_resp message received from
- *				outgoing fw message queue
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	dequeue_err_count	Error count number pointer
- *
- */
-int
-qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
-		void *op_cookie, __rte_unused uint64_t *dequeue_err_count);
-
-void
-qat_asym_init_op_cookie(void *cookie);
-
-#endif /* _QAT_ASYM_H_ */
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 8fe3f0b061..203f74f46f 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -12,7 +12,10 @@
 extern uint8_t qat_sym_driver_id;
 extern uint8_t qat_asym_driver_id;
 
-/** helper macro to set cryptodev capability range **/
+/**
+ * helper macro to set cryptodev capability range
+ * <n: name> <l: min > <r: max> <i: increment> <v: value>
+ **/
 #define CAP_RNG(n, l, r, i) .n = {.min = l, .max = r, .increment = i}
 
 #define CAP_RNG_ZERO(n) .n = {.min = 0, .max = 0, .increment = 0}
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 09c1e6f6a4..b78e6a6ef3 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -11,285 +11,107 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
 
+uint8_t qat_sym_driver_id;
 
-/** Decrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_decrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_decrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
-	return -EINVAL;
-}
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
-		/* Decrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely((sym_op->m_dst != NULL)
-				&& (sym_op->m_dst != sym_op->m_src)))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = last_block - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
-			dst, last_block_len);
-#endif
-		bpi_cipher_decrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
-			dst, last_block_len);
-#endif
-	}
-
-	return sym_op->cipher.data.length - last_block_len;
-}
-
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	/* copy IV into request if it fits */
-	if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
-		rte_memcpy(cipher_param->u.cipher_IV_array,
-				rte_crypto_op_ctod_offset(op, uint8_t *,
-					iv_offset),
-				iv_length);
-	} else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr =
-				rte_crypto_op_ctophys_offset(op,
-					iv_offset);
-	}
-}
-
-/** Set IV for CCM is special case, 0th byte is set to q-1
- *  where q is padding of nonce in 16 byte block
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
  */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
-	rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
-			ICP_QAT_HW_CCM_NONCE_OFFSET,
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-	*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-	if (aad_len_field_sz)
-		rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-}
-
-/** Handle Single-Pass AES-GMAC on QAT GEN3 */
-static inline void
-handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
-		struct qat_sym_op_cookie *cookie,
-		struct icp_qat_fw_la_bulk_req *qat_req)
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
+void
+qat_sym_init_op_cookie(void *op_cookie)
 {
-	static const uint32_t ver_key_offset =
-			sizeof(struct icp_qat_hw_auth_setup) +
-			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
-			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
-			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
-			sizeof(struct icp_qat_hw_cipher_config);
-	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
-			(void *) &qat_req->cd_ctrl;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-			(void *) &qat_req->serv_specif_rqpars;
-	uint32_t data_length = op->sym->auth.data.length;
-
-	/* Fill separate Content Descriptor for this op */
-	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
-			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-				ctx->cd.cipher.key :
-				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
-			ctx->auth_key_length);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
-				ICP_QAT_HW_CIPHER_AEAD_MODE,
-				ctx->qat_cipher_alg,
-				ICP_QAT_HW_CIPHER_NO_CONVERT,
-				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-					ICP_QAT_HW_CIPHER_ENCRYPT :
-					ICP_QAT_HW_CIPHER_DECRYPT));
-	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
-			ctx->digest_length,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
-
-	/* Update the request */
-	qat_req->cd_pars.u.s.content_desc_addr =
-			cookie->opt.spc_gmac.cd_phys_addr;
-	qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
-			sizeof(struct icp_qat_hw_cipher_config) +
-			ctx->auth_key_length, 8) >> 3;
-	qat_req->comn_mid.src_length = data_length;
-	qat_req->comn_mid.dst_length = 0;
-
-	cipher_param->spc_aad_addr = 0;
-	cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
-	cipher_param->spc_aad_sz = data_length;
-	cipher_param->reserved = 0;
-	cipher_param->spc_auth_res_sz = ctx->digest_length;
-
-	qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
-	cipher_cd_ctrl->cipher_cfg_offset = 0;
-	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
-	ICP_QAT_FW_LA_PROTO_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_NO_PROTO);
+	struct qat_sym_op_cookie *cookie = op_cookie;
+
+	cookie->qat_sgl_src_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_src);
+
+	cookie->qat_sgl_dst_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_dst);
+
+	cookie->opt.spc_gmac.cd_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			opt.spc_gmac.cd_cipher);
 }
 
-/*
- * Sym build request refactor function template,
- * will be removed in the later patchset.
- */
 static __rte_always_inline int
-refactor_qat_sym_build_request(__rte_unused void *in_op,
-		__rte_unused uint8_t *out_msg, __rte_unused	void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		__rte_unused enum qat_device_gen dev_gen)
-{
-	return 0;
-}
-
-/*
- * Sym enqueue burst refactor function template,
- * will be removed in the later patchset.
- */
-uint16_t
-refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return refactor_qat_enqueue_op_burst(qp, refactor_qat_sym_build_request,
-			(void **)ops, nb_ops);
-}
-
-/*
- * Sym dequeue burst refactor function template,
- * will be removed in the later patchset.
- */
-uint16_t
-refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return refactor_qat_dequeue_op_burst(qp, (void **)ops,
-				refactor_qat_sym_process_response, nb_ops);
-}
-
-int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
 {
-	int ret = 0;
-	struct qat_sym_session *ctx = NULL;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	register struct icp_qat_fw_la_bulk_req *qat_req;
-	uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
-	uint32_t cipher_len = 0, cipher_ofs = 0;
-	uint32_t auth_len = 0, auth_ofs = 0;
-	uint32_t min_ofs = 0;
-	uint64_t src_buf_start = 0, dst_buf_start = 0;
-	uint64_t auth_data_end = 0;
-	uint8_t do_sgl = 0;
-	uint8_t in_place = 1;
-	int alignment_adjustment = 0;
-	int oop_shift = 0;
 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *)op_cookie;
+	void *sess = (void *)opaque[0];
+	qat_sym_build_request_t build_request = (void *)opaque[1];
+	struct qat_sym_session *ctx = NULL;
 
-	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
-				"operation requests, op (%p) is not a "
-				"symmetric operation.", op);
-		return -EINVAL;
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
+
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
 	}
 
-	if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
-				" requests, op (%p) is sessionless.", op);
-		return -EINVAL;
-	} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_sym_session *)get_sym_session_private_data(
-				op->sym->session, qat_sym_driver_id);
 #ifdef RTE_LIB_SECURITY
-	} else {
-		ctx = (struct qat_sym_session *)get_sec_session_private_data(
-				op->sym->sec_session);
-		if (likely(ctx)) {
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if (sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
 			if (unlikely(ctx->bpi_ctx == NULL)) {
 				QAT_DP_LOG(ERR, "QAT PMD only supports security"
 						" operation requests for"
@@ -305,463 +127,284 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 				return -EINVAL;
 			}
-		}
-#endif
-	}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
 
-	if (unlikely(ctx == NULL)) {
-		QAT_DP_LOG(ERR, "Session was not created for this device");
-		return -EINVAL;
-	}
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
 
-	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
-	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
-	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
-	cipher_param = (void *)&qat_req->serv_specif_rqpars;
-	cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			do_aead = 1;
-		} else {
-			do_auth = 1;
-			do_cipher = 1;
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
+
+			sess = (void *)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)sess;
+			opaque[1] = (uintptr_t)build_request;
 		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		do_auth = 1;
-		do_cipher = 0;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		do_auth = 0;
-		do_cipher = 1;
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
 	}
 
-	if (do_cipher) {
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
 
-		if (ctx->qat_cipher_alg ==
-					 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
 
-			if (unlikely(
-			    (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
-			    (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cipher_len = op->sym->cipher.data.length >> 3;
-			cipher_ofs = op->sym->cipher.data.offset >> 3;
-
-		} else if (ctx->bpi_ctx) {
-			/* DOCSIS - only send complete blocks to device.
-			 * Process any partial block using CFB mode.
-			 * Even if 0 complete blocks, still send this to device
-			 * to get into rx queue for post-process and dequeuing
-			 */
-			cipher_len = qat_bpicipher_preprocess(ctx, op);
-			cipher_ofs = op->sym->cipher.data.offset;
-		} else {
-			cipher_len = op->sym->cipher.data.length;
-			cipher_ofs = op->sym->cipher.data.offset;
-		}
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
 
-		set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
-				cipher_param, op, qat_req);
-		min_ofs = cipher_ofs;
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	if (do_auth) {
-
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
-			ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
-			if (unlikely(
-			    (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
-			    (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			auth_ofs = op->sym->auth.data.offset >> 3;
-			auth_len = op->sym->auth.data.length >> 3;
-
-			auth_param->u1.aad_adr =
-					rte_crypto_op_ctophys_offset(op,
-							ctx->auth_iv.offset);
-
-		} else if (ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-			/* AES-GMAC */
-			set_cipher_iv(ctx->auth_iv.length,
-				ctx->auth_iv.offset,
-				cipher_param, op, qat_req);
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
-			auth_param->u1.aad_adr = 0;
-			auth_param->u2.aad_sz = 0;
-
-		} else {
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-		min_ofs = auth_ofs;
-
-		if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
-				ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
-			auth_param->auth_res_addr =
-					op->sym->auth.digest.phys_addr;
-
 	}
 
-	if (do_aead) {
-		/*
-		 * This address may used for setting AAD physical pointer
-		 * into IV offset from op
-		 */
-		rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
-		if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-
-			set_cipher_iv(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, qat_req);
-
-		} else if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
-			/* In case of AES-CCM this may point to user selected
-			 * memory or iv offset in cypto_op
-			 */
-			uint8_t *aad_data = op->sym->aead.aad.data;
-			/* This is true AAD length, it not includes 18 bytes of
-			 * preceding data
-			 */
-			uint8_t aad_ccm_real_len = 0;
-			uint8_t aad_len_field_sz = 0;
-			uint32_t msg_len_be =
-					rte_bswap32(op->sym->aead.data.length);
-
-			if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-				aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-				aad_ccm_real_len = ctx->aad_len -
-					ICP_QAT_HW_CCM_AAD_B0_LEN -
-					ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			} else {
-				/*
-				 * aad_len not greater than 18, so no actual aad
-				 *  data, then use IV after op for B0 block
-				 */
-				aad_data = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-				aad_phys_addr_aead =
-						rte_crypto_op_ctophys_offset(op,
-							ctx->cipher_iv.offset);
-			}
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
 
-			uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
-							ctx->cipher_iv.length;
-
-			aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-							aad_len_field_sz,
-							ctx->digest_length, q);
-
-			if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET +
-				    (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				    (uint8_t *)&msg_len_be,
-				    ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-			} else {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET,
-				    (uint8_t *)&msg_len_be
-				    + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				    - q), q);
-			}
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
 
-			if (aad_len_field_sz > 0) {
-				*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
-						= rte_bswap16(aad_ccm_real_len);
-
-				if ((aad_ccm_real_len + aad_len_field_sz)
-						% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-					uint8_t pad_len = 0;
-					uint8_t pad_idx = 0;
-
-					pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len + aad_len_field_sz) %
-						ICP_QAT_HW_CCM_AAD_B0_LEN);
-					pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					    aad_ccm_real_len + aad_len_field_sz;
-					memset(&aad_data[pad_idx],
-							0, pad_len);
-				}
+	if (cryptodev == NULL)
+		return -ENODEV;
 
-			}
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
-			set_cipher_iv_ccm(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, q,
-					aad_len_field_sz);
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
-		}
-
-		cipher_len = op->sym->aead.data.length;
-		cipher_ofs = op->sym->aead.data.offset;
-		auth_len = op->sym->aead.data.length;
-		auth_ofs = op->sym->aead.data.offset;
-
-		auth_param->u1.aad_adr = aad_phys_addr_aead;
-		auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
-		min_ofs = op->sym->aead.data.offset;
-	}
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
-	if (op->sym->m_src->nb_segs > 1 ||
-			(op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
-		do_sgl = 1;
-
-	/* adjust for chain case */
-	if (do_cipher && do_auth)
-		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
-	if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
-		min_ofs = 0;
-
-	if (unlikely((op->sym->m_dst != NULL) &&
-			(op->sym->m_dst != op->sym->m_src))) {
-		/* Out-of-place operation (OOP)
-		 * Don't align DMA start. DMA the minimum data-set
-		 * so as not to overwrite data in dest buffer
-		 */
-		in_place = 0;
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-		dst_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-		oop_shift = min_ofs;
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
 
-	} else {
-		/* In-place operation
-		 * Start DMA at nearest aligned address below min_ofs
-		 */
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
-						& QAT_64_BTYE_ALIGN_MASK;
-
-		if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
-					rte_pktmbuf_headroom(op->sym->m_src))
-							> src_buf_start)) {
-			/* alignment has pushed addr ahead of start of mbuf
-			 * so revert and take the performance hit
-			 */
-			src_buf_start =
-				rte_pktmbuf_iova_offset(op->sym->m_src,
-								min_ofs);
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
 		}
-		dst_buf_start = src_buf_start;
 
-		/* remember any adjustment for later, note, can be +/- */
-		alignment_adjustment = src_buf_start -
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-	}
-
-	if (do_cipher || do_aead) {
-		cipher_param->cipher_offset =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, cipher_ofs) - src_buf_start;
-		cipher_param->cipher_length = cipher_len;
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
 	} else {
-		cipher_param->cipher_offset = 0;
-		cipher_param->cipher_length = 0;
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
 	}
-
-	if (!ctx->is_single_pass) {
-		/* Do not let to overwrite spc_aad len */
-		if (do_auth || do_aead) {
-			auth_param->auth_off =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, auth_ofs) - src_buf_start;
-			auth_param->auth_len = auth_len;
-		} else {
-			auth_param->auth_off = 0;
-			auth_param->auth_len = 0;
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
 		}
 	}
 
-	qat_req->comn_mid.dst_length =
-		qat_req->comn_mid.src_length =
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		> (auth_param->auth_off + auth_param->auth_len) ?
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		: (auth_param->auth_off + auth_param->auth_len);
-
-	if (do_auth && do_cipher) {
-		/* Handle digest-encrypted cases, i.e.
-		 * auth-gen-then-cipher-encrypt and
-		 * cipher-decrypt-then-auth-verify
-		 */
-		 /* First find the end of the data */
-		if (do_sgl) {
-			uint32_t remaining_off = auth_param->auth_off +
-				auth_param->auth_len + alignment_adjustment + oop_shift;
-			struct rte_mbuf *sgl_buf =
-				(in_place ?
-					op->sym->m_src : op->sym->m_dst);
-
-			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
-					&& sgl_buf->next != NULL) {
-				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
-				sgl_buf = sgl_buf->next;
-			}
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-			auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
-				sgl_buf, remaining_off);
-		} else {
-			auth_data_end = (in_place ?
-				src_buf_start : dst_buf_start) +
-				auth_param->auth_off + auth_param->auth_len;
-		}
-		/* Then check if digest-encrypted conditions are met */
-		if ((auth_param->auth_off + auth_param->auth_len <
-					cipher_param->cipher_offset +
-					cipher_param->cipher_length) &&
-				(op->sym->auth.digest.phys_addr ==
-					auth_data_end)) {
-			/* Handle partial digest encryption */
-			if (cipher_param->cipher_offset +
-					cipher_param->cipher_length <
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length)
-				qat_req->comn_mid.dst_length =
-					qat_req->comn_mid.src_length =
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length;
-			struct icp_qat_fw_comn_req_hdr *header =
-				&qat_req->comn_hdr;
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-		}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
 	}
 
-	if (do_sgl) {
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
 
-		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
-				QAT_COMN_PTR_TYPE_SGL);
-		ret = qat_sgl_fill_array(op->sym->m_src,
-		   (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
-		   &cookie->qat_sgl_src,
-		   qat_req->comn_mid.src_length,
-		   QAT_SYM_SGL_MAX_NUMBER);
+	return 0;
 
-		if (unlikely(ret)) {
-			QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
-			return ret;
-		}
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
 
-		if (in_place)
-			qat_req->comn_mid.dest_data_addr =
-				qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-		else {
-			ret = qat_sgl_fill_array(op->sym->m_dst,
-				(int64_t)(dst_buf_start -
-					  rte_pktmbuf_iova(op->sym->m_dst)),
-				 &cookie->qat_sgl_dst,
-				 qat_req->comn_mid.dst_length,
-				 QAT_SYM_SGL_MAX_NUMBER);
-
-			if (unlikely(ret)) {
-				QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
-				return ret;
-			}
+	return ret;
+}
 
-			qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-			qat_req->comn_mid.dest_data_addr =
-					cookie->qat_sgl_dst_phys_addr;
-		}
-		qat_req->comn_mid.src_length = 0;
-		qat_req->comn_mid.dst_length = 0;
-	} else {
-		qat_req->comn_mid.src_data_addr = src_buf_start;
-		qat_req->comn_mid.dest_data_addr = dst_buf_start;
-	}
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
 
-	if (ctx->is_single_pass) {
-		if (ctx->is_ucs) {
-			/* GEN 4 */
-			cipher_param20->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param20->spc_auth_res_addr =
-				op->sym->aead.digest.phys_addr;
-		} else {
-			cipher_param->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param->spc_auth_res_addr =
-					op->sym->aead.digest.phys_addr;
-		}
-	} else if (ctx->is_single_pass_gmac &&
-		       op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
-		/* Handle Single-Pass AES-GMAC */
-		handle_spc_gmac(ctx, op, cookie, qat_req);
-	}
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
 
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
-			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
-			rte_pktmbuf_data_len(op->sym->m_src));
-	if (do_cipher) {
-		uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
-				ctx->cipher_iv.length);
-	}
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
 
-	if (do_auth) {
-		if (ctx->auth_iv.length) {
-			uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
-							uint8_t *,
-							ctx->auth_iv.offset);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
-						ctx->auth_iv.length);
-		}
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
-				ctx->digest_length);
+	return 0;
+}
+
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct qat_cryptodev_private *internals = dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_crypto_gen_dev_ops *gen_dev_ops =
+			&qat_sym_gen_dev_ops[qat_dev_gen];
+	struct qat_qp *qp;
+	struct qat_sym_session *ctx;
+	struct qat_sym_dp_ctx *dp_ctx;
+
+	if (!gen_dev_ops->set_raw_dp_ctx) {
+		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+				qat_dev_gen);
+		return -ENOTSUP;
 	}
 
-	if (do_aead) {
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
-				ctx->digest_length);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
-				ctx->aad_len);
+	qp = dev->data->queue_pairs[qp_id];
+	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(struct qat_sym_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+		dp_ctx->tail = qp->tx_q.tail;
+		dp_ctx->head = qp->rx_q.head;
+		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
 	}
-#endif
-	return 0;
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -EINVAL;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, qat_sym_driver_id);
+
+	dp_ctx->session = ctx;
+
+	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
 }
+
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct qat_sym_dp_ctx);
+}
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index 4801bd50a7..278abbfd3a 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -15,15 +15,75 @@
 
 #include "qat_common.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_crypto.h"
 #include "qat_logs.h"
 
+#define CRYPTODEV_NAME_QAT_SYM_PMD   crypto_qat
+
 #define BYTE_LENGTH    8
 /* bpi is only used for partial blocks of DES and AES
  * so AES block len can be assumed as max len for iv, src and dst
  */
 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
+#define QAT_SYM_CAP_VALID		(1 << 31)
+
+/**
+ * Macro to add a sym capability
+ * helper function to add an sym capability
+ * <n: name> <b: block size> <k: key size> <d: digest size>
+ * <a: aad_size> <i: iv_size>
+ **/
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, d					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_##n,		\
+				b, k, i					\
+			}, }						\
+		}, }							\
+	}
+
 /*
  * Maximum number of SGL entries
  */
@@ -62,27 +122,14 @@ struct qat_sym_dp_ctx {
 	uint16_t cached_dequeue;
 };
 
-static __rte_always_inline int
-refactor_qat_sym_process_response(__rte_unused void **op,
-		__rte_unused uint8_t *resp, __rte_unused void *op_cookie,
-		__rte_unused uint64_t *dequeue_err_count)
-{
-	return 0;
-}
-
 uint16_t
-refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
 uint16_t
-refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
-
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
@@ -237,17 +284,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
 		}
 	}
 }
-#else
-
-static inline void
-qat_sym_preprocess_requests(void **ops __rte_unused,
-				uint16_t nb_ops __rte_unused)
-{
-}
 #endif
 
-static inline void
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused)
 {
 	struct icp_qat_fw_comn_resp *resp_msg =
 			(struct icp_qat_fw_comn_resp *)resp;
@@ -306,6 +347,8 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
 	}
 
 	*op = (void *)rx_op;
+
+	return 1;
 }
 
 int
@@ -317,6 +360,52 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 int
 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
 
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_vec *vec, uint32_t vec_len,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t i;
+
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_la_bulk_req));
+	for (i = 0; i < vec_len; i++)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+	if (cipher_iv && ctx->cipher_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+				ctx->cipher_iv.length);
+	if (auth_iv && ctx->auth_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+				ctx->auth_iv.length);
+	if (aad && ctx->aad_len > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+				ctx->aad_len);
+	if (digest && ctx->digest_length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+				ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+		struct qat_sym_session *ctx __rte_unused,
+		struct rte_crypto_vec *vec __rte_unused,
+		uint32_t vec_len __rte_unused,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+}
+#endif
+
 #else
 
 static inline void
@@ -331,5 +420,5 @@ qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
 {
 }
 
-#endif
+#endif /* BUILD_QAT_SYM */
 #endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
deleted file mode 100644
index af75ac2011..0000000000
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ /dev/null
@@ -1,975 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "adf_transport_access_macros.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-
-#include "qat_sym_refactor.h"
-#include "qat_sym_pmd.h"
-#include "qat_sym_session.h"
-#include "qat_qp.h"
-
-static __rte_always_inline int32_t
-qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
-		struct rte_crypto_vec *data, uint16_t n_data_vecs)
-{
-	struct qat_queue *tx_queue;
-	struct qat_sym_op_cookie *cookie;
-	struct qat_sgl *list;
-	uint32_t i;
-	uint32_t total_len;
-
-	if (likely(n_data_vecs == 1)) {
-		req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			data[0].iova;
-		req->comn_mid.src_length = req->comn_mid.dst_length =
-			data[0].len;
-		return data[0].len;
-	}
-
-	if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
-		return -1;
-
-	total_len = 0;
-	tx_queue = &qp->tx_q;
-
-	ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
-			QAT_COMN_PTR_TYPE_SGL);
-	cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
-	list = (struct qat_sgl *)&cookie->qat_sgl_src;
-
-	for (i = 0; i < n_data_vecs; i++) {
-		list->buffers[i].len = data[i].len;
-		list->buffers[i].resrvd = 0;
-		list->buffers[i].addr = data[i].iova;
-		if (total_len + data[i].len > UINT32_MAX) {
-			QAT_DP_LOG(ERR, "Message too long");
-			return -1;
-		}
-		total_len += data[i].len;
-	}
-
-	list->num_bufs = i;
-	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			cookie->qat_sgl_src_phys_addr;
-	req->comn_mid.src_length = req->comn_mid.dst_length = 0;
-	return total_len;
-}
-
-static __rte_always_inline void
-set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	/* copy IV into request if it fits */
-	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
-				iv_len);
-	else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
-	}
-}
-
-#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
-	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
-	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
-
-static __rte_always_inline void
-qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
-{
-	uint32_t i;
-
-	for (i = 0; i < n; i++)
-		sta[i] = status;
-}
-
-#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
-	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
-
-static __rte_always_inline void
-enqueue_one_cipher_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-
-	/* cipher IV */
-	set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest __rte_unused,
-	struct rte_crypto_va_iova_ptr *aad __rte_unused,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
-			(uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_auth_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = data_len - ofs.ofs.auth.head -
-			ofs.ofs.auth.tail;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
-				ctx->auth_iv.length);
-		break;
-	default:
-		break;
-	}
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv __rte_unused,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
-			(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_auth_job(ctx, req, &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline int
-enqueue_one_chain_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_vec *data,
-	uint16_t n_data_vecs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	rte_iova_t auth_iova_end;
-	int32_t cipher_len, auth_len;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	cipher_len = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
-
-	if (unlikely(cipher_len < 0 || auth_len < 0))
-		return -1;
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = cipher_len;
-	set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = auth_len;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		break;
-	default:
-		break;
-	}
-
-	if (unlikely(n_data_vecs > 1)) {
-		int auth_end_get = 0, i = n_data_vecs - 1;
-		struct rte_crypto_vec *cvec = &data[0];
-		uint32_t len;
-
-		len = data_len - ofs.ofs.auth.tail;
-
-		while (i >= 0 && len > 0) {
-			if (cvec->len >= len) {
-				auth_iova_end = cvec->iova + len;
-				len = 0;
-				auth_end_get = 1;
-				break;
-			}
-			len -= cvec->len;
-			i--;
-			cvec++;
-		}
-
-		if (unlikely(auth_end_get == 0))
-			return -1;
-	} else
-		auth_iova_end = data[0].iova + auth_param->auth_off +
-			auth_param->auth_len;
-
-	/* Then check if digest-encrypted conditions are met */
-	if ((auth_param->auth_off + auth_param->auth_len <
-		cipher_param->cipher_offset +
-		cipher_param->cipher_length) &&
-		(digest->iova == auth_iova_end)) {
-		/* Handle partial digest encryption */
-		if (cipher_param->cipher_offset +
-				cipher_param->cipher_length <
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length)
-			req->comn_mid.dst_length =
-				req->comn_mid.src_length =
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length;
-		struct icp_qat_fw_comn_req_hdr *header =
-			&req->comn_hdr;
-		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-			header->serv_specif_flags,
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-	}
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
-			cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
-		return -1;
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num,
-			&vec->iv[i], &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
-			break;
-
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_aead_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-		(void *)&req->serv_specif_rqpars;
-	struct icp_qat_fw_la_auth_req_params *auth_param =
-		(void *)((uint8_t *)&req->serv_specif_rqpars +
-		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-	uint8_t *aad_data;
-	uint8_t aad_ccm_real_len;
-	uint8_t aad_len_field_sz;
-	uint32_t msg_len_be;
-	rte_iova_t aad_iova = 0;
-	uint8_t q;
-
-	/* CPM 1.7 uses single pass to treat AEAD as cipher operation */
-	if (ctx->is_single_pass) {
-		enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
-		cipher_param->spc_aad_addr = aad->iova;
-		cipher_param->spc_auth_res_addr = digest->iova;
-		return;
-	}
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
-				ctx->cipher_iv.length);
-		aad_iova = aad->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
-		aad_data = aad->va;
-		aad_iova = aad->iova;
-		aad_ccm_real_len = 0;
-		aad_len_field_sz = 0;
-		msg_len_be = rte_bswap32((uint32_t)data_len -
-				ofs.ofs.cipher.head);
-
-		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			aad_ccm_real_len = ctx->aad_len -
-				ICP_QAT_HW_CCM_AAD_B0_LEN -
-				ICP_QAT_HW_CCM_AAD_LEN_INFO;
-		} else {
-			aad_data = iv->va;
-			aad_iova = iv->iova;
-		}
-
-		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-			aad_len_field_sz, ctx->digest_length, q);
-		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				(uint8_t *)&msg_len_be,
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-		} else {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-				(uint8_t *)&msg_len_be +
-				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				- q), q);
-		}
-
-		if (aad_len_field_sz > 0) {
-			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
-				rte_bswap16(aad_ccm_real_len);
-
-			if ((aad_ccm_real_len + aad_len_field_sz)
-				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-				uint8_t pad_len = 0;
-				uint8_t pad_idx = 0;
-
-				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len +
-					aad_len_field_sz) %
-					ICP_QAT_HW_CCM_AAD_B0_LEN);
-				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					aad_ccm_real_len +
-					aad_len_field_sz;
-				memset(&aad_data[pad_idx], 0, pad_len);
-			}
-		}
-
-		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
-			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va +
-			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
-		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-		rte_memcpy((uint8_t *)aad->va +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			ctx->cipher_iv.length);
-		break;
-	default:
-		break;
-	}
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_param->auth_off = ofs.ofs.cipher.head;
-	auth_param->auth_len = cipher_param->cipher_length;
-	auth_param->auth_res_addr = digest->iova;
-	auth_param->u1.aad_adr = aad_iova;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
-		(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
-			&vec->aad[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
-	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
-	uint32_t max_nb_to_dequeue,
-	rte_cryptodev_raw_post_dequeue_t post_dequeue,
-	void **out_user_data, uint8_t is_user_data_array,
-	uint32_t *n_success_jobs, int *return_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct icp_qat_fw_comn_resp *resp;
-	void *resp_opaque;
-	uint32_t i, n, inflight;
-	uint32_t head;
-	uint8_t status;
-
-	*n_success_jobs = 0;
-	*return_status = 0;
-	head = dp_ctx->head;
-
-	inflight = qp->enqueued - qp->dequeued;
-	if (unlikely(inflight == 0))
-		return 0;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			head);
-	/* no operation ready */
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return 0;
-
-	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
-	/* get the dequeue count */
-	if (get_dequeue_count) {
-		n = get_dequeue_count(resp_opaque);
-		if (unlikely(n == 0))
-			return 0;
-	} else {
-		if (unlikely(max_nb_to_dequeue == 0))
-			return 0;
-		n = max_nb_to_dequeue;
-	}
-
-	out_user_data[0] = resp_opaque;
-	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-	post_dequeue(resp_opaque, 0, status);
-	*n_success_jobs += status;
-
-	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
-
-	/* we already finished dequeue when n == 1 */
-	if (unlikely(n == 1)) {
-		i = 1;
-		goto end_deq;
-	}
-
-	if (is_user_data_array) {
-		for (i = 1; i < n; i++) {
-			resp = (struct icp_qat_fw_comn_resp *)(
-				(uint8_t *)rx_queue->base_addr + head);
-			if (unlikely(*(uint32_t *)resp ==
-					ADF_RING_EMPTY_SIG))
-				goto end_deq;
-			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
-			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-			*n_success_jobs += status;
-			post_dequeue(out_user_data[i], i, status);
-			head = (head + rx_queue->msg_size) &
-					rx_queue->modulo_mask;
-		}
-
-		goto end_deq;
-	}
-
-	/* opaque is not array */
-	for (i = 1; i < n; i++) {
-		resp = (struct icp_qat_fw_comn_resp *)(
-			(uint8_t *)rx_queue->base_addr + head);
-		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-			goto end_deq;
-		head = (head + rx_queue->msg_size) &
-				rx_queue->modulo_mask;
-		post_dequeue(resp_opaque, i, status);
-		*n_success_jobs += status;
-	}
-
-end_deq:
-	dp_ctx->head = head;
-	dp_ctx->cached_dequeue += i;
-	return i;
-}
-
-static __rte_always_inline void *
-qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
-		enum rte_crypto_op_status *op_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	register struct icp_qat_fw_comn_resp *resp;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			dp_ctx->head);
-
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return NULL;
-
-	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
-			rx_queue->modulo_mask;
-	dp_ctx->cached_dequeue++;
-
-	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
-			RTE_CRYPTO_OP_STATUS_SUCCESS :
-			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	*dequeue_status = 0;
-	return (void *)(uintptr_t)resp->opaque_data;
-}
-
-static __rte_always_inline int
-qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_enqueue != n))
-		return -1;
-
-	qp->enqueued += n;
-	qp->stats.enqueued_count += n;
-
-	tx_queue->tail = dp_ctx->tail;
-
-	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
-			tx_queue->hw_bundle_number,
-			tx_queue->hw_queue_number, tx_queue->tail);
-	tx_queue->csr_tail = tx_queue->tail;
-	dp_ctx->cached_enqueue = 0;
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_dequeue != n))
-		return -1;
-
-	rx_queue->head = dp_ctx->head;
-	rx_queue->nb_processed_responses += n;
-	qp->dequeued += n;
-	qp->stats.dequeued_count += n;
-	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
-		uint32_t old_head, new_head;
-		uint32_t max_head;
-
-		old_head = rx_queue->csr_head;
-		new_head = rx_queue->head;
-		max_head = qp->nb_descriptors * rx_queue->msg_size;
-
-		/* write out free descriptors */
-		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
-
-		if (new_head < old_head) {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
-					max_head - old_head);
-			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
-					new_head);
-		} else {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
-					old_head);
-		}
-		rx_queue->nb_processed_responses = 0;
-		rx_queue->csr_head = new_head;
-
-		/* write current head to CSR */
-		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
-			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
-			new_head);
-	}
-
-	dp_ctx->cached_dequeue = 0;
-	return 0;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
-	struct qat_qp *qp;
-	struct qat_sym_session *ctx;
-	struct qat_sym_dp_ctx *dp_ctx;
-
-	qp = dev->data->queue_pairs[qp_id];
-	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
-	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(struct qat_sym_dp_ctx));
-		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
-		dp_ctx->tail = qp->tx_q.tail;
-		dp_ctx->head = qp->rx_q.head;
-		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
-	}
-
-	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
-		return -EINVAL;
-
-	ctx = (struct qat_sym_session *)get_sym_session_private_data(
-			session_ctx.crypto_sess, qat_sym_driver_id);
-
-	dp_ctx->session = ctx;
-
-	raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
-	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
-	raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
-	raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_chain_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
-		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
-		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_cipher_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
-		}
-	} else
-		return -1;
-
-	return 0;
-}
-
-int
-qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
-{
-	return sizeof(struct qat_sym_dp_ctx);
-}
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
deleted file mode 100644
index b835245f17..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security_driver.h>
-#endif
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
-
-#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
-	struct qat_sym_op_cookie *cookie = op_cookie;
-
-	cookie->qat_sgl_src_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_src);
-
-	cookie->qat_sgl_dst_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_dst);
-
-	cookie->opt.spc_gmac.cd_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			opt.spc_gmac.cd_cipher);
-}
-
-static uint16_t
-qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-static uint16_t
-qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
-	.name = qat_sym_drv_name,
-	.alias = qat_sym_drv_name
-};
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
-	int i = 0, ret = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "sym");
-	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	/*
-	 * All processes must use same driver id so they can share sessions.
-	 * Store driver_id so we can validate that all processes have the same
-	 * value, typically they have, but could differ if binaries built
-	 * separately.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_sym_driver_id =
-				qat_sym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_sym_driver_id !=
-				qat_sym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
-	qat_dev_instance->sym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->sym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->sym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_sym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-#ifdef RTE_LIB_SECURITY
-	if (gen_dev_ops->create_security_ctx) {
-		cryptodev->security_ctx =
-			gen_dev_ops->create_security_ctx((void *)cryptodev);
-		if (cryptodev->security_ctx == NULL) {
-			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
-	} else
-		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-
-#endif
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_SYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->service_type = QAT_SERVICE_SYMMETRIC;
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating capability memzon for %s",
-				name);
-			ret = -EFAULT;
-			goto error;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->sym_dev = internals;
-	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	memset(&qat_dev_instance->sym_rte_dev, 0,
-		sizeof(qat_dev_instance->sym_rte_dev));
-
-	return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->sym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
-	qat_pci_dev->sym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_sym_driver,
-		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
deleted file mode 100644
index 0dc0c6f0d9..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_PMD_H_
-#define _QAT_SYM_PMD_H_
-
-#ifdef BUILD_QAT_SYM
-
-#include <rte_ether.h>
-#include <rte_cryptodev.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security.h>
-#endif
-
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Symmetric Crypto PMD driver name */
-#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
-#define QAT_SYM_CAP_VALID		(1 << 31)
-
-/**
- * Macro to add a sym capability
- * helper function to add an sym capability
- * <n: name> <b: block size> <k: key size> <d: digest size>
- * <a: aad_size> <i: iv_size>
- **/
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, d					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
-			{.aead = {					\
-				.algo = RTE_CRYPTO_AEAD_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_##n,		\
-				b, k, i					\
-			}, }						\
-		}, }							\
-	}
-
-extern uint8_t qat_sym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param);
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
-
-void
-qat_sym_init_op_cookie(void *op_cookie);
-
-#endif
-#endif /* _QAT_SYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_sym_refactor.c b/drivers/crypto/qat/qat_sym_refactor.c
deleted file mode 100644
index 82f078ff1e..0000000000
--- a/drivers/crypto/qat/qat_sym_refactor.c
+++ /dev/null
@@ -1,360 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
- */
-
-#include <openssl/evp.h>
-
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_crypto_sym.h>
-#include <rte_bus_pci.h>
-#include <rte_byteorder.h>
-
-#include "qat_sym_refactor.h"
-#include "qat_crypto.h"
-#include "qat_qp.h"
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
-	.name = qat_sym_drv_name,
-	.alias = qat_sym_drv_name
-};
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
-	struct qat_sym_op_cookie *cookie = op_cookie;
-
-	cookie->qat_sgl_src_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_src);
-
-	cookie->qat_sgl_dst_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_dst);
-
-	cookie->opt.spc_gmac.cd_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			opt.spc_gmac.cd_cipher);
-}
-
-static __rte_always_inline int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
-{
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	void *sess = (void *)opaque[0];
-	qat_sym_build_request_t build_request = (void *)opaque[1];
-	struct qat_sym_session *ctx = NULL;
-
-	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
-		ctx = get_sym_session_private_data(op->sym->session,
-				qat_sym_driver_id);
-		if (unlikely(!ctx)) {
-			QAT_DP_LOG(ERR, "No session for this device");
-			return -EINVAL;
-		}
-		if (sess != ctx) {
-			struct rte_cryptodev *cdev;
-			struct qat_cryptodev_private *internals;
-			enum rte_proc_type_t proc_type;
-
-			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
-			internals = cdev->data->dev_private;
-			proc_type = rte_eal_process_type();
-
-			if (internals->qat_dev->qat_dev_gen != dev_gen) {
-				op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-				return -EINVAL;
-			}
-
-			if (unlikely(ctx->build_request[proc_type] == NULL)) {
-				int ret =
-				qat_sym_gen_dev_ops[dev_gen].set_session(
-					(void *)cdev, sess);
-				if (ret < 0) {
-					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-					return -EINVAL;
-				}
-			}
-
-			build_request = ctx->build_request[proc_type];
-			opaque[0] = (uintptr_t)ctx;
-			opaque[1] = (uintptr_t)build_request;
-		}
-	}
-
-#ifdef RTE_LIB_SECURITY
-	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
-		if (sess != (void *)op->sym->sec_session) {
-			struct rte_cryptodev *cdev;
-			struct qat_cryptodev_private *internals;
-			enum rte_proc_type_t proc_type;
-
-			ctx = get_sec_session_private_data(
-					op->sym->sec_session);
-			if (unlikely(!ctx)) {
-				QAT_DP_LOG(ERR, "No session for this device");
-				return -EINVAL;
-			}
-			if (unlikely(ctx->bpi_ctx == NULL)) {
-				QAT_DP_LOG(ERR, "QAT PMD only supports security"
-						" operation requests for"
-						" DOCSIS, op (%p) is not for"
-						" DOCSIS.", op);
-				return -EINVAL;
-			} else if (unlikely(((op->sym->m_dst != NULL) &&
-					(op->sym->m_dst != op->sym->m_src)) ||
-					op->sym->m_src->nb_segs > 1)) {
-				QAT_DP_LOG(ERR, "OOP and/or multi-segment"
-						" buffers not supported for"
-						" DOCSIS security.");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
-			internals = cdev->data->dev_private;
-			proc_type = rte_eal_process_type();
-
-			if (internals->qat_dev->qat_dev_gen != dev_gen) {
-				op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-				return -EINVAL;
-			}
-
-			if (unlikely(ctx->build_request[proc_type] == NULL)) {
-				int ret =
-				qat_sym_gen_dev_ops[dev_gen].set_session(
-					(void *)cdev, sess);
-				if (ret < 0) {
-					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-					return -EINVAL;
-				}
-			}
-
-			sess = (void *)op->sym->sec_session;
-			build_request = ctx->build_request[proc_type];
-			opaque[0] = (uintptr_t)sess;
-			opaque[1] = (uintptr_t)build_request;
-		}
-	}
-#endif
-	else { /* RTE_CRYPTO_OP_SESSIONLESS */
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
-		return -1;
-	}
-
-	return build_request(op, (void *)ctx, out_msg, op_cookie);
-}
-
-uint16_t
-qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, qat_sym_build_request,
-			(void **)ops, nb_ops);
-}
-
-uint16_t
-qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops,
-				qat_sym_process_response, nb_ops);
-}
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
-	int i = 0, ret = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "sym");
-	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
-				name);
-		return -(EFAULT);
-	}
-
-	/*
-	 * All processes must use same driver id so they can share sessions.
-	 * Store driver_id so we can validate that all processes have the same
-	 * value, typically they have, but could differ if binaries built
-	 * separately.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_sym_driver_id =
-				qat_sym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_sym_driver_id !=
-				qat_sym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
-	qat_dev_instance->sym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->sym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->sym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_sym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
-	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-#ifdef RTE_LIB_SECURITY
-	if (gen_dev_ops->create_security_ctx) {
-		cryptodev->security_ctx =
-			gen_dev_ops->create_security_ctx((void *)cryptodev);
-		if (cryptodev->security_ctx == NULL) {
-			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
-	} else {
-		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-	}
-#endif
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_SYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			ret = -EFAULT;
-			goto error;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	internals->service_type = QAT_SERVICE_SYMMETRIC;
-	qat_pci_dev->sym_dev = internals;
-	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-
-	return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	memset(&qat_dev_instance->sym_rte_dev, 0,
-		sizeof(qat_dev_instance->sym_rte_dev));
-
-	return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->sym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
-	qat_pci_dev->sym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_sym_driver,
-		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_refactor.h b/drivers/crypto/qat/qat_sym_refactor.h
deleted file mode 100644
index 44feca8251..0000000000
--- a/drivers/crypto/qat/qat_sym_refactor.h
+++ /dev/null
@@ -1,402 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_H_
-#define _QAT_SYM_H_
-
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_net_crc.h>
-#endif
-
-#include <openssl/evp.h>
-
-#include "qat_common.h"
-#include "qat_sym_session.h"
-#include "qat_crypto.h"
-#include "qat_logs.h"
-
-#define CRYPTODEV_NAME_QAT_SYM_PMD   crypto_qat
-
-#define BYTE_LENGTH    8
-/* bpi is only used for partial blocks of DES and AES
- * so AES block len can be assumed as max len for iv, src and dst
- */
-#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
-#define QAT_SYM_CAP_VALID		(1 << 31)
-
-/* Macro to add a capability */
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, d					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
-			{.aead = {					\
-				.algo = RTE_CRYPTO_AEAD_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_##n,		\
-				b, k, i					\
-			}, }						\
-		}, }							\
-	}
-
-/*
- * Maximum number of SGL entries
- */
-#define QAT_SYM_SGL_MAX_NUMBER	16
-
-/* Maximum data length for single pass GMAC: 2^14-1 */
-#define QAT_AES_GMAC_SPC_MAX_SIZE 16383
-
-struct qat_sym_session;
-
-struct qat_sym_sgl {
-	qat_sgl_hdr;
-	struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
-} __rte_packed __rte_cache_aligned;
-
-struct qat_sym_op_cookie {
-	struct qat_sym_sgl qat_sgl_src;
-	struct qat_sym_sgl qat_sgl_dst;
-	phys_addr_t qat_sgl_src_phys_addr;
-	phys_addr_t qat_sgl_dst_phys_addr;
-	union {
-		/* Used for Single-Pass AES-GMAC only */
-		struct {
-			struct icp_qat_hw_cipher_algo_blk cd_cipher
-					__rte_packed __rte_cache_aligned;
-			phys_addr_t cd_phys_addr;
-		} spc_gmac;
-	} opt;
-};
-
-struct qat_sym_dp_ctx {
-	struct qat_sym_session *session;
-	uint32_t tail;
-	uint32_t head;
-	uint16_t cached_enqueue;
-	uint16_t cached_dequeue;
-};
-
-uint16_t
-qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops);
-
-uint16_t
-qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops);
-
-/** Encrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt the IV, then XOR this with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_encrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_encrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
-	return -EINVAL;
-}
-
-static inline uint32_t
-qat_bpicipher_postprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len > 0 &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
-
-		/* Encrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset;
-
-		last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely(sym_op->m_dst != NULL))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = dst - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG,
-				"BPI: dst before post-process:",
-				dst, last_block_len);
-#endif
-		bpi_cipher_encrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
-				last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG,
-				"BPI: dst after post-process:",
-				dst, last_block_len);
-#endif
-	}
-	return sym_op->cipher.data.length - last_block_len;
-}
-
-#ifdef RTE_LIB_SECURITY
-static inline void
-qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op)
-{
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint32_t crc_data_ofs, crc_data_len, crc;
-	uint8_t *crc_data;
-
-	if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT &&
-			sym_op->auth.data.length != 0) {
-
-		crc_data_ofs = sym_op->auth.data.offset;
-		crc_data_len = sym_op->auth.data.length;
-		crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
-				crc_data_ofs);
-
-		crc = rte_net_crc_calc(crc_data, crc_data_len,
-				RTE_NET_CRC32_ETH);
-
-		if (crc != *(uint32_t *)(crc_data + crc_data_len))
-			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	}
-}
-
-static inline void
-qat_crc_generate(struct qat_sym_session *ctx,
-			struct rte_crypto_op *op)
-{
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint32_t *crc, crc_data_len;
-	uint8_t *crc_data;
-
-	if (ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT &&
-			sym_op->auth.data.length != 0 &&
-			sym_op->m_src->nb_segs == 1) {
-
-		crc_data_len = sym_op->auth.data.length;
-		crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
-				sym_op->auth.data.offset);
-		crc = (uint32_t *)(crc_data + crc_data_len);
-		*crc = rte_net_crc_calc(crc_data, crc_data_len,
-				RTE_NET_CRC32_ETH);
-	}
-}
-
-static inline void
-qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
-{
-	struct rte_crypto_op *op;
-	struct qat_sym_session *ctx;
-	uint16_t i;
-
-	for (i = 0; i < nb_ops; i++) {
-		op = (struct rte_crypto_op *)ops[i];
-
-		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
-			ctx = (struct qat_sym_session *)
-				get_sec_session_private_data(
-					op->sym->sec_session);
-
-			if (ctx == NULL || ctx->bpi_ctx == NULL)
-				continue;
-
-			qat_crc_generate(ctx, op);
-		}
-	}
-}
-#endif
-
-static __rte_always_inline int
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
-		uint64_t *dequeue_err_count __rte_unused)
-{
-	struct icp_qat_fw_comn_resp *resp_msg =
-			(struct icp_qat_fw_comn_resp *)resp;
-	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
-			(resp_msg->opaque_data);
-	struct qat_sym_session *sess;
-	uint8_t is_docsis_sec;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
-			sizeof(struct icp_qat_fw_comn_resp));
-#endif
-
-#ifdef RTE_LIB_SECURITY
-	if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
-		/*
-		 * Assuming at this point that if it's a security
-		 * op, that this is for DOCSIS
-		 */
-		sess = (struct qat_sym_session *)
-				get_sec_session_private_data(
-				rx_op->sym->sec_session);
-		is_docsis_sec = 1;
-	} else
-#endif
-	{
-		sess = (struct qat_sym_session *)
-				get_sym_session_private_data(
-				rx_op->sym->session,
-				qat_sym_driver_id);
-		is_docsis_sec = 0;
-	}
-
-	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
-			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
-			resp_msg->comn_hdr.comn_status)) {
-
-		rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	} else {
-		rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
-		if (sess->bpi_ctx) {
-			qat_bpicipher_postprocess(sess, rx_op);
-#ifdef RTE_LIB_SECURITY
-			if (is_docsis_sec)
-				qat_crc_verify(sess, rx_op);
-#endif
-		}
-	}
-
-	if (sess->is_single_pass_gmac) {
-		struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *) op_cookie;
-		memset(cookie->opt.spc_gmac.cd_cipher.key, 0,
-				sess->auth_key_length);
-	}
-
-	*op = (void *)rx_op;
-
-	return 1;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
-
-int
-qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
-
-void
-qat_sym_init_op_cookie(void *cookie);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-static __rte_always_inline void
-qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
-		struct qat_sym_session *ctx,
-		struct rte_crypto_vec *vec, uint32_t vec_len,
-		struct rte_crypto_va_iova_ptr *cipher_iv,
-		struct rte_crypto_va_iova_ptr *auth_iv,
-		struct rte_crypto_va_iova_ptr *aad,
-		struct rte_crypto_va_iova_ptr *digest)
-{
-	uint32_t i;
-
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	for (i = 0; i < vec_len; i++)
-		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
-	if (cipher_iv && ctx->cipher_iv.length > 0)
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
-				ctx->cipher_iv.length);
-	if (auth_iv && ctx->auth_iv.length > 0)
-		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
-				ctx->auth_iv.length);
-	if (aad && ctx->aad_len > 0)
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
-				ctx->aad_len);
-	if (digest && ctx->digest_length > 0)
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
-				ctx->digest_length);
-}
-#else
-static __rte_always_inline void
-qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
-		struct qat_sym_session *ctx __rte_unused,
-		struct rte_crypto_vec *vec __rte_unused,
-		uint32_t vec_len __rte_unused,
-		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
-		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
-		struct rte_crypto_va_iova_ptr *aad __rte_unused,
-		struct rte_crypto_va_iova_ptr *digest __rte_unused)
-{}
-#endif
-
-#endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 52837d7c9c..27cdeb1de2 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -20,7 +20,7 @@
 
 #include "qat_logs.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 
 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
 static const uint8_t sha1InitialState[] = {
@@ -600,11 +600,11 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 	session->is_auth = 1;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
-	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
 		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
-	} else {
+	else
 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
-	}
+
 	session->cipher_iv.offset = aead_xform->iv.offset;
 	session->cipher_iv.length = aead_xform->iv.length;
 	session->aad_len = aead_xform->aad_length;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework
  2021-11-01 23:12 ` [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                     ` (6 preceding siblings ...)
  2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 7/7] crypto/qat: qat driver rework clean up Kai Ji
@ 2021-11-02 13:49   ` Kai Ji
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 1/8] crypro/qat: qat driver refactor skeleton Kai Ji
                       ` (10 more replies)
  7 siblings, 11 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-02 13:49 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch reworks QAT symmetric crypto datapatch implementation where each
generation request building separated and the crypto operation under the
raw datapath api implementation are unified.

In addtion this patchset also enables QAT OOP support in raw datapath api
implementation.

This patch depends on http://patchwork.dpdk.org/project/dpdk/cover/20211027155055.32264-1-kai.ji@intel.com/

v3:
- sperate a single patch 6 to two different patches

v2:
- review comments addressed

Kai Ji (8):
  crypro/qat: qat driver refactor skeleton
  crypto/qat: qat driver sym op refactor
  crypto/qat: qat driver asym op refactor
  crypto/qat: qat driver session method rework
  crypto/qat: qat driver datapath rework
  crypto/qat: support sgl oop operation
  app/test: cryptodev test fix
  crypto/qat: qat driver rework clean up

 app/test/test_cryptodev.c                    |  58 +-
 drivers/common/qat/meson.build               |   4 +-
 drivers/common/qat/qat_device.c              |   2 +-
 drivers/common/qat/qat_qp.c                  |  40 +-
 drivers/common/qat/qat_qp.h                  |  70 +-
 drivers/compress/qat/qat_comp_pmd.c          |  12 +-
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   7 +
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  90 ++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 487 +++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 253 +++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 911 +++++++++++++++++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 939 ++++++++++++++++++
 drivers/crypto/qat/qat_asym.c                | 786 +++++++++------
 drivers/crypto/qat/qat_asym.h                |  77 +-
 drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
 drivers/crypto/qat/qat_asym_pmd.h            |  54 -
 drivers/crypto/qat/qat_crypto.c              |   1 +
 drivers/crypto/qat/qat_crypto.h              |  14 +-
 drivers/crypto/qat/qat_sym.c                 | 978 ++++++------------
 drivers/crypto/qat/qat_sym.h                 | 141 ++-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 983 -------------------
 drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
 drivers/crypto/qat/qat_sym_pmd.h             |  95 --
 drivers/crypto/qat/qat_sym_session.c         | 114 +--
 drivers/crypto/qat/qat_sym_session.h         |   8 +-
 25 files changed, 3861 insertions(+), 2745 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v3 1/8] crypro/qat: qat driver refactor skeleton
  2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
@ 2021-11-02 13:49     ` Kai Ji
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 2/8] crypto/qat: qat driver sym op refactor Kai Ji
                       ` (9 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-02 13:49 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

Add-in enqueue/dequeue op burst and build-request skeleton functions
for qat crypto driver refactor.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c          | 16 +++++++++
 drivers/common/qat/qat_qp.h          | 54 ++++++++++++++++++++++++++++
 drivers/crypto/qat/qat_asym.c        | 43 ++++++++++++++++++++++
 drivers/crypto/qat/qat_asym.h        | 15 ++++++++
 drivers/crypto/qat/qat_sym.c         | 37 +++++++++++++++++++
 drivers/crypto/qat/qat_sym.h         | 16 +++++++++
 drivers/crypto/qat/qat_sym_session.h |  6 ++++
 7 files changed, 187 insertions(+)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index cde421eb77..0fda890075 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -549,6 +549,22 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 	return data & modulo_mask;
 }
 
+uint16_t
+refactor_qat_enqueue_op_burst(__rte_unused void *qp,
+		__rte_unused qat_op_build_request_t op_build_request,
+		__rte_unused void **ops, __rte_unused uint16_t nb_ops)
+{
+	return 0;
+}
+
+uint16_t
+refactor_qat_dequeue_op_burst(__rte_unused void *qp, __rte_unused void **ops,
+		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
+		__rte_unused uint16_t nb_ops)
+{
+	return 0;
+}
+
 uint16_t
 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
 {
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index deafb407b3..b1350cdaf4 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -36,6 +36,51 @@ struct qat_queue {
 	/* number of responses processed since last CSR head write */
 };
 
+/**
+ * Type define qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ *
+ * @param in_op
+ *    An input op pointer
+ * @param out_msg
+ *    out_meg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param opaque
+ *    an opaque data may be used to store context may be useful between
+ *    2 enqueue operations.
+ * @param dev_gen
+ *    qat device gen id
+ * @return
+ *   - 0 if the crypto request is build successfully,
+ *   - error code otherwise
+ **/
+typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
+
+/**
+ * Type define qat_op_dequeue_t function pointer, passed in as argument
+ * in dequeue op burst, where a dequeue op assigned base on the type of
+ * crypto op.
+ *
+ * @param op
+ *    An input op pointer
+ * @param resp
+ *    qat response msg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param dequeue_err_count
+ *    dequeue error counter
+ * @return
+ *    - 0 if dequeue OP is successful
+ *    - error code otherwise
+ **/
+typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused);
+
+#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE	2
+
 struct qat_qp {
 	void			*mmap_bar_addr;
 	struct qat_queue	tx_q;
@@ -44,6 +89,7 @@ struct qat_qp {
 	struct rte_mempool *op_cookie_pool;
 	void **op_cookies;
 	uint32_t nb_descriptors;
+	uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
 	enum qat_device_gen qat_dev_gen;
 	enum qat_service_type service_type;
 	struct qat_pci_device *qat_dev;
@@ -77,6 +123,14 @@ struct qat_qp_config {
 	const char *service_str;
 };
 
+uint16_t
+refactor_qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops);
+
+uint16_t
+refactor_qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
+
 uint16_t
 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 85973812a8..c3bbdb6eb8 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -456,6 +456,49 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	return 0;
 }
 
+/*
+ * Asym build request refactor function template,
+ * will be removed in the later patchset.
+ */
+static __rte_always_inline int
+refactor_qat_asym_build_request(__rte_unused void *in_op,
+		__rte_unused uint8_t *out_msg, __rte_unused void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
+{
+	return 0;
+}
+
+/*
+ * Asym process response refactor function template,
+ * will be removed in the later patchset.
+ */
+int
+refactor_qat_asym_process_response(__rte_unused void **op,
+		__rte_unused uint8_t *resp,
+		__rte_unused void *op_cookie,
+		__rte_unused uint64_t *dequeue_err_count)
+{
+	return 0;
+}
+
+uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return refactor_qat_enqueue_op_burst(qp,
+					refactor_qat_asym_build_request,
+					(void **)ops, nb_ops);
+}
+
+uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return refactor_qat_dequeue_op_burst(qp, (void **)ops,
+				refactor_qat_asym_process_response, nb_ops);
+}
+
 int
 qat_asym_build_request(void *in_op,
 			uint8_t *out_msg,
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 308b6b2e0b..50c2641eba 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -92,4 +92,19 @@ void
 qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
 		void *op_cookie);
 
+int
+refactor_qat_asym_process_response(__rte_unused void **op,
+		__rte_unused uint8_t *resp,
+		__rte_unused void *op_cookie,
+		__rte_unused uint64_t *dequeue_err_count);
+
+uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+
+uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 #endif /* _QAT_ASYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 93b257522b..576ef532a7 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -210,6 +210,43 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
 			ICP_QAT_FW_LA_NO_PROTO);
 }
 
+/*
+ * Sym build request refactor function template,
+ * will be removed in the later patchset.
+ */
+static __rte_always_inline int
+refactor_qat_sym_build_request(__rte_unused void *in_op,
+		__rte_unused uint8_t *out_msg, __rte_unused	void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
+{
+	return 0;
+}
+
+/*
+ * Sym enqueue burst refactor function template,
+ * will be removed in the later patchset.
+ */
+uint16_t
+refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return refactor_qat_enqueue_op_burst(qp, refactor_qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
+
+/*
+ * Sym dequeue burst refactor function template,
+ * will be removed in the later patchset.
+ */
+uint16_t
+refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return refactor_qat_dequeue_op_burst(qp, (void **)ops,
+				refactor_qat_sym_process_response, nb_ops);
+}
+
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		void *op_cookie, enum qat_device_gen qat_dev_gen)
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index e3ec7f0de4..17b2c871bd 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -54,6 +54,22 @@ struct qat_sym_op_cookie {
 	} opt;
 };
 
+static __rte_always_inline int
+refactor_qat_sym_process_response(__rte_unused void **op,
+		__rte_unused uint8_t *resp, __rte_unused void *op_cookie,
+		__rte_unused uint64_t *dequeue_err_count)
+{
+	return 0;
+}
+
+uint16_t
+refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		void *op_cookie, enum qat_device_gen qat_dev_gen);
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 6ebc176729..73493ec864 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -55,6 +55,11 @@
 #define QAT_SESSION_IS_SLICE_SET(flags, flag)	\
 	(!!((flags) & (flag)))
 
+struct qat_sym_session;
+
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 enum qat_sym_proto_flag {
 	QAT_CRYPTO_PROTO_FLAG_NONE = 0,
 	QAT_CRYPTO_PROTO_FLAG_CCM = 1,
@@ -107,6 +112,7 @@ struct qat_sym_session {
 	/* Some generations need different setup of counter */
 	uint32_t slice_types;
 	enum qat_sym_proto_flag qat_proto_flag;
+	qat_sym_build_request_t build_request[2];
 };
 
 int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v3 2/8] crypto/qat: qat driver sym op refactor
  2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 1/8] crypro/qat: qat driver refactor skeleton Kai Ji
@ 2021-11-02 13:49     ` Kai Ji
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 3/8] crypto/qat: qat driver asym " Kai Ji
                       ` (8 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-02 13:49 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch add-in refactored qat symmetirc build request function
implementation (qat_sym_refactor.c & qat_sym_refactor.h)
Add-in QAT sym build ops in auth, cipher, chain and aead operation for
QAT generation 1

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 1071 ++++++++++++++++++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  895 +++++++++++++++
 drivers/crypto/qat/qat_sym.h                 |    8 +
 drivers/crypto/qat/qat_sym_hw_dp.c           |    8 -
 drivers/crypto/qat/qat_sym_refactor.c        |  409 +++++++
 drivers/crypto/qat/qat_sym_refactor.h        |  402 +++++++
 6 files changed, 2785 insertions(+), 8 deletions(-)
 create mode 100644 drivers/crypto/qat/qat_sym_refactor.c
 create mode 100644 drivers/crypto/qat/qat_sym_refactor.h

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 67a4d2cb2c..07020741bd 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -8,14 +8,1082 @@
 #include <rte_cryptodev.h>
 #include "qat_crypto.h"
 #include "qat_sym_session.h"
+#include "qat_sym.h"
+
+#define QAT_BASE_GEN1_SYM_CAPABILITIES \
+	QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
+		CAP_RNG(digest_size, 1, 20, 1)), \
+	QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
+	QAT_SYM_AEAD_CAP(AES_CCM,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
+		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
+	QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)), \
+	QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
+			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
+		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
+		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(MD5_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SNOW3G_UIA2, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_AUTH_CAP(KASUMI_F9, CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
+		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_CIPHER_CAP(AES_CTR,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_CIPHER_CAP(AES_XTS,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_CIPHER_CAP(KASUMI_F8,  CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)), \
+	QAT_SYM_CIPHER_CAP(NULL,  CAP_SET(block_size, 1), \
+		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_CIPHER_CAP(3DES_CBC,  CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
+	QAT_SYM_CIPHER_CAP(3DES_CTR,  CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
+	QAT_SYM_CIPHER_CAP(DES_CBC,  CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
+	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,  CAP_SET(block_size, 8), \
+		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0))
+
+#define QAT_BASE_GEN1_ASYM_CAPABILITIES				\
+	QAT_ASYM_CAP(MODEX, 0, 1, 512, 1),			\
+	QAT_ASYM_CAP(MODINV, 0, 1, 512, 1),			\
+	QAT_ASYM_CAP(RSA,					\
+			((1 << RTE_CRYPTO_ASYM_OP_SIGN) |	\
+			(1 << RTE_CRYPTO_ASYM_OP_VERIFY) |	\
+			(1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) |	\
+			(1 << RTE_CRYPTO_ASYM_OP_DECRYPT)),	\
+			64, 512, 64)
+
+#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \
+	QAT_SYM_CIPHER_CAP(ZUC_EEA3, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_AUTH_CAP(ZUC_EIA3, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)) \
+
+#define QAT_EXTRA_GEN3_SYM_CAPABILITIES \
+	QAT_SYM_AEAD_CAP(CHACHA20_POLY1305, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 32, 32, 0), \
+		CAP_RNG(digest_size, 16, 16, 0), \
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0))
+
+#define QAT_BASE_GEN4_SYM_CAPABILITIES \
+	QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
+		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
+		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
+		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
+		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_CIPHER_CAP(NULL,  CAP_SET(block_size, 1), \
+		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
+		CAP_RNG(digest_size, 1, 20, 1)), \
+	QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
+		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
+	QAT_SYM_CIPHER_CAP(AES_CTR,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
+	QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
+	QAT_SYM_AEAD_CAP(AES_CCM,  CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
+		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
+	QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
+		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
+		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)) \
+
+#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
+	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
+
+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
+	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
+	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
+
+static __rte_always_inline int
+op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_decrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_decrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+	return -EINVAL;
+}
+
+static __rte_always_inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		/* Decrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely((sym_op->m_dst != NULL)
+				&& (sym_op->m_dst != sym_op->m_src)))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = last_block - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
+			dst, last_block_len);
+#endif
+		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+			dst, last_block_len);
+#endif
+	}
+
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+static __rte_always_inline int
+qat_chk_len_in_bits_auth(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_chk_len_in_bits_cipher(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+			((op->sym->cipher.data.offset %
+			BYTE_LENGTH) != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int32_t
+qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
+		void *opaque, struct qat_sym_op_cookie *cookie,
+		struct rte_crypto_vec *src_vec, uint16_t n_src,
+		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
+{
+	struct qat_sgl *list;
+	uint32_t i;
+	uint32_t tl_src = 0, total_len_src, total_len_dst;
+	uint64_t src_data_start = 0, dst_data_start = 0;
+	int is_sgl = n_src > 1 || n_dst > 1;
+
+	if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
+			n_dst > QAT_SYM_SGL_MAX_NUMBER))
+		return -1;
+
+	if (likely(!is_sgl)) {
+		src_data_start = src_vec[0].iova;
+		tl_src = total_len_src =
+				src_vec[0].len;
+		if (unlikely(n_dst)) { /* oop */
+			total_len_dst = dst_vec[0].len;
+
+			dst_data_start = dst_vec[0].iova;
+			if (unlikely(total_len_src != total_len_dst))
+				return -EINVAL;
+		} else {
+			dst_data_start = src_data_start;
+			total_len_dst = tl_src;
+		}
+	} else { /* sgl */
+		total_len_dst = total_len_src = 0;
+
+		ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+			QAT_COMN_PTR_TYPE_SGL);
+
+		list = (struct qat_sgl *)&cookie->qat_sgl_src;
+		for (i = 0; i < n_src; i++) {
+			list->buffers[i].len = src_vec[i].len;
+			list->buffers[i].resrvd = 0;
+			list->buffers[i].addr = src_vec[i].iova;
+			if (tl_src + src_vec[i].len > UINT32_MAX) {
+				QAT_DP_LOG(ERR, "Message too long");
+				return -1;
+			}
+			tl_src += src_vec[i].len;
+		}
+
+		list->num_bufs = i;
+		src_data_start = cookie->qat_sgl_src_phys_addr;
+
+		if (unlikely(n_dst > 0)) { /* oop sgl */
+			uint32_t tl_dst = 0;
+
+			list = (struct qat_sgl *)&cookie->qat_sgl_dst;
+
+			for (i = 0; i < n_dst; i++) {
+				list->buffers[i].len = dst_vec[i].len;
+				list->buffers[i].resrvd = 0;
+				list->buffers[i].addr = dst_vec[i].iova;
+				if (tl_dst + dst_vec[i].len > UINT32_MAX) {
+					QAT_DP_LOG(ERR, "Message too long");
+					return -ENOTSUP;
+				}
+
+				tl_dst += dst_vec[i].len;
+			}
+
+			if (tl_src != tl_dst)
+				return -EINVAL;
+			list->num_bufs = i;
+			dst_data_start = cookie->qat_sgl_dst_phys_addr;
+		} else
+			dst_data_start = src_data_start;
+	}
+
+	req->comn_mid.src_data_addr = src_data_start;
+	req->comn_mid.dest_data_addr = dst_data_start;
+	req->comn_mid.src_length = total_len_src;
+	req->comn_mid.dst_length = total_len_dst;
+	req->comn_mid.opaque_data = (uintptr_t)opaque;
+
+	return tl_src;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int n_src = 0;
+	int ret;
+
+	ret = qat_chk_len_in_bits_cipher(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->cipher.data.length >> 3;
+		cipher_ofs = op->sym->cipher.data.offset >> 3;
+		break;
+	case 0:
+		if (ctx->bpi_ctx) {
+			/* DOCSIS - only send complete blocks to device.
+			 * Process any partial block using CFB mode.
+			 * Even if 0 complete blocks, still send this to device
+			 * to get into rx queue for post-process and dequeuing
+			 */
+			cipher_len = qat_bpicipher_preprocess(ctx, op);
+			cipher_ofs = op->sym->cipher.data.offset;
+		} else {
+			cipher_len = op->sym->cipher.data.length;
+			cipher_ofs = op->sym->cipher.data.offset;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
+			cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t auth_ofs = 0, auth_len = 0;
+	int n_src, ret;
+
+	ret = qat_chk_len_in_bits_auth(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+				ctx->auth_iv.offset);
+		auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+				ctx->auth_iv.offset);
+		break;
+	case 0:
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+			/* AES-GMAC */
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+					ctx->auth_iv.offset);
+			auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+					ctx->auth_iv.offset);
+		} else {
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = NULL;
+			auth_iv->iova = 0;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
+			auth_ofs + auth_len, in_sgl->vec,
+			QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
+				auth_ofs + auth_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	union rte_crypto_sym_ofs ofs;
+	uint32_t min_ofs = 0, max_len = 0;
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	uint32_t auth_len = 0, auth_ofs = 0;
+	int is_oop = (op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src);
+	int is_sgl = op->sym->m_src->nb_segs > 1;
+	int n_src;
+	int ret;
+
+	if (unlikely(is_oop))
+		is_sgl |= op->sym->m_dst->nb_segs > 1;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->auth_iv.offset);
+	auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->auth_iv.offset);
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	ret = qat_chk_len_in_bits_cipher(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->aead.data.length >> 3;
+		cipher_ofs = op->sym->aead.data.offset >> 3;
+		break;
+	case 0:
+		cipher_len = op->sym->aead.data.length;
+		cipher_ofs = op->sym->aead.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	ret = qat_chk_len_in_bits_auth(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		break;
+	case 0:
+		auth_len = op->sym->auth.data.length;
+		auth_ofs = op->sym->auth.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
+
+	/* digest in buffer check. Needed only for wireless algos */
+	if (ret == 1) {
+		/* Handle digest-encrypted cases, i.e.
+		 * auth-gen-then-cipher-encrypt and
+		 * cipher-decrypt-then-auth-verify
+		 */
+		uint64_t auth_end_iova;
+
+		if (unlikely(is_sgl)) {
+			uint32_t remaining_off = auth_ofs + auth_len;
+			struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
+				op->sym->m_src);
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else
+			auth_end_iova = (is_oop ?
+				rte_pktmbuf_iova(op->sym->m_dst) :
+				rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
+					auth_len;
+
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
+				(digest->iova == auth_end_iova))
+			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
+					ctx->digest_length);
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return -1;
+	}
+	in_sgl->num = n_src;
+
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs,
+				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	ofs.ofs.cipher.head = cipher_ofs;
+	ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
+	ofs.ofs.auth.head = auth_ofs;
+	ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
+
+	return ofs.raw;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int32_t n_src = 0;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
+	auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
+	digest->va = (void *)op->sym->aead.digest.data;
+	digest->iova = op->sym->aead.digest.phys_addr;
+
+	cipher_len = op->sym->aead.data.length;
+	cipher_ofs = op->sym->aead.data.offset;
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline void
+qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
+		struct icp_qat_fw_la_bulk_req *qat_req)
+{
+	/* copy IV into request if it fits */
+	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
+				iv_len);
+	else {
+		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+				qat_req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
+	}
+}
+
+static __rte_always_inline void
+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
+{
+	uint32_t i;
+
+	for (i = 0; i < n; i++)
+		sta[i] = status;
+}
+
+static __rte_always_inline void
+enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+
+	/* cipher IV */
+	qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
+				ctx->auth_iv.length);
+		break;
+	default:
+		break;
+	}
+}
+
+static __rte_always_inline int
+enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_vec *src_vec,
+	uint16_t n_src_vecs,
+	struct rte_crypto_vec *dst_vec,
+	uint16_t n_dst_vecs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
+			dst_vec : src_vec;
+	rte_iova_t auth_iova_end;
+	int cipher_len, auth_len;
+	int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	cipher_len = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (unlikely(cipher_len < 0 || auth_len < 0))
+		return -1;
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = cipher_len;
+	qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = auth_len;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		break;
+	default:
+		break;
+	}
+
+	if (unlikely(is_sgl)) {
+		/* sgl */
+		int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
+		uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
+
+		while (remaining_off >= cvec->len && i >= 1) {
+			i--;
+			remaining_off -= cvec->len;
+			cvec++;
+		}
+
+		auth_iova_end = cvec->iova + remaining_off;
+	} else
+		auth_iova_end = cvec[0].iova + auth_param->auth_off +
+			auth_param->auth_len;
+
+	/* Then check if digest-encrypted conditions are met */
+	if ((auth_param->auth_off + auth_param->auth_len <
+		cipher_param->cipher_offset + cipher_param->cipher_length) &&
+			(digest->iova == auth_iova_end)) {
+		/* Handle partial digest encryption */
+		if (cipher_param->cipher_offset + cipher_param->cipher_length <
+			auth_param->auth_off + auth_param->auth_len +
+				ctx->digest_length && !is_sgl)
+			req->comn_mid.dst_length = req->comn_mid.src_length =
+				auth_param->auth_off + auth_param->auth_len +
+					ctx->digest_length;
+		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void
+enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param =
+		(void *)&req->serv_specif_rqpars;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(void *)((uint8_t *)&req->serv_specif_rqpars +
+		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+	uint8_t *aad_data;
+	uint8_t aad_ccm_real_len;
+	uint8_t aad_len_field_sz;
+	uint32_t msg_len_be;
+	rte_iova_t aad_iova = 0;
+	uint8_t q;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
+				ctx->cipher_iv.length);
+		aad_iova = aad->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		aad_data = aad->va;
+		aad_iova = aad->iova;
+		aad_ccm_real_len = 0;
+		aad_len_field_sz = 0;
+		msg_len_be = rte_bswap32((uint32_t)data_len -
+				ofs.ofs.cipher.head);
+
+		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			aad_ccm_real_len = ctx->aad_len -
+				ICP_QAT_HW_CCM_AAD_B0_LEN -
+				ICP_QAT_HW_CCM_AAD_LEN_INFO;
+		} else {
+			aad_data = iv->va;
+			aad_iova = iv->iova;
+		}
+
+		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+			aad_len_field_sz, ctx->digest_length, q);
+		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+				(uint8_t *)&msg_len_be,
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+		} else {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+				(uint8_t *)&msg_len_be +
+				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+				- q), q);
+		}
+
+		if (aad_len_field_sz > 0) {
+			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+				rte_bswap16(aad_ccm_real_len);
+
+			if ((aad_ccm_real_len + aad_len_field_sz)
+				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+				uint8_t pad_len = 0;
+				uint8_t pad_idx = 0;
+
+				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+					((aad_ccm_real_len +
+					aad_len_field_sz) %
+					ICP_QAT_HW_CCM_AAD_B0_LEN);
+				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+					aad_ccm_real_len +
+					aad_len_field_sz;
+				memset(&aad_data[pad_idx], 0, pad_len);
+			}
+		}
+
+		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va +
+			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
+		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+		rte_memcpy((uint8_t *)aad->va +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			ctx->cipher_iv.length);
+		break;
+	default:
+		break;
+	}
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_param->auth_off = ofs.ofs.cipher.head;
+	auth_param->auth_len = cipher_param->cipher_length;
+	auth_param->auth_res_addr = digest->iova;
+	auth_param->u1.aad_adr = aad_iova;
+}
 
 extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
 extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
 
+/* -----------------GEN 1 sym crypto op data path APIs ---------------- */
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+/* -----------------GEN 1 sym crypto raw data path APIs ---------------- */
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status);
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status);
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_dp_denqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 void
 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 		uint8_t hash_flag);
@@ -23,6 +1091,9 @@ qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 struct qat_capabilities_info
 qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 uint64_t
 qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index e156f194e2..e1fd14956b 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -269,6 +269,901 @@ qat_sym_create_security_gen1(void *cryptodev)
 
 #endif
 
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, NULL, NULL);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, NULL, NULL);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
+			total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
+			out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			&auth_iv, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct qat_sym_op_cookie *cookie;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, &iv,
+			NULL, NULL, NULL);
+#endif
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i],
+				cookie, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
+			(uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				NULL, NULL, NULL);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
+			NULL, 0, cipher_iv, digest, auth_iv, ofs,
+			(uint32_t)data_len)))
+		return -1;
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				NULL, 0,
+				&vec->iv[i], &vec->digest[i],
+				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+			break;
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				&vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp;
+	void *resp_opaque;
+	uint32_t i, n, inflight;
+	uint32_t head;
+	uint8_t status;
+
+	*n_success_jobs = 0;
+	*return_status = 0;
+	head = dp_ctx->head;
+
+	inflight = qp->enqueued - qp->dequeued;
+	if (unlikely(inflight == 0))
+		return 0;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			head);
+	/* no operation ready */
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return 0;
+
+	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
+	/* get the dequeue count */
+	if (get_dequeue_count) {
+		n = get_dequeue_count(resp_opaque);
+		if (unlikely(n == 0))
+			return 0;
+	} else {
+		if (unlikely(max_nb_to_dequeue == 0))
+			return 0;
+		n = max_nb_to_dequeue;
+	}
+
+	out_user_data[0] = resp_opaque;
+	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+	post_dequeue(resp_opaque, 0, status);
+	*n_success_jobs += status;
+
+	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+
+	/* we already finished dequeue when n == 1 */
+	if (unlikely(n == 1)) {
+		i = 1;
+		goto end_deq;
+	}
+
+	if (is_user_data_array) {
+		for (i = 1; i < n; i++) {
+			resp = (struct icp_qat_fw_comn_resp *)(
+				(uint8_t *)rx_queue->base_addr + head);
+			if (unlikely(*(uint32_t *)resp ==
+					ADF_RING_EMPTY_SIG))
+				goto end_deq;
+			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
+			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+			*n_success_jobs += status;
+			post_dequeue(out_user_data[i], i, status);
+			head = (head + rx_queue->msg_size) &
+					rx_queue->modulo_mask;
+		}
+
+		goto end_deq;
+	}
+
+	/* opaque is not array */
+	for (i = 1; i < n; i++) {
+		resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+			goto end_deq;
+		head = (head + rx_queue->msg_size) &
+				rx_queue->modulo_mask;
+		post_dequeue(resp_opaque, i, status);
+		*n_success_jobs += status;
+	}
+
+end_deq:
+	dp_ctx->head = head;
+	dp_ctx->cached_dequeue += i;
+	return i;
+}
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	register struct icp_qat_fw_comn_resp *resp;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			dp_ctx->head);
+
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return NULL;
+
+	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
+			rx_queue->modulo_mask;
+	dp_ctx->cached_dequeue++;
+
+	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
+			RTE_CRYPTO_OP_STATUS_SUCCESS :
+			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	*dequeue_status = 0;
+	return (void *)(uintptr_t)resp->opaque_data;
+}
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_enqueue != n))
+		return -1;
+
+	qp->enqueued += n;
+	qp->stats.enqueued_count += n;
+
+	tx_queue->tail = dp_ctx->tail;
+
+	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number, tx_queue->tail);
+	tx_queue->csr_tail = tx_queue->tail;
+	dp_ctx->cached_enqueue = 0;
+
+	return 0;
+}
+
+int
+qat_sym_dp_denqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_dequeue != n))
+		return -1;
+
+	rx_queue->head = dp_ctx->head;
+	rx_queue->nb_processed_responses += n;
+	qp->dequeued += n;
+	qp->stats.dequeued_count += n;
+	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
+		uint32_t old_head, new_head;
+		uint32_t max_head;
+
+		old_head = rx_queue->csr_head;
+		new_head = rx_queue->head;
+		max_head = qp->nb_descriptors * rx_queue->msg_size;
+
+		/* write out free descriptors */
+		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
+
+		if (new_head < old_head) {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
+					max_head - old_head);
+			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
+					new_head);
+		} else {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
+					old_head);
+		}
+		rx_queue->nb_processed_responses = 0;
+		rx_queue->csr_head = new_head;
+
+		/* write current head to CSR */
+		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
+			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
+			new_head);
+	}
+
+	dp_ctx->cached_dequeue = 0;
+	return 0;
+}
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+
+	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
+	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
+	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
+	raw_dp_ctx->dequeue_done = qat_sym_dp_denqueue_done_gen1;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_chain_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_chain_gen1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
+			ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_cipher_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_cipher_gen1;
+		}
+	} else
+		return -1;
+
+	return 0;
+}
+
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int handle_mixed = 0;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			/* do_aead = 1; */
+			build_request = qat_sym_build_op_aead_gen1;
+		} else {
+			/* do_auth = 1; do_cipher = 1; */
+			build_request = qat_sym_build_op_chain_gen1;
+			handle_mixed = 1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		/* do_auth = 1; do_cipher = 0;*/
+		build_request = qat_sym_build_op_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		/* do_auth = 0; do_cipher = 1; */
+		build_request = qat_sym_build_op_cipher_gen1;
+	}
+
+	if (!build_request)
+		return 0;
+	ctx->build_request[proc_type] = build_request;
+
+	if (!handle_mixed)
+		return 0;
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		return -ENOTSUP;
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		return -ENOTSUP;
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen1_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index 17b2c871bd..4801bd50a7 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -54,6 +54,14 @@ struct qat_sym_op_cookie {
 	} opt;
 };
 
+struct qat_sym_dp_ctx {
+	struct qat_sym_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
 static __rte_always_inline int
 refactor_qat_sym_process_response(__rte_unused void **op,
 		__rte_unused uint8_t *resp, __rte_unused void *op_cookie,
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 12825e448b..94589458d0 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -13,14 +13,6 @@
 #include "qat_sym_session.h"
 #include "qat_qp.h"
 
-struct qat_sym_dp_ctx {
-	struct qat_sym_session *session;
-	uint32_t tail;
-	uint32_t head;
-	uint16_t cached_enqueue;
-	uint16_t cached_dequeue;
-};
-
 static __rte_always_inline int32_t
 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
 		struct rte_crypto_vec *data, uint16_t n_data_vecs)
diff --git a/drivers/crypto/qat/qat_sym_refactor.c b/drivers/crypto/qat/qat_sym_refactor.c
new file mode 100644
index 0000000000..0412902e70
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_refactor.c
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+
+#include <openssl/evp.h>
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_crypto_sym.h>
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+
+#include "qat_sym.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
+
+uint8_t qat_sym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
+void
+qat_sym_init_op_cookie(void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+
+	cookie->qat_sgl_src_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_src);
+
+	cookie->qat_sgl_dst_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_dst);
+
+	cookie->opt.spc_gmac.cd_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			opt.spc_gmac.cd_cipher);
+}
+
+static __rte_always_inline int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
+{
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	void *sess = (void *)opaque[0];
+	qat_sym_build_request_t build_request = (void *)opaque[1];
+	struct qat_sym_session *ctx = NULL;
+
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
+
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
+	}
+
+#ifdef RTE_LIB_SECURITY
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if (sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
+			if (unlikely(ctx->bpi_ctx == NULL)) {
+				QAT_DP_LOG(ERR, "QAT PMD only supports security"
+						" operation requests for"
+						" DOCSIS, op (%p) is not for"
+						" DOCSIS.", op);
+				return -EINVAL;
+			} else if (unlikely(((op->sym->m_dst != NULL) &&
+					(op->sym->m_dst != op->sym->m_src)) ||
+					op->sym->m_src->nb_segs > 1)) {
+				QAT_DP_LOG(ERR, "OOP and/or multi-segment"
+						" buffers not supported for"
+						" DOCSIS security.");
+				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+				return -EINVAL;
+			}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
+
+			sess = (void *)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)sess;
+			opaque[1] = (uintptr_t)build_request;
+		}
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
+	}
+
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
+
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
+	}
+
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
+		}
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
+	} else {
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
+	}
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
+		}
+	}
+
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
+
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+
+	return 0;
+
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
+
+	return ret;
+}
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
+
+	return 0;
+}
+
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct qat_cryptodev_private *internals = dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_crypto_gen_dev_ops *gen_dev_ops =
+			&qat_sym_gen_dev_ops[qat_dev_gen];
+	struct qat_qp *qp;
+	struct qat_sym_session *ctx;
+	struct qat_sym_dp_ctx *dp_ctx;
+
+	if (!gen_dev_ops->set_raw_dp_ctx) {
+		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+				qat_dev_gen);
+		return -ENOTSUP;
+	}
+
+	qp = dev->data->queue_pairs[qp_id];
+	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(struct qat_sym_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+		dp_ctx->tail = qp->tx_q.tail;
+		dp_ctx->head = qp->rx_q.head;
+		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
+	}
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -EINVAL;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, qat_sym_driver_id);
+
+	dp_ctx->session = ctx;
+
+	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
+}
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct qat_sym_dp_ctx);
+}
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_refactor.h b/drivers/crypto/qat/qat_sym_refactor.h
new file mode 100644
index 0000000000..d4bfe8f364
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_refactor.h
@@ -0,0 +1,402 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_H_
+#define _QAT_SYM_H_
+
+#include <rte_cryptodev_pmd.h>
+#ifdef RTE_LIB_SECURITY
+#include <rte_net_crc.h>
+#endif
+
+#include <openssl/evp.h>
+
+#include "qat_common.h"
+#include "qat_sym_session.h"
+#include "qat_crypto.h"
+#include "qat_logs.h"
+
+#define CRYPTODEV_NAME_QAT_SYM_PMD   crypto_qat
+
+#define BYTE_LENGTH    8
+/* bpi is only used for partial blocks of DES and AES
+ * so AES block len can be assumed as max len for iv, src and dst
+ */
+#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
+
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
+#define QAT_SYM_CAP_VALID		(1 << 31)
+
+/* Macro to add a capability */
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, d					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_##n,		\
+				b, k, i					\
+			}, }						\
+		}, }							\
+	}
+
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SYM_SGL_MAX_NUMBER	16
+
+/* Maximum data length for single pass GMAC: 2^14-1 */
+#define QAT_AES_GMAC_SPC_MAX_SIZE 16383
+
+struct qat_sym_session;
+
+struct qat_sym_sgl {
+	qat_sgl_hdr;
+	struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_sym_op_cookie {
+	struct qat_sym_sgl qat_sgl_src;
+	struct qat_sym_sgl qat_sgl_dst;
+	phys_addr_t qat_sgl_src_phys_addr;
+	phys_addr_t qat_sgl_dst_phys_addr;
+	union {
+		/* Used for Single-Pass AES-GMAC only */
+		struct {
+			struct icp_qat_hw_cipher_algo_blk cd_cipher
+					__rte_packed __rte_cache_aligned;
+			phys_addr_t cd_phys_addr;
+		} spc_gmac;
+	} opt;
+};
+
+struct qat_sym_dp_ctx {
+	struct qat_sym_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+/** Encrypt a single partial block
+ *  Depends on openssl libcrypto
+ *  Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt the IV, then XOR this with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_encrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_encrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
+	return -EINVAL;
+}
+
+static inline uint32_t
+qat_bpicipher_postprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len > 0 &&
+			ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+
+		/* Encrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset;
+
+		last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely(sym_op->m_dst != NULL))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = dst - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG,
+				"BPI: dst before post-process:",
+				dst, last_block_len);
+#endif
+		bpi_cipher_encrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
+				last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG,
+				"BPI: dst after post-process:",
+				dst, last_block_len);
+#endif
+	}
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+#ifdef RTE_LIB_SECURITY
+static inline void
+qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint32_t crc_data_ofs, crc_data_len, crc;
+	uint8_t *crc_data;
+
+	if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT &&
+			sym_op->auth.data.length != 0) {
+
+		crc_data_ofs = sym_op->auth.data.offset;
+		crc_data_len = sym_op->auth.data.length;
+		crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+				crc_data_ofs);
+
+		crc = rte_net_crc_calc(crc_data, crc_data_len,
+				RTE_NET_CRC32_ETH);
+
+		if (crc != *(uint32_t *)(crc_data + crc_data_len))
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	}
+}
+
+static inline void
+qat_crc_generate(struct qat_sym_session *ctx,
+			struct rte_crypto_op *op)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint32_t *crc, crc_data_len;
+	uint8_t *crc_data;
+
+	if (ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT &&
+			sym_op->auth.data.length != 0 &&
+			sym_op->m_src->nb_segs == 1) {
+
+		crc_data_len = sym_op->auth.data.length;
+		crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+				sym_op->auth.data.offset);
+		crc = (uint32_t *)(crc_data + crc_data_len);
+		*crc = rte_net_crc_calc(crc_data, crc_data_len,
+				RTE_NET_CRC32_ETH);
+	}
+}
+
+static inline void
+qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
+{
+	struct rte_crypto_op *op;
+	struct qat_sym_session *ctx;
+	uint16_t i;
+
+	for (i = 0; i < nb_ops; i++) {
+		op = (struct rte_crypto_op *)ops[i];
+
+		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+			ctx = (struct qat_sym_session *)
+				get_sec_session_private_data(
+					op->sym->sec_session);
+
+			if (ctx == NULL || ctx->bpi_ctx == NULL)
+				continue;
+
+			qat_crc_generate(ctx, op);
+		}
+	}
+}
+#endif
+
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused)
+{
+	struct icp_qat_fw_comn_resp *resp_msg =
+			(struct icp_qat_fw_comn_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+			(resp_msg->opaque_data);
+	struct qat_sym_session *sess;
+	uint8_t is_docsis_sec;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+			sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+#ifdef RTE_LIB_SECURITY
+	if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		/*
+		 * Assuming at this point that if it's a security
+		 * op, that this is for DOCSIS
+		 */
+		sess = (struct qat_sym_session *)
+				get_sec_session_private_data(
+				rx_op->sym->sec_session);
+		is_docsis_sec = 1;
+	} else
+#endif
+	{
+		sess = (struct qat_sym_session *)
+				get_sym_session_private_data(
+				rx_op->sym->session,
+				qat_sym_driver_id);
+		is_docsis_sec = 0;
+	}
+
+	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+			resp_msg->comn_hdr.comn_status)) {
+
+		rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	} else {
+		rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+		if (sess->bpi_ctx) {
+			qat_bpicipher_postprocess(sess, rx_op);
+#ifdef RTE_LIB_SECURITY
+			if (is_docsis_sec)
+				qat_crc_verify(sess, rx_op);
+#endif
+		}
+	}
+
+	if (sess->is_single_pass_gmac) {
+		struct qat_sym_op_cookie *cookie =
+				(struct qat_sym_op_cookie *) op_cookie;
+		memset(cookie->opt.spc_gmac.cd_cipher.key, 0,
+				sess->auth_key_length);
+	}
+
+	*op = (void *)rx_op;
+
+	return 1;
+}
+
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
+
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_vec *vec, uint32_t vec_len,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t i;
+
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_la_bulk_req));
+	for (i = 0; i < vec_len; i++)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+	if (cipher_iv && ctx->cipher_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+				ctx->cipher_iv.length);
+	if (auth_iv && ctx->auth_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+				ctx->auth_iv.length);
+	if (aad && ctx->aad_len > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+				ctx->aad_len);
+	if (digest && ctx->digest_length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+				ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+		struct qat_sym_session *ctx __rte_unused,
+		struct rte_crypto_vec *vec __rte_unused,
+		uint32_t vec_len __rte_unused,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{}
+#endif
+
+#endif /* _QAT_SYM_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v3 3/8] crypto/qat: qat driver asym op refactor
  2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 1/8] crypro/qat: qat driver refactor skeleton Kai Ji
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 2/8] crypto/qat: qat driver sym op refactor Kai Ji
@ 2021-11-02 13:49     ` Kai Ji
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 4/8] crypto/qat: qat driver session method rework Kai Ji
                       ` (7 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-02 13:49 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch add-in refactored qat asymmetric build request
function implementation (qat_asym_refactor.c & qat_asym_refactor.h)

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c |   7 +
 drivers/crypto/qat/qat_asym_refactor.c     | 994 +++++++++++++++++++++
 drivers/crypto/qat/qat_asym_refactor.h     | 125 +++
 3 files changed, 1126 insertions(+)
 create mode 100644 drivers/crypto/qat/qat_asym_refactor.c
 create mode 100644 drivers/crypto/qat/qat_asym_refactor.h

diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
index 9ed1f21d9d..99d90fa56c 100644
--- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
@@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_asym_crypto_set_session_gen1(void *cdev __rte_unused,
+		void *session __rte_unused)
+{
+	return 0;
+}
+
 RTE_INIT(qat_asym_crypto_gen1_init)
 {
 	qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops =
diff --git a/drivers/crypto/qat/qat_asym_refactor.c b/drivers/crypto/qat/qat_asym_refactor.c
new file mode 100644
index 0000000000..8e789920cb
--- /dev/null
+++ b/drivers/crypto/qat/qat_asym_refactor.c
@@ -0,0 +1,994 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 - 2021 Intel Corporation
+ */
+
+
+#include <stdarg.h>
+
+#include <rte_cryptodev_pmd.h>
+
+#include "icp_qat_fw_pke.h"
+#include "icp_qat_fw.h"
+#include "qat_pke_functionality_arrays.h"
+
+#include "qat_device.h"
+
+#include "qat_logs.h"
+#include "qat_asym.h"
+
+uint8_t qat_asym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
+
+void
+qat_asym_init_op_cookie(void *op_cookie)
+{
+	int j;
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	cookie->input_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					input_params_ptrs);
+
+	cookie->output_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					output_params_ptrs);
+
+	for (j = 0; j < 8; j++) {
+		cookie->input_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						input_array[j]);
+		cookie->output_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						output_array[j]);
+	}
+}
+
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_asym_xform *xform,
+		struct rte_cryptodev_asym_session *sess,
+		struct rte_mempool *mempool)
+{
+	int err = 0;
+	void *sess_private_data;
+	struct qat_asym_session *session;
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		QAT_LOG(ERR,
+			"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	session = sess_private_data;
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		if (xform->modex.exponent.length == 0 ||
+				xform->modex.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod exp input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		if (xform->modinv.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod inv input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+		if (xform->rsa.n.length == 0) {
+			QAT_LOG(ERR, "Invalid rsa input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
+			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
+		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
+		err = -EINVAL;
+		goto error;
+	} else {
+		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
+		err = -EINVAL;
+		goto error;
+	}
+
+	session->xform = xform;
+	qat_asym_build_req_tmpl(sess_private_data);
+	set_asym_session_private_data(sess, dev->driver_id,
+		sess_private_data);
+
+	return 0;
+error:
+	rte_mempool_put(mempool, sess_private_data);
+	return err;
+}
+
+unsigned int
+qat_asym_session_get_private_size(
+		struct rte_cryptodev *dev __rte_unused)
+{
+	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
+}
+
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_asym_session *sess)
+{
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_asym_session_private_data(sess, index);
+	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
+
+	if (sess_priv) {
+		memset(s, 0, qat_asym_session_get_private_size(dev));
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		set_asym_session_private_data(sess, index, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
+	}
+}
+
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+	.name = qat_asym_drv_name,
+	.alias = qat_asym_drv_name
+};
+
+
+static void
+qat_clear_arrays(struct qat_asym_op_cookie *cookie,
+		int in_count, int out_count, int in_size, int out_size)
+{
+	int i;
+
+	for (i = 0; i < in_count; i++)
+		memset(cookie->input_array[i], 0x0, in_size);
+	for (i = 0; i < out_count; i++)
+		memset(cookie->output_array[i], 0x0, out_size);
+}
+
+static void
+qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
+		enum rte_crypto_asym_xform_type alg, int in_size, int out_size)
+{
+	if (alg == RTE_CRYPTO_ASYM_XFORM_MODEX)
+		qat_clear_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS,
+				QAT_ASYM_MODEXP_NUM_OUT_PARAMS, in_size,
+				out_size);
+	else if (alg == RTE_CRYPTO_ASYM_XFORM_MODINV)
+		qat_clear_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS,
+				QAT_ASYM_MODINV_NUM_OUT_PARAMS, in_size,
+				out_size);
+}
+
+static void
+qat_asym_collect_response(struct rte_crypto_op *rx_op,
+		struct qat_asym_op_cookie *cookie,
+		struct rte_crypto_asym_xform *xform)
+{
+	size_t alg_size, alg_size_in_bytes = 0;
+	struct rte_crypto_asym_op *asym_op = rx_op->asym;
+
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		rte_crypto_param n = xform->modex.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modexp_result = asym_op->modex.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modexp_result +
+				(asym_op->modex.result.length -
+					n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length
+				);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		rte_crypto_param n = xform->modinv.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modinv_result = asym_op->modinv.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modinv_result +
+				(asym_op->modinv.result.length - n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
+				asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+							cookie->output_array[0],
+							alg_size_in_bytes);
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+			}
+		} else {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_DECRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.message.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
+						rsa_result, alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+						RTE_CRYPTO_ASYM_OP_SIGN) {
+				uint8_t *rsa_result = asym_op->rsa.sign.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			}
+		}
+	}
+	qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes,
+			alg_size_in_bytes);
+}
+
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
+{
+	struct qat_asym_session *ctx;
+	struct icp_qat_fw_pke_resp *resp_msg =
+			(struct icp_qat_fw_pke_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+			(resp_msg->opaque);
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	if (cookie->error) {
+		cookie->error = 0;
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		QAT_DP_LOG(ERR, "Cookie status returned error");
+	} else {
+		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric response status"
+					" returned error");
+		}
+		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric common status"
+					" returned error");
+		}
+	}
+
+	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		ctx = (struct qat_asym_session *)get_asym_session_private_data(
+			rx_op->asym->session, qat_asym_driver_id);
+		qat_asym_collect_response(rx_op, cookie, ctx->xform);
+	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
+	}
+	*op = rx_op;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
+			sizeof(struct icp_qat_fw_pke_resp));
+#endif
+
+	return 1;
+}
+
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+		size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+	size_t i;
+
+	for (i = 0; i < arr_sz; i++) {
+		if (*size <= arr[i][0]) {
+			*size = arr[i][0];
+			*func_id = arr[i][1];
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+	va_list args;
+	size_t len = 0, num;
+	int i;
+
+	va_start(args, n);
+	len = va_arg(args, size_t);
+
+	for (i = 0; i < n - 1; i++) {
+		num = va_arg(args, size_t);
+		if (num > len)
+			len = num;
+	}
+	va_end(args);
+
+	return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
+{
+	if (n.length < 8) {
+		/* Not a case for any cryptograpic function except for DH
+		 * generator which very often can be of one byte length
+		 */
+		size_t i;
+
+		if (n.data[n.length - 1] == 0x0) {
+			for (i = 0; i < n.length - 1; i++)
+				if (n.data[i] != 0x0)
+					break;
+			if (i == n.length - 1)
+				return -(EINVAL);
+		}
+	} else if (*(uint64_t *)&n.data[
+				n.length - 8] == 0) {
+		/* Very likely it is zeroed modulus */
+		size_t i;
+
+		for (i = 0; i < n.length - 8; i++)
+			if (n.data[i] != 0x0)
+				break;
+		if (i == n.length - 8)
+			return -(EINVAL);
+	}
+
+	return 0;
+}
+
+static int
+qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
+		struct icp_qat_fw_pke_request *qat_req,
+		struct qat_asym_op_cookie *cookie,
+		struct rte_crypto_asym_xform *xform)
+{
+	int err = 0;
+	size_t alg_size;
+	size_t alg_size_in_bytes;
+	uint32_t func_id = 0;
+
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		err = qat_asym_check_nonzero(xform->modex.modulus);
+		if (err) {
+			QAT_LOG(ERR, "Empty modulus in modular exponentiation,"
+					" aborting this operation");
+			return err;
+		}
+
+		alg_size_in_bytes = max_of(3, asym_op->modex.base.length,
+			       xform->modex.exponent.length,
+			       xform->modex.modulus.length);
+		alg_size = alg_size_in_bytes << 3;
+
+		if (qat_asym_get_sz_and_func_id(MOD_EXP_SIZE,
+				sizeof(MOD_EXP_SIZE)/sizeof(*MOD_EXP_SIZE),
+				&alg_size, &func_id)) {
+			return -(EINVAL);
+		}
+
+		alg_size_in_bytes = alg_size >> 3;
+		rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
+			asym_op->modex.base.length
+			, asym_op->modex.base.data,
+			asym_op->modex.base.length);
+		rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
+			xform->modex.exponent.length
+			, xform->modex.exponent.data,
+			xform->modex.exponent.length);
+		rte_memcpy(cookie->input_array[2]  + alg_size_in_bytes -
+			xform->modex.modulus.length,
+			xform->modex.modulus.data,
+			xform->modex.modulus.length);
+		cookie->alg_size = alg_size;
+		qat_req->pke_hdr.cd_pars.func_id = func_id;
+		qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS;
+		qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp base",
+				cookie->input_array[0],
+				alg_size_in_bytes);
+		QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp exponent",
+				cookie->input_array[1],
+				alg_size_in_bytes);
+		QAT_DP_HEXDUMP_LOG(DEBUG, " ModExpmodulus",
+				cookie->input_array[2],
+				alg_size_in_bytes);
+#endif
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		err = qat_asym_check_nonzero(xform->modinv.modulus);
+		if (err) {
+			QAT_LOG(ERR, "Empty modulus in modular multiplicative"
+					" inverse, aborting this operation");
+			return err;
+		}
+
+		alg_size_in_bytes = max_of(2, asym_op->modinv.base.length,
+				xform->modinv.modulus.length);
+		alg_size = alg_size_in_bytes << 3;
+
+		if (xform->modinv.modulus.data[
+				xform->modinv.modulus.length - 1] & 0x01) {
+			if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_ODD,
+					sizeof(MOD_INV_IDS_ODD)/
+					sizeof(*MOD_INV_IDS_ODD),
+					&alg_size, &func_id)) {
+				return -(EINVAL);
+			}
+		} else {
+			if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_EVEN,
+					sizeof(MOD_INV_IDS_EVEN)/
+					sizeof(*MOD_INV_IDS_EVEN),
+					&alg_size, &func_id)) {
+				return -(EINVAL);
+			}
+		}
+
+		alg_size_in_bytes = alg_size >> 3;
+		rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
+			asym_op->modinv.base.length
+				, asym_op->modinv.base.data,
+				asym_op->modinv.base.length);
+		rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
+				xform->modinv.modulus.length
+				, xform->modinv.modulus.data,
+				xform->modinv.modulus.length);
+		cookie->alg_size = alg_size;
+		qat_req->pke_hdr.cd_pars.func_id = func_id;
+		qat_req->input_param_count =
+				QAT_ASYM_MODINV_NUM_IN_PARAMS;
+		qat_req->output_param_count =
+				QAT_ASYM_MODINV_NUM_OUT_PARAMS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv base",
+				cookie->input_array[0],
+				alg_size_in_bytes);
+		QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv modulus",
+				cookie->input_array[1],
+				alg_size_in_bytes);
+#endif
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+		err = qat_asym_check_nonzero(xform->rsa.n);
+		if (err) {
+			QAT_LOG(ERR, "Empty modulus in RSA"
+					" inverse, aborting this operation");
+			return err;
+		}
+
+		alg_size_in_bytes = xform->rsa.n.length;
+		alg_size = alg_size_in_bytes << 3;
+
+		qat_req->input_param_count =
+				QAT_ASYM_RSA_NUM_IN_PARAMS;
+		qat_req->output_param_count =
+				QAT_ASYM_RSA_NUM_OUT_PARAMS;
+
+		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
+				asym_op->rsa.op_type ==
+						RTE_CRYPTO_ASYM_OP_VERIFY) {
+
+			if (qat_asym_get_sz_and_func_id(RSA_ENC_IDS,
+					sizeof(RSA_ENC_IDS)/
+					sizeof(*RSA_ENC_IDS),
+					&alg_size, &func_id)) {
+				err = -(EINVAL);
+				QAT_LOG(ERR,
+					"Not supported RSA parameter size (key)");
+				return err;
+			}
+			alg_size_in_bytes = alg_size >> 3;
+			if (asym_op->rsa.op_type ==
+				RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(cookie->input_array[0] +
+						alg_size_in_bytes -
+						asym_op->rsa.message.length
+						, asym_op->rsa.message.data,
+						asym_op->rsa.message.length);
+					break;
+				default:
+					err = -(EINVAL);
+					QAT_LOG(ERR,
+						"Invalid RSA padding (Encryption)");
+					return err;
+				}
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Message",
+						cookie->input_array[0],
+						alg_size_in_bytes);
+#endif
+			} else {
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(cookie->input_array[0],
+						asym_op->rsa.sign.data,
+						alg_size_in_bytes);
+					break;
+				default:
+					err = -(EINVAL);
+					QAT_LOG(ERR,
+						"Invalid RSA padding (Verify)");
+					return err;
+				}
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, " RSA Signature",
+						cookie->input_array[0],
+						alg_size_in_bytes);
+#endif
+
+			}
+			rte_memcpy(cookie->input_array[1] +
+					alg_size_in_bytes -
+					xform->rsa.e.length
+					, xform->rsa.e.data,
+					xform->rsa.e.length);
+			rte_memcpy(cookie->input_array[2] +
+					alg_size_in_bytes -
+					xform->rsa.n.length,
+					xform->rsa.n.data,
+					xform->rsa.n.length);
+
+			cookie->alg_size = alg_size;
+			qat_req->pke_hdr.cd_pars.func_id = func_id;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Public Key",
+					cookie->input_array[1],
+					alg_size_in_bytes);
+			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Modulus",
+					cookie->input_array[2],
+					alg_size_in_bytes);
+#endif
+		} else {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_DECRYPT) {
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(cookie->input_array[0]
+						+ alg_size_in_bytes -
+						asym_op->rsa.cipher.length,
+						asym_op->rsa.cipher.data,
+						asym_op->rsa.cipher.length);
+					break;
+				default:
+					QAT_LOG(ERR,
+						"Invalid padding of RSA (Decrypt)");
+					return -(EINVAL);
+				}
+
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_SIGN) {
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(cookie->input_array[0]
+						+ alg_size_in_bytes -
+						asym_op->rsa.message.length,
+						asym_op->rsa.message.data,
+						asym_op->rsa.message.length);
+					break;
+				default:
+					QAT_LOG(ERR,
+						"Invalid padding of RSA (Signature)");
+					return -(EINVAL);
+				}
+			}
+			if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT) {
+
+				qat_req->input_param_count =
+						QAT_ASYM_RSA_QT_NUM_IN_PARAMS;
+				if (qat_asym_get_sz_and_func_id(RSA_DEC_CRT_IDS,
+						sizeof(RSA_DEC_CRT_IDS)/
+						sizeof(*RSA_DEC_CRT_IDS),
+						&alg_size, &func_id)) {
+					return -(EINVAL);
+				}
+				alg_size_in_bytes = alg_size >> 3;
+
+				rte_memcpy(cookie->input_array[1] +
+						(alg_size_in_bytes >> 1) -
+						xform->rsa.qt.p.length
+						, xform->rsa.qt.p.data,
+						xform->rsa.qt.p.length);
+				rte_memcpy(cookie->input_array[2] +
+						(alg_size_in_bytes >> 1) -
+						xform->rsa.qt.q.length
+						, xform->rsa.qt.q.data,
+						xform->rsa.qt.q.length);
+				rte_memcpy(cookie->input_array[3] +
+						(alg_size_in_bytes >> 1) -
+						xform->rsa.qt.dP.length
+						, xform->rsa.qt.dP.data,
+						xform->rsa.qt.dP.length);
+				rte_memcpy(cookie->input_array[4] +
+						(alg_size_in_bytes >> 1) -
+						xform->rsa.qt.dQ.length
+						, xform->rsa.qt.dQ.data,
+						xform->rsa.qt.dQ.length);
+				rte_memcpy(cookie->input_array[5] +
+						(alg_size_in_bytes >> 1) -
+						xform->rsa.qt.qInv.length
+						, xform->rsa.qt.qInv.data,
+						xform->rsa.qt.qInv.length);
+				cookie->alg_size = alg_size;
+				qat_req->pke_hdr.cd_pars.func_id = func_id;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "C",
+						cookie->input_array[0],
+						alg_size_in_bytes);
+				QAT_DP_HEXDUMP_LOG(DEBUG, "p",
+						cookie->input_array[1],
+						alg_size_in_bytes);
+				QAT_DP_HEXDUMP_LOG(DEBUG, "q",
+						cookie->input_array[2],
+						alg_size_in_bytes);
+				QAT_DP_HEXDUMP_LOG(DEBUG,
+						"dP", cookie->input_array[3],
+						alg_size_in_bytes);
+				QAT_DP_HEXDUMP_LOG(DEBUG,
+						"dQ", cookie->input_array[4],
+						alg_size_in_bytes);
+				QAT_DP_HEXDUMP_LOG(DEBUG,
+						"qInv", cookie->input_array[5],
+						alg_size_in_bytes);
+#endif
+			} else if (xform->rsa.key_type ==
+					RTE_RSA_KEY_TYPE_EXP) {
+				if (qat_asym_get_sz_and_func_id(
+						RSA_DEC_IDS,
+						sizeof(RSA_DEC_IDS)/
+						sizeof(*RSA_DEC_IDS),
+						&alg_size, &func_id)) {
+					return -(EINVAL);
+				}
+				alg_size_in_bytes = alg_size >> 3;
+				rte_memcpy(cookie->input_array[1] +
+						alg_size_in_bytes -
+						xform->rsa.d.length,
+						xform->rsa.d.data,
+						xform->rsa.d.length);
+				rte_memcpy(cookie->input_array[2] +
+						alg_size_in_bytes -
+						xform->rsa.n.length,
+						xform->rsa.n.data,
+						xform->rsa.n.length);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA ciphertext",
+					cookie->input_array[0],
+					alg_size_in_bytes);
+			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA d",
+					cookie->input_array[1],
+					alg_size_in_bytes);
+			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA n",
+					cookie->input_array[2],
+					alg_size_in_bytes);
+#endif
+
+				cookie->alg_size = alg_size;
+				qat_req->pke_hdr.cd_pars.func_id = func_id;
+			} else {
+				QAT_LOG(ERR, "Invalid RSA key type");
+				return -(EINVAL);
+			}
+		}
+	} else {
+		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
+		return -(EINVAL);
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
+{
+	struct qat_asym_session *ctx;
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	struct rte_crypto_asym_op *asym_op = op->asym;
+	struct icp_qat_fw_pke_request *qat_req =
+			(struct icp_qat_fw_pke_request *)out_msg;
+	struct qat_asym_op_cookie *cookie =
+				(struct qat_asym_op_cookie *)op_cookie;
+	int err = 0;
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		ctx = (struct qat_asym_session *)
+			get_asym_session_private_data(
+			op->asym->session, qat_asym_driver_id);
+		if (unlikely(ctx == NULL)) {
+			QAT_LOG(ERR, "Session has not been created for this device");
+			goto error;
+		}
+		rte_mov64((uint8_t *)qat_req,
+			(const uint8_t *)&(ctx->req_tmpl));
+		err = qat_asym_fill_arrays(asym_op, qat_req,
+				cookie, ctx->xform);
+		if (err) {
+			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			goto error;
+		}
+	} else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		qat_fill_req_tmpl(qat_req);
+		err = qat_asym_fill_arrays(asym_op, qat_req, cookie,
+				op->asym->xform);
+		if (err) {
+			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			goto error;
+		}
+	} else {
+		QAT_DP_LOG(ERR, "Invalid session/xform settings");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+		goto error;
+	}
+
+	qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
+	qat_req->pke_mid.src_data_addr = cookie->input_addr;
+	qat_req->pke_mid.dest_data_addr = cookie->output_addr;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_pke_request));
+#endif
+
+	return 0;
+error:
+
+	qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+		sizeof(struct icp_qat_fw_pke_request));
+#endif
+
+	qat_req->output_param_count = 0;
+	qat_req->input_param_count = 0;
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
+	cookie->error |= err;
+
+	return 0;
+}
+
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+			nb_ops);
+}
+
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+				nb_ops);
+}
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param)
+{
+	struct qat_cryptodev_private *internals;
+	struct rte_cryptodev *cryptodev;
+	struct qat_device_info *qat_dev_instance =
+		&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	uint64_t capa_size;
+	int i = 0;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "asym");
+	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+				name);
+		return -(EFAULT);
+	}
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_asym_driver_id =
+				qat_asym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_asym_driver_id !=
+				qat_asym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
+		}
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+	qat_dev_instance->asym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->asym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_asym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_ASYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			rte_cryptodev_pmd_destroy(cryptodev);
+			memset(&qat_dev_instance->asym_rte_dev, 0,
+				sizeof(qat_dev_instance->asym_rte_dev));
+			return -EFAULT;
+		}
+	}
+
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
+
+	qat_pci_dev->asym_dev = internals;
+	internals->service_type = QAT_SERVICE_ASYMMETRIC;
+	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+	return 0;
+}
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->asym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(
+			qat_pci_dev->asym_dev->dev_id);
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+	qat_pci_dev->asym_dev = NULL;
+
+	return 0;
+}
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_asym_driver,
+		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_refactor.h b/drivers/crypto/qat/qat_asym_refactor.h
new file mode 100644
index 0000000000..9ecabdbe8f
--- /dev/null
+++ b/drivers/crypto/qat/qat_asym_refactor.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _QAT_ASYM_H_
+#define _QAT_ASYM_H_
+
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto_asym.h>
+#include "icp_qat_fw_pke.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
+#include "icp_qat_fw.h"
+
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
+
+typedef uint64_t large_int_ptr;
+#define MAX_PKE_PARAMS	8
+#define QAT_PKE_MAX_LN_SIZE 512
+#define _PKE_ALIGN_ __rte_aligned(8)
+
+#define QAT_ASYM_MAX_PARAMS			8
+#define QAT_ASYM_MODINV_NUM_IN_PARAMS		2
+#define QAT_ASYM_MODINV_NUM_OUT_PARAMS		1
+#define QAT_ASYM_MODEXP_NUM_IN_PARAMS		3
+#define QAT_ASYM_MODEXP_NUM_OUT_PARAMS		1
+#define QAT_ASYM_RSA_NUM_IN_PARAMS		3
+#define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
+#define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
+
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
+		{.asym = {						\
+			.xform_capa = {					\
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+				.op_types = o,				\
+				{					\
+				.modlen = {				\
+				.min = l,				\
+				.max = r,				\
+				.increment = i				\
+				}, }					\
+			}						\
+		},							\
+		}							\
+	}
+
+struct qat_asym_op_cookie {
+	size_t alg_size;
+	uint64_t error;
+	rte_iova_t input_addr;
+	rte_iova_t output_addr;
+	large_int_ptr input_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
+	large_int_ptr output_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
+	union {
+		uint8_t input_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE];
+		uint8_t input_buffer[MAX_PKE_PARAMS * QAT_PKE_MAX_LN_SIZE];
+	} _PKE_ALIGN_;
+	uint8_t output_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE] _PKE_ALIGN_;
+} _PKE_ALIGN_;
+
+struct qat_asym_session {
+	struct icp_qat_fw_pke_request req_tmpl;
+	struct rte_crypto_asym_xform *xform;
+};
+
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+	memset(qat_req, 0, sizeof(*qat_req));
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+	qat_req->pke_hdr.hdr_flags =
+			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+	struct icp_qat_fw_pke_request *qat_req;
+	struct qat_asym_session *session = sess_private_data;
+
+	qat_req = &session->req_tmpl;
+	qat_fill_req_tmpl(qat_req);
+}
+
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_asym_xform *xform,
+		struct rte_cryptodev_asym_session *sess,
+		struct rte_mempool *mempool);
+
+unsigned int
+qat_asym_session_get_private_size(struct rte_cryptodev *dev);
+
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_asym_session *sess);
+
+/*
+ * Process PKE response received from outgoing queue of QAT
+ *
+ * @param	op		a ptr to the rte_crypto_op referred to by
+ *				the response message is returned in this param
+ * @param	resp		icp_qat_fw_pke_resp message received from
+ *				outgoing fw message queue
+ * @param	op_cookie	Cookie pointer that holds private metadata
+ * @param	dequeue_err_count	Error count number pointer
+ *
+ */
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count);
+
+void
+qat_asym_init_op_cookie(void *cookie);
+
+#endif /* _QAT_ASYM_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v3 4/8] crypto/qat: qat driver session method rework
  2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                       ` (2 preceding siblings ...)
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 3/8] crypto/qat: qat driver asym " Kai Ji
@ 2021-11-02 13:49     ` Kai Ji
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 5/8] crypto/qat: qat driver datapath rework Kai Ji
                       ` (6 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-02 13:49 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

The patch introduce set_session & set_raw_dp_ctx methods to qat gen dev ops
Replace min_qat_dev_gen_id with dev_id, the session will be invalid if the
device generation id is not matching during init and ops

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  90 ++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 467 +++++++++++++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 243 ++++++++++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |   5 +
 drivers/crypto/qat/qat_crypto.c              |   1 +
 drivers/crypto/qat/qat_crypto.h              |   9 +
 drivers/crypto/qat/qat_sym.c                 |   8 +-
 drivers/crypto/qat/qat_sym_session.c         | 106 +----
 drivers/crypto/qat/qat_sym_session.h         |   2 +-
 9 files changed, 831 insertions(+), 100 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index b4ec440e05..72609703f9 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -166,6 +166,90 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
 	return 0;
 }
 
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+		uint8_t hash_flag)
+{
+	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+	/* Set the Use Extended Protocol Flags bit in LW 1 */
+	QAT_FIELD_SET(header->comn_req_flags,
+			QAT_COMN_EXT_FLAGS_USED,
+			QAT_COMN_EXT_FLAGS_BITPOS,
+			QAT_COMN_EXT_FLAGS_MASK);
+
+	/* Set Hash Flags in LW 28 */
+	cd_ctrl->hash_flags |= hash_flag;
+
+	/* Set proto flags in LW 1 */
+	switch (session->qat_cipher_alg) {
+	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_SNOW_3G_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags,
+				ICP_QAT_FW_LA_ZUC_3G_PROTO);
+		break;
+	default:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	}
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+	struct rte_cryptodev *dev = cdev;
+	struct qat_sym_session *ctx = session;
+	const struct qat_cryptodev_private *qat_private =
+			dev->data->dev_private;
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	if (ret == 0 || ret != -ENOTSUP)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * but some are not supported by GEN2, so checking here
+	 */
+	if ((qat_private->internal_capabilities &
+			QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+		return -ENOTSUP;
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
 
 	/* Device related operations */
@@ -204,6 +288,10 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
 			qat_sym_crypto_cap_get_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_sym_crypto_set_session_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
@@ -221,4 +309,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
 			qat_asym_crypto_cap_get_gen1;
 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_asym_crypto_feature_flags_get_gen1;
+	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_asym_crypto_set_session_gen1;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index d3336cf4a1..6494019050 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -143,6 +143,468 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass) {
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN3 uses single pass to treat AEAD as
+		 * cipher operation
+		 */
+		cipher_param = (void *)&req->serv_specif_rqpars;
+
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+				ofs.ofs.cipher.tail;
+
+		cipher_param->spc_aad_addr = aad->iova;
+		cipher_param->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+	struct qat_sym_op_cookie *cookie,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	uint32_t ver_key_offset;
+	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+
+	if (!ctx->is_single_pass_gmac ||
+			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+				data_len);
+		return;
+	}
+
+	cipher_cd_ctrl = (void *) &req->cd_ctrl;
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+			sizeof(struct icp_qat_hw_cipher_config);
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+		/* AES-GMAC */
+		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+				req);
+	}
+
+	/* Fill separate Content Descriptor for this op */
+	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+				ctx->cd.cipher.key :
+				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+			ctx->auth_key_length);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+				ICP_QAT_HW_CIPHER_AEAD_MODE,
+				ctx->qat_cipher_alg,
+				ICP_QAT_HW_CIPHER_NO_CONVERT,
+				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+					ICP_QAT_HW_CIPHER_ENCRYPT :
+					ICP_QAT_HW_CIPHER_DECRYPT));
+	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+			ctx->digest_length,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+	/* Update the request */
+	req->cd_pars.u.s.content_desc_addr =
+			cookie->opt.spc_gmac.cd_phys_addr;
+	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+			sizeof(struct icp_qat_hw_cipher_config) +
+			ctx->auth_key_length, 8) >> 3;
+	req->comn_mid.src_length = data_len;
+	req->comn_mid.dst_length = 0;
+
+	cipher_param->spc_aad_addr = 0;
+	cipher_param->spc_auth_res_addr = digest->iova;
+	cipher_param->spc_aad_sz = auth_data_len;
+	cipher_param->reserved = 0;
+	cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+	ICP_QAT_FW_LA_PROTO_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_NO_PROTO);
+}
+
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN3 */
+	if (ctx->is_single_pass)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+	else if (ctx->is_single_pass_gmac)
+		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+	if (ret == 0)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * this is addressed by GEN3
+	 */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
+static int
+qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
+	} else if (ctx->is_single_pass_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
+	}
+
+	return 0;
+}
+
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -150,6 +612,10 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_cap_get_gen3;
 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+			qat_sym_crypto_set_session_gen3;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
@@ -161,4 +627,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 37a58c026f..167c95abcf 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -103,11 +103,253 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+			(void *)&req->serv_specif_rqpars;
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN4 uses single pass to treat AEAD as cipher
+		 * operation
+		 */
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+				req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len -
+				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+		cipher_param_20->spc_aad_addr = aad->iova;
+		cipher_param_20->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN4 */
+	if (ctx->is_single_pass && ctx->is_ucs)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+	if (ret == 0)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * this is addressed by GEN4
+	 */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
+static int
+qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
 			qat_sym_crypto_cap_get_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+			qat_sym_crypto_set_session_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
@@ -121,4 +363,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
 	qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index e1fd14956b..b044c7b2d3 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -6,6 +6,7 @@
 #ifdef RTE_LIB_SECURITY
 #include <rte_security_driver.h>
 #endif
+#include <cryptodev_pmd.h>
 
 #include "adf_transport_access_macros.h"
 #include "icp_qat_fw.h"
@@ -1169,6 +1170,10 @@ RTE_INIT(qat_sym_crypto_gen1_init)
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
 			qat_sym_crypto_cap_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+			qat_sym_crypto_set_session_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 84c26a8062..ee878e47f1 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -2,6 +2,7 @@
  * Copyright(c) 2021 Intel Corporation
  */
 
+#include <cryptodev_pmd.h>
 #include "qat_device.h"
 #include "qat_qp.h"
 #include "qat_crypto.h"
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6eaa15b975..8fe3f0b061 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -48,15 +48,24 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
 
 typedef void * (*create_security_ctx_t)(void *cryptodev);
 
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
+typedef int (*set_raw_dp_ctx_t)(void *raw_dp_ctx, void *ctx);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
+	set_session_t set_session;
+	set_raw_dp_ctx_t set_raw_dp_ctx;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
 };
 
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
+
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
 		struct rte_cryptodev_config *config);
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 576ef532a7..09c1e6f6a4 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -249,7 +249,7 @@ refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen)
+		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
 {
 	int ret = 0;
 	struct qat_sym_session *ctx = NULL;
@@ -314,12 +314,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		return -EINVAL;
 	}
 
-	if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
-		QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -EINVAL;
-	}
-
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 8ca475ca8b..52837d7c9c 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
 	return 0;
 }
 
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
-		uint8_t hash_flag)
-{
-	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
-	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
-			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
-			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
-	/* Set the Use Extended Protocol Flags bit in LW 1 */
-	QAT_FIELD_SET(header->comn_req_flags,
-			QAT_COMN_EXT_FLAGS_USED,
-			QAT_COMN_EXT_FLAGS_BITPOS,
-			QAT_COMN_EXT_FLAGS_MASK);
-
-	/* Set Hash Flags in LW 28 */
-	cd_ctrl->hash_flags |= hash_flag;
-
-	/* Set proto flags in LW 1 */
-	switch (session->qat_cipher_alg) {
-	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_SNOW_3G_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_ZUC_3G_PROTO);
-		break;
-	default:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	}
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
-		struct qat_sym_session *session)
-{
-	const struct qat_cryptodev_private *qat_private =
-			dev->data->dev_private;
-	enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
-			QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
-	if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
-	} else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
-	} else if ((session->aes_cmac ||
-			session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
-			(session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session, 0);
-	}
-}
-
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
 	int ret;
 	int qat_cmd_id;
-	int handle_mixed = 0;
 
 	/* Verify the session physical address is known */
 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
+	session->dev_id = internals->dev_id;
 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
 	session->is_ucs = 0;
 
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		return -ENOTSUP;
 	}
 	qat_sym_session_finalize(session);
-	if (handle_mixed) {
-		/* Special handling of mixed hash+cipher algorithms */
-		qat_sym_session_handle_mixed(dev, session);
-	}
 
-	return 0;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+			(void *)session);
 }
 
 static int
@@ -678,7 +598,6 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 {
 	session->is_single_pass = 1;
 	session->is_auth = 1;
-	session->min_qat_dev_gen = QAT_GEN3;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
 	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
@@ -1205,9 +1124,10 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-			uint8_t *data_in,
-			uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in,
+		uint8_t *data_out)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1574,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1921,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2181,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
-
 	/* Get requested QAT command id - should be cipher */
 	qat_cmd_id = qat_get_cmd_id(xform);
 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2205,9 @@ qat_security_session_create(void *dev,
 {
 	void *sess_private_data;
 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct qat_cryptodev_private *internals = cdev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_sym_session *sym_session = NULL;
 	int ret;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2231,11 @@ qat_security_session_create(void *dev,
 	}
 
 	set_sec_session_private_data(sess, sess_private_data);
+	sym_session = (struct qat_sym_session *)sess_private_data;
+	sym_session->dev_id = internals->dev_id;
 
-	return ret;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+			sess_private_data);
 }
 
 int
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 73493ec864..e1b5edb2f9 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -100,7 +100,7 @@ struct qat_sym_session {
 	uint16_t auth_key_length;
 	uint16_t digest_length;
 	rte_spinlock_t lock;	/* protects this struct */
-	enum qat_device_gen min_qat_dev_gen;
+	uint16_t dev_id;
 	uint8_t aes_cmac;
 	uint8_t is_single_pass;
 	uint8_t is_single_pass_gmac;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v3 5/8] crypto/qat: qat driver datapath rework
  2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                       ` (3 preceding siblings ...)
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 4/8] crypto/qat: qat driver session method rework Kai Ji
@ 2021-11-02 13:49     ` Kai Ji
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 6/8] crypto/qat: support sgl oop operation Kai Ji
                       ` (5 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-02 13:49 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch introduce op_build_request func in qat enqueue op
burst. Add-in qat_dequeue_process_response in qat dequeue op
burst. Enable session build request op based on crypto operation

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build               |  4 +-
 drivers/common/qat/qat_qp.c                  | 60 +++++---------------
 drivers/common/qat/qat_qp.h                  | 32 +++++++----
 drivers/compress/qat/qat_comp_pmd.c          | 12 +++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  4 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |  4 +-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  2 +-
 drivers/crypto/qat/qat_asym_refactor.c       |  4 +-
 drivers/crypto/qat/qat_asym_refactor.h       |  2 +-
 drivers/crypto/qat/qat_sym_hw_dp.c           |  2 +-
 drivers/crypto/qat/qat_sym_refactor.c        | 51 +----------------
 drivers/crypto/qat/qat_sym_refactor.h        |  2 +-
 12 files changed, 57 insertions(+), 122 deletions(-)

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index ce9959d103..b7b18b6c0f 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -70,8 +70,8 @@ if qat_compress
 endif
 
 if qat_crypto
-    foreach f: ['qat_sym_pmd.c', 'qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
+    foreach f: ['qat_sym_refactor.c', 'qat_sym_session.c',
+            'qat_sym_hw_dp.c', 'qat_asym_refactor.c', 'qat_crypto.c',
 	    'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 0fda890075..82adda0698 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -15,8 +15,8 @@
 #include "qat_logs.h"
 #include "qat_device.h"
 #include "qat_qp.h"
-#include "qat_sym.h"
-#include "qat_asym.h"
+#include "qat_sym_refactor.h"
+#include "qat_asym_refactor.h"
 #include "qat_comp.h"
 
 #define QAT_CQ_MAX_DEQ_RETRIES 10
@@ -550,23 +550,8 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-refactor_qat_enqueue_op_burst(__rte_unused void *qp,
-		__rte_unused qat_op_build_request_t op_build_request,
-		__rte_unused void **ops, __rte_unused uint16_t nb_ops)
-{
-	return 0;
-}
-
-uint16_t
-refactor_qat_dequeue_op_burst(__rte_unused void *qp, __rte_unused void **ops,
-		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
-		__rte_unused uint16_t nb_ops)
-{
-	return 0;
-}
-
-uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -616,29 +601,18 @@ qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
 		}
 	}
 
-#ifdef BUILD_QAT_SYM
+#ifdef RTE_LIB_SECURITY
 	if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 		qat_sym_preprocess_requests(ops, nb_ops_possible);
 #endif
 
+	memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque));
+
 	while (nb_ops_sent != nb_ops_possible) {
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
-#ifdef BUILD_QAT_SYM
-			ret = qat_sym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		} else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
-			ret = qat_comp_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-		} else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
-#ifdef BUILD_QAT_ASYM
-			ret = qat_asym_build_request(*ops, base_addr + tail,
+		ret = op_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		}
+				tmp_qp->opaque, tmp_qp->qat_dev_gen);
+
 		if (ret != 0) {
 			tmp_qp->stats.enqueue_err_count++;
 			/* This message cannot be enqueued */
@@ -833,7 +807,8 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 }
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -851,19 +826,10 @@ qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
 
 		nb_fw_responses = 1;
 
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
-			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
-		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
-			nb_fw_responses = qat_comp_process_response(
+		nb_fw_responses = qat_dequeue_process_response(
 				ops, resp_msg,
 				tmp_qp->op_cookies[head >> rx_queue->trailz],
 				&tmp_qp->stats.dequeue_err_count);
-#ifdef BUILD_QAT_ASYM
-		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
-			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
-#endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
 				  rx_queue->modulo_mask);
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index b1350cdaf4..b1829e122b 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -14,6 +14,24 @@
 
 struct qat_pci_device;
 
+/* Default qp configuration for GEN4 devices */
+#define QAT_GEN4_QP_DEFCON	(QAT_SERVICE_SYMMETRIC |	\
+				QAT_SERVICE_SYMMETRIC << 8 |	\
+				QAT_SERVICE_SYMMETRIC << 16 |	\
+				QAT_SERVICE_SYMMETRIC << 24)
+
+/* QAT GEN 4 specific macros */
+#define QAT_GEN4_BUNDLE_NUM             4
+#define QAT_GEN4_QPS_PER_BUNDLE_NUM     1
+
+/* Queue pair setup error codes */
+#define QAT_NOMEM		1
+#define QAT_QP_INVALID_DESC_NO	2
+#define QAT_QP_BUSY		3
+#define QAT_PCI_NO_RESOURCE	4
+
+#define ADF_ARB_RINGSRVARBEN_OFFSET		0x19C
+
 /**
  * Structure associated with each queue.
  */
@@ -124,21 +142,15 @@ struct qat_qp_config {
 };
 
 uint16_t
-refactor_qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
 		void **ops, uint16_t nb_ops);
 
-uint16_t
-refactor_qat_dequeue_op_burst(void *qp, void **ops,
-		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
-
-uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
-
 uint16_t
 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
 
 int
 qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
@@ -150,7 +162,7 @@ qat_qp_setup(struct qat_pci_device *qat_dev,
 
 int
 qat_qps_per_service(struct qat_pci_device *qat_dev,
-		enum qat_service_type service);
+			enum qat_service_type service);
 
 const struct qat_qp_hw_data *
 qat_qp_get_hw_data(struct qat_pci_device *qat_dev,
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 9b24d46e97..2f17f8825e 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -616,11 +616,18 @@ static struct rte_compressdev_ops compress_qat_dummy_ops = {
 	.private_xform_free	= qat_comp_private_xform_free
 };
 
+static uint16_t
+qat_comp_dequeue_burst(void *qp, struct rte_comp_op **ops,  uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_comp_process_response,
+				nb_ops);
+}
+
 static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	uint16_t ret = qat_comp_dequeue_burst(qp, ops, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
@@ -638,8 +645,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 
 		} else {
 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
-					(compressdev_dequeue_pkt_burst_t)
-					qat_dequeue_op_burst;
+					qat_comp_dequeue_burst;
 		}
 	}
 	return ret;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 72609703f9..86c124f6c8 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -5,8 +5,8 @@
 #include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
 #include "qat_sym_session.h"
-#include "qat_sym.h"
-#include "qat_asym.h"
+#include "qat_sym_refactor.h"
+#include "qat_asym_refactor.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 167c95abcf..108e07ee7f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -5,8 +5,8 @@
 #include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
 #include "qat_sym_session.h"
-#include "qat_sym.h"
-#include "qat_asym.h"
+#include "qat_sym_refactor.h"
+#include "qat_asym_refactor.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index b044c7b2d3..abd36c7f1c 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -13,7 +13,7 @@
 #include "icp_qat_fw_la.h"
 
 #include "qat_sym_session.h"
-#include "qat_sym.h"
+#include "qat_sym_refactor.h"
 #include "qat_sym_session.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
diff --git a/drivers/crypto/qat/qat_asym_refactor.c b/drivers/crypto/qat/qat_asym_refactor.c
index 8e789920cb..3a9b1d4054 100644
--- a/drivers/crypto/qat/qat_asym_refactor.c
+++ b/drivers/crypto/qat/qat_asym_refactor.c
@@ -5,7 +5,7 @@
 
 #include <stdarg.h>
 
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
 
 #include "icp_qat_fw_pke.h"
 #include "icp_qat_fw.h"
@@ -14,7 +14,7 @@
 #include "qat_device.h"
 
 #include "qat_logs.h"
-#include "qat_asym.h"
+#include "qat_asym_refactor.h"
 
 uint8_t qat_asym_driver_id;
 
diff --git a/drivers/crypto/qat/qat_asym_refactor.h b/drivers/crypto/qat/qat_asym_refactor.h
index 9ecabdbe8f..6d3d991bc7 100644
--- a/drivers/crypto/qat/qat_asym_refactor.h
+++ b/drivers/crypto/qat/qat_asym_refactor.h
@@ -5,7 +5,7 @@
 #ifndef _QAT_ASYM_H_
 #define _QAT_ASYM_H_
 
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
 #include <rte_crypto_asym.h>
 #include "icp_qat_fw_pke.h"
 #include "qat_device.h"
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 94589458d0..af75ac2011 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -8,7 +8,7 @@
 #include "icp_qat_fw.h"
 #include "icp_qat_fw_la.h"
 
-#include "qat_sym.h"
+#include "qat_sym_refactor.h"
 #include "qat_sym_pmd.h"
 #include "qat_sym_session.h"
 #include "qat_qp.h"
diff --git a/drivers/crypto/qat/qat_sym_refactor.c b/drivers/crypto/qat/qat_sym_refactor.c
index 0412902e70..82f078ff1e 100644
--- a/drivers/crypto/qat/qat_sym_refactor.c
+++ b/drivers/crypto/qat/qat_sym_refactor.c
@@ -10,7 +10,7 @@
 #include <rte_bus_pci.h>
 #include <rte_byteorder.h>
 
-#include "qat_sym.h"
+#include "qat_sym_refactor.h"
 #include "qat_crypto.h"
 #include "qat_qp.h"
 
@@ -354,55 +354,6 @@ qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
 	return 0;
 }
 
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
-	struct qat_cryptodev_private *internals = dev->data->dev_private;
-	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
-	struct qat_crypto_gen_dev_ops *gen_dev_ops =
-			&qat_sym_gen_dev_ops[qat_dev_gen];
-	struct qat_qp *qp;
-	struct qat_sym_session *ctx;
-	struct qat_sym_dp_ctx *dp_ctx;
-
-	if (!gen_dev_ops->set_raw_dp_ctx) {
-		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
-				qat_dev_gen);
-		return -ENOTSUP;
-	}
-
-	qp = dev->data->queue_pairs[qp_id];
-	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
-	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(struct qat_sym_dp_ctx));
-		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
-		dp_ctx->tail = qp->tx_q.tail;
-		dp_ctx->head = qp->rx_q.head;
-		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
-	}
-
-	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
-		return -EINVAL;
-
-	ctx = (struct qat_sym_session *)get_sym_session_private_data(
-			session_ctx.crypto_sess, qat_sym_driver_id);
-
-	dp_ctx->session = ctx;
-
-	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
-}
-
-int
-qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
-{
-	return sizeof(struct qat_sym_dp_ctx);
-}
-
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_sym_driver,
diff --git a/drivers/crypto/qat/qat_sym_refactor.h b/drivers/crypto/qat/qat_sym_refactor.h
index d4bfe8f364..44feca8251 100644
--- a/drivers/crypto/qat/qat_sym_refactor.h
+++ b/drivers/crypto/qat/qat_sym_refactor.h
@@ -5,7 +5,7 @@
 #ifndef _QAT_SYM_H_
 #define _QAT_SYM_H_
 
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
 #ifdef RTE_LIB_SECURITY
 #include <rte_net_crc.h>
 #endif
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v3 6/8] crypto/qat: support sgl oop operation
  2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                       ` (4 preceding siblings ...)
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 5/8] crypto/qat: qat driver datapath rework Kai Ji
@ 2021-11-02 13:49     ` Kai Ji
  2021-11-03 15:46       ` Zhang, Roy Fan
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 7/8] app/test: cryptodev test fix Kai Ji
                       ` (4 subsequent siblings)
  10 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2021-11-02 13:49 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch add-in sgl oop support in qat driver

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++--
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 55 +++++++++++++++++---
 3 files changed, 83 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index 6494019050..c59c25fe8f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -467,8 +467,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -564,8 +574,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 108e07ee7f..1b6cf10589 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -295,8 +295,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index abd36c7f1c..9894757657 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -529,9 +529,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i],
-				cookie, vec->src_sgl[i].vec,
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
 				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
@@ -628,8 +637,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
@@ -728,8 +747,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -833,8 +862,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v3 7/8] app/test: cryptodev test fix
  2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                       ` (5 preceding siblings ...)
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 6/8] crypto/qat: support sgl oop operation Kai Ji
@ 2021-11-02 13:49     ` Kai Ji
  2021-11-03 15:45       ` Zhang, Roy Fan
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 8/8] crypto/qat: qat driver rework clean up Kai Ji
                       ` (3 subsequent siblings)
  10 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2021-11-02 13:49 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji, pablo.de.lara.guarch, adamx.dybkowski

test_mixed_auth_cipher: ensure enough space allocate in ibuf & obuf
for mbuf to vec conversion
test_kasumi_decryption: cipher length update.

Fixes: 681f540da52b ("cryptodev: do not use AAD in wireless algorithms")
Cc: pablo.de.lara.guarch@intel.com

Fixes: e847fc512817 ("test/crypto: add encrypted digest case for AES-CTR-CMAC")
Cc: adamx.dybkowski@intel.com

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 app/test/test_cryptodev.c | 58 +++++++++++++++++++++++++++++++--------
 1 file changed, 46 insertions(+), 12 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 814a0b401d..bfc6408eda 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -179,6 +179,10 @@ post_process_raw_dp_op(void *user_data,	uint32_t index __rte_unused,
 			RTE_CRYPTO_OP_STATUS_ERROR;
 }
 
+static struct crypto_testsuite_params testsuite_params = { NULL };
+struct crypto_testsuite_params *p_testsuite_params = &testsuite_params;
+static struct crypto_unittest_params unittest_params;
+
 void
 process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 		struct rte_crypto_op *op, uint8_t is_cipher, uint8_t is_auth,
@@ -193,6 +197,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	struct rte_crypto_sgl sgl, dest_sgl;
 	uint32_t max_len;
 	union rte_cryptodev_session_ctx sess;
+	uint64_t auth_end_iova;
 	uint32_t count = 0;
 	struct rte_crypto_raw_dp_ctx *ctx;
 	uint32_t cipher_offset = 0, cipher_len = 0, auth_offset = 0,
@@ -202,6 +207,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	int ctx_service_size;
 	int32_t status = 0;
 	int enqueue_status, dequeue_status;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+	int is_sgl = sop->m_src->nb_segs > 1;
+	int is_oop = 0;
 
 	ctx_service_size = rte_cryptodev_get_raw_dp_ctx_size(dev_id);
 	if (ctx_service_size < 0) {
@@ -240,6 +248,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 
 	ofs.raw = 0;
 
+	if ((sop->m_dst != NULL) && (sop->m_dst != sop->m_src))
+		is_oop = 1;
+
 	if (is_cipher && is_auth) {
 		cipher_offset = sop->cipher.data.offset;
 		cipher_len = sop->cipher.data.length;
@@ -267,6 +278,31 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 		digest.va = (void *)sop->auth.digest.data;
 		digest.iova = sop->auth.digest.phys_addr;
 
+		if (is_sgl) {
+			uint32_t remaining_off = auth_offset + auth_len;
+			struct rte_mbuf *sgl_buf = sop->m_src;
+			if (is_oop)
+				sgl_buf = sop->m_dst;
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else {
+			auth_end_iova = rte_pktmbuf_iova(op->sym->m_src) +
+							 auth_offset + auth_len;
+		}
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_offset + auth_len < cipher_offset + cipher_len) &&
+				(digest.iova == auth_end_iova) && is_sgl)
+			max_len = RTE_MAX(max_len,
+				auth_offset + auth_len +
+				ut_params->auth_xform.auth.digest_length);
+
 	} else if (is_cipher) {
 		cipher_offset = sop->cipher.data.offset;
 		cipher_len = sop->cipher.data.length;
@@ -327,7 +363,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 
 	sgl.num = n;
 	/* Out of place */
-	if (sop->m_dst != NULL) {
+	if (is_oop) {
 		dest_sgl.vec = dest_data_vec;
 		vec.dest_sgl = &dest_sgl;
 		n = rte_crypto_mbuf_to_vec(sop->m_dst, 0, max_len,
@@ -503,10 +539,6 @@ process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
 	return op;
 }
 
-static struct crypto_testsuite_params testsuite_params = { NULL };
-struct crypto_testsuite_params *p_testsuite_params = &testsuite_params;
-static struct crypto_unittest_params unittest_params;
-
 static int
 testsuite_setup(void)
 {
@@ -4077,9 +4109,9 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata)
 
 	/* Create KASUMI operation */
 	retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data,
-					tdata->cipher_iv.len,
-					tdata->ciphertext.len,
-					tdata->validCipherOffsetInBits.len);
+			tdata->cipher_iv.len,
+			RTE_ALIGN_CEIL(tdata->validCipherLenInBits.len, 8),
+			tdata->validCipherOffsetInBits.len);
 	if (retval < 0)
 		return retval;
 
@@ -7310,6 +7342,7 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata,
 	unsigned int plaintext_len;
 	unsigned int ciphertext_pad_len;
 	unsigned int ciphertext_len;
+	unsigned int data_len;
 
 	struct rte_cryptodev_info dev_info;
 	struct rte_crypto_op *op;
@@ -7370,21 +7403,22 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata,
 	plaintext_len = ceil_byte_length(tdata->plaintext.len_bits);
 	ciphertext_pad_len = RTE_ALIGN_CEIL(ciphertext_len, 16);
 	plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
+	data_len = RTE_MAX(ciphertext_pad_len, plaintext_pad_len);
 
 	if (verify) {
 		ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
-				ciphertext_pad_len);
+				data_len);
 		memcpy(ciphertext, tdata->ciphertext.data, ciphertext_len);
 		if (op_mode == OUT_OF_PLACE)
-			rte_pktmbuf_append(ut_params->obuf, ciphertext_pad_len);
+			rte_pktmbuf_append(ut_params->obuf, data_len);
 		debug_hexdump(stdout, "ciphertext:", ciphertext,
 				ciphertext_len);
 	} else {
 		plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
-				plaintext_pad_len);
+				data_len);
 		memcpy(plaintext, tdata->plaintext.data, plaintext_len);
 		if (op_mode == OUT_OF_PLACE)
-			rte_pktmbuf_append(ut_params->obuf, plaintext_pad_len);
+			rte_pktmbuf_append(ut_params->obuf, data_len);
 		debug_hexdump(stdout, "plaintext:", plaintext, plaintext_len);
 	}
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v3 8/8] crypto/qat: qat driver rework clean up
  2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                       ` (6 preceding siblings ...)
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 7/8] app/test: cryptodev test fix Kai Ji
@ 2021-11-02 13:49     ` Kai Ji
  2021-11-03 15:46     ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Zhang, Roy Fan
                       ` (2 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-02 13:49 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch rename back refactor code to qat_sym.c and qat_asym.c, meson
build file also updated accordingly.
Integrate qat hw dp apis and qat pmd function to qat_sym.c and qat_asym.c

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build               |    4 +-
 drivers/common/qat/qat_device.c              |    2 +-
 drivers/common/qat/qat_qp.c                  |    4 +-
 drivers/common/qat/qat_qp.h                  |    2 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |    4 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |    4 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |  164 +--
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |    2 +-
 drivers/crypto/qat/qat_asym.c                |  811 ++++++++------
 drivers/crypto/qat/qat_asym.h                |   90 +-
 drivers/crypto/qat/qat_asym_pmd.c            |  231 ----
 drivers/crypto/qat/qat_asym_pmd.h            |   54 -
 drivers/crypto/qat/qat_asym_refactor.c       |  994 -----------------
 drivers/crypto/qat/qat_asym_refactor.h       |  125 ---
 drivers/crypto/qat/qat_crypto.h              |    5 +-
 drivers/crypto/qat/qat_sym.c                 | 1009 ++++++------------
 drivers/crypto/qat/qat_sym.h                 |  141 ++-
 drivers/crypto/qat/qat_sym_hw_dp.c           |  975 -----------------
 drivers/crypto/qat/qat_sym_pmd.c             |  251 -----
 drivers/crypto/qat/qat_sym_pmd.h             |   95 --
 drivers/crypto/qat/qat_sym_refactor.c        |  360 -------
 drivers/crypto/qat/qat_sym_refactor.h        |  402 -------
 drivers/crypto/qat/qat_sym_session.c         |    8 +-
 23 files changed, 997 insertions(+), 4740 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_asym_refactor.c
 delete mode 100644 drivers/crypto/qat/qat_asym_refactor.h
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_sym_refactor.c
 delete mode 100644 drivers/crypto/qat/qat_sym_refactor.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index b7b18b6c0f..e53b51dcac 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -70,8 +70,8 @@ if qat_compress
 endif
 
 if qat_crypto
-    foreach f: ['qat_sym_refactor.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym_refactor.c', 'qat_crypto.c',
+    foreach f: ['qat_sym.c', 'qat_sym_session.c',
+            'qat_asym.c', 'qat_crypto.c',
 	    'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 437996f2e8..e1fc1c37ec 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -8,7 +8,7 @@
 
 #include "qat_device.h"
 #include "adf_transport_access_macros.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 #include "qat_comp_pmd.h"
 #include "adf_pf2vf_msg.h"
 #include "qat_pf2vf.h"
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 82adda0698..56facf0fbe 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -15,8 +15,8 @@
 #include "qat_logs.h"
 #include "qat_device.h"
 #include "qat_qp.h"
-#include "qat_sym_refactor.h"
-#include "qat_asym_refactor.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
 #include "qat_comp.h"
 
 #define QAT_CQ_MAX_DEQ_RETRIES 10
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index b1829e122b..3b58786bd8 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -162,7 +162,7 @@ qat_qp_setup(struct qat_pci_device *qat_dev,
 
 int
 qat_qps_per_service(struct qat_pci_device *qat_dev,
-			enum qat_service_type service);
+		enum qat_service_type service);
 
 const struct qat_qp_hw_data *
 qat_qp_get_hw_data(struct qat_pci_device *qat_dev,
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 86c124f6c8..72609703f9 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -5,8 +5,8 @@
 #include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
 #include "qat_sym_session.h"
-#include "qat_sym_refactor.h"
-#include "qat_asym_refactor.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 1b6cf10589..529878e3e7 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -5,8 +5,8 @@
 #include <rte_cryptodev.h>
 #include <cryptodev_pmd.h>
 #include "qat_sym_session.h"
-#include "qat_sym_refactor.h"
-#include "qat_asym_refactor.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 07020741bd..b69d4d9091 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -10,166 +10,6 @@
 #include "qat_sym_session.h"
 #include "qat_sym.h"
 
-#define QAT_BASE_GEN1_SYM_CAPABILITIES \
-	QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
-		CAP_RNG(digest_size, 1, 20, 1)), \
-	QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
-		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
-	QAT_SYM_AEAD_CAP(AES_CCM,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
-		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
-	QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)), \
-	QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
-			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
-		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
-		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(MD5_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SNOW3G_UIA2, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_AUTH_CAP(KASUMI_F9, CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
-		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_CIPHER_CAP(AES_CTR,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_CIPHER_CAP(AES_XTS,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_CIPHER_CAP(KASUMI_F8,  CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)), \
-	QAT_SYM_CIPHER_CAP(NULL,  CAP_SET(block_size, 1), \
-		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_CIPHER_CAP(3DES_CBC,  CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
-	QAT_SYM_CIPHER_CAP(3DES_CTR,  CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
-	QAT_SYM_CIPHER_CAP(DES_CBC,  CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)), \
-	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,  CAP_SET(block_size, 8), \
-		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0))
-
-#define QAT_BASE_GEN1_ASYM_CAPABILITIES				\
-	QAT_ASYM_CAP(MODEX, 0, 1, 512, 1),			\
-	QAT_ASYM_CAP(MODINV, 0, 1, 512, 1),			\
-	QAT_ASYM_CAP(RSA,					\
-			((1 << RTE_CRYPTO_ASYM_OP_SIGN) |	\
-			(1 << RTE_CRYPTO_ASYM_OP_VERIFY) |	\
-			(1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) |	\
-			(1 << RTE_CRYPTO_ASYM_OP_DECRYPT)),	\
-			64, 512, 64)
-
-#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \
-	QAT_SYM_CIPHER_CAP(ZUC_EEA3, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_AUTH_CAP(ZUC_EIA3, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)) \
-
-#define QAT_EXTRA_GEN3_SYM_CAPABILITIES \
-	QAT_SYM_AEAD_CAP(CHACHA20_POLY1305, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 32, 32, 0), \
-		CAP_RNG(digest_size, 16, 16, 0), \
-		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0))
-
-#define QAT_BASE_GEN4_SYM_CAPABILITIES \
-	QAT_SYM_CIPHER_CAP(AES_CBC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_AUTH_CAP(SHA1_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA224_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA256_HMAC, CAP_SET(block_size, 64), \
-		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA384_HMAC, CAP_SET(block_size, 128), \
-		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA512_HMAC, CAP_SET(block_size, 128), \
-		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(AES_XCBC_MAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(AES_CMAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_AUTH_CAP(NULL, CAP_SET(block_size, 1), \
-		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_CIPHER_CAP(NULL,  CAP_SET(block_size, 1), \
-		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_PLAIN_AUTH_CAP(SHA1, CAP_SET(block_size, 64), \
-		CAP_RNG(digest_size, 1, 20, 1)), \
-	QAT_SYM_AUTH_CAP(SHA224, CAP_SET(block_size, 64), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA256, CAP_SET(block_size, 64), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA384, CAP_SET(block_size, 128), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_AUTH_CAP(SHA512, CAP_SET(block_size, 128), \
-		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)), \
-	QAT_SYM_CIPHER_CAP(AES_CTR,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)), \
-	QAT_SYM_AEAD_CAP(AES_GCM, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
-		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)), \
-	QAT_SYM_AEAD_CAP(AES_CCM,  CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2), \
-		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)), \
-	QAT_SYM_AUTH_CAP(AES_GMAC, CAP_SET(block_size, 16), \
-		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4), \
-		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)) \
-
 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
 	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
 
@@ -432,7 +272,7 @@ qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
 				cipher_len, out_sgl->vec,
 				QAT_SYM_SGL_MAX_NUMBER);
 
-		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
 			return UINT64_MAX;
 		}
@@ -506,7 +346,7 @@ qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
 				auth_ofs + auth_len, out_sgl->vec,
 				QAT_SYM_SGL_MAX_NUMBER);
 
-		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
 			return UINT64_MAX;
 		}
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 9894757657..4bbdfdf367 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -13,7 +13,7 @@
 #include "icp_qat_fw_la.h"
 
 #include "qat_sym_session.h"
-#include "qat_sym_refactor.h"
+#include "qat_sym.h"
 #include "qat_sym_session.h"
 #include "qat_crypto.h"
 #include "qat_crypto_pmd_gens.h"
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index c3bbdb6eb8..b80b81e8da 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -1,70 +1,147 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2021 Intel Corporation
  */
 
 #include <stdarg.h>
 
-#include "qat_asym.h"
+#include <cryptodev_pmd.h>
+
 #include "icp_qat_fw_pke.h"
 #include "icp_qat_fw.h"
 #include "qat_pke_functionality_arrays.h"
 
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+#include "qat_device.h"
 
-static int qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
-		size_t arr_sz, size_t *size, uint32_t *func_id)
+#include "qat_logs.h"
+#include "qat_asym.h"
+
+uint8_t qat_asym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
+
+void
+qat_asym_init_op_cookie(void *op_cookie)
 {
-	size_t i;
+	int j;
+	struct qat_asym_op_cookie *cookie = op_cookie;
 
-	for (i = 0; i < arr_sz; i++) {
-		if (*size <= arr[i][0]) {
-			*size = arr[i][0];
-			*func_id = arr[i][1];
-			return 0;
-		}
+	cookie->input_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					input_params_ptrs);
+
+	cookie->output_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					output_params_ptrs);
+
+	for (j = 0; j < 8; j++) {
+		cookie->input_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						input_array[j]);
+		cookie->output_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						output_array[j]);
 	}
-	return -1;
 }
 
-static inline void qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_asym_xform *xform,
+		struct rte_cryptodev_asym_session *sess,
+		struct rte_mempool *mempool)
 {
-	memset(qat_req, 0, sizeof(*qat_req));
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+	int err = 0;
+	void *sess_private_data;
+	struct qat_asym_session *session;
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		QAT_LOG(ERR,
+			"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	session = sess_private_data;
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		if (xform->modex.exponent.length == 0 ||
+				xform->modex.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod exp input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		if (xform->modinv.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod inv input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+		if (xform->rsa.n.length == 0) {
+			QAT_LOG(ERR, "Invalid rsa input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
+			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
+		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
+		err = -EINVAL;
+		goto error;
+	} else {
+		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
+		err = -EINVAL;
+		goto error;
+	}
+
+	session->xform = xform;
+	qat_asym_build_req_tmpl(sess_private_data);
+	set_asym_session_private_data(sess, dev->driver_id,
+		sess_private_data);
 
-	qat_req->pke_hdr.hdr_flags =
-			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
-			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	return 0;
+error:
+	rte_mempool_put(mempool, sess_private_data);
+	return err;
 }
 
-static inline void qat_asym_build_req_tmpl(void *sess_private_data)
+unsigned int
+qat_asym_session_get_private_size(
+		struct rte_cryptodev *dev __rte_unused)
 {
-	struct icp_qat_fw_pke_request *qat_req;
-	struct qat_asym_session *session = sess_private_data;
-
-	qat_req = &session->req_tmpl;
-	qat_fill_req_tmpl(qat_req);
+	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
 }
 
-static size_t max_of(int n, ...)
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_asym_session *sess)
 {
-	va_list args;
-	size_t len = 0, num;
-	int i;
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_asym_session_private_data(sess, index);
+	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
 
-	va_start(args, n);
-	len = va_arg(args, size_t);
+	if (sess_priv) {
+		memset(s, 0, qat_asym_session_get_private_size(dev));
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
 
-	for (i = 0; i < n - 1; i++) {
-		num = va_arg(args, size_t);
-		if (num > len)
-			len = num;
+		set_asym_session_private_data(sess, index, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
 	}
-	va_end(args);
-
-	return len;
 }
 
-static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+	.name = qat_asym_drv_name,
+	.alias = qat_asym_drv_name
+};
+
+
+static void
+qat_clear_arrays(struct qat_asym_op_cookie *cookie,
 		int in_count, int out_count, int in_size, int out_size)
 {
 	int i;
@@ -75,7 +152,8 @@ static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
 		memset(cookie->output_array[i], 0x0, out_size);
 }
 
-static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
+static void
+qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
 		enum rte_crypto_asym_xform_type alg, int in_size, int out_size)
 {
 	if (alg == RTE_CRYPTO_ASYM_XFORM_MODEX)
@@ -88,7 +166,229 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
 				out_size);
 }
 
-static int qat_asym_check_nonzero(rte_crypto_param n)
+static void
+qat_asym_collect_response(struct rte_crypto_op *rx_op,
+		struct qat_asym_op_cookie *cookie,
+		struct rte_crypto_asym_xform *xform)
+{
+	size_t alg_size, alg_size_in_bytes = 0;
+	struct rte_crypto_asym_op *asym_op = rx_op->asym;
+
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		rte_crypto_param n = xform->modex.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modexp_result = asym_op->modex.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modexp_result +
+				(asym_op->modex.result.length -
+					n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length
+				);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		rte_crypto_param n = xform->modinv.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modinv_result = asym_op->modinv.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modinv_result +
+				(asym_op->modinv.result.length - n.length),
+				cookie->output_array[0] +
+				alg_size_in_bytes - n.length, n.length);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
+				asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+							cookie->output_array[0],
+							alg_size_in_bytes);
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+			}
+		} else {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_DECRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.message.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
+						rsa_result, alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_SIGN) {
+				uint8_t *rsa_result = asym_op->rsa.sign.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			}
+		}
+	}
+	qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes,
+			alg_size_in_bytes);
+}
+
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
+{
+	struct qat_asym_session *ctx;
+	struct icp_qat_fw_pke_resp *resp_msg =
+			(struct icp_qat_fw_pke_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+			(resp_msg->opaque);
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	if (cookie->error) {
+		cookie->error = 0;
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		QAT_DP_LOG(ERR, "Cookie status returned error");
+	} else {
+		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric response status"
+					" returned error");
+		}
+		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric common status"
+					" returned error");
+		}
+	}
+
+	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		ctx = (struct qat_asym_session *)get_asym_session_private_data(
+			rx_op->asym->session, qat_asym_driver_id);
+		qat_asym_collect_response(rx_op, cookie, ctx->xform);
+	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
+	}
+	*op = rx_op;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
+			sizeof(struct icp_qat_fw_pke_resp));
+#endif
+
+	return 1;
+}
+
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+		size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+	size_t i;
+
+	for (i = 0; i < arr_sz; i++) {
+		if (*size <= arr[i][0]) {
+			*size = arr[i][0];
+			*func_id = arr[i][1];
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+	va_list args;
+	size_t len = 0, num;
+	int i;
+
+	va_start(args, n);
+	len = va_arg(args, size_t);
+
+	for (i = 0; i < n - 1; i++) {
+		num = va_arg(args, size_t);
+		if (num > len)
+			len = num;
+	}
+	va_end(args);
+
+	return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
 {
 	if (n.length < 8) {
 		/* Not a case for any cryptograpic function except for DH
@@ -452,58 +752,14 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	} else {
 		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
 		return -(EINVAL);
-	}
-	return 0;
-}
-
-/*
- * Asym build request refactor function template,
- * will be removed in the later patchset.
- */
-static __rte_always_inline int
-refactor_qat_asym_build_request(__rte_unused void *in_op,
-		__rte_unused uint8_t *out_msg, __rte_unused void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		__rte_unused enum qat_device_gen dev_gen)
-{
-	return 0;
-}
-
-/*
- * Asym process response refactor function template,
- * will be removed in the later patchset.
- */
-int
-refactor_qat_asym_process_response(__rte_unused void **op,
-		__rte_unused uint8_t *resp,
-		__rte_unused void *op_cookie,
-		__rte_unused uint64_t *dequeue_err_count)
-{
+	}
 	return 0;
 }
 
-uint16_t
-qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return refactor_qat_enqueue_op_burst(qp,
-					refactor_qat_asym_build_request,
-					(void **)ops, nb_ops);
-}
-
-uint16_t
-qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return refactor_qat_dequeue_op_burst(qp, (void **)ops,
-				refactor_qat_asym_process_response, nb_ops);
-}
-
-int
-qat_asym_build_request(void *in_op,
-			uint8_t *out_msg,
-			void *op_cookie,
-			__rte_unused enum qat_device_gen qat_dev_gen)
+static __rte_always_inline int
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
 {
 	struct qat_asym_session *ctx;
 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
@@ -570,262 +826,161 @@ qat_asym_build_request(void *in_op,
 	return 0;
 }
 
-static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
-		struct qat_asym_op_cookie *cookie,
-		struct rte_crypto_asym_xform *xform)
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
 {
-	size_t alg_size, alg_size_in_bytes = 0;
-	struct rte_crypto_asym_op *asym_op = rx_op->asym;
-
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		rte_crypto_param n = xform->modex.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modexp_result = asym_op->modex.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modexp_result +
-				(asym_op->modex.result.length -
-					n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length
-				);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		rte_crypto_param n = xform->modinv.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modinv_result = asym_op->modinv.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modinv_result + (asym_op->modinv.result.length
-				- n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
-				asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-							cookie->output_array[0],
-							alg_size_in_bytes);
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-			}
-		} else {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_DECRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.message.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
-						rsa_result, alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
-				uint8_t *rsa_result = asym_op->rsa.sign.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			}
-		}
-	}
-	qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes,
-			alg_size_in_bytes);
+	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+			nb_ops);
 }
 
-void
-qat_asym_process_response(void **op, uint8_t *resp,
-		void *op_cookie)
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
 {
-	struct qat_asym_session *ctx;
-	struct icp_qat_fw_pke_resp *resp_msg =
-			(struct icp_qat_fw_pke_resp *)resp;
-	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
-			(resp_msg->opaque);
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	if (cookie->error) {
-		cookie->error = 0;
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		QAT_DP_LOG(ERR, "Cookie status returned error");
-	} else {
-		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
-			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric response status"
-					" returned error");
-		}
-		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric common status"
-					" returned error");
-		}
-	}
-
-	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_asym_session *)get_asym_session_private_data(
-			rx_op->asym->session, qat_asym_driver_id);
-		qat_asym_collect_response(rx_op, cookie, ctx->xform);
-	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
-	}
-	*op = rx_op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
-			sizeof(struct icp_qat_fw_pke_resp));
-#endif
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+				nb_ops);
 }
 
 int
-qat_asym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_asym_xform *xform,
-		struct rte_cryptodev_asym_session *sess,
-		struct rte_mempool *mempool)
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param)
 {
-	int err = 0;
-	void *sess_private_data;
-	struct qat_asym_session *session;
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		QAT_LOG(ERR,
-			"Couldn't get object from session mempool");
-		return -ENOMEM;
+	struct qat_cryptodev_private *internals;
+	struct rte_cryptodev *cryptodev;
+	struct qat_device_info *qat_dev_instance =
+		&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	uint64_t capa_size;
+	int i = 0;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "asym");
+	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	session = sess_private_data;
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		if (xform->modex.exponent.length == 0 ||
-				xform->modex.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod exp input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		if (xform->modinv.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod inv input parameter");
-			err = -EINVAL;
-			goto error;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_asym_driver_id =
+				qat_asym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_asym_driver_id !=
+				qat_asym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-		if (xform->rsa.n.length == 0) {
-			QAT_LOG(ERR, "Invalid rsa input parameter");
-			err = -EINVAL;
-			goto error;
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+	qat_dev_instance->asym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->asym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_asym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_ASYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			rte_cryptodev_pmd_destroy(cryptodev);
+			memset(&qat_dev_instance->asym_rte_dev, 0,
+				sizeof(qat_dev_instance->asym_rte_dev));
+			return -EFAULT;
 		}
-	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
-			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
-		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
-		err = -EINVAL;
-		goto error;
-	} else {
-		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
-		err = -EINVAL;
-		goto error;
 	}
 
-	session->xform = xform;
-	qat_asym_build_req_tmpl(sess_private_data);
-	set_asym_session_private_data(sess, dev->driver_id,
-		sess_private_data);
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-	return 0;
-error:
-	rte_mempool_put(mempool, sess_private_data);
-	return err;
-}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
 
-unsigned int qat_asym_session_get_private_size(
-		struct rte_cryptodev *dev __rte_unused)
-{
-	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
+	qat_pci_dev->asym_dev = internals;
+	internals->service_type = QAT_SERVICE_ASYMMETRIC;
+	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+	return 0;
 }
 
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_asym_session *sess)
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
 {
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_asym_session_private_data(sess, index);
-	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
-
-	if (sess_priv) {
-		memset(s, 0, qat_asym_session_get_private_size(dev));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->asym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(
+			qat_pci_dev->asym_dev->dev_id);
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+	qat_pci_dev->asym_dev = NULL;
 
-		set_asym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
+	return 0;
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_asym_driver,
+		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 50c2641eba..55b1e1b2cc 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -8,10 +8,13 @@
 #include <cryptodev_pmd.h>
 #include <rte_crypto_asym.h>
 #include "icp_qat_fw_pke.h"
-#include "qat_common.h"
-#include "qat_asym_pmd.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
 #include "icp_qat_fw.h"
 
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
+
 typedef uint64_t large_int_ptr;
 #define MAX_PKE_PARAMS	8
 #define QAT_PKE_MAX_LN_SIZE 512
@@ -26,6 +29,28 @@ typedef uint64_t large_int_ptr;
 #define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
 #define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
 
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
+		{.asym = {						\
+			.xform_capa = {					\
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+				.op_types = o,				\
+				{					\
+				.modlen = {				\
+				.min = l,				\
+				.max = r,				\
+				.increment = i				\
+				}, }					\
+			}						\
+		},							\
+		}							\
+	}
+
 struct qat_asym_op_cookie {
 	size_t alg_size;
 	uint64_t error;
@@ -45,6 +70,27 @@ struct qat_asym_session {
 	struct rte_crypto_asym_xform *xform;
 };
 
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+	memset(qat_req, 0, sizeof(*qat_req));
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+	qat_req->pke_hdr.hdr_flags =
+			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+	struct icp_qat_fw_pke_request *qat_req;
+	struct qat_asym_session *session = sess_private_data;
+
+	qat_req = &session->req_tmpl;
+	qat_fill_req_tmpl(qat_req);
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev,
 		struct rte_crypto_asym_xform *xform,
@@ -58,26 +104,6 @@ void
 qat_asym_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_asym_session *sess);
 
-/*
- * Build PKE request to be sent to the fw, partially uses template
- * request generated during session creation.
- *
- * @param	in_op		Pointer to the crypto operation, for every
- *				service it points to service specific struct.
- * @param	out_msg		Message to be returned to enqueue function
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	qat_dev_gen	Generation of QAT hardware
- *
- * @return
- *	This function always returns zero,
- *	it is because of backward compatibility.
- *	- 0: Always returned
- *
- */
-int
-qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
 /*
  * Process PKE response received from outgoing queue of QAT
  *
@@ -88,23 +114,11 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg,
  * @param	op_cookie	Cookie pointer that holds private metadata
  *
  */
-void
-qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
-		void *op_cookie);
-
 int
-refactor_qat_asym_process_response(__rte_unused void **op,
-		__rte_unused uint8_t *resp,
-		__rte_unused void *op_cookie,
-		__rte_unused uint64_t *dequeue_err_count);
-
-uint16_t
-qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops);
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count);
 
-
-uint16_t
-qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops);
+void
+qat_asym_init_op_cookie(void *cookie);
 
 #endif /* _QAT_ASYM_H_ */
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
deleted file mode 100644
index addee384e3..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "qat_logs.h"
-
-#include "qat_crypto.h"
-#include "qat_asym.h"
-#include "qat_asym_pmd.h"
-
-uint8_t qat_asym_driver_id;
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
-	int j;
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	cookie->input_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					input_params_ptrs);
-
-	cookie->output_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					output_params_ptrs);
-
-	for (j = 0; j < 8; j++) {
-		cookie->input_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						input_array[j]);
-		cookie->output_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						output_array[j]);
-	}
-}
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
-	/* Device related operations */
-	.dev_configure		= qat_cryptodev_config,
-	.dev_start		= qat_cryptodev_start,
-	.dev_stop		= qat_cryptodev_stop,
-	.dev_close		= qat_cryptodev_close,
-	.dev_infos_get		= qat_cryptodev_info_get,
-
-	.stats_get		= qat_cryptodev_stats_get,
-	.stats_reset		= qat_cryptodev_stats_reset,
-	.queue_pair_setup	= qat_cryptodev_qp_setup,
-	.queue_pair_release	= qat_cryptodev_qp_release,
-
-	/* Crypto related operations */
-	.asym_session_get_size	= qat_asym_session_get_private_size,
-	.asym_session_configure	= qat_asym_session_configure,
-	.asym_session_clear	= qat_asym_session_clear
-};
-
-uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
-	.name = qat_asym_drv_name,
-	.alias = qat_asym_drv_name
-};
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
-	int i = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "asym");
-	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_asym_driver_id =
-				qat_asym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_asym_driver_id !=
-				qat_asym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
-	qat_dev_instance->asym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->asym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->asym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_asym_driver_id;
-	cryptodev->dev_ops = &crypto_qat_ops;
-
-	cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst;
-
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_ASYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->dev_id = cryptodev->data->dev_id;
-	internals->service_type = QAT_SERVICE_ASYMMETRIC;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			rte_cryptodev_pmd_destroy(cryptodev);
-			memset(&qat_dev_instance->asym_rte_dev, 0,
-				sizeof(qat_dev_instance->asym_rte_dev));
-			return -EFAULT;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->asym_dev = internals;
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-	return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->asym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(
-			qat_pci_dev->asym_dev->dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
-	qat_pci_dev->asym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_asym_driver,
-		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_pmd.h b/drivers/crypto/qat/qat_asym_pmd.h
deleted file mode 100644
index fd6b406248..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-
-#ifndef _QAT_ASYM_PMD_H_
-#define _QAT_ASYM_PMD_H_
-
-#include <rte_cryptodev.h>
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD driver name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
-
-
-/**
- * Helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
-		{.asym = {						\
-			.xform_capa = {					\
-				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
-				.op_types = o,				\
-				{					\
-				.modlen = {				\
-				.min = l,				\
-				.max = r,				\
-				.increment = i				\
-				}, }					\
-			}						\
-		},							\
-		}							\
-	}
-
-extern uint8_t qat_asym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
-
-void
-qat_asym_init_op_cookie(void *op_cookie);
-
-uint16_t
-qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-uint16_t
-qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-#endif /* _QAT_ASYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_asym_refactor.c b/drivers/crypto/qat/qat_asym_refactor.c
deleted file mode 100644
index 3a9b1d4054..0000000000
--- a/drivers/crypto/qat/qat_asym_refactor.c
+++ /dev/null
@@ -1,994 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 - 2021 Intel Corporation
- */
-
-
-#include <stdarg.h>
-
-#include <cryptodev_pmd.h>
-
-#include "icp_qat_fw_pke.h"
-#include "icp_qat_fw.h"
-#include "qat_pke_functionality_arrays.h"
-
-#include "qat_device.h"
-
-#include "qat_logs.h"
-#include "qat_asym_refactor.h"
-
-uint8_t qat_asym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
-	int j;
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	cookie->input_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					input_params_ptrs);
-
-	cookie->output_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					output_params_ptrs);
-
-	for (j = 0; j < 8; j++) {
-		cookie->input_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						input_array[j]);
-		cookie->output_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						output_array[j]);
-	}
-}
-
-int
-qat_asym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_asym_xform *xform,
-		struct rte_cryptodev_asym_session *sess,
-		struct rte_mempool *mempool)
-{
-	int err = 0;
-	void *sess_private_data;
-	struct qat_asym_session *session;
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		QAT_LOG(ERR,
-			"Couldn't get object from session mempool");
-		return -ENOMEM;
-	}
-
-	session = sess_private_data;
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		if (xform->modex.exponent.length == 0 ||
-				xform->modex.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod exp input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		if (xform->modinv.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod inv input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-		if (xform->rsa.n.length == 0) {
-			QAT_LOG(ERR, "Invalid rsa input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
-			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
-		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
-		err = -EINVAL;
-		goto error;
-	} else {
-		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
-		err = -EINVAL;
-		goto error;
-	}
-
-	session->xform = xform;
-	qat_asym_build_req_tmpl(sess_private_data);
-	set_asym_session_private_data(sess, dev->driver_id,
-		sess_private_data);
-
-	return 0;
-error:
-	rte_mempool_put(mempool, sess_private_data);
-	return err;
-}
-
-unsigned int
-qat_asym_session_get_private_size(
-		struct rte_cryptodev *dev __rte_unused)
-{
-	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
-}
-
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_asym_session *sess)
-{
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_asym_session_private_data(sess, index);
-	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
-
-	if (sess_priv) {
-		memset(s, 0, qat_asym_session_get_private_size(dev));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
-
-		set_asym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
-	.name = qat_asym_drv_name,
-	.alias = qat_asym_drv_name
-};
-
-
-static void
-qat_clear_arrays(struct qat_asym_op_cookie *cookie,
-		int in_count, int out_count, int in_size, int out_size)
-{
-	int i;
-
-	for (i = 0; i < in_count; i++)
-		memset(cookie->input_array[i], 0x0, in_size);
-	for (i = 0; i < out_count; i++)
-		memset(cookie->output_array[i], 0x0, out_size);
-}
-
-static void
-qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
-		enum rte_crypto_asym_xform_type alg, int in_size, int out_size)
-{
-	if (alg == RTE_CRYPTO_ASYM_XFORM_MODEX)
-		qat_clear_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS,
-				QAT_ASYM_MODEXP_NUM_OUT_PARAMS, in_size,
-				out_size);
-	else if (alg == RTE_CRYPTO_ASYM_XFORM_MODINV)
-		qat_clear_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS,
-				QAT_ASYM_MODINV_NUM_OUT_PARAMS, in_size,
-				out_size);
-}
-
-static void
-qat_asym_collect_response(struct rte_crypto_op *rx_op,
-		struct qat_asym_op_cookie *cookie,
-		struct rte_crypto_asym_xform *xform)
-{
-	size_t alg_size, alg_size_in_bytes = 0;
-	struct rte_crypto_asym_op *asym_op = rx_op->asym;
-
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		rte_crypto_param n = xform->modex.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modexp_result = asym_op->modex.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modexp_result +
-				(asym_op->modex.result.length -
-					n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length
-				);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		rte_crypto_param n = xform->modinv.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modinv_result = asym_op->modinv.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modinv_result +
-				(asym_op->modinv.result.length - n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
-				asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-							cookie->output_array[0],
-							alg_size_in_bytes);
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-			}
-		} else {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_DECRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.message.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
-						rsa_result, alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type ==
-						RTE_CRYPTO_ASYM_OP_SIGN) {
-				uint8_t *rsa_result = asym_op->rsa.sign.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			}
-		}
-	}
-	qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes,
-			alg_size_in_bytes);
-}
-
-int
-qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
-		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
-{
-	struct qat_asym_session *ctx;
-	struct icp_qat_fw_pke_resp *resp_msg =
-			(struct icp_qat_fw_pke_resp *)resp;
-	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
-			(resp_msg->opaque);
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	if (cookie->error) {
-		cookie->error = 0;
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		QAT_DP_LOG(ERR, "Cookie status returned error");
-	} else {
-		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
-			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric response status"
-					" returned error");
-		}
-		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric common status"
-					" returned error");
-		}
-	}
-
-	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_asym_session *)get_asym_session_private_data(
-			rx_op->asym->session, qat_asym_driver_id);
-		qat_asym_collect_response(rx_op, cookie, ctx->xform);
-	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
-	}
-	*op = rx_op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
-			sizeof(struct icp_qat_fw_pke_resp));
-#endif
-
-	return 1;
-}
-
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
-
-static int
-qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
-		size_t arr_sz, size_t *size, uint32_t *func_id)
-{
-	size_t i;
-
-	for (i = 0; i < arr_sz; i++) {
-		if (*size <= arr[i][0]) {
-			*size = arr[i][0];
-			*func_id = arr[i][1];
-			return 0;
-		}
-	}
-	return -1;
-}
-
-static size_t
-max_of(int n, ...)
-{
-	va_list args;
-	size_t len = 0, num;
-	int i;
-
-	va_start(args, n);
-	len = va_arg(args, size_t);
-
-	for (i = 0; i < n - 1; i++) {
-		num = va_arg(args, size_t);
-		if (num > len)
-			len = num;
-	}
-	va_end(args);
-
-	return len;
-}
-
-static int
-qat_asym_check_nonzero(rte_crypto_param n)
-{
-	if (n.length < 8) {
-		/* Not a case for any cryptograpic function except for DH
-		 * generator which very often can be of one byte length
-		 */
-		size_t i;
-
-		if (n.data[n.length - 1] == 0x0) {
-			for (i = 0; i < n.length - 1; i++)
-				if (n.data[i] != 0x0)
-					break;
-			if (i == n.length - 1)
-				return -(EINVAL);
-		}
-	} else if (*(uint64_t *)&n.data[
-				n.length - 8] == 0) {
-		/* Very likely it is zeroed modulus */
-		size_t i;
-
-		for (i = 0; i < n.length - 8; i++)
-			if (n.data[i] != 0x0)
-				break;
-		if (i == n.length - 8)
-			return -(EINVAL);
-	}
-
-	return 0;
-}
-
-static int
-qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
-		struct icp_qat_fw_pke_request *qat_req,
-		struct qat_asym_op_cookie *cookie,
-		struct rte_crypto_asym_xform *xform)
-{
-	int err = 0;
-	size_t alg_size;
-	size_t alg_size_in_bytes;
-	uint32_t func_id = 0;
-
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		err = qat_asym_check_nonzero(xform->modex.modulus);
-		if (err) {
-			QAT_LOG(ERR, "Empty modulus in modular exponentiation,"
-					" aborting this operation");
-			return err;
-		}
-
-		alg_size_in_bytes = max_of(3, asym_op->modex.base.length,
-			       xform->modex.exponent.length,
-			       xform->modex.modulus.length);
-		alg_size = alg_size_in_bytes << 3;
-
-		if (qat_asym_get_sz_and_func_id(MOD_EXP_SIZE,
-				sizeof(MOD_EXP_SIZE)/sizeof(*MOD_EXP_SIZE),
-				&alg_size, &func_id)) {
-			return -(EINVAL);
-		}
-
-		alg_size_in_bytes = alg_size >> 3;
-		rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
-			asym_op->modex.base.length
-			, asym_op->modex.base.data,
-			asym_op->modex.base.length);
-		rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
-			xform->modex.exponent.length
-			, xform->modex.exponent.data,
-			xform->modex.exponent.length);
-		rte_memcpy(cookie->input_array[2]  + alg_size_in_bytes -
-			xform->modex.modulus.length,
-			xform->modex.modulus.data,
-			xform->modex.modulus.length);
-		cookie->alg_size = alg_size;
-		qat_req->pke_hdr.cd_pars.func_id = func_id;
-		qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS;
-		qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp base",
-				cookie->input_array[0],
-				alg_size_in_bytes);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp exponent",
-				cookie->input_array[1],
-				alg_size_in_bytes);
-		QAT_DP_HEXDUMP_LOG(DEBUG, " ModExpmodulus",
-				cookie->input_array[2],
-				alg_size_in_bytes);
-#endif
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		err = qat_asym_check_nonzero(xform->modinv.modulus);
-		if (err) {
-			QAT_LOG(ERR, "Empty modulus in modular multiplicative"
-					" inverse, aborting this operation");
-			return err;
-		}
-
-		alg_size_in_bytes = max_of(2, asym_op->modinv.base.length,
-				xform->modinv.modulus.length);
-		alg_size = alg_size_in_bytes << 3;
-
-		if (xform->modinv.modulus.data[
-				xform->modinv.modulus.length - 1] & 0x01) {
-			if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_ODD,
-					sizeof(MOD_INV_IDS_ODD)/
-					sizeof(*MOD_INV_IDS_ODD),
-					&alg_size, &func_id)) {
-				return -(EINVAL);
-			}
-		} else {
-			if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_EVEN,
-					sizeof(MOD_INV_IDS_EVEN)/
-					sizeof(*MOD_INV_IDS_EVEN),
-					&alg_size, &func_id)) {
-				return -(EINVAL);
-			}
-		}
-
-		alg_size_in_bytes = alg_size >> 3;
-		rte_memcpy(cookie->input_array[0] + alg_size_in_bytes -
-			asym_op->modinv.base.length
-				, asym_op->modinv.base.data,
-				asym_op->modinv.base.length);
-		rte_memcpy(cookie->input_array[1] + alg_size_in_bytes -
-				xform->modinv.modulus.length
-				, xform->modinv.modulus.data,
-				xform->modinv.modulus.length);
-		cookie->alg_size = alg_size;
-		qat_req->pke_hdr.cd_pars.func_id = func_id;
-		qat_req->input_param_count =
-				QAT_ASYM_MODINV_NUM_IN_PARAMS;
-		qat_req->output_param_count =
-				QAT_ASYM_MODINV_NUM_OUT_PARAMS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv base",
-				cookie->input_array[0],
-				alg_size_in_bytes);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv modulus",
-				cookie->input_array[1],
-				alg_size_in_bytes);
-#endif
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-		err = qat_asym_check_nonzero(xform->rsa.n);
-		if (err) {
-			QAT_LOG(ERR, "Empty modulus in RSA"
-					" inverse, aborting this operation");
-			return err;
-		}
-
-		alg_size_in_bytes = xform->rsa.n.length;
-		alg_size = alg_size_in_bytes << 3;
-
-		qat_req->input_param_count =
-				QAT_ASYM_RSA_NUM_IN_PARAMS;
-		qat_req->output_param_count =
-				QAT_ASYM_RSA_NUM_OUT_PARAMS;
-
-		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
-				asym_op->rsa.op_type ==
-						RTE_CRYPTO_ASYM_OP_VERIFY) {
-
-			if (qat_asym_get_sz_and_func_id(RSA_ENC_IDS,
-					sizeof(RSA_ENC_IDS)/
-					sizeof(*RSA_ENC_IDS),
-					&alg_size, &func_id)) {
-				err = -(EINVAL);
-				QAT_LOG(ERR,
-					"Not supported RSA parameter size (key)");
-				return err;
-			}
-			alg_size_in_bytes = alg_size >> 3;
-			if (asym_op->rsa.op_type ==
-				RTE_CRYPTO_ASYM_OP_ENCRYPT) {
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(cookie->input_array[0] +
-						alg_size_in_bytes -
-						asym_op->rsa.message.length
-						, asym_op->rsa.message.data,
-						asym_op->rsa.message.length);
-					break;
-				default:
-					err = -(EINVAL);
-					QAT_LOG(ERR,
-						"Invalid RSA padding (Encryption)");
-					return err;
-				}
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Message",
-						cookie->input_array[0],
-						alg_size_in_bytes);
-#endif
-			} else {
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(cookie->input_array[0],
-						asym_op->rsa.sign.data,
-						alg_size_in_bytes);
-					break;
-				default:
-					err = -(EINVAL);
-					QAT_LOG(ERR,
-						"Invalid RSA padding (Verify)");
-					return err;
-				}
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, " RSA Signature",
-						cookie->input_array[0],
-						alg_size_in_bytes);
-#endif
-
-			}
-			rte_memcpy(cookie->input_array[1] +
-					alg_size_in_bytes -
-					xform->rsa.e.length
-					, xform->rsa.e.data,
-					xform->rsa.e.length);
-			rte_memcpy(cookie->input_array[2] +
-					alg_size_in_bytes -
-					xform->rsa.n.length,
-					xform->rsa.n.data,
-					xform->rsa.n.length);
-
-			cookie->alg_size = alg_size;
-			qat_req->pke_hdr.cd_pars.func_id = func_id;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Public Key",
-					cookie->input_array[1],
-					alg_size_in_bytes);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Modulus",
-					cookie->input_array[2],
-					alg_size_in_bytes);
-#endif
-		} else {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_DECRYPT) {
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(cookie->input_array[0]
-						+ alg_size_in_bytes -
-						asym_op->rsa.cipher.length,
-						asym_op->rsa.cipher.data,
-						asym_op->rsa.cipher.length);
-					break;
-				default:
-					QAT_LOG(ERR,
-						"Invalid padding of RSA (Decrypt)");
-					return -(EINVAL);
-				}
-
-			} else if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_SIGN) {
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(cookie->input_array[0]
-						+ alg_size_in_bytes -
-						asym_op->rsa.message.length,
-						asym_op->rsa.message.data,
-						asym_op->rsa.message.length);
-					break;
-				default:
-					QAT_LOG(ERR,
-						"Invalid padding of RSA (Signature)");
-					return -(EINVAL);
-				}
-			}
-			if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT) {
-
-				qat_req->input_param_count =
-						QAT_ASYM_RSA_QT_NUM_IN_PARAMS;
-				if (qat_asym_get_sz_and_func_id(RSA_DEC_CRT_IDS,
-						sizeof(RSA_DEC_CRT_IDS)/
-						sizeof(*RSA_DEC_CRT_IDS),
-						&alg_size, &func_id)) {
-					return -(EINVAL);
-				}
-				alg_size_in_bytes = alg_size >> 3;
-
-				rte_memcpy(cookie->input_array[1] +
-						(alg_size_in_bytes >> 1) -
-						xform->rsa.qt.p.length
-						, xform->rsa.qt.p.data,
-						xform->rsa.qt.p.length);
-				rte_memcpy(cookie->input_array[2] +
-						(alg_size_in_bytes >> 1) -
-						xform->rsa.qt.q.length
-						, xform->rsa.qt.q.data,
-						xform->rsa.qt.q.length);
-				rte_memcpy(cookie->input_array[3] +
-						(alg_size_in_bytes >> 1) -
-						xform->rsa.qt.dP.length
-						, xform->rsa.qt.dP.data,
-						xform->rsa.qt.dP.length);
-				rte_memcpy(cookie->input_array[4] +
-						(alg_size_in_bytes >> 1) -
-						xform->rsa.qt.dQ.length
-						, xform->rsa.qt.dQ.data,
-						xform->rsa.qt.dQ.length);
-				rte_memcpy(cookie->input_array[5] +
-						(alg_size_in_bytes >> 1) -
-						xform->rsa.qt.qInv.length
-						, xform->rsa.qt.qInv.data,
-						xform->rsa.qt.qInv.length);
-				cookie->alg_size = alg_size;
-				qat_req->pke_hdr.cd_pars.func_id = func_id;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "C",
-						cookie->input_array[0],
-						alg_size_in_bytes);
-				QAT_DP_HEXDUMP_LOG(DEBUG, "p",
-						cookie->input_array[1],
-						alg_size_in_bytes);
-				QAT_DP_HEXDUMP_LOG(DEBUG, "q",
-						cookie->input_array[2],
-						alg_size_in_bytes);
-				QAT_DP_HEXDUMP_LOG(DEBUG,
-						"dP", cookie->input_array[3],
-						alg_size_in_bytes);
-				QAT_DP_HEXDUMP_LOG(DEBUG,
-						"dQ", cookie->input_array[4],
-						alg_size_in_bytes);
-				QAT_DP_HEXDUMP_LOG(DEBUG,
-						"qInv", cookie->input_array[5],
-						alg_size_in_bytes);
-#endif
-			} else if (xform->rsa.key_type ==
-					RTE_RSA_KEY_TYPE_EXP) {
-				if (qat_asym_get_sz_and_func_id(
-						RSA_DEC_IDS,
-						sizeof(RSA_DEC_IDS)/
-						sizeof(*RSA_DEC_IDS),
-						&alg_size, &func_id)) {
-					return -(EINVAL);
-				}
-				alg_size_in_bytes = alg_size >> 3;
-				rte_memcpy(cookie->input_array[1] +
-						alg_size_in_bytes -
-						xform->rsa.d.length,
-						xform->rsa.d.data,
-						xform->rsa.d.length);
-				rte_memcpy(cookie->input_array[2] +
-						alg_size_in_bytes -
-						xform->rsa.n.length,
-						xform->rsa.n.data,
-						xform->rsa.n.length);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA ciphertext",
-					cookie->input_array[0],
-					alg_size_in_bytes);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA d",
-					cookie->input_array[1],
-					alg_size_in_bytes);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "RSA n",
-					cookie->input_array[2],
-					alg_size_in_bytes);
-#endif
-
-				cookie->alg_size = alg_size;
-				qat_req->pke_hdr.cd_pars.func_id = func_id;
-			} else {
-				QAT_LOG(ERR, "Invalid RSA key type");
-				return -(EINVAL);
-			}
-		}
-	} else {
-		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
-		return -(EINVAL);
-	}
-	return 0;
-}
-
-static __rte_always_inline int
-qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		__rte_unused enum qat_device_gen dev_gen)
-{
-	struct qat_asym_session *ctx;
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct rte_crypto_asym_op *asym_op = op->asym;
-	struct icp_qat_fw_pke_request *qat_req =
-			(struct icp_qat_fw_pke_request *)out_msg;
-	struct qat_asym_op_cookie *cookie =
-				(struct qat_asym_op_cookie *)op_cookie;
-	int err = 0;
-
-	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_asym_session *)
-			get_asym_session_private_data(
-			op->asym->session, qat_asym_driver_id);
-		if (unlikely(ctx == NULL)) {
-			QAT_LOG(ERR, "Session has not been created for this device");
-			goto error;
-		}
-		rte_mov64((uint8_t *)qat_req,
-			(const uint8_t *)&(ctx->req_tmpl));
-		err = qat_asym_fill_arrays(asym_op, qat_req,
-				cookie, ctx->xform);
-		if (err) {
-			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			goto error;
-		}
-	} else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		qat_fill_req_tmpl(qat_req);
-		err = qat_asym_fill_arrays(asym_op, qat_req, cookie,
-				op->asym->xform);
-		if (err) {
-			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			goto error;
-		}
-	} else {
-		QAT_DP_LOG(ERR, "Invalid session/xform settings");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		goto error;
-	}
-
-	qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
-	qat_req->pke_mid.src_data_addr = cookie->input_addr;
-	qat_req->pke_mid.dest_data_addr = cookie->output_addr;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_pke_request));
-#endif
-
-	return 0;
-error:
-
-	qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-		sizeof(struct icp_qat_fw_pke_request));
-#endif
-
-	qat_req->output_param_count = 0;
-	qat_req->input_param_count = 0;
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
-	cookie->error |= err;
-
-	return 0;
-}
-
-static uint16_t
-qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
-			nb_ops);
-}
-
-static uint16_t
-qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
-				nb_ops);
-}
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
-	struct qat_cryptodev_private *internals;
-	struct rte_cryptodev *cryptodev;
-	struct qat_device_info *qat_dev_instance =
-		&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	uint64_t capa_size;
-	int i = 0;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "asym");
-	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
-				name);
-		return -(EFAULT);
-	}
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_asym_driver_id =
-				qat_asym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_asym_driver_id !=
-				qat_asym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
-	qat_dev_instance->asym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->asym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->asym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_asym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_ASYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			rte_cryptodev_pmd_destroy(cryptodev);
-			memset(&qat_dev_instance->asym_rte_dev, 0,
-				sizeof(qat_dev_instance->asym_rte_dev));
-			return -EFAULT;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->asym_dev = internals;
-	internals->service_type = QAT_SERVICE_ASYMMETRIC;
-	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-	return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->asym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(
-			qat_pci_dev->asym_dev->dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
-	qat_pci_dev->asym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_asym_driver,
-		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_refactor.h b/drivers/crypto/qat/qat_asym_refactor.h
deleted file mode 100644
index 6d3d991bc7..0000000000
--- a/drivers/crypto/qat/qat_asym_refactor.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#ifndef _QAT_ASYM_H_
-#define _QAT_ASYM_H_
-
-#include <cryptodev_pmd.h>
-#include <rte_crypto_asym.h>
-#include "icp_qat_fw_pke.h"
-#include "qat_device.h"
-#include "qat_crypto.h"
-#include "icp_qat_fw.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD driver name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
-
-typedef uint64_t large_int_ptr;
-#define MAX_PKE_PARAMS	8
-#define QAT_PKE_MAX_LN_SIZE 512
-#define _PKE_ALIGN_ __rte_aligned(8)
-
-#define QAT_ASYM_MAX_PARAMS			8
-#define QAT_ASYM_MODINV_NUM_IN_PARAMS		2
-#define QAT_ASYM_MODINV_NUM_OUT_PARAMS		1
-#define QAT_ASYM_MODEXP_NUM_IN_PARAMS		3
-#define QAT_ASYM_MODEXP_NUM_OUT_PARAMS		1
-#define QAT_ASYM_RSA_NUM_IN_PARAMS		3
-#define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
-#define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
-
-/**
- * helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
-		{.asym = {						\
-			.xform_capa = {					\
-				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
-				.op_types = o,				\
-				{					\
-				.modlen = {				\
-				.min = l,				\
-				.max = r,				\
-				.increment = i				\
-				}, }					\
-			}						\
-		},							\
-		}							\
-	}
-
-struct qat_asym_op_cookie {
-	size_t alg_size;
-	uint64_t error;
-	rte_iova_t input_addr;
-	rte_iova_t output_addr;
-	large_int_ptr input_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
-	large_int_ptr output_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_;
-	union {
-		uint8_t input_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE];
-		uint8_t input_buffer[MAX_PKE_PARAMS * QAT_PKE_MAX_LN_SIZE];
-	} _PKE_ALIGN_;
-	uint8_t output_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE] _PKE_ALIGN_;
-} _PKE_ALIGN_;
-
-struct qat_asym_session {
-	struct icp_qat_fw_pke_request req_tmpl;
-	struct rte_crypto_asym_xform *xform;
-};
-
-static inline void
-qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
-{
-	memset(qat_req, 0, sizeof(*qat_req));
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
-
-	qat_req->pke_hdr.hdr_flags =
-			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
-			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
-}
-
-static inline void
-qat_asym_build_req_tmpl(void *sess_private_data)
-{
-	struct icp_qat_fw_pke_request *qat_req;
-	struct qat_asym_session *session = sess_private_data;
-
-	qat_req = &session->req_tmpl;
-	qat_fill_req_tmpl(qat_req);
-}
-
-int
-qat_asym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_asym_xform *xform,
-		struct rte_cryptodev_asym_session *sess,
-		struct rte_mempool *mempool);
-
-unsigned int
-qat_asym_session_get_private_size(struct rte_cryptodev *dev);
-
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_asym_session *sess);
-
-/*
- * Process PKE response received from outgoing queue of QAT
- *
- * @param	op		a ptr to the rte_crypto_op referred to by
- *				the response message is returned in this param
- * @param	resp		icp_qat_fw_pke_resp message received from
- *				outgoing fw message queue
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	dequeue_err_count	Error count number pointer
- *
- */
-int
-qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
-		void *op_cookie, __rte_unused uint64_t *dequeue_err_count);
-
-void
-qat_asym_init_op_cookie(void *cookie);
-
-#endif /* _QAT_ASYM_H_ */
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 8fe3f0b061..203f74f46f 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -12,7 +12,10 @@
 extern uint8_t qat_sym_driver_id;
 extern uint8_t qat_asym_driver_id;
 
-/** helper macro to set cryptodev capability range **/
+/**
+ * helper macro to set cryptodev capability range
+ * <n: name> <l: min > <r: max> <i: increment> <v: value>
+ **/
 #define CAP_RNG(n, l, r, i) .n = {.min = l, .max = r, .increment = i}
 
 #define CAP_RNG_ZERO(n) .n = {.min = 0, .max = 0, .increment = 0}
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 09c1e6f6a4..b78e6a6ef3 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -11,285 +11,107 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
 
+uint8_t qat_sym_driver_id;
 
-/** Decrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_decrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_decrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
-	return -EINVAL;
-}
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
-		/* Decrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely((sym_op->m_dst != NULL)
-				&& (sym_op->m_dst != sym_op->m_src)))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = last_block - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
-			dst, last_block_len);
-#endif
-		bpi_cipher_decrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
-			dst, last_block_len);
-#endif
-	}
-
-	return sym_op->cipher.data.length - last_block_len;
-}
-
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	/* copy IV into request if it fits */
-	if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
-		rte_memcpy(cipher_param->u.cipher_IV_array,
-				rte_crypto_op_ctod_offset(op, uint8_t *,
-					iv_offset),
-				iv_length);
-	} else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr =
-				rte_crypto_op_ctophys_offset(op,
-					iv_offset);
-	}
-}
-
-/** Set IV for CCM is special case, 0th byte is set to q-1
- *  where q is padding of nonce in 16 byte block
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
  */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
-	rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
-			ICP_QAT_HW_CCM_NONCE_OFFSET,
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-	*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-	if (aad_len_field_sz)
-		rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-}
-
-/** Handle Single-Pass AES-GMAC on QAT GEN3 */
-static inline void
-handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
-		struct qat_sym_op_cookie *cookie,
-		struct icp_qat_fw_la_bulk_req *qat_req)
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
+void
+qat_sym_init_op_cookie(void *op_cookie)
 {
-	static const uint32_t ver_key_offset =
-			sizeof(struct icp_qat_hw_auth_setup) +
-			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
-			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
-			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
-			sizeof(struct icp_qat_hw_cipher_config);
-	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
-			(void *) &qat_req->cd_ctrl;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-			(void *) &qat_req->serv_specif_rqpars;
-	uint32_t data_length = op->sym->auth.data.length;
-
-	/* Fill separate Content Descriptor for this op */
-	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
-			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-				ctx->cd.cipher.key :
-				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
-			ctx->auth_key_length);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
-				ICP_QAT_HW_CIPHER_AEAD_MODE,
-				ctx->qat_cipher_alg,
-				ICP_QAT_HW_CIPHER_NO_CONVERT,
-				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-					ICP_QAT_HW_CIPHER_ENCRYPT :
-					ICP_QAT_HW_CIPHER_DECRYPT));
-	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
-			ctx->digest_length,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
-
-	/* Update the request */
-	qat_req->cd_pars.u.s.content_desc_addr =
-			cookie->opt.spc_gmac.cd_phys_addr;
-	qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
-			sizeof(struct icp_qat_hw_cipher_config) +
-			ctx->auth_key_length, 8) >> 3;
-	qat_req->comn_mid.src_length = data_length;
-	qat_req->comn_mid.dst_length = 0;
-
-	cipher_param->spc_aad_addr = 0;
-	cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
-	cipher_param->spc_aad_sz = data_length;
-	cipher_param->reserved = 0;
-	cipher_param->spc_auth_res_sz = ctx->digest_length;
-
-	qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
-	cipher_cd_ctrl->cipher_cfg_offset = 0;
-	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
-	ICP_QAT_FW_LA_PROTO_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_NO_PROTO);
+	struct qat_sym_op_cookie *cookie = op_cookie;
+
+	cookie->qat_sgl_src_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_src);
+
+	cookie->qat_sgl_dst_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_dst);
+
+	cookie->opt.spc_gmac.cd_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			opt.spc_gmac.cd_cipher);
 }
 
-/*
- * Sym build request refactor function template,
- * will be removed in the later patchset.
- */
 static __rte_always_inline int
-refactor_qat_sym_build_request(__rte_unused void *in_op,
-		__rte_unused uint8_t *out_msg, __rte_unused	void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		__rte_unused enum qat_device_gen dev_gen)
-{
-	return 0;
-}
-
-/*
- * Sym enqueue burst refactor function template,
- * will be removed in the later patchset.
- */
-uint16_t
-refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return refactor_qat_enqueue_op_burst(qp, refactor_qat_sym_build_request,
-			(void **)ops, nb_ops);
-}
-
-/*
- * Sym dequeue burst refactor function template,
- * will be removed in the later patchset.
- */
-uint16_t
-refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return refactor_qat_dequeue_op_burst(qp, (void **)ops,
-				refactor_qat_sym_process_response, nb_ops);
-}
-
-int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
 {
-	int ret = 0;
-	struct qat_sym_session *ctx = NULL;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	register struct icp_qat_fw_la_bulk_req *qat_req;
-	uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
-	uint32_t cipher_len = 0, cipher_ofs = 0;
-	uint32_t auth_len = 0, auth_ofs = 0;
-	uint32_t min_ofs = 0;
-	uint64_t src_buf_start = 0, dst_buf_start = 0;
-	uint64_t auth_data_end = 0;
-	uint8_t do_sgl = 0;
-	uint8_t in_place = 1;
-	int alignment_adjustment = 0;
-	int oop_shift = 0;
 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *)op_cookie;
+	void *sess = (void *)opaque[0];
+	qat_sym_build_request_t build_request = (void *)opaque[1];
+	struct qat_sym_session *ctx = NULL;
 
-	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
-				"operation requests, op (%p) is not a "
-				"symmetric operation.", op);
-		return -EINVAL;
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
+
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
 	}
 
-	if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
-				" requests, op (%p) is sessionless.", op);
-		return -EINVAL;
-	} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_sym_session *)get_sym_session_private_data(
-				op->sym->session, qat_sym_driver_id);
 #ifdef RTE_LIB_SECURITY
-	} else {
-		ctx = (struct qat_sym_session *)get_sec_session_private_data(
-				op->sym->sec_session);
-		if (likely(ctx)) {
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if (sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
 			if (unlikely(ctx->bpi_ctx == NULL)) {
 				QAT_DP_LOG(ERR, "QAT PMD only supports security"
 						" operation requests for"
@@ -305,463 +127,284 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 				return -EINVAL;
 			}
-		}
-#endif
-	}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
 
-	if (unlikely(ctx == NULL)) {
-		QAT_DP_LOG(ERR, "Session was not created for this device");
-		return -EINVAL;
-	}
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
 
-	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
-	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
-	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
-	cipher_param = (void *)&qat_req->serv_specif_rqpars;
-	cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			do_aead = 1;
-		} else {
-			do_auth = 1;
-			do_cipher = 1;
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
+
+			sess = (void *)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)sess;
+			opaque[1] = (uintptr_t)build_request;
 		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		do_auth = 1;
-		do_cipher = 0;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		do_auth = 0;
-		do_cipher = 1;
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
 	}
 
-	if (do_cipher) {
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
 
-		if (ctx->qat_cipher_alg ==
-					 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
 
-			if (unlikely(
-			    (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
-			    (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cipher_len = op->sym->cipher.data.length >> 3;
-			cipher_ofs = op->sym->cipher.data.offset >> 3;
-
-		} else if (ctx->bpi_ctx) {
-			/* DOCSIS - only send complete blocks to device.
-			 * Process any partial block using CFB mode.
-			 * Even if 0 complete blocks, still send this to device
-			 * to get into rx queue for post-process and dequeuing
-			 */
-			cipher_len = qat_bpicipher_preprocess(ctx, op);
-			cipher_ofs = op->sym->cipher.data.offset;
-		} else {
-			cipher_len = op->sym->cipher.data.length;
-			cipher_ofs = op->sym->cipher.data.offset;
-		}
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
 
-		set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
-				cipher_param, op, qat_req);
-		min_ofs = cipher_ofs;
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	if (do_auth) {
-
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
-			ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
-			if (unlikely(
-			    (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
-			    (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			auth_ofs = op->sym->auth.data.offset >> 3;
-			auth_len = op->sym->auth.data.length >> 3;
-
-			auth_param->u1.aad_adr =
-					rte_crypto_op_ctophys_offset(op,
-							ctx->auth_iv.offset);
-
-		} else if (ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-			/* AES-GMAC */
-			set_cipher_iv(ctx->auth_iv.length,
-				ctx->auth_iv.offset,
-				cipher_param, op, qat_req);
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
-			auth_param->u1.aad_adr = 0;
-			auth_param->u2.aad_sz = 0;
-
-		} else {
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-		min_ofs = auth_ofs;
-
-		if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
-				ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
-			auth_param->auth_res_addr =
-					op->sym->auth.digest.phys_addr;
-
 	}
 
-	if (do_aead) {
-		/*
-		 * This address may used for setting AAD physical pointer
-		 * into IV offset from op
-		 */
-		rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
-		if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-
-			set_cipher_iv(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, qat_req);
-
-		} else if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
-			/* In case of AES-CCM this may point to user selected
-			 * memory or iv offset in cypto_op
-			 */
-			uint8_t *aad_data = op->sym->aead.aad.data;
-			/* This is true AAD length, it not includes 18 bytes of
-			 * preceding data
-			 */
-			uint8_t aad_ccm_real_len = 0;
-			uint8_t aad_len_field_sz = 0;
-			uint32_t msg_len_be =
-					rte_bswap32(op->sym->aead.data.length);
-
-			if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-				aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-				aad_ccm_real_len = ctx->aad_len -
-					ICP_QAT_HW_CCM_AAD_B0_LEN -
-					ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			} else {
-				/*
-				 * aad_len not greater than 18, so no actual aad
-				 *  data, then use IV after op for B0 block
-				 */
-				aad_data = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-				aad_phys_addr_aead =
-						rte_crypto_op_ctophys_offset(op,
-							ctx->cipher_iv.offset);
-			}
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
 
-			uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
-							ctx->cipher_iv.length;
-
-			aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-							aad_len_field_sz,
-							ctx->digest_length, q);
-
-			if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET +
-				    (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				    (uint8_t *)&msg_len_be,
-				    ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-			} else {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET,
-				    (uint8_t *)&msg_len_be
-				    + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				    - q), q);
-			}
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
 
-			if (aad_len_field_sz > 0) {
-				*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
-						= rte_bswap16(aad_ccm_real_len);
-
-				if ((aad_ccm_real_len + aad_len_field_sz)
-						% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-					uint8_t pad_len = 0;
-					uint8_t pad_idx = 0;
-
-					pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len + aad_len_field_sz) %
-						ICP_QAT_HW_CCM_AAD_B0_LEN);
-					pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					    aad_ccm_real_len + aad_len_field_sz;
-					memset(&aad_data[pad_idx],
-							0, pad_len);
-				}
+	if (cryptodev == NULL)
+		return -ENODEV;
 
-			}
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
-			set_cipher_iv_ccm(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, q,
-					aad_len_field_sz);
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
-		}
-
-		cipher_len = op->sym->aead.data.length;
-		cipher_ofs = op->sym->aead.data.offset;
-		auth_len = op->sym->aead.data.length;
-		auth_ofs = op->sym->aead.data.offset;
-
-		auth_param->u1.aad_adr = aad_phys_addr_aead;
-		auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
-		min_ofs = op->sym->aead.data.offset;
-	}
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
-	if (op->sym->m_src->nb_segs > 1 ||
-			(op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
-		do_sgl = 1;
-
-	/* adjust for chain case */
-	if (do_cipher && do_auth)
-		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
-	if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
-		min_ofs = 0;
-
-	if (unlikely((op->sym->m_dst != NULL) &&
-			(op->sym->m_dst != op->sym->m_src))) {
-		/* Out-of-place operation (OOP)
-		 * Don't align DMA start. DMA the minimum data-set
-		 * so as not to overwrite data in dest buffer
-		 */
-		in_place = 0;
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-		dst_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-		oop_shift = min_ofs;
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
 
-	} else {
-		/* In-place operation
-		 * Start DMA at nearest aligned address below min_ofs
-		 */
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
-						& QAT_64_BTYE_ALIGN_MASK;
-
-		if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
-					rte_pktmbuf_headroom(op->sym->m_src))
-							> src_buf_start)) {
-			/* alignment has pushed addr ahead of start of mbuf
-			 * so revert and take the performance hit
-			 */
-			src_buf_start =
-				rte_pktmbuf_iova_offset(op->sym->m_src,
-								min_ofs);
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
 		}
-		dst_buf_start = src_buf_start;
 
-		/* remember any adjustment for later, note, can be +/- */
-		alignment_adjustment = src_buf_start -
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-	}
-
-	if (do_cipher || do_aead) {
-		cipher_param->cipher_offset =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, cipher_ofs) - src_buf_start;
-		cipher_param->cipher_length = cipher_len;
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
 	} else {
-		cipher_param->cipher_offset = 0;
-		cipher_param->cipher_length = 0;
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
 	}
-
-	if (!ctx->is_single_pass) {
-		/* Do not let to overwrite spc_aad len */
-		if (do_auth || do_aead) {
-			auth_param->auth_off =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, auth_ofs) - src_buf_start;
-			auth_param->auth_len = auth_len;
-		} else {
-			auth_param->auth_off = 0;
-			auth_param->auth_len = 0;
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
 		}
 	}
 
-	qat_req->comn_mid.dst_length =
-		qat_req->comn_mid.src_length =
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		> (auth_param->auth_off + auth_param->auth_len) ?
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		: (auth_param->auth_off + auth_param->auth_len);
-
-	if (do_auth && do_cipher) {
-		/* Handle digest-encrypted cases, i.e.
-		 * auth-gen-then-cipher-encrypt and
-		 * cipher-decrypt-then-auth-verify
-		 */
-		 /* First find the end of the data */
-		if (do_sgl) {
-			uint32_t remaining_off = auth_param->auth_off +
-				auth_param->auth_len + alignment_adjustment + oop_shift;
-			struct rte_mbuf *sgl_buf =
-				(in_place ?
-					op->sym->m_src : op->sym->m_dst);
-
-			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
-					&& sgl_buf->next != NULL) {
-				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
-				sgl_buf = sgl_buf->next;
-			}
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-			auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
-				sgl_buf, remaining_off);
-		} else {
-			auth_data_end = (in_place ?
-				src_buf_start : dst_buf_start) +
-				auth_param->auth_off + auth_param->auth_len;
-		}
-		/* Then check if digest-encrypted conditions are met */
-		if ((auth_param->auth_off + auth_param->auth_len <
-					cipher_param->cipher_offset +
-					cipher_param->cipher_length) &&
-				(op->sym->auth.digest.phys_addr ==
-					auth_data_end)) {
-			/* Handle partial digest encryption */
-			if (cipher_param->cipher_offset +
-					cipher_param->cipher_length <
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length)
-				qat_req->comn_mid.dst_length =
-					qat_req->comn_mid.src_length =
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length;
-			struct icp_qat_fw_comn_req_hdr *header =
-				&qat_req->comn_hdr;
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-		}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
 	}
 
-	if (do_sgl) {
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
 
-		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
-				QAT_COMN_PTR_TYPE_SGL);
-		ret = qat_sgl_fill_array(op->sym->m_src,
-		   (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
-		   &cookie->qat_sgl_src,
-		   qat_req->comn_mid.src_length,
-		   QAT_SYM_SGL_MAX_NUMBER);
+	return 0;
 
-		if (unlikely(ret)) {
-			QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
-			return ret;
-		}
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
 
-		if (in_place)
-			qat_req->comn_mid.dest_data_addr =
-				qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-		else {
-			ret = qat_sgl_fill_array(op->sym->m_dst,
-				(int64_t)(dst_buf_start -
-					  rte_pktmbuf_iova(op->sym->m_dst)),
-				 &cookie->qat_sgl_dst,
-				 qat_req->comn_mid.dst_length,
-				 QAT_SYM_SGL_MAX_NUMBER);
-
-			if (unlikely(ret)) {
-				QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
-				return ret;
-			}
+	return ret;
+}
 
-			qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-			qat_req->comn_mid.dest_data_addr =
-					cookie->qat_sgl_dst_phys_addr;
-		}
-		qat_req->comn_mid.src_length = 0;
-		qat_req->comn_mid.dst_length = 0;
-	} else {
-		qat_req->comn_mid.src_data_addr = src_buf_start;
-		qat_req->comn_mid.dest_data_addr = dst_buf_start;
-	}
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
 
-	if (ctx->is_single_pass) {
-		if (ctx->is_ucs) {
-			/* GEN 4 */
-			cipher_param20->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param20->spc_auth_res_addr =
-				op->sym->aead.digest.phys_addr;
-		} else {
-			cipher_param->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param->spc_auth_res_addr =
-					op->sym->aead.digest.phys_addr;
-		}
-	} else if (ctx->is_single_pass_gmac &&
-		       op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
-		/* Handle Single-Pass AES-GMAC */
-		handle_spc_gmac(ctx, op, cookie, qat_req);
-	}
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
 
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
-			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
-			rte_pktmbuf_data_len(op->sym->m_src));
-	if (do_cipher) {
-		uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
-				ctx->cipher_iv.length);
-	}
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
 
-	if (do_auth) {
-		if (ctx->auth_iv.length) {
-			uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
-							uint8_t *,
-							ctx->auth_iv.offset);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
-						ctx->auth_iv.length);
-		}
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
-				ctx->digest_length);
+	return 0;
+}
+
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct qat_cryptodev_private *internals = dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_crypto_gen_dev_ops *gen_dev_ops =
+			&qat_sym_gen_dev_ops[qat_dev_gen];
+	struct qat_qp *qp;
+	struct qat_sym_session *ctx;
+	struct qat_sym_dp_ctx *dp_ctx;
+
+	if (!gen_dev_ops->set_raw_dp_ctx) {
+		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+				qat_dev_gen);
+		return -ENOTSUP;
 	}
 
-	if (do_aead) {
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
-				ctx->digest_length);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
-				ctx->aad_len);
+	qp = dev->data->queue_pairs[qp_id];
+	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(struct qat_sym_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+		dp_ctx->tail = qp->tx_q.tail;
+		dp_ctx->head = qp->rx_q.head;
+		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
 	}
-#endif
-	return 0;
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -EINVAL;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, qat_sym_driver_id);
+
+	dp_ctx->session = ctx;
+
+	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
 }
+
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct qat_sym_dp_ctx);
+}
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index 4801bd50a7..278abbfd3a 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -15,15 +15,75 @@
 
 #include "qat_common.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_crypto.h"
 #include "qat_logs.h"
 
+#define CRYPTODEV_NAME_QAT_SYM_PMD   crypto_qat
+
 #define BYTE_LENGTH    8
 /* bpi is only used for partial blocks of DES and AES
  * so AES block len can be assumed as max len for iv, src and dst
  */
 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
+#define QAT_SYM_CAP_VALID		(1 << 31)
+
+/**
+ * Macro to add a sym capability
+ * helper function to add an sym capability
+ * <n: name> <b: block size> <k: key size> <d: digest size>
+ * <a: aad_size> <i: iv_size>
+ **/
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, d					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_##n,		\
+				b, k, i					\
+			}, }						\
+		}, }							\
+	}
+
 /*
  * Maximum number of SGL entries
  */
@@ -62,27 +122,14 @@ struct qat_sym_dp_ctx {
 	uint16_t cached_dequeue;
 };
 
-static __rte_always_inline int
-refactor_qat_sym_process_response(__rte_unused void **op,
-		__rte_unused uint8_t *resp, __rte_unused void *op_cookie,
-		__rte_unused uint64_t *dequeue_err_count)
-{
-	return 0;
-}
-
 uint16_t
-refactor_qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
 uint16_t
-refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
-
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
@@ -237,17 +284,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
 		}
 	}
 }
-#else
-
-static inline void
-qat_sym_preprocess_requests(void **ops __rte_unused,
-				uint16_t nb_ops __rte_unused)
-{
-}
 #endif
 
-static inline void
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused)
 {
 	struct icp_qat_fw_comn_resp *resp_msg =
 			(struct icp_qat_fw_comn_resp *)resp;
@@ -306,6 +347,8 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
 	}
 
 	*op = (void *)rx_op;
+
+	return 1;
 }
 
 int
@@ -317,6 +360,52 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 int
 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
 
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_vec *vec, uint32_t vec_len,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t i;
+
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_la_bulk_req));
+	for (i = 0; i < vec_len; i++)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+	if (cipher_iv && ctx->cipher_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+				ctx->cipher_iv.length);
+	if (auth_iv && ctx->auth_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+				ctx->auth_iv.length);
+	if (aad && ctx->aad_len > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+				ctx->aad_len);
+	if (digest && ctx->digest_length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+				ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+		struct qat_sym_session *ctx __rte_unused,
+		struct rte_crypto_vec *vec __rte_unused,
+		uint32_t vec_len __rte_unused,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+}
+#endif
+
 #else
 
 static inline void
@@ -331,5 +420,5 @@ qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
 {
 }
 
-#endif
+#endif /* BUILD_QAT_SYM */
 #endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
deleted file mode 100644
index af75ac2011..0000000000
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ /dev/null
@@ -1,975 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "adf_transport_access_macros.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-
-#include "qat_sym_refactor.h"
-#include "qat_sym_pmd.h"
-#include "qat_sym_session.h"
-#include "qat_qp.h"
-
-static __rte_always_inline int32_t
-qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
-		struct rte_crypto_vec *data, uint16_t n_data_vecs)
-{
-	struct qat_queue *tx_queue;
-	struct qat_sym_op_cookie *cookie;
-	struct qat_sgl *list;
-	uint32_t i;
-	uint32_t total_len;
-
-	if (likely(n_data_vecs == 1)) {
-		req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			data[0].iova;
-		req->comn_mid.src_length = req->comn_mid.dst_length =
-			data[0].len;
-		return data[0].len;
-	}
-
-	if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
-		return -1;
-
-	total_len = 0;
-	tx_queue = &qp->tx_q;
-
-	ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
-			QAT_COMN_PTR_TYPE_SGL);
-	cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
-	list = (struct qat_sgl *)&cookie->qat_sgl_src;
-
-	for (i = 0; i < n_data_vecs; i++) {
-		list->buffers[i].len = data[i].len;
-		list->buffers[i].resrvd = 0;
-		list->buffers[i].addr = data[i].iova;
-		if (total_len + data[i].len > UINT32_MAX) {
-			QAT_DP_LOG(ERR, "Message too long");
-			return -1;
-		}
-		total_len += data[i].len;
-	}
-
-	list->num_bufs = i;
-	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			cookie->qat_sgl_src_phys_addr;
-	req->comn_mid.src_length = req->comn_mid.dst_length = 0;
-	return total_len;
-}
-
-static __rte_always_inline void
-set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	/* copy IV into request if it fits */
-	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
-				iv_len);
-	else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
-	}
-}
-
-#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
-	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
-	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
-
-static __rte_always_inline void
-qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
-{
-	uint32_t i;
-
-	for (i = 0; i < n; i++)
-		sta[i] = status;
-}
-
-#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
-	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
-
-static __rte_always_inline void
-enqueue_one_cipher_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-
-	/* cipher IV */
-	set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest __rte_unused,
-	struct rte_crypto_va_iova_ptr *aad __rte_unused,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
-			(uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_auth_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = data_len - ofs.ofs.auth.head -
-			ofs.ofs.auth.tail;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
-				ctx->auth_iv.length);
-		break;
-	default:
-		break;
-	}
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv __rte_unused,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
-			(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_auth_job(ctx, req, &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline int
-enqueue_one_chain_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_vec *data,
-	uint16_t n_data_vecs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	rte_iova_t auth_iova_end;
-	int32_t cipher_len, auth_len;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	cipher_len = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
-
-	if (unlikely(cipher_len < 0 || auth_len < 0))
-		return -1;
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = cipher_len;
-	set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = auth_len;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		break;
-	default:
-		break;
-	}
-
-	if (unlikely(n_data_vecs > 1)) {
-		int auth_end_get = 0, i = n_data_vecs - 1;
-		struct rte_crypto_vec *cvec = &data[0];
-		uint32_t len;
-
-		len = data_len - ofs.ofs.auth.tail;
-
-		while (i >= 0 && len > 0) {
-			if (cvec->len >= len) {
-				auth_iova_end = cvec->iova + len;
-				len = 0;
-				auth_end_get = 1;
-				break;
-			}
-			len -= cvec->len;
-			i--;
-			cvec++;
-		}
-
-		if (unlikely(auth_end_get == 0))
-			return -1;
-	} else
-		auth_iova_end = data[0].iova + auth_param->auth_off +
-			auth_param->auth_len;
-
-	/* Then check if digest-encrypted conditions are met */
-	if ((auth_param->auth_off + auth_param->auth_len <
-		cipher_param->cipher_offset +
-		cipher_param->cipher_length) &&
-		(digest->iova == auth_iova_end)) {
-		/* Handle partial digest encryption */
-		if (cipher_param->cipher_offset +
-				cipher_param->cipher_length <
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length)
-			req->comn_mid.dst_length =
-				req->comn_mid.src_length =
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length;
-		struct icp_qat_fw_comn_req_hdr *header =
-			&req->comn_hdr;
-		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-			header->serv_specif_flags,
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-	}
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
-			cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
-		return -1;
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num,
-			&vec->iv[i], &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
-			break;
-
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_aead_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-		(void *)&req->serv_specif_rqpars;
-	struct icp_qat_fw_la_auth_req_params *auth_param =
-		(void *)((uint8_t *)&req->serv_specif_rqpars +
-		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-	uint8_t *aad_data;
-	uint8_t aad_ccm_real_len;
-	uint8_t aad_len_field_sz;
-	uint32_t msg_len_be;
-	rte_iova_t aad_iova = 0;
-	uint8_t q;
-
-	/* CPM 1.7 uses single pass to treat AEAD as cipher operation */
-	if (ctx->is_single_pass) {
-		enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
-		cipher_param->spc_aad_addr = aad->iova;
-		cipher_param->spc_auth_res_addr = digest->iova;
-		return;
-	}
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
-				ctx->cipher_iv.length);
-		aad_iova = aad->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
-		aad_data = aad->va;
-		aad_iova = aad->iova;
-		aad_ccm_real_len = 0;
-		aad_len_field_sz = 0;
-		msg_len_be = rte_bswap32((uint32_t)data_len -
-				ofs.ofs.cipher.head);
-
-		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			aad_ccm_real_len = ctx->aad_len -
-				ICP_QAT_HW_CCM_AAD_B0_LEN -
-				ICP_QAT_HW_CCM_AAD_LEN_INFO;
-		} else {
-			aad_data = iv->va;
-			aad_iova = iv->iova;
-		}
-
-		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-			aad_len_field_sz, ctx->digest_length, q);
-		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				(uint8_t *)&msg_len_be,
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-		} else {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-				(uint8_t *)&msg_len_be +
-				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				- q), q);
-		}
-
-		if (aad_len_field_sz > 0) {
-			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
-				rte_bswap16(aad_ccm_real_len);
-
-			if ((aad_ccm_real_len + aad_len_field_sz)
-				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-				uint8_t pad_len = 0;
-				uint8_t pad_idx = 0;
-
-				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len +
-					aad_len_field_sz) %
-					ICP_QAT_HW_CCM_AAD_B0_LEN);
-				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					aad_ccm_real_len +
-					aad_len_field_sz;
-				memset(&aad_data[pad_idx], 0, pad_len);
-			}
-		}
-
-		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
-			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va +
-			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
-		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-		rte_memcpy((uint8_t *)aad->va +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			ctx->cipher_iv.length);
-		break;
-	default:
-		break;
-	}
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_param->auth_off = ofs.ofs.cipher.head;
-	auth_param->auth_len = cipher_param->cipher_length;
-	auth_param->auth_res_addr = digest->iova;
-	auth_param->u1.aad_adr = aad_iova;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
-		(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
-			&vec->aad[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
-	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
-	uint32_t max_nb_to_dequeue,
-	rte_cryptodev_raw_post_dequeue_t post_dequeue,
-	void **out_user_data, uint8_t is_user_data_array,
-	uint32_t *n_success_jobs, int *return_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct icp_qat_fw_comn_resp *resp;
-	void *resp_opaque;
-	uint32_t i, n, inflight;
-	uint32_t head;
-	uint8_t status;
-
-	*n_success_jobs = 0;
-	*return_status = 0;
-	head = dp_ctx->head;
-
-	inflight = qp->enqueued - qp->dequeued;
-	if (unlikely(inflight == 0))
-		return 0;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			head);
-	/* no operation ready */
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return 0;
-
-	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
-	/* get the dequeue count */
-	if (get_dequeue_count) {
-		n = get_dequeue_count(resp_opaque);
-		if (unlikely(n == 0))
-			return 0;
-	} else {
-		if (unlikely(max_nb_to_dequeue == 0))
-			return 0;
-		n = max_nb_to_dequeue;
-	}
-
-	out_user_data[0] = resp_opaque;
-	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-	post_dequeue(resp_opaque, 0, status);
-	*n_success_jobs += status;
-
-	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
-
-	/* we already finished dequeue when n == 1 */
-	if (unlikely(n == 1)) {
-		i = 1;
-		goto end_deq;
-	}
-
-	if (is_user_data_array) {
-		for (i = 1; i < n; i++) {
-			resp = (struct icp_qat_fw_comn_resp *)(
-				(uint8_t *)rx_queue->base_addr + head);
-			if (unlikely(*(uint32_t *)resp ==
-					ADF_RING_EMPTY_SIG))
-				goto end_deq;
-			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
-			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-			*n_success_jobs += status;
-			post_dequeue(out_user_data[i], i, status);
-			head = (head + rx_queue->msg_size) &
-					rx_queue->modulo_mask;
-		}
-
-		goto end_deq;
-	}
-
-	/* opaque is not array */
-	for (i = 1; i < n; i++) {
-		resp = (struct icp_qat_fw_comn_resp *)(
-			(uint8_t *)rx_queue->base_addr + head);
-		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-			goto end_deq;
-		head = (head + rx_queue->msg_size) &
-				rx_queue->modulo_mask;
-		post_dequeue(resp_opaque, i, status);
-		*n_success_jobs += status;
-	}
-
-end_deq:
-	dp_ctx->head = head;
-	dp_ctx->cached_dequeue += i;
-	return i;
-}
-
-static __rte_always_inline void *
-qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
-		enum rte_crypto_op_status *op_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	register struct icp_qat_fw_comn_resp *resp;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			dp_ctx->head);
-
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return NULL;
-
-	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
-			rx_queue->modulo_mask;
-	dp_ctx->cached_dequeue++;
-
-	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
-			RTE_CRYPTO_OP_STATUS_SUCCESS :
-			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	*dequeue_status = 0;
-	return (void *)(uintptr_t)resp->opaque_data;
-}
-
-static __rte_always_inline int
-qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_enqueue != n))
-		return -1;
-
-	qp->enqueued += n;
-	qp->stats.enqueued_count += n;
-
-	tx_queue->tail = dp_ctx->tail;
-
-	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
-			tx_queue->hw_bundle_number,
-			tx_queue->hw_queue_number, tx_queue->tail);
-	tx_queue->csr_tail = tx_queue->tail;
-	dp_ctx->cached_enqueue = 0;
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_dequeue != n))
-		return -1;
-
-	rx_queue->head = dp_ctx->head;
-	rx_queue->nb_processed_responses += n;
-	qp->dequeued += n;
-	qp->stats.dequeued_count += n;
-	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
-		uint32_t old_head, new_head;
-		uint32_t max_head;
-
-		old_head = rx_queue->csr_head;
-		new_head = rx_queue->head;
-		max_head = qp->nb_descriptors * rx_queue->msg_size;
-
-		/* write out free descriptors */
-		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
-
-		if (new_head < old_head) {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
-					max_head - old_head);
-			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
-					new_head);
-		} else {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
-					old_head);
-		}
-		rx_queue->nb_processed_responses = 0;
-		rx_queue->csr_head = new_head;
-
-		/* write current head to CSR */
-		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
-			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
-			new_head);
-	}
-
-	dp_ctx->cached_dequeue = 0;
-	return 0;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
-	struct qat_qp *qp;
-	struct qat_sym_session *ctx;
-	struct qat_sym_dp_ctx *dp_ctx;
-
-	qp = dev->data->queue_pairs[qp_id];
-	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
-	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(struct qat_sym_dp_ctx));
-		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
-		dp_ctx->tail = qp->tx_q.tail;
-		dp_ctx->head = qp->rx_q.head;
-		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
-	}
-
-	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
-		return -EINVAL;
-
-	ctx = (struct qat_sym_session *)get_sym_session_private_data(
-			session_ctx.crypto_sess, qat_sym_driver_id);
-
-	dp_ctx->session = ctx;
-
-	raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
-	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
-	raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
-	raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_chain_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
-		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
-		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_cipher_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
-		}
-	} else
-		return -1;
-
-	return 0;
-}
-
-int
-qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
-{
-	return sizeof(struct qat_sym_dp_ctx);
-}
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
deleted file mode 100644
index b835245f17..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security_driver.h>
-#endif
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
-
-#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
-	struct qat_sym_op_cookie *cookie = op_cookie;
-
-	cookie->qat_sgl_src_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_src);
-
-	cookie->qat_sgl_dst_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_dst);
-
-	cookie->opt.spc_gmac.cd_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			opt.spc_gmac.cd_cipher);
-}
-
-static uint16_t
-qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-static uint16_t
-qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
-	.name = qat_sym_drv_name,
-	.alias = qat_sym_drv_name
-};
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
-	int i = 0, ret = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "sym");
-	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	/*
-	 * All processes must use same driver id so they can share sessions.
-	 * Store driver_id so we can validate that all processes have the same
-	 * value, typically they have, but could differ if binaries built
-	 * separately.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_sym_driver_id =
-				qat_sym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_sym_driver_id !=
-				qat_sym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
-	qat_dev_instance->sym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->sym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->sym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_sym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-#ifdef RTE_LIB_SECURITY
-	if (gen_dev_ops->create_security_ctx) {
-		cryptodev->security_ctx =
-			gen_dev_ops->create_security_ctx((void *)cryptodev);
-		if (cryptodev->security_ctx == NULL) {
-			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
-	} else
-		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-
-#endif
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_SYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->service_type = QAT_SERVICE_SYMMETRIC;
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating capability memzon for %s",
-				name);
-			ret = -EFAULT;
-			goto error;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->sym_dev = internals;
-	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	memset(&qat_dev_instance->sym_rte_dev, 0,
-		sizeof(qat_dev_instance->sym_rte_dev));
-
-	return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->sym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
-	qat_pci_dev->sym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_sym_driver,
-		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
deleted file mode 100644
index 0dc0c6f0d9..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_PMD_H_
-#define _QAT_SYM_PMD_H_
-
-#ifdef BUILD_QAT_SYM
-
-#include <rte_ether.h>
-#include <rte_cryptodev.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security.h>
-#endif
-
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Symmetric Crypto PMD driver name */
-#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
-#define QAT_SYM_CAP_VALID		(1 << 31)
-
-/**
- * Macro to add a sym capability
- * helper function to add an sym capability
- * <n: name> <b: block size> <k: key size> <d: digest size>
- * <a: aad_size> <i: iv_size>
- **/
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, d					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
-			{.aead = {					\
-				.algo = RTE_CRYPTO_AEAD_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_##n,		\
-				b, k, i					\
-			}, }						\
-		}, }							\
-	}
-
-extern uint8_t qat_sym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param);
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
-
-void
-qat_sym_init_op_cookie(void *op_cookie);
-
-#endif
-#endif /* _QAT_SYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_sym_refactor.c b/drivers/crypto/qat/qat_sym_refactor.c
deleted file mode 100644
index 82f078ff1e..0000000000
--- a/drivers/crypto/qat/qat_sym_refactor.c
+++ /dev/null
@@ -1,360 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
- */
-
-#include <openssl/evp.h>
-
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_crypto_sym.h>
-#include <rte_bus_pci.h>
-#include <rte_byteorder.h>
-
-#include "qat_sym_refactor.h"
-#include "qat_crypto.h"
-#include "qat_qp.h"
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
-	.name = qat_sym_drv_name,
-	.alias = qat_sym_drv_name
-};
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
-	struct qat_sym_op_cookie *cookie = op_cookie;
-
-	cookie->qat_sgl_src_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_src);
-
-	cookie->qat_sgl_dst_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_dst);
-
-	cookie->opt.spc_gmac.cd_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			opt.spc_gmac.cd_cipher);
-}
-
-static __rte_always_inline int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
-{
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	void *sess = (void *)opaque[0];
-	qat_sym_build_request_t build_request = (void *)opaque[1];
-	struct qat_sym_session *ctx = NULL;
-
-	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
-		ctx = get_sym_session_private_data(op->sym->session,
-				qat_sym_driver_id);
-		if (unlikely(!ctx)) {
-			QAT_DP_LOG(ERR, "No session for this device");
-			return -EINVAL;
-		}
-		if (sess != ctx) {
-			struct rte_cryptodev *cdev;
-			struct qat_cryptodev_private *internals;
-			enum rte_proc_type_t proc_type;
-
-			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
-			internals = cdev->data->dev_private;
-			proc_type = rte_eal_process_type();
-
-			if (internals->qat_dev->qat_dev_gen != dev_gen) {
-				op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-				return -EINVAL;
-			}
-
-			if (unlikely(ctx->build_request[proc_type] == NULL)) {
-				int ret =
-				qat_sym_gen_dev_ops[dev_gen].set_session(
-					(void *)cdev, sess);
-				if (ret < 0) {
-					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-					return -EINVAL;
-				}
-			}
-
-			build_request = ctx->build_request[proc_type];
-			opaque[0] = (uintptr_t)ctx;
-			opaque[1] = (uintptr_t)build_request;
-		}
-	}
-
-#ifdef RTE_LIB_SECURITY
-	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
-		if (sess != (void *)op->sym->sec_session) {
-			struct rte_cryptodev *cdev;
-			struct qat_cryptodev_private *internals;
-			enum rte_proc_type_t proc_type;
-
-			ctx = get_sec_session_private_data(
-					op->sym->sec_session);
-			if (unlikely(!ctx)) {
-				QAT_DP_LOG(ERR, "No session for this device");
-				return -EINVAL;
-			}
-			if (unlikely(ctx->bpi_ctx == NULL)) {
-				QAT_DP_LOG(ERR, "QAT PMD only supports security"
-						" operation requests for"
-						" DOCSIS, op (%p) is not for"
-						" DOCSIS.", op);
-				return -EINVAL;
-			} else if (unlikely(((op->sym->m_dst != NULL) &&
-					(op->sym->m_dst != op->sym->m_src)) ||
-					op->sym->m_src->nb_segs > 1)) {
-				QAT_DP_LOG(ERR, "OOP and/or multi-segment"
-						" buffers not supported for"
-						" DOCSIS security.");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
-			internals = cdev->data->dev_private;
-			proc_type = rte_eal_process_type();
-
-			if (internals->qat_dev->qat_dev_gen != dev_gen) {
-				op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-				return -EINVAL;
-			}
-
-			if (unlikely(ctx->build_request[proc_type] == NULL)) {
-				int ret =
-				qat_sym_gen_dev_ops[dev_gen].set_session(
-					(void *)cdev, sess);
-				if (ret < 0) {
-					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-					return -EINVAL;
-				}
-			}
-
-			sess = (void *)op->sym->sec_session;
-			build_request = ctx->build_request[proc_type];
-			opaque[0] = (uintptr_t)sess;
-			opaque[1] = (uintptr_t)build_request;
-		}
-	}
-#endif
-	else { /* RTE_CRYPTO_OP_SESSIONLESS */
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
-		return -1;
-	}
-
-	return build_request(op, (void *)ctx, out_msg, op_cookie);
-}
-
-uint16_t
-qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, qat_sym_build_request,
-			(void **)ops, nb_ops);
-}
-
-uint16_t
-qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops,
-				qat_sym_process_response, nb_ops);
-}
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
-	int i = 0, ret = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "sym");
-	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
-				name);
-		return -(EFAULT);
-	}
-
-	/*
-	 * All processes must use same driver id so they can share sessions.
-	 * Store driver_id so we can validate that all processes have the same
-	 * value, typically they have, but could differ if binaries built
-	 * separately.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_sym_driver_id =
-				qat_sym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_sym_driver_id !=
-				qat_sym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
-	qat_dev_instance->sym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->sym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->sym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_sym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
-	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-#ifdef RTE_LIB_SECURITY
-	if (gen_dev_ops->create_security_ctx) {
-		cryptodev->security_ctx =
-			gen_dev_ops->create_security_ctx((void *)cryptodev);
-		if (cryptodev->security_ctx == NULL) {
-			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
-	} else {
-		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-	}
-#endif
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_SYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			ret = -EFAULT;
-			goto error;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	internals->service_type = QAT_SERVICE_SYMMETRIC;
-	qat_pci_dev->sym_dev = internals;
-	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-
-	return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	memset(&qat_dev_instance->sym_rte_dev, 0,
-		sizeof(qat_dev_instance->sym_rte_dev));
-
-	return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->sym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
-	qat_pci_dev->sym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_sym_driver,
-		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_refactor.h b/drivers/crypto/qat/qat_sym_refactor.h
deleted file mode 100644
index 44feca8251..0000000000
--- a/drivers/crypto/qat/qat_sym_refactor.h
+++ /dev/null
@@ -1,402 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_H_
-#define _QAT_SYM_H_
-
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_net_crc.h>
-#endif
-
-#include <openssl/evp.h>
-
-#include "qat_common.h"
-#include "qat_sym_session.h"
-#include "qat_crypto.h"
-#include "qat_logs.h"
-
-#define CRYPTODEV_NAME_QAT_SYM_PMD   crypto_qat
-
-#define BYTE_LENGTH    8
-/* bpi is only used for partial blocks of DES and AES
- * so AES block len can be assumed as max len for iv, src and dst
- */
-#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
-#define QAT_SYM_CAP_VALID		(1 << 31)
-
-/* Macro to add a capability */
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, d					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
-			{.aead = {					\
-				.algo = RTE_CRYPTO_AEAD_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_##n,		\
-				b, k, i					\
-			}, }						\
-		}, }							\
-	}
-
-/*
- * Maximum number of SGL entries
- */
-#define QAT_SYM_SGL_MAX_NUMBER	16
-
-/* Maximum data length for single pass GMAC: 2^14-1 */
-#define QAT_AES_GMAC_SPC_MAX_SIZE 16383
-
-struct qat_sym_session;
-
-struct qat_sym_sgl {
-	qat_sgl_hdr;
-	struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
-} __rte_packed __rte_cache_aligned;
-
-struct qat_sym_op_cookie {
-	struct qat_sym_sgl qat_sgl_src;
-	struct qat_sym_sgl qat_sgl_dst;
-	phys_addr_t qat_sgl_src_phys_addr;
-	phys_addr_t qat_sgl_dst_phys_addr;
-	union {
-		/* Used for Single-Pass AES-GMAC only */
-		struct {
-			struct icp_qat_hw_cipher_algo_blk cd_cipher
-					__rte_packed __rte_cache_aligned;
-			phys_addr_t cd_phys_addr;
-		} spc_gmac;
-	} opt;
-};
-
-struct qat_sym_dp_ctx {
-	struct qat_sym_session *session;
-	uint32_t tail;
-	uint32_t head;
-	uint16_t cached_enqueue;
-	uint16_t cached_dequeue;
-};
-
-uint16_t
-qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops);
-
-uint16_t
-qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops);
-
-/** Encrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt the IV, then XOR this with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_encrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_encrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
-	return -EINVAL;
-}
-
-static inline uint32_t
-qat_bpicipher_postprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len > 0 &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
-
-		/* Encrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset;
-
-		last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely(sym_op->m_dst != NULL))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = dst - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG,
-				"BPI: dst before post-process:",
-				dst, last_block_len);
-#endif
-		bpi_cipher_encrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
-				last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG,
-				"BPI: dst after post-process:",
-				dst, last_block_len);
-#endif
-	}
-	return sym_op->cipher.data.length - last_block_len;
-}
-
-#ifdef RTE_LIB_SECURITY
-static inline void
-qat_crc_verify(struct qat_sym_session *ctx, struct rte_crypto_op *op)
-{
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint32_t crc_data_ofs, crc_data_len, crc;
-	uint8_t *crc_data;
-
-	if (ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT &&
-			sym_op->auth.data.length != 0) {
-
-		crc_data_ofs = sym_op->auth.data.offset;
-		crc_data_len = sym_op->auth.data.length;
-		crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
-				crc_data_ofs);
-
-		crc = rte_net_crc_calc(crc_data, crc_data_len,
-				RTE_NET_CRC32_ETH);
-
-		if (crc != *(uint32_t *)(crc_data + crc_data_len))
-			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	}
-}
-
-static inline void
-qat_crc_generate(struct qat_sym_session *ctx,
-			struct rte_crypto_op *op)
-{
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint32_t *crc, crc_data_len;
-	uint8_t *crc_data;
-
-	if (ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT &&
-			sym_op->auth.data.length != 0 &&
-			sym_op->m_src->nb_segs == 1) {
-
-		crc_data_len = sym_op->auth.data.length;
-		crc_data = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
-				sym_op->auth.data.offset);
-		crc = (uint32_t *)(crc_data + crc_data_len);
-		*crc = rte_net_crc_calc(crc_data, crc_data_len,
-				RTE_NET_CRC32_ETH);
-	}
-}
-
-static inline void
-qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
-{
-	struct rte_crypto_op *op;
-	struct qat_sym_session *ctx;
-	uint16_t i;
-
-	for (i = 0; i < nb_ops; i++) {
-		op = (struct rte_crypto_op *)ops[i];
-
-		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
-			ctx = (struct qat_sym_session *)
-				get_sec_session_private_data(
-					op->sym->sec_session);
-
-			if (ctx == NULL || ctx->bpi_ctx == NULL)
-				continue;
-
-			qat_crc_generate(ctx, op);
-		}
-	}
-}
-#endif
-
-static __rte_always_inline int
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
-		uint64_t *dequeue_err_count __rte_unused)
-{
-	struct icp_qat_fw_comn_resp *resp_msg =
-			(struct icp_qat_fw_comn_resp *)resp;
-	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
-			(resp_msg->opaque_data);
-	struct qat_sym_session *sess;
-	uint8_t is_docsis_sec;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
-			sizeof(struct icp_qat_fw_comn_resp));
-#endif
-
-#ifdef RTE_LIB_SECURITY
-	if (rx_op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
-		/*
-		 * Assuming at this point that if it's a security
-		 * op, that this is for DOCSIS
-		 */
-		sess = (struct qat_sym_session *)
-				get_sec_session_private_data(
-				rx_op->sym->sec_session);
-		is_docsis_sec = 1;
-	} else
-#endif
-	{
-		sess = (struct qat_sym_session *)
-				get_sym_session_private_data(
-				rx_op->sym->session,
-				qat_sym_driver_id);
-		is_docsis_sec = 0;
-	}
-
-	if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
-			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
-			resp_msg->comn_hdr.comn_status)) {
-
-		rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	} else {
-		rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
-		if (sess->bpi_ctx) {
-			qat_bpicipher_postprocess(sess, rx_op);
-#ifdef RTE_LIB_SECURITY
-			if (is_docsis_sec)
-				qat_crc_verify(sess, rx_op);
-#endif
-		}
-	}
-
-	if (sess->is_single_pass_gmac) {
-		struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *) op_cookie;
-		memset(cookie->opt.spc_gmac.cd_cipher.key, 0,
-				sess->auth_key_length);
-	}
-
-	*op = (void *)rx_op;
-
-	return 1;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
-
-int
-qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
-
-void
-qat_sym_init_op_cookie(void *cookie);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-static __rte_always_inline void
-qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
-		struct qat_sym_session *ctx,
-		struct rte_crypto_vec *vec, uint32_t vec_len,
-		struct rte_crypto_va_iova_ptr *cipher_iv,
-		struct rte_crypto_va_iova_ptr *auth_iv,
-		struct rte_crypto_va_iova_ptr *aad,
-		struct rte_crypto_va_iova_ptr *digest)
-{
-	uint32_t i;
-
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	for (i = 0; i < vec_len; i++)
-		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
-	if (cipher_iv && ctx->cipher_iv.length > 0)
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
-				ctx->cipher_iv.length);
-	if (auth_iv && ctx->auth_iv.length > 0)
-		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
-				ctx->auth_iv.length);
-	if (aad && ctx->aad_len > 0)
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
-				ctx->aad_len);
-	if (digest && ctx->digest_length > 0)
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
-				ctx->digest_length);
-}
-#else
-static __rte_always_inline void
-qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
-		struct qat_sym_session *ctx __rte_unused,
-		struct rte_crypto_vec *vec __rte_unused,
-		uint32_t vec_len __rte_unused,
-		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
-		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
-		struct rte_crypto_va_iova_ptr *aad __rte_unused,
-		struct rte_crypto_va_iova_ptr *digest __rte_unused)
-{}
-#endif
-
-#endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 52837d7c9c..27cdeb1de2 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -20,7 +20,7 @@
 
 #include "qat_logs.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 
 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
 static const uint8_t sha1InitialState[] = {
@@ -600,11 +600,11 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 	session->is_auth = 1;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
-	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
 		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
-	} else {
+	else
 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
-	}
+
 	session->cipher_iv.offset = aead_xform->iv.offset;
 	session->cipher_iv.length = aead_xform->iv.length;
 	session->aad_len = aead_xform->aad_length;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* Re: [dpdk-dev] [dpdk-dev v3 7/8] app/test: cryptodev test fix
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 7/8] app/test: cryptodev test fix Kai Ji
@ 2021-11-03 15:45       ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2021-11-03 15:45 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: Ji, Kai, De Lara Guarch, Pablo, adamx.dybkowski

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Kai Ji
> Sent: Tuesday, November 2, 2021 1:49 PM
> To: dev@dpdk.org
> Cc: Ji, Kai <kai.ji@intel.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; adamx.dybkowski@intel.com
> Subject: [dpdk-dev] [dpdk-dev v3 7/8] app/test: cryptodev test fix
> 
> test_mixed_auth_cipher: ensure enough space allocate in ibuf & obuf
> for mbuf to vec conversion
> test_kasumi_decryption: cipher length update.
> 
> Fixes: 681f540da52b ("cryptodev: do not use AAD in wireless algorithms")
> Cc: pablo.de.lara.guarch@intel.com
> 
> Fixes: e847fc512817 ("test/crypto: add encrypted digest case for AES-CTR-
> CMAC")
> Cc: adamx.dybkowski@intel.com
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* Re: [dpdk-dev] [dpdk-dev v3 6/8] crypto/qat: support sgl oop operation
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 6/8] crypto/qat: support sgl oop operation Kai Ji
@ 2021-11-03 15:46       ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2021-11-03 15:46 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: Ji, Kai

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Kai Ji
> Sent: Tuesday, November 2, 2021 1:49 PM
> To: dev@dpdk.org
> Cc: Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev] [dpdk-dev v3 6/8] crypto/qat: support sgl oop operation
> 
> This patch add-in sgl oop support in qat driver
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* Re: [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework
  2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                       ` (7 preceding siblings ...)
  2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 8/8] crypto/qat: qat driver rework clean up Kai Ji
@ 2021-11-03 15:46     ` Zhang, Roy Fan
  2021-11-03 18:49     ` [dpdk-dev] [EXT] " Akhil Goyal
  2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
  10 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2021-11-03 15:46 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: Ji, Kai

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Kai Ji
> Sent: Tuesday, November 2, 2021 1:49 PM
> To: dev@dpdk.org
> Cc: Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto
> datapatch rework
> 
> This patch reworks QAT symmetric crypto datapatch implementation where
> each
> generation request building separated and the crypto operation under the
> raw datapath api implementation are unified.
> 
> In addtion this patchset also enables QAT OOP support in raw datapath api
> implementation.
> 
> This patch depends on
> http://patchwork.dpdk.org/project/dpdk/cover/20211027155055.32264-1-
> kai.ji@intel.com/
> 
> v3:
> - sperate a single patch 6 to two different patches
> 
> v2:
> - review comments addressed
> 
Series-acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* Re: [dpdk-dev] [EXT] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework
  2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                       ` (8 preceding siblings ...)
  2021-11-03 15:46     ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Zhang, Roy Fan
@ 2021-11-03 18:49     ` Akhil Goyal
  2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
  10 siblings, 0 replies; 156+ messages in thread
From: Akhil Goyal @ 2021-11-03 18:49 UTC (permalink / raw)
  To: Kai Ji, dev

> This patch reworks QAT symmetric crypto datapatch implementation where
> each
> generation request building separated and the crypto operation under the
> raw datapath api implementation are unified.
> 
> In addtion this patchset also enables QAT OOP support in raw datapath api
> implementation.
> 

The patches do not apply cleanly.
Please rebase into a single series.
Rework patch titles as follows
[v7,1/9] common/qat: define gen specific structs and functions
[v7,2/9] common/qat: add gen specific device implementation
[v7,3/9] common/qat: add gen specific queue pair function
[v7,4/9] common/qat: add gen specific queue implementation
[v7,5/9] compress/qat: define gen specific structs and functions
[v7,6/9] compress/qat: add gen specific implementation
[v7,7/9] crypto/qat: unify device private data structure
[v7,8/9] crypto/qat: define gen specific structs and functions
[v7,9/9] crypto/qat: add gen specific implementation

[v3,1/8] crypto/qat: refactor driver skeleton
[v3,2/8] crypto/qat: refactor symmetric ops
[v3,3/8] crypto/qat: refactor asymmetric ops
[v3,4/8] crypto/qat: rework session APIs
[v3,5/8] crypto/qat: rework data path
[v3,6/8] crypto/qat: support out of place SG list
[v3,7/8] test/crypto: fix ----
The title and description do not show what is happening and what the patch is fixing.
[v3,8/8] crypto/qat: rework ----
Can you think of some better title for this one? Also, the description does not specify
On what basis rework is done.


> http://patchwork.dpdk.org/project/dpdk/cover/20211027155055.32264-1-kai.ji@intel.com/
> 
> v3:
> - sperate a single patch 6 to two different patches
> 
> v2:
> - review comments addressed
> 
> Kai Ji (8):
>   crypro/qat: qat driver refactor skeleton
>   crypto/qat: qat driver sym op refactor
>   crypto/qat: qat driver asym op refactor
>   crypto/qat: qat driver session method rework
>   crypto/qat: qat driver datapath rework
>   crypto/qat: support sgl oop operation
>   app/test: cryptodev test fix
>   crypto/qat: qat driver rework clean up
> 
>  app/test/test_cryptodev.c                    |  58 +-
>  drivers/common/qat/meson.build               |   4 +-
>  drivers/common/qat/qat_device.c              |   2 +-
>  drivers/common/qat/qat_qp.c                  |  40 +-
>  drivers/common/qat/qat_qp.h                  |  70 +-
>  drivers/compress/qat/qat_comp_pmd.c          |  12 +-
>  drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   7 +
>  drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  90 ++
>  drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 487 +++++++++
>  drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 253 +++++
>  drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 911 +++++++++++++++++
>  drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 939 ++++++++++++++++++
>  drivers/crypto/qat/qat_asym.c                | 786 +++++++++------
>  drivers/crypto/qat/qat_asym.h                |  77 +-
>  drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
>  drivers/crypto/qat/qat_asym_pmd.h            |  54 -
>  drivers/crypto/qat/qat_crypto.c              |   1 +
>  drivers/crypto/qat/qat_crypto.h              |  14 +-
>  drivers/crypto/qat/qat_sym.c                 | 978 ++++++------------
>  drivers/crypto/qat/qat_sym.h                 | 141 ++-
>  drivers/crypto/qat/qat_sym_hw_dp.c           | 983 -------------------
>  drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
>  drivers/crypto/qat/qat_sym_pmd.h             |  95 --
>  drivers/crypto/qat/qat_sym_session.c         | 114 +--
>  drivers/crypto/qat/qat_sym_session.h         |   8 +-
>  25 files changed, 3861 insertions(+), 2745 deletions(-)
>  delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
>  delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
>  delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
>  delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
>  delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h
> 
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v4 00/11] drivers/qat: QAT symmetric crypto datapatch rework
  2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                       ` (9 preceding siblings ...)
  2021-11-03 18:49     ` [dpdk-dev] [EXT] " Akhil Goyal
@ 2021-11-05  0:19     ` Kai Ji
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 01/11] common/qat: define build op request and dequeue op Kai Ji
                         ` (10 more replies)
  10 siblings, 11 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-05  0:19 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch reworks QAT symmetric crypto datapatch implementation where each
generation request building separated and the crypto operation under the
raw datapath api implementation are unified.

In addtion this patchset also enables QAT OOP support in raw datapath api
implementation.

This patch depends on http://patchwork.dpdk.org/project/dpdk/cover/20211027155055.32264-1-kai.ji@intel.com/

v4:
- patchset break down and reconstruct

v3:
- sperate a single patch 6 to two different patches

v2:
- review comments addressed

Kai Ji (11):
  common/qat: define build op request and dequeue op
  crypto/qat: sym build op request specific implementation
  crypto/qat: rework session APIs
  crypto/qat: asym build op request specific implementation
  crypto/qat: unify sym pmd apis
  crypto/qat: unify qat asym pmd apis
  crypto/qat: op burst data path rework
  compress/qat: comp dequeue burst update
  crypto/qat: raw dp api integration
  crypto/qat: support out of place SG list
  test/cryptodev: fix incomplete data length

 app/test/test_cryptodev.c                    |  27 +-
 drivers/common/qat/meson.build               |   4 +-
 drivers/common/qat/qat_device.c              |   2 +-
 drivers/common/qat/qat_qp.c                  |  40 +-
 drivers/common/qat/qat_qp.h                  |  70 +-
 drivers/compress/qat/qat_comp_pmd.c          |  12 +-
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   7 +
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  91 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 487 +++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 253 +++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 911 +++++++++++++++++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 939 ++++++++++++++++++
 drivers/crypto/qat/qat_asym.c                | 762 ++++++++------
 drivers/crypto/qat/qat_asym.h                |  77 +-
 drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
 drivers/crypto/qat/qat_asym_pmd.h            |  54 -
 drivers/crypto/qat/qat_crypto.c              |   1 +
 drivers/crypto/qat/qat_crypto.h              |  14 +-
 drivers/crypto/qat/qat_sym.c                 | 978 ++++++------------
 drivers/crypto/qat/qat_sym.h                 | 141 ++-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 983 -------------------
 drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
 drivers/crypto/qat/qat_sym_pmd.h             |  95 --
 drivers/crypto/qat/qat_sym_session.c         | 114 +--
 drivers/crypto/qat/qat_sym_session.h         |   8 +-
 25 files changed, 3820 insertions(+), 2732 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

--
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v4 01/11] common/qat: define build op request and dequeue op
  2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
@ 2021-11-05  0:19       ` Kai Ji
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 02/11] crypto/qat: sym build op request specific implementation Kai Ji
                         ` (9 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-05  0:19 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch introduce build request op and dequeue op function
pointers to qat queue pair implementation. Those two functions
are used to be assigned during qat session generation based on
crypto operation

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c          |  8 +++--
 drivers/common/qat/qat_qp.h          | 52 ++++++++++++++++++++++++++--
 drivers/compress/qat/qat_comp_pmd.c  |  2 +-
 drivers/crypto/qat/qat_asym_pmd.c    |  4 +--
 drivers/crypto/qat/qat_sym_pmd.c     |  4 +--
 drivers/crypto/qat/qat_sym_session.h |  6 ++++
 6 files changed, 67 insertions(+), 9 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index cde421eb77..a026b90ca7 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -550,7 +550,9 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_enqueue_op_burst(void *qp,
+		__rte_unused qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -817,7 +819,9 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 }
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_dequeue_op_burst(void *qp, void **ops,
+		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
+		uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index deafb407b3..f2ca0173de 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -36,6 +36,51 @@ struct qat_queue {
 	/* number of responses processed since last CSR head write */
 };
 
+/**
+ * Type define qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ *
+ * @param in_op
+ *    An input op pointer
+ * @param out_msg
+ *    out_meg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param opaque
+ *    an opaque data may be used to store context may be useful between
+ *    2 enqueue operations.
+ * @param dev_gen
+ *    qat device gen id
+ * @return
+ *   - 0 if the crypto request is build successfully,
+ *   - error code otherwise
+ **/
+typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
+
+/**
+ * Type define qat_op_dequeue_t function pointer, passed in as argument
+ * in dequeue op burst, where a dequeue op assigned base on the type of
+ * crypto op.
+ *
+ * @param op
+ *    An input op pointer
+ * @param resp
+ *    qat response msg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param dequeue_err_count
+ *    dequeue error counter
+ * @return
+ *    - 0 if dequeue OP is successful
+ *    - error code otherwise
+ **/
+typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused);
+
+#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE	2
+
 struct qat_qp {
 	void			*mmap_bar_addr;
 	struct qat_queue	tx_q;
@@ -44,6 +89,7 @@ struct qat_qp {
 	struct rte_mempool *op_cookie_pool;
 	void **op_cookies;
 	uint32_t nb_descriptors;
+	uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
 	enum qat_device_gen qat_dev_gen;
 	enum qat_service_type service_type;
 	struct qat_pci_device *qat_dev;
@@ -78,13 +124,15 @@ struct qat_qp_config {
 };
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops);
 
 uint16_t
 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
 
 int
 qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 9b24d46e97..31de526056 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -620,7 +620,7 @@ static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
index addee384e3..9a7596b227 100644
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ b/drivers/crypto/qat/qat_asym_pmd.c
@@ -62,13 +62,13 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
 uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index b835245f17..28a26260fb 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -49,14 +49,14 @@ static uint16_t
 qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 static uint16_t
 qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 6ebc176729..33f977a4e3 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -63,6 +63,11 @@ enum qat_sym_proto_flag {
 	QAT_CRYPTO_PROTO_FLAG_ZUC = 4
 };
 
+struct qat_sym_session;
+
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* Common content descriptor */
 struct qat_sym_cd {
 	struct icp_qat_hw_cipher_algo_blk cipher;
@@ -107,6 +112,7 @@ struct qat_sym_session {
 	/* Some generations need different setup of counter */
 	uint32_t slice_types;
 	enum qat_sym_proto_flag qat_proto_flag;
+	qat_sym_build_request_t build_request[2];
 };
 
 int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v4 02/11] crypto/qat: sym build op request specific implementation
  2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 01/11] common/qat: define build op request and dequeue op Kai Ji
@ 2021-11-05  0:19       ` Kai Ji
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 03/11] crypto/qat: rework session APIs Kai Ji
                         ` (8 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-05  0:19 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch add-in specific symmetric crypto build op request
function pointer implementation for QAT generation 1

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 830 +++++++++++++++++++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 186 +++++
 2 files changed, 1016 insertions(+)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 67a4d2cb2c..783fdcaa34 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -8,14 +8,844 @@
 #include <rte_cryptodev.h>
 #include "qat_crypto.h"
 #include "qat_sym_session.h"
+#include "qat_sym.h"
+
+#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
+	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
+
+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
+	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
+	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
+
+static __rte_always_inline int
+op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_decrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_decrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+	return -EINVAL;
+}
+
+static __rte_always_inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		/* Decrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely((sym_op->m_dst != NULL)
+				&& (sym_op->m_dst != sym_op->m_src)))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = last_block - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
+			dst, last_block_len);
+#endif
+		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+			dst, last_block_len);
+#endif
+	}
+
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+static __rte_always_inline int
+qat_chk_len_in_bits_auth(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_chk_len_in_bits_cipher(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+			((op->sym->cipher.data.offset %
+			BYTE_LENGTH) != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int32_t
+qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
+		void *opaque, struct qat_sym_op_cookie *cookie,
+		struct rte_crypto_vec *src_vec, uint16_t n_src,
+		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
+{
+	struct qat_sgl *list;
+	uint32_t i;
+	uint32_t tl_src = 0, total_len_src, total_len_dst;
+	uint64_t src_data_start = 0, dst_data_start = 0;
+	int is_sgl = n_src > 1 || n_dst > 1;
+
+	if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
+			n_dst > QAT_SYM_SGL_MAX_NUMBER))
+		return -1;
+
+	if (likely(!is_sgl)) {
+		src_data_start = src_vec[0].iova;
+		tl_src = total_len_src =
+				src_vec[0].len;
+		if (unlikely(n_dst)) { /* oop */
+			total_len_dst = dst_vec[0].len;
+
+			dst_data_start = dst_vec[0].iova;
+			if (unlikely(total_len_src != total_len_dst))
+				return -EINVAL;
+		} else {
+			dst_data_start = src_data_start;
+			total_len_dst = tl_src;
+		}
+	} else { /* sgl */
+		total_len_dst = total_len_src = 0;
+
+		ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+			QAT_COMN_PTR_TYPE_SGL);
+
+		list = (struct qat_sgl *)&cookie->qat_sgl_src;
+		for (i = 0; i < n_src; i++) {
+			list->buffers[i].len = src_vec[i].len;
+			list->buffers[i].resrvd = 0;
+			list->buffers[i].addr = src_vec[i].iova;
+			if (tl_src + src_vec[i].len > UINT32_MAX) {
+				QAT_DP_LOG(ERR, "Message too long");
+				return -1;
+			}
+			tl_src += src_vec[i].len;
+		}
+
+		list->num_bufs = i;
+		src_data_start = cookie->qat_sgl_src_phys_addr;
+
+		if (unlikely(n_dst > 0)) { /* oop sgl */
+			uint32_t tl_dst = 0;
+
+			list = (struct qat_sgl *)&cookie->qat_sgl_dst;
+
+			for (i = 0; i < n_dst; i++) {
+				list->buffers[i].len = dst_vec[i].len;
+				list->buffers[i].resrvd = 0;
+				list->buffers[i].addr = dst_vec[i].iova;
+				if (tl_dst + dst_vec[i].len > UINT32_MAX) {
+					QAT_DP_LOG(ERR, "Message too long");
+					return -ENOTSUP;
+				}
+
+				tl_dst += dst_vec[i].len;
+			}
+
+			if (tl_src != tl_dst)
+				return -EINVAL;
+			list->num_bufs = i;
+			dst_data_start = cookie->qat_sgl_dst_phys_addr;
+		} else
+			dst_data_start = src_data_start;
+	}
+
+	req->comn_mid.src_data_addr = src_data_start;
+	req->comn_mid.dest_data_addr = dst_data_start;
+	req->comn_mid.src_length = total_len_src;
+	req->comn_mid.dst_length = total_len_dst;
+	req->comn_mid.opaque_data = (uintptr_t)opaque;
+
+	return tl_src;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int n_src = 0;
+	int ret;
+
+	ret = qat_chk_len_in_bits_cipher(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->cipher.data.length >> 3;
+		cipher_ofs = op->sym->cipher.data.offset >> 3;
+		break;
+	case 0:
+		if (ctx->bpi_ctx) {
+			/* DOCSIS - only send complete blocks to device.
+			 * Process any partial block using CFB mode.
+			 * Even if 0 complete blocks, still send this to device
+			 * to get into rx queue for post-process and dequeuing
+			 */
+			cipher_len = qat_bpicipher_preprocess(ctx, op);
+			cipher_ofs = op->sym->cipher.data.offset;
+		} else {
+			cipher_len = op->sym->cipher.data.length;
+			cipher_ofs = op->sym->cipher.data.offset;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
+			cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t auth_ofs = 0, auth_len = 0;
+	int n_src, ret;
+
+	ret = qat_chk_len_in_bits_auth(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+				ctx->auth_iv.offset);
+		auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+				ctx->auth_iv.offset);
+		break;
+	case 0:
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+			/* AES-GMAC */
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+					ctx->auth_iv.offset);
+			auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+					ctx->auth_iv.offset);
+		} else {
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = NULL;
+			auth_iv->iova = 0;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
+			auth_ofs + auth_len, in_sgl->vec,
+			QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
+				auth_ofs + auth_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	union rte_crypto_sym_ofs ofs;
+	uint32_t min_ofs = 0, max_len = 0;
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	uint32_t auth_len = 0, auth_ofs = 0;
+	int is_oop = (op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src);
+	int is_sgl = op->sym->m_src->nb_segs > 1;
+	int n_src;
+	int ret;
+
+	if (unlikely(is_oop))
+		is_sgl |= op->sym->m_dst->nb_segs > 1;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->auth_iv.offset);
+	auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->auth_iv.offset);
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	ret = qat_chk_len_in_bits_cipher(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->aead.data.length >> 3;
+		cipher_ofs = op->sym->aead.data.offset >> 3;
+		break;
+	case 0:
+		cipher_len = op->sym->aead.data.length;
+		cipher_ofs = op->sym->aead.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	ret = qat_chk_len_in_bits_auth(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		break;
+	case 0:
+		auth_len = op->sym->auth.data.length;
+		auth_ofs = op->sym->auth.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
+
+	/* digest in buffer check. Needed only for wireless algos */
+	if (ret == 1) {
+		/* Handle digest-encrypted cases, i.e.
+		 * auth-gen-then-cipher-encrypt and
+		 * cipher-decrypt-then-auth-verify
+		 */
+		uint64_t auth_end_iova;
+
+		if (unlikely(is_sgl)) {
+			uint32_t remaining_off = auth_ofs + auth_len;
+			struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
+				op->sym->m_src);
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else
+			auth_end_iova = (is_oop ?
+				rte_pktmbuf_iova(op->sym->m_dst) :
+				rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
+					auth_len;
+
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
+				(digest->iova == auth_end_iova))
+			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
+					ctx->digest_length);
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return -1;
+	}
+	in_sgl->num = n_src;
+
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs,
+				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	ofs.ofs.cipher.head = cipher_ofs;
+	ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
+	ofs.ofs.auth.head = auth_ofs;
+	ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
+
+	return ofs.raw;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int32_t n_src = 0;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
+	auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
+	digest->va = (void *)op->sym->aead.digest.data;
+	digest->iova = op->sym->aead.digest.phys_addr;
+
+	cipher_len = op->sym->aead.data.length;
+	cipher_ofs = op->sym->aead.data.offset;
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline void
+qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
+		struct icp_qat_fw_la_bulk_req *qat_req)
+{
+	/* copy IV into request if it fits */
+	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
+				iv_len);
+	else {
+		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+				qat_req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
+	}
+}
+
+static __rte_always_inline void
+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
+{
+	uint32_t i;
+
+	for (i = 0; i < n; i++)
+		sta[i] = status;
+}
+
+static __rte_always_inline void
+enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+
+	/* cipher IV */
+	qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
+				ctx->auth_iv.length);
+		break;
+	default:
+		break;
+	}
+}
+
+static __rte_always_inline int
+enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_vec *src_vec,
+	uint16_t n_src_vecs,
+	struct rte_crypto_vec *dst_vec,
+	uint16_t n_dst_vecs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
+			dst_vec : src_vec;
+	rte_iova_t auth_iova_end;
+	int cipher_len, auth_len;
+	int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	cipher_len = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (unlikely(cipher_len < 0 || auth_len < 0))
+		return -1;
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = cipher_len;
+	qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = auth_len;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		break;
+	default:
+		break;
+	}
+
+	if (unlikely(is_sgl)) {
+		/* sgl */
+		int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
+		uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
+
+		while (remaining_off >= cvec->len && i >= 1) {
+			i--;
+			remaining_off -= cvec->len;
+			cvec++;
+		}
+
+		auth_iova_end = cvec->iova + remaining_off;
+	} else
+		auth_iova_end = cvec[0].iova + auth_param->auth_off +
+			auth_param->auth_len;
+
+	/* Then check if digest-encrypted conditions are met */
+	if ((auth_param->auth_off + auth_param->auth_len <
+		cipher_param->cipher_offset + cipher_param->cipher_length) &&
+			(digest->iova == auth_iova_end)) {
+		/* Handle partial digest encryption */
+		if (cipher_param->cipher_offset + cipher_param->cipher_length <
+			auth_param->auth_off + auth_param->auth_len +
+				ctx->digest_length && !is_sgl)
+			req->comn_mid.dst_length = req->comn_mid.src_length =
+				auth_param->auth_off + auth_param->auth_len +
+					ctx->digest_length;
+		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void
+enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param =
+		(void *)&req->serv_specif_rqpars;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(void *)((uint8_t *)&req->serv_specif_rqpars +
+		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+	uint8_t *aad_data;
+	uint8_t aad_ccm_real_len;
+	uint8_t aad_len_field_sz;
+	uint32_t msg_len_be;
+	rte_iova_t aad_iova = 0;
+	uint8_t q;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
+				ctx->cipher_iv.length);
+		aad_iova = aad->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		aad_data = aad->va;
+		aad_iova = aad->iova;
+		aad_ccm_real_len = 0;
+		aad_len_field_sz = 0;
+		msg_len_be = rte_bswap32((uint32_t)data_len -
+				ofs.ofs.cipher.head);
+
+		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			aad_ccm_real_len = ctx->aad_len -
+				ICP_QAT_HW_CCM_AAD_B0_LEN -
+				ICP_QAT_HW_CCM_AAD_LEN_INFO;
+		} else {
+			aad_data = iv->va;
+			aad_iova = iv->iova;
+		}
+
+		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+			aad_len_field_sz, ctx->digest_length, q);
+		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+				(uint8_t *)&msg_len_be,
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+		} else {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+				(uint8_t *)&msg_len_be +
+				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+				- q), q);
+		}
+
+		if (aad_len_field_sz > 0) {
+			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+				rte_bswap16(aad_ccm_real_len);
+
+			if ((aad_ccm_real_len + aad_len_field_sz)
+				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+				uint8_t pad_len = 0;
+				uint8_t pad_idx = 0;
+
+				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+					((aad_ccm_real_len +
+					aad_len_field_sz) %
+					ICP_QAT_HW_CCM_AAD_B0_LEN);
+				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+					aad_ccm_real_len +
+					aad_len_field_sz;
+				memset(&aad_data[pad_idx], 0, pad_len);
+			}
+		}
+
+		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va +
+			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
+		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+		rte_memcpy((uint8_t *)aad->va +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			ctx->cipher_iv.length);
+		break;
+	default:
+		break;
+	}
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_param->auth_off = ofs.ofs.cipher.head;
+	auth_param->auth_len = cipher_param->cipher_length;
+	auth_param->auth_res_addr = digest->iova;
+	auth_param->u1.aad_adr = aad_iova;
+}
 
 extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
 extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
 
+/* -----------------GEN 1 sym crypto op data path APIs ---------------- */
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 void
 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 		uint8_t hash_flag);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index e156f194e2..5b96e626f8 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -180,6 +180,192 @@ qat_sym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, NULL, NULL);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, NULL, NULL);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
+			total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
+			out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			&auth_iv, &digest);
+#endif
+
+	return 0;
+}
+
 #ifdef RTE_LIB_SECURITY
 
 #define QAT_SECURITY_SYM_CAPABILITIES					\
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v4 03/11] crypto/qat: rework session APIs
  2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 01/11] common/qat: define build op request and dequeue op Kai Ji
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 02/11] crypto/qat: sym build op request specific implementation Kai Ji
@ 2021-11-05  0:19       ` Kai Ji
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 04/11] crypto/qat: asym build op request specific implementation Kai Ji
                         ` (7 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-05  0:19 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

The patch introduce set_session methods to qat gen dev ops
Replace min_qat_dev_gen_id with dev_id, the session will become
invalid if the device generation id is not matching during session
init and crypto ops

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  87 ++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 253 +++++++++++++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 121 +++++++++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  62 +++++
 drivers/crypto/qat/qat_crypto.c              |   1 +
 drivers/crypto/qat/qat_crypto.h              |   5 +
 drivers/crypto/qat/qat_sym.c                 |   8 +-
 drivers/crypto/qat/qat_sym_session.c         | 106 +-------
 drivers/crypto/qat/qat_sym_session.h         |   2 +-
 9 files changed, 544 insertions(+), 101 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index b4ec440e05..621ff85638 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -166,6 +166,90 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
 	return 0;
 }
 
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+		uint8_t hash_flag)
+{
+	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+	/* Set the Use Extended Protocol Flags bit in LW 1 */
+	QAT_FIELD_SET(header->comn_req_flags,
+			QAT_COMN_EXT_FLAGS_USED,
+			QAT_COMN_EXT_FLAGS_BITPOS,
+			QAT_COMN_EXT_FLAGS_MASK);
+
+	/* Set Hash Flags in LW 28 */
+	cd_ctrl->hash_flags |= hash_flag;
+
+	/* Set proto flags in LW 1 */
+	switch (session->qat_cipher_alg) {
+	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_SNOW_3G_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags,
+				ICP_QAT_FW_LA_ZUC_3G_PROTO);
+		break;
+	default:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	}
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+	struct rte_cryptodev *dev = cdev;
+	struct qat_sym_session *ctx = session;
+	const struct qat_cryptodev_private *qat_private =
+			dev->data->dev_private;
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	if (ret == 0 || ret != -ENOTSUP)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * but some are not supported by GEN2, so checking here
+	 */
+	if ((qat_private->internal_capabilities &
+			QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+		return -ENOTSUP;
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
 
 	/* Device related operations */
@@ -204,9 +288,10 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
 			qat_sym_crypto_cap_get_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_sym_crypto_set_session_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
-
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN2].create_security_ctx =
 			qat_sym_create_security_gen1;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index d3336cf4a1..6baf1810ed 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -143,6 +143,256 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass) {
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN3 uses single pass to treat AEAD as
+		 * cipher operation
+		 */
+		cipher_param = (void *)&req->serv_specif_rqpars;
+
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+				ofs.ofs.cipher.tail;
+
+		cipher_param->spc_aad_addr = aad->iova;
+		cipher_param->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+	struct qat_sym_op_cookie *cookie,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	uint32_t ver_key_offset;
+	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+
+	if (!ctx->is_single_pass_gmac ||
+			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+				data_len);
+		return;
+	}
+
+	cipher_cd_ctrl = (void *) &req->cd_ctrl;
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+			sizeof(struct icp_qat_hw_cipher_config);
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+		/* AES-GMAC */
+		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+				req);
+	}
+
+	/* Fill separate Content Descriptor for this op */
+	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+				ctx->cd.cipher.key :
+				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+			ctx->auth_key_length);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+				ICP_QAT_HW_CIPHER_AEAD_MODE,
+				ctx->qat_cipher_alg,
+				ICP_QAT_HW_CIPHER_NO_CONVERT,
+				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+					ICP_QAT_HW_CIPHER_ENCRYPT :
+					ICP_QAT_HW_CIPHER_DECRYPT));
+	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+			ctx->digest_length,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+	/* Update the request */
+	req->cd_pars.u.s.content_desc_addr =
+			cookie->opt.spc_gmac.cd_phys_addr;
+	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+			sizeof(struct icp_qat_hw_cipher_config) +
+			ctx->auth_key_length, 8) >> 3;
+	req->comn_mid.src_length = data_len;
+	req->comn_mid.dst_length = 0;
+
+	cipher_param->spc_aad_addr = 0;
+	cipher_param->spc_auth_res_addr = digest->iova;
+	cipher_param->spc_aad_sz = auth_data_len;
+	cipher_param->reserved = 0;
+	cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+	ICP_QAT_FW_LA_PROTO_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_NO_PROTO);
+}
+
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN3 */
+	if (ctx->is_single_pass)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+	else if (ctx->is_single_pass_gmac)
+		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+	if (ret == 0)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * this is addressed by GEN3
+	 */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -150,6 +400,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_cap_get_gen3;
 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+			qat_sym_crypto_set_session_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
@@ -161,4 +413,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 37a58c026f..fa6a7ee2fd 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -103,11 +103,131 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+			(void *)&req->serv_specif_rqpars;
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN4 uses single pass to treat AEAD as cipher
+		 * operation
+		 */
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+				req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len -
+				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+		cipher_param_20->spc_aad_addr = aad->iova;
+		cipher_param_20->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN4 */
+	if (ctx->is_single_pass && ctx->is_ucs)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+	if (ret == 0)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * this is addressed by GEN4
+	 */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
 			qat_sym_crypto_cap_get_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+			qat_sym_crypto_set_session_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
@@ -121,4 +241,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
 	qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 5b96e626f8..bcdf0172fe 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -6,6 +6,7 @@
 #ifdef RTE_LIB_SECURITY
 #include <rte_security_driver.h>
 #endif
+#include <cryptodev_pmd.h>
 
 #include "adf_transport_access_macros.h"
 #include "icp_qat_fw.h"
@@ -454,12 +455,73 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int handle_mixed = 0;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			/* do_aead = 1; */
+			build_request = qat_sym_build_op_aead_gen1;
+		} else {
+			/* do_auth = 1; do_cipher = 1; */
+			build_request = qat_sym_build_op_chain_gen1;
+			handle_mixed = 1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		/* do_auth = 1; do_cipher = 0;*/
+		build_request = qat_sym_build_op_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		/* do_auth = 0; do_cipher = 1; */
+		build_request = qat_sym_build_op_cipher_gen1;
+	}
+
+	if (!build_request)
+		return 0;
+	ctx->build_request[proc_type] = build_request;
+
+	if (!handle_mixed)
+		return 0;
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		return -ENOTSUP;
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		return -ENOTSUP;
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
 
 RTE_INIT(qat_sym_crypto_gen1_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
 			qat_sym_crypto_cap_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+			qat_sym_crypto_set_session_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 84c26a8062..ee878e47f1 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -2,6 +2,7 @@
  * Copyright(c) 2021 Intel Corporation
  */
 
+#include <cryptodev_pmd.h>
 #include "qat_device.h"
 #include "qat_qp.h"
 #include "qat_crypto.h"
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6eaa15b975..a32c716d28 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -48,15 +48,20 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
 
 typedef void * (*create_security_ctx_t)(void *cryptodev);
 
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
+	set_session_t set_session;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
 };
 
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
 		struct rte_cryptodev_config *config);
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 93b257522b..7481607ff8 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -212,7 +212,7 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
 
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen)
+		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
 {
 	int ret = 0;
 	struct qat_sym_session *ctx = NULL;
@@ -277,12 +277,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		return -EINVAL;
 	}
 
-	if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
-		QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -EINVAL;
-	}
-
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 8ca475ca8b..52837d7c9c 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
 	return 0;
 }
 
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
-		uint8_t hash_flag)
-{
-	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
-	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
-			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
-			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
-	/* Set the Use Extended Protocol Flags bit in LW 1 */
-	QAT_FIELD_SET(header->comn_req_flags,
-			QAT_COMN_EXT_FLAGS_USED,
-			QAT_COMN_EXT_FLAGS_BITPOS,
-			QAT_COMN_EXT_FLAGS_MASK);
-
-	/* Set Hash Flags in LW 28 */
-	cd_ctrl->hash_flags |= hash_flag;
-
-	/* Set proto flags in LW 1 */
-	switch (session->qat_cipher_alg) {
-	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_SNOW_3G_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_ZUC_3G_PROTO);
-		break;
-	default:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	}
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
-		struct qat_sym_session *session)
-{
-	const struct qat_cryptodev_private *qat_private =
-			dev->data->dev_private;
-	enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
-			QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
-	if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
-	} else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
-	} else if ((session->aes_cmac ||
-			session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
-			(session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session, 0);
-	}
-}
-
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
 	int ret;
 	int qat_cmd_id;
-	int handle_mixed = 0;
 
 	/* Verify the session physical address is known */
 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
+	session->dev_id = internals->dev_id;
 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
 	session->is_ucs = 0;
 
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		return -ENOTSUP;
 	}
 	qat_sym_session_finalize(session);
-	if (handle_mixed) {
-		/* Special handling of mixed hash+cipher algorithms */
-		qat_sym_session_handle_mixed(dev, session);
-	}
 
-	return 0;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+			(void *)session);
 }
 
 static int
@@ -678,7 +598,6 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 {
 	session->is_single_pass = 1;
 	session->is_auth = 1;
-	session->min_qat_dev_gen = QAT_GEN3;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
 	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
@@ -1205,9 +1124,10 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-			uint8_t *data_in,
-			uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in,
+		uint8_t *data_out)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1574,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1921,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2181,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
-
 	/* Get requested QAT command id - should be cipher */
 	qat_cmd_id = qat_get_cmd_id(xform);
 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2205,9 @@ qat_security_session_create(void *dev,
 {
 	void *sess_private_data;
 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct qat_cryptodev_private *internals = cdev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_sym_session *sym_session = NULL;
 	int ret;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2231,11 @@ qat_security_session_create(void *dev,
 	}
 
 	set_sec_session_private_data(sess, sess_private_data);
+	sym_session = (struct qat_sym_session *)sess_private_data;
+	sym_session->dev_id = internals->dev_id;
 
-	return ret;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+			sess_private_data);
 }
 
 int
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 33f977a4e3..581bce3790 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -100,7 +100,7 @@ struct qat_sym_session {
 	uint16_t auth_key_length;
 	uint16_t digest_length;
 	rte_spinlock_t lock;	/* protects this struct */
-	enum qat_device_gen min_qat_dev_gen;
+	uint16_t dev_id;
 	uint8_t aes_cmac;
 	uint8_t is_single_pass;
 	uint8_t is_single_pass_gmac;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v4 04/11] crypto/qat: asym build op request specific implementation
  2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
                         ` (2 preceding siblings ...)
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 03/11] crypto/qat: rework session APIs Kai Ji
@ 2021-11-05  0:19       ` Kai Ji
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 05/11] crypto/qat: unify sym pmd apis Kai Ji
                         ` (6 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-05  0:19 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch add-in specific asymmetric crypto session and build
op request implementation for QAT generations

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c                  |   5 +-
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   7 +
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |   3 +
 drivers/crypto/qat/qat_asym.c                | 620 ++++++++++---------
 drivers/crypto/qat/qat_asym.h                |  65 +-
 5 files changed, 389 insertions(+), 311 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index a026b90ca7..a3db053c82 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -622,7 +622,7 @@ qat_enqueue_op_burst(void *qp,
 #ifdef BUILD_QAT_ASYM
 			ret = qat_asym_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
+				NULL, tmp_qp->qat_dev_gen);
 #endif
 		}
 		if (ret != 0) {
@@ -850,7 +850,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 #ifdef BUILD_QAT_ASYM
 		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
 			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 #endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
index 9ed1f21d9d..99d90fa56c 100644
--- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
@@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_asym_crypto_set_session_gen1(void *cdev __rte_unused,
+		void *session __rte_unused)
+{
+	return 0;
+}
+
 RTE_INIT(qat_asym_crypto_gen1_init)
 {
 	qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops =
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 783fdcaa34..6277211b35 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -853,6 +853,9 @@ qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 struct qat_capabilities_info
 qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 uint64_t
 qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index f893508030..e2a5fb9260 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -1,69 +1,115 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2021 Intel Corporation
  */
 
 #include <stdarg.h>
 
-#include "qat_asym.h"
+#include <cryptodev_pmd.h>
+
 #include "icp_qat_fw_pke.h"
 #include "icp_qat_fw.h"
 #include "qat_pke_functionality_arrays.h"
 
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+#include "qat_device.h"
 
-static int qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
-		size_t arr_sz, size_t *size, uint32_t *func_id)
+#include "qat_logs.h"
+#include "qat_asym.h"
+
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_asym_xform *xform,
+		struct rte_cryptodev_asym_session *sess,
+		struct rte_mempool *mempool)
 {
-	size_t i;
+	int err = 0;
+	void *sess_private_data;
+	struct qat_asym_session *session;
 
-	for (i = 0; i < arr_sz; i++) {
-		if (*size <= arr[i][0]) {
-			*size = arr[i][0];
-			*func_id = arr[i][1];
-			return 0;
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		QAT_LOG(ERR,
+			"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	session = sess_private_data;
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		if (xform->modex.exponent.length == 0 ||
+				xform->modex.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod exp input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		if (xform->modinv.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod inv input parameter");
+			err = -EINVAL;
+			goto error;
 		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+		if (xform->rsa.n.length == 0) {
+			QAT_LOG(ERR, "Invalid rsa input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
+			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
+		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
+		err = -EINVAL;
+		goto error;
+	} else {
+		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
+		err = -EINVAL;
+		goto error;
 	}
-	return -1;
-}
 
-static inline void qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
-{
-	memset(qat_req, 0, sizeof(*qat_req));
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+	session->xform = xform;
+	qat_asym_build_req_tmpl(sess_private_data);
+	set_asym_session_private_data(sess, dev->driver_id,
+		sess_private_data);
 
-	qat_req->pke_hdr.hdr_flags =
-			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
-			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	return 0;
+error:
+	rte_mempool_put(mempool, sess_private_data);
+	return err;
 }
 
-static inline void qat_asym_build_req_tmpl(void *sess_private_data)
+unsigned int
+qat_asym_session_get_private_size(
+		struct rte_cryptodev *dev __rte_unused)
 {
-	struct icp_qat_fw_pke_request *qat_req;
-	struct qat_asym_session *session = sess_private_data;
-
-	qat_req = &session->req_tmpl;
-	qat_fill_req_tmpl(qat_req);
+	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
 }
 
-static size_t max_of(int n, ...)
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_asym_session *sess)
 {
-	va_list args;
-	size_t len = 0, num;
-	int i;
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_asym_session_private_data(sess, index);
+	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
 
-	va_start(args, n);
-	len = va_arg(args, size_t);
+	if (sess_priv) {
+		memset(s, 0, qat_asym_session_get_private_size(dev));
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
 
-	for (i = 0; i < n - 1; i++) {
-		num = va_arg(args, size_t);
-		if (num > len)
-			len = num;
+		set_asym_session_private_data(sess, index, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
 	}
-	va_end(args);
-
-	return len;
 }
 
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+	.name = qat_asym_drv_name,
+	.alias = qat_asym_drv_name
+};
+
+
 static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
 		int in_count, int out_count, int alg_size)
 {
@@ -106,7 +152,230 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
 	}
 }
 
-static int qat_asym_check_nonzero(rte_crypto_param n)
+static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
+		struct qat_asym_op_cookie *cookie,
+		struct rte_crypto_asym_xform *xform)
+{
+	size_t alg_size, alg_size_in_bytes = 0;
+	struct rte_crypto_asym_op *asym_op = rx_op->asym;
+
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		rte_crypto_param n = xform->modex.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modexp_result = asym_op->modex.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modexp_result +
+				(asym_op->modex.result.length -
+					n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length
+				);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		rte_crypto_param n = xform->modinv.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modinv_result = asym_op->modinv.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modinv_result +
+				(asym_op->modinv.result.length
+				- n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
+				asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+							cookie->output_array[0],
+							alg_size_in_bytes);
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+			}
+		} else {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_DECRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.message.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_SUCCESS;
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
+						rsa_result, alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_SIGN) {
+				uint8_t *rsa_result = asym_op->rsa.sign.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			}
+		}
+	}
+	qat_clear_arrays_by_alg(cookie, xform, alg_size_in_bytes);
+}
+
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
+{
+	struct qat_asym_session *ctx;
+	struct icp_qat_fw_pke_resp *resp_msg =
+			(struct icp_qat_fw_pke_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+			(resp_msg->opaque);
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	if (cookie->error) {
+		cookie->error = 0;
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		QAT_DP_LOG(ERR, "Cookie status returned error");
+	} else {
+		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric response status"
+					" returned error");
+		}
+		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric common status"
+					" returned error");
+		}
+	}
+
+	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		ctx = (struct qat_asym_session *)get_asym_session_private_data(
+			rx_op->asym->session, qat_asym_driver_id);
+		qat_asym_collect_response(rx_op, cookie, ctx->xform);
+	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
+	}
+	*op = rx_op;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
+			sizeof(struct icp_qat_fw_pke_resp));
+#endif
+
+	return 1;
+}
+
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+		size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+	size_t i;
+
+	for (i = 0; i < arr_sz; i++) {
+		if (*size <= arr[i][0]) {
+			*size = arr[i][0];
+			*func_id = arr[i][1];
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+	va_list args;
+	size_t len = 0, num;
+	int i;
+
+	va_start(args, n);
+	len = va_arg(args, size_t);
+
+	for (i = 0; i < n - 1; i++) {
+		num = va_arg(args, size_t);
+		if (num > len)
+			len = num;
+	}
+	va_end(args);
+
+	return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
 {
 	if (n.length < 8) {
 		/* Not a case for any cryptograpic function except for DH
@@ -475,10 +744,9 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 }
 
 int
-qat_asym_build_request(void *in_op,
-			uint8_t *out_msg,
-			void *op_cookie,
-			__rte_unused enum qat_device_gen qat_dev_gen)
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
 {
 	struct qat_asym_session *ctx;
 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
@@ -545,263 +813,7 @@ qat_asym_build_request(void *in_op,
 	return 0;
 }
 
-static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
-		struct qat_asym_op_cookie *cookie,
-		struct rte_crypto_asym_xform *xform)
-{
-	size_t alg_size, alg_size_in_bytes = 0;
-	struct rte_crypto_asym_op *asym_op = rx_op->asym;
-
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		rte_crypto_param n = xform->modex.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modexp_result = asym_op->modex.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modexp_result +
-				(asym_op->modex.result.length -
-					n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length
-				);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		rte_crypto_param n = xform->modinv.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modinv_result = asym_op->modinv.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modinv_result + (asym_op->modinv.result.length
-				- n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
-				asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-							cookie->output_array[0],
-							alg_size_in_bytes);
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-			}
-		} else {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_DECRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.message.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_SUCCESS;
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
-						rsa_result, alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
-				uint8_t *rsa_result = asym_op->rsa.sign.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			}
-		}
-	}
-	qat_clear_arrays_by_alg(cookie, xform, alg_size_in_bytes);
-}
-
-void
-qat_asym_process_response(void **op, uint8_t *resp,
-		void *op_cookie)
-{
-	struct qat_asym_session *ctx;
-	struct icp_qat_fw_pke_resp *resp_msg =
-			(struct icp_qat_fw_pke_resp *)resp;
-	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
-			(resp_msg->opaque);
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	if (cookie->error) {
-		cookie->error = 0;
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		QAT_DP_LOG(ERR, "Cookie status returned error");
-	} else {
-		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
-			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric response status"
-					" returned error");
-		}
-		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric common status"
-					" returned error");
-		}
-	}
-
-	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_asym_session *)get_asym_session_private_data(
-			rx_op->asym->session, qat_asym_driver_id);
-		qat_asym_collect_response(rx_op, cookie, ctx->xform);
-	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
-	}
-	*op = rx_op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
-			sizeof(struct icp_qat_fw_pke_resp));
-#endif
-}
-
-int
-qat_asym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_asym_xform *xform,
-		struct rte_cryptodev_asym_session *sess,
-		struct rte_mempool *mempool)
-{
-	int err = 0;
-	void *sess_private_data;
-	struct qat_asym_session *session;
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		QAT_LOG(ERR,
-			"Couldn't get object from session mempool");
-		return -ENOMEM;
-	}
-
-	session = sess_private_data;
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		if (xform->modex.exponent.length == 0 ||
-				xform->modex.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod exp input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		if (xform->modinv.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod inv input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-		if (xform->rsa.n.length == 0) {
-			QAT_LOG(ERR, "Invalid rsa input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
-			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
-		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
-		err = -EINVAL;
-		goto error;
-	} else {
-		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
-		err = -EINVAL;
-		goto error;
-	}
-
-	session->xform = xform;
-	qat_asym_build_req_tmpl(sess_private_data);
-	set_asym_session_private_data(sess, dev->driver_id,
-		sess_private_data);
-
-	return 0;
-error:
-	rte_mempool_put(mempool, sess_private_data);
-	return err;
-}
-
-unsigned int qat_asym_session_get_private_size(
-		struct rte_cryptodev *dev __rte_unused)
-{
-	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
-}
-
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_asym_session *sess)
-{
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_asym_session_private_data(sess, index);
-	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
-
-	if (sess_priv) {
-		memset(s, 0, qat_asym_session_get_private_size(dev));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
-
-		set_asym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
-}
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_asym_driver,
+		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 308b6b2e0b..655a835c7f 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -8,10 +8,13 @@
 #include <cryptodev_pmd.h>
 #include <rte_crypto_asym.h>
 #include "icp_qat_fw_pke.h"
-#include "qat_common.h"
-#include "qat_asym_pmd.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
 #include "icp_qat_fw.h"
 
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
+
 typedef uint64_t large_int_ptr;
 #define MAX_PKE_PARAMS	8
 #define QAT_PKE_MAX_LN_SIZE 512
@@ -26,6 +29,32 @@ typedef uint64_t large_int_ptr;
 #define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
 #define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
 
+uint8_t qat_asym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
+
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
+		{.asym = {						\
+			.xform_capa = {					\
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+				.op_types = o,				\
+				{					\
+				.modlen = {				\
+				.min = l,				\
+				.max = r,				\
+				.increment = i				\
+				}, }					\
+			}						\
+		},							\
+		}							\
+	}
+
 struct qat_asym_op_cookie {
 	size_t alg_size;
 	uint64_t error;
@@ -45,6 +74,27 @@ struct qat_asym_session {
 	struct rte_crypto_asym_xform *xform;
 };
 
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+	memset(qat_req, 0, sizeof(*qat_req));
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+	qat_req->pke_hdr.hdr_flags =
+			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+	struct icp_qat_fw_pke_request *qat_req;
+	struct qat_asym_session *session = sess_private_data;
+
+	qat_req = &session->req_tmpl;
+	qat_fill_req_tmpl(qat_req);
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev,
 		struct rte_crypto_asym_xform *xform,
@@ -76,7 +126,9 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
  */
 int
 qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
+		void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		enum qat_device_gen qat_dev_gen);
 
 /*
  * Process PKE response received from outgoing queue of QAT
@@ -88,8 +140,11 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg,
  * @param	op_cookie	Cookie pointer that holds private metadata
  *
  */
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count);
+
 void
-qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
-		void *op_cookie);
+qat_asym_init_op_cookie(void *cookie);
 
 #endif /* _QAT_ASYM_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v4 05/11] crypto/qat: unify sym pmd apis
  2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
                         ` (3 preceding siblings ...)
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 04/11] crypto/qat: asym build op request specific implementation Kai Ji
@ 2021-11-05  0:19       ` Kai Ji
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 06/11] crypto/qat: unify qat asym " Kai Ji
                         ` (5 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-05  0:19 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch integrate all the functions from qat_sym_pmd.c into qat_sym.c,
The integration unify the sym crypto pmd apis and should make it easier
for furture maintains

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build       |   2 +-
 drivers/common/qat/qat_device.c      |   2 +-
 drivers/common/qat/qat_qp.c          |   3 +-
 drivers/crypto/qat/qat_sym.c         |  24 +++
 drivers/crypto/qat/qat_sym.h         | 141 +++++++++++++--
 drivers/crypto/qat/qat_sym_hw_dp.c   |   9 -
 drivers/crypto/qat/qat_sym_pmd.c     | 251 ---------------------------
 drivers/crypto/qat/qat_sym_pmd.h     |  95 ----------
 drivers/crypto/qat/qat_sym_session.c |   2 +-
 9 files changed, 158 insertions(+), 371 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index ce9959d103..7f0ff68946 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -70,7 +70,7 @@ if qat_compress
 endif
 
 if qat_crypto
-    foreach f: ['qat_sym_pmd.c', 'qat_sym.c', 'qat_sym_session.c',
+    foreach f: ['qat_sym.c', 'qat_sym_session.c',
             'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
 	    'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 437996f2e8..e1fc1c37ec 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -8,7 +8,7 @@
 
 #include "qat_device.h"
 #include "adf_transport_access_macros.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 #include "qat_comp_pmd.h"
 #include "adf_pf2vf_msg.h"
 #include "qat_pf2vf.h"
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index a3db053c82..7faa5a48fd 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -841,7 +841,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
 			nb_fw_responses = qat_comp_process_response(
 				ops, resp_msg,
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 7481607ff8..8b1a49c690 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -12,6 +12,30 @@
 
 #include "qat_sym.h"
 
+uint8_t qat_sym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+
+void
+qat_sym_init_op_cookie(void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+
+	cookie->qat_sgl_src_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_src);
+
+	cookie->qat_sgl_dst_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_dst);
+
+	cookie->opt.spc_gmac.cd_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			opt.spc_gmac.cd_cipher);
+}
 
 /** Decrypt a single partial block
  *  Depends on openssl libcrypto
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index e3ec7f0de4..87e23bb1d1 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -15,15 +15,75 @@
 
 #include "qat_common.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_crypto.h"
 #include "qat_logs.h"
 
+#define CRYPTODEV_NAME_QAT_SYM_PMD   crypto_qat
+
 #define BYTE_LENGTH    8
 /* bpi is only used for partial blocks of DES and AES
  * so AES block len can be assumed as max len for iv, src and dst
  */
 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
+#define QAT_SYM_CAP_VALID		(1 << 31)
+
+/**
+ * Macro to add a sym capability
+ * helper function to add an sym capability
+ * <n: name> <b: block size> <k: key size> <d: digest size>
+ * <a: aad_size> <i: iv_size>
+ **/
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, d					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_##n,		\
+				b, k, i					\
+			}, }						\
+		}, }							\
+	}
+
 /*
  * Maximum number of SGL entries
  */
@@ -54,11 +114,26 @@ struct qat_sym_op_cookie {
 	} opt;
 };
 
+struct qat_sym_dp_ctx {
+	struct qat_sym_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		void *op_cookie, enum qat_device_gen qat_dev_gen);
 
-
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
@@ -213,17 +288,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
 		}
 	}
 }
-#else
-
-static inline void
-qat_sym_preprocess_requests(void **ops __rte_unused,
-				uint16_t nb_ops __rte_unused)
-{
-}
 #endif
 
-static inline void
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused)
 {
 	struct icp_qat_fw_comn_resp *resp_msg =
 			(struct icp_qat_fw_comn_resp *)resp;
@@ -282,6 +351,8 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
 	}
 
 	*op = (void *)rx_op;
+
+	return 1;
 }
 
 int
@@ -293,6 +364,52 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 int
 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
 
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_vec *vec, uint32_t vec_len,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t i;
+
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_la_bulk_req));
+	for (i = 0; i < vec_len; i++)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+	if (cipher_iv && ctx->cipher_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+				ctx->cipher_iv.length);
+	if (auth_iv && ctx->auth_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+				ctx->auth_iv.length);
+	if (aad && ctx->aad_len > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+				ctx->aad_len);
+	if (digest && ctx->digest_length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+				ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+		struct qat_sym_session *ctx __rte_unused,
+		struct rte_crypto_vec *vec __rte_unused,
+		uint32_t vec_len __rte_unused,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+}
+#endif
+
 #else
 
 static inline void
@@ -307,5 +424,5 @@ qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
 {
 }
 
-#endif
+#endif /* BUILD_QAT_SYM */
 #endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 12825e448b..6bcc30d92d 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -9,18 +9,9 @@
 #include "icp_qat_fw_la.h"
 
 #include "qat_sym.h"
-#include "qat_sym_pmd.h"
 #include "qat_sym_session.h"
 #include "qat_qp.h"
 
-struct qat_sym_dp_ctx {
-	struct qat_sym_session *session;
-	uint32_t tail;
-	uint32_t head;
-	uint16_t cached_enqueue;
-	uint16_t cached_dequeue;
-};
-
 static __rte_always_inline int32_t
 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
 		struct rte_crypto_vec *data, uint16_t n_data_vecs)
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
deleted file mode 100644
index 28a26260fb..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security_driver.h>
-#endif
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
-
-#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
-	struct qat_sym_op_cookie *cookie = op_cookie;
-
-	cookie->qat_sgl_src_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_src);
-
-	cookie->qat_sgl_dst_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_dst);
-
-	cookie->opt.spc_gmac.cd_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			opt.spc_gmac.cd_cipher);
-}
-
-static uint16_t
-qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-static uint16_t
-qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
-	.name = qat_sym_drv_name,
-	.alias = qat_sym_drv_name
-};
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
-	int i = 0, ret = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "sym");
-	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	/*
-	 * All processes must use same driver id so they can share sessions.
-	 * Store driver_id so we can validate that all processes have the same
-	 * value, typically they have, but could differ if binaries built
-	 * separately.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_sym_driver_id =
-				qat_sym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_sym_driver_id !=
-				qat_sym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
-	qat_dev_instance->sym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->sym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->sym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_sym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-#ifdef RTE_LIB_SECURITY
-	if (gen_dev_ops->create_security_ctx) {
-		cryptodev->security_ctx =
-			gen_dev_ops->create_security_ctx((void *)cryptodev);
-		if (cryptodev->security_ctx == NULL) {
-			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
-	} else
-		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-
-#endif
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_SYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->service_type = QAT_SERVICE_SYMMETRIC;
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating capability memzon for %s",
-				name);
-			ret = -EFAULT;
-			goto error;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->sym_dev = internals;
-	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	memset(&qat_dev_instance->sym_rte_dev, 0,
-		sizeof(qat_dev_instance->sym_rte_dev));
-
-	return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->sym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
-	qat_pci_dev->sym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_sym_driver,
-		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
deleted file mode 100644
index 0dc0c6f0d9..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_PMD_H_
-#define _QAT_SYM_PMD_H_
-
-#ifdef BUILD_QAT_SYM
-
-#include <rte_ether.h>
-#include <rte_cryptodev.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security.h>
-#endif
-
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Symmetric Crypto PMD driver name */
-#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
-#define QAT_SYM_CAP_VALID		(1 << 31)
-
-/**
- * Macro to add a sym capability
- * helper function to add an sym capability
- * <n: name> <b: block size> <k: key size> <d: digest size>
- * <a: aad_size> <i: iv_size>
- **/
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, d					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
-			{.aead = {					\
-				.algo = RTE_CRYPTO_AEAD_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_##n,		\
-				b, k, i					\
-			}, }						\
-		}, }							\
-	}
-
-extern uint8_t qat_sym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param);
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
-
-void
-qat_sym_init_op_cookie(void *op_cookie);
-
-#endif
-#endif /* _QAT_SYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 52837d7c9c..933ac89569 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -20,7 +20,7 @@
 
 #include "qat_logs.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 
 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
 static const uint8_t sha1InitialState[] = {
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v4 06/11] crypto/qat: unify qat asym pmd apis
  2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
                         ` (4 preceding siblings ...)
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 05/11] crypto/qat: unify sym pmd apis Kai Ji
@ 2021-11-05  0:19       ` Kai Ji
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 07/11] crypto/qat: op burst data path rework Kai Ji
                         ` (4 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-05  0:19 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch integrate all the functions from qat_asym_pmd.c
into qat_asym.c, The integration unify the asym crypto pmd apis
and should make it easier for furture maintains

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build    |   2 +-
 drivers/crypto/qat/qat_asym.c     | 180 +++++++++++++++++++++++
 drivers/crypto/qat/qat_asym_pmd.c | 231 ------------------------------
 drivers/crypto/qat/qat_asym_pmd.h |  54 -------
 4 files changed, 181 insertions(+), 286 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 7f0ff68946..25c15bebea 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -71,7 +71,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
 	    'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index e2a5fb9260..83747118a5 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -15,6 +15,32 @@
 #include "qat_logs.h"
 #include "qat_asym.h"
 
+void
+qat_asym_init_op_cookie(void *op_cookie)
+{
+	int j;
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	cookie->input_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					input_params_ptrs);
+
+	cookie->output_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					output_params_ptrs);
+
+	for (j = 0; j < 8; j++) {
+		cookie->input_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						input_array[j]);
+		cookie->output_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						output_array[j]);
+	}
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev,
 		struct rte_crypto_asym_xform *xform,
@@ -813,6 +839,160 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 	return 0;
 }
 
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+			nb_ops);
+}
+
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+				nb_ops);
+}
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param)
+{
+	struct qat_cryptodev_private *internals;
+	struct rte_cryptodev *cryptodev;
+	struct qat_device_info *qat_dev_instance =
+		&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	uint64_t capa_size;
+	int i = 0;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "asym");
+	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+				name);
+		return -(EFAULT);
+	}
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_asym_driver_id =
+				qat_asym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_asym_driver_id !=
+				qat_asym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
+		}
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+	qat_dev_instance->asym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->asym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_asym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_ASYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			rte_cryptodev_pmd_destroy(cryptodev);
+			memset(&qat_dev_instance->asym_rte_dev, 0,
+				sizeof(qat_dev_instance->asym_rte_dev));
+			return -EFAULT;
+		}
+	}
+
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
+
+	qat_pci_dev->asym_dev = internals;
+	internals->service_type = QAT_SERVICE_ASYMMETRIC;
+	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+	return 0;
+}
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->asym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(
+			qat_pci_dev->asym_dev->dev_id);
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+	qat_pci_dev->asym_dev = NULL;
+
+	return 0;
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_asym_driver,
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
deleted file mode 100644
index 9a7596b227..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "qat_logs.h"
-
-#include "qat_crypto.h"
-#include "qat_asym.h"
-#include "qat_asym_pmd.h"
-
-uint8_t qat_asym_driver_id;
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
-	int j;
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	cookie->input_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					input_params_ptrs);
-
-	cookie->output_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					output_params_ptrs);
-
-	for (j = 0; j < 8; j++) {
-		cookie->input_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						input_array[j]);
-		cookie->output_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						output_array[j]);
-	}
-}
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
-	/* Device related operations */
-	.dev_configure		= qat_cryptodev_config,
-	.dev_start		= qat_cryptodev_start,
-	.dev_stop		= qat_cryptodev_stop,
-	.dev_close		= qat_cryptodev_close,
-	.dev_infos_get		= qat_cryptodev_info_get,
-
-	.stats_get		= qat_cryptodev_stats_get,
-	.stats_reset		= qat_cryptodev_stats_reset,
-	.queue_pair_setup	= qat_cryptodev_qp_setup,
-	.queue_pair_release	= qat_cryptodev_qp_release,
-
-	/* Crypto related operations */
-	.asym_session_get_size	= qat_asym_session_get_private_size,
-	.asym_session_configure	= qat_asym_session_configure,
-	.asym_session_clear	= qat_asym_session_clear
-};
-
-uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
-	.name = qat_asym_drv_name,
-	.alias = qat_asym_drv_name
-};
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
-	int i = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "asym");
-	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_asym_driver_id =
-				qat_asym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_asym_driver_id !=
-				qat_asym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
-	qat_dev_instance->asym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->asym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->asym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_asym_driver_id;
-	cryptodev->dev_ops = &crypto_qat_ops;
-
-	cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst;
-
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_ASYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->dev_id = cryptodev->data->dev_id;
-	internals->service_type = QAT_SERVICE_ASYMMETRIC;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			rte_cryptodev_pmd_destroy(cryptodev);
-			memset(&qat_dev_instance->asym_rte_dev, 0,
-				sizeof(qat_dev_instance->asym_rte_dev));
-			return -EFAULT;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->asym_dev = internals;
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-	return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->asym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(
-			qat_pci_dev->asym_dev->dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
-	qat_pci_dev->asym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_asym_driver,
-		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_pmd.h b/drivers/crypto/qat/qat_asym_pmd.h
deleted file mode 100644
index fd6b406248..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-
-#ifndef _QAT_ASYM_PMD_H_
-#define _QAT_ASYM_PMD_H_
-
-#include <rte_cryptodev.h>
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD driver name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
-
-
-/**
- * Helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
-		{.asym = {						\
-			.xform_capa = {					\
-				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
-				.op_types = o,				\
-				{					\
-				.modlen = {				\
-				.min = l,				\
-				.max = r,				\
-				.increment = i				\
-				}, }					\
-			}						\
-		},							\
-		}							\
-	}
-
-extern uint8_t qat_asym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
-
-void
-qat_asym_init_op_cookie(void *op_cookie);
-
-uint16_t
-qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-uint16_t
-qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-#endif /* _QAT_ASYM_PMD_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v4 07/11] crypto/qat: op burst data path rework
  2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
                         ` (5 preceding siblings ...)
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 06/11] crypto/qat: unify qat asym " Kai Ji
@ 2021-11-05  0:19       ` Kai Ji
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 08/11] compress/qat: comp dequeue burst update Kai Ji
                         ` (3 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-05  0:19 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch enable op_build_request func in qat enqueue op burst.
and qat_dequeue_process_response in qat dequeue op burst.
The op_build_request invoked in crypto build request op is based
on crypto operations setuped during session init.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c                  |  42 +-
 drivers/common/qat/qat_qp.h                  |  18 +
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |   2 +
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 180 ++--
 drivers/crypto/qat/qat_asym.c                |   6 +-
 drivers/crypto/qat/qat_asym.h                |  26 -
 drivers/crypto/qat/qat_crypto.h              |   6 +-
 drivers/crypto/qat/qat_sym.c                 | 914 ++++++-------------
 drivers/crypto/qat/qat_sym.h                 |   4 -
 drivers/crypto/qat/qat_sym_session.c         |   6 +-
 drivers/crypto/qat/qat_sym_session.h         |  10 +-
 11 files changed, 395 insertions(+), 819 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 7faa5a48fd..56facf0fbe 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -550,8 +550,7 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp,
-		__rte_unused qat_op_build_request_t op_build_request,
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
 		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
@@ -602,29 +601,18 @@ qat_enqueue_op_burst(void *qp,
 		}
 	}
 
-#ifdef BUILD_QAT_SYM
+#ifdef RTE_LIB_SECURITY
 	if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 		qat_sym_preprocess_requests(ops, nb_ops_possible);
 #endif
 
+	memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque));
+
 	while (nb_ops_sent != nb_ops_possible) {
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
-#ifdef BUILD_QAT_SYM
-			ret = qat_sym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		} else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
-			ret = qat_comp_build_request(*ops, base_addr + tail,
+		ret = op_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-		} else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
-#ifdef BUILD_QAT_ASYM
-			ret = qat_asym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				NULL, tmp_qp->qat_dev_gen);
-#endif
-		}
+				tmp_qp->opaque, tmp_qp->qat_dev_gen);
+
 		if (ret != 0) {
 			tmp_qp->stats.enqueue_err_count++;
 			/* This message cannot be enqueued */
@@ -820,8 +808,7 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 
 uint16_t
 qat_dequeue_op_burst(void *qp, void **ops,
-		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
-		uint16_t nb_ops)
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -839,21 +826,10 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		nb_fw_responses = 1;
 
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
-			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
-			nb_fw_responses = qat_comp_process_response(
+		nb_fw_responses = qat_dequeue_process_response(
 				ops, resp_msg,
 				tmp_qp->op_cookies[head >> rx_queue->trailz],
 				&tmp_qp->stats.dequeue_err_count);
-#ifdef BUILD_QAT_ASYM
-		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
-			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-#endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
 				  rx_queue->modulo_mask);
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index f2ca0173de..3b58786bd8 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -14,6 +14,24 @@
 
 struct qat_pci_device;
 
+/* Default qp configuration for GEN4 devices */
+#define QAT_GEN4_QP_DEFCON	(QAT_SERVICE_SYMMETRIC |	\
+				QAT_SERVICE_SYMMETRIC << 8 |	\
+				QAT_SERVICE_SYMMETRIC << 16 |	\
+				QAT_SERVICE_SYMMETRIC << 24)
+
+/* QAT GEN 4 specific macros */
+#define QAT_GEN4_BUNDLE_NUM             4
+#define QAT_GEN4_QPS_PER_BUNDLE_NUM     1
+
+/* Queue pair setup error codes */
+#define QAT_NOMEM		1
+#define QAT_QP_INVALID_DESC_NO	2
+#define QAT_QP_BUSY		3
+#define QAT_PCI_NO_RESOURCE	4
+
+#define ADF_ARB_RINGSRVARBEN_OFFSET		0x19C
+
 /**
  * Structure associated with each queue.
  */
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 621ff85638..9320eb5adf 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -306,4 +306,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
 			qat_asym_crypto_cap_get_gen1;
 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_asym_crypto_feature_flags_get_gen1;
+	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_asym_crypto_set_session_gen1;
 }
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index bcdf0172fe..14fc9d7f50 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -148,10 +148,6 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
-
-	/* Raw data-path API related operations */
-	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
-	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
@@ -181,6 +177,94 @@ qat_sym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+#ifdef RTE_LIB_SECURITY
+
+#define QAT_SECURITY_SYM_CAPABILITIES					\
+	{	/* AES DOCSIS BPI */					\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\
+				.block_size = 16,			\
+				.key_size = {				\
+					.min = 16,			\
+					.max = 32,			\
+					.increment = 16			\
+				},					\
+				.iv_size = {				\
+					.min = 16,			\
+					.max = 16,			\
+					.increment = 0			\
+				}					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SECURITY_CAPABILITIES(sym)					\
+	[0] = {	/* DOCSIS Uplink */					\
+		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,	\
+		.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,		\
+		.docsis = {						\
+			.direction = RTE_SECURITY_DOCSIS_UPLINK		\
+		},							\
+		.crypto_capabilities = (sym)				\
+	},								\
+	[1] = {	/* DOCSIS Downlink */					\
+		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,	\
+		.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,		\
+		.docsis = {						\
+			.direction = RTE_SECURITY_DOCSIS_DOWNLINK	\
+		},							\
+		.crypto_capabilities = (sym)				\
+	}
+
+static const struct rte_cryptodev_capabilities
+					qat_security_sym_capabilities[] = {
+	QAT_SECURITY_SYM_CAPABILITIES,
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_security_capability qat_security_capabilities_gen1[] = {
+	QAT_SECURITY_CAPABILITIES(qat_security_sym_capabilities),
+	{
+		.action = RTE_SECURITY_ACTION_TYPE_NONE
+	}
+};
+
+static const struct rte_security_capability *
+qat_security_cap_get_gen1(void *dev __rte_unused)
+{
+	return qat_security_capabilities_gen1;
+}
+
+struct rte_security_ops security_qat_ops_gen1 = {
+		.session_create = qat_security_session_create,
+		.session_update = NULL,
+		.session_stats_get = NULL,
+		.session_destroy = qat_security_session_destroy,
+		.set_pkt_metadata = NULL,
+		.capabilities_get = qat_security_cap_get_gen1
+};
+
+void *
+qat_sym_create_security_gen1(void *cryptodev)
+{
+	struct rte_security_ctx *security_instance;
+
+	security_instance = rte_malloc(NULL, sizeof(struct rte_security_ctx),
+			RTE_CACHE_LINE_SIZE);
+	if (security_instance == NULL)
+		return NULL;
+
+	security_instance->device = cryptodev;
+	security_instance->ops = &security_qat_ops_gen1;
+	security_instance->sess_cnt = 0;
+
+	return (void *)security_instance;
+}
+
+#endif
 
 int
 qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
@@ -367,94 +451,6 @@ qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
 	return 0;
 }
 
-#ifdef RTE_LIB_SECURITY
-
-#define QAT_SECURITY_SYM_CAPABILITIES					\
-	{	/* AES DOCSIS BPI */					\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\
-				.block_size = 16,			\
-				.key_size = {				\
-					.min = 16,			\
-					.max = 32,			\
-					.increment = 16			\
-				},					\
-				.iv_size = {				\
-					.min = 16,			\
-					.max = 16,			\
-					.increment = 0			\
-				}					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SECURITY_CAPABILITIES(sym)					\
-	[0] = {	/* DOCSIS Uplink */					\
-		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,	\
-		.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,		\
-		.docsis = {						\
-			.direction = RTE_SECURITY_DOCSIS_UPLINK		\
-		},							\
-		.crypto_capabilities = (sym)				\
-	},								\
-	[1] = {	/* DOCSIS Downlink */					\
-		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,	\
-		.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,		\
-		.docsis = {						\
-			.direction = RTE_SECURITY_DOCSIS_DOWNLINK	\
-		},							\
-		.crypto_capabilities = (sym)				\
-	}
-
-static const struct rte_cryptodev_capabilities
-					qat_security_sym_capabilities[] = {
-	QAT_SECURITY_SYM_CAPABILITIES,
-	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-static const struct rte_security_capability qat_security_capabilities_gen1[] = {
-	QAT_SECURITY_CAPABILITIES(qat_security_sym_capabilities),
-	{
-		.action = RTE_SECURITY_ACTION_TYPE_NONE
-	}
-};
-
-static const struct rte_security_capability *
-qat_security_cap_get_gen1(void *dev __rte_unused)
-{
-	return qat_security_capabilities_gen1;
-}
-
-struct rte_security_ops security_qat_ops_gen1 = {
-		.session_create = qat_security_session_create,
-		.session_update = NULL,
-		.session_stats_get = NULL,
-		.session_destroy = qat_security_session_destroy,
-		.set_pkt_metadata = NULL,
-		.capabilities_get = qat_security_cap_get_gen1
-};
-
-void *
-qat_sym_create_security_gen1(void *cryptodev)
-{
-	struct rte_security_ctx *security_instance;
-
-	security_instance = rte_malloc(NULL, sizeof(struct rte_security_ctx),
-			RTE_CACHE_LINE_SIZE);
-	if (security_instance == NULL)
-		return NULL;
-
-	security_instance->device = cryptodev;
-	security_instance->ops = &security_qat_ops_gen1;
-	security_instance->sess_cnt = 0;
-
-	return (void *)security_instance;
-}
-
-#endif
 int
 qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
 {
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 83747118a5..a25131e4d9 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -15,6 +15,10 @@
 #include "qat_logs.h"
 #include "qat_asym.h"
 
+uint8_t qat_asym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
+
 void
 qat_asym_init_op_cookie(void *op_cookie)
 {
@@ -769,7 +773,7 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	return 0;
 }
 
-int
+static __rte_always_inline int
 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 		__rte_unused uint64_t *opaque,
 		__rte_unused enum qat_device_gen dev_gen)
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 655a835c7f..55b1e1b2cc 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -29,10 +29,6 @@ typedef uint64_t large_int_ptr;
 #define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
 #define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
 
-uint8_t qat_asym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
 /**
  * helper function to add an asym capability
  * <name> <op type> <modlen (min, max, increment)>
@@ -108,28 +104,6 @@ void
 qat_asym_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_asym_session *sess);
 
-/*
- * Build PKE request to be sent to the fw, partially uses template
- * request generated during session creation.
- *
- * @param	in_op		Pointer to the crypto operation, for every
- *				service it points to service specific struct.
- * @param	out_msg		Message to be returned to enqueue function
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	qat_dev_gen	Generation of QAT hardware
- *
- * @return
- *	This function always returns zero,
- *	it is because of backward compatibility.
- *	- 0: Always returned
- *
- */
-int
-qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		enum qat_device_gen qat_dev_gen);
-
 /*
  * Process PKE response received from outgoing queue of QAT
  *
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index a32c716d28..3ac6c90e1c 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -12,7 +12,10 @@
 extern uint8_t qat_sym_driver_id;
 extern uint8_t qat_asym_driver_id;
 
-/** helper macro to set cryptodev capability range **/
+/**
+ * helper macro to set cryptodev capability range
+ * <n: name> <l: min > <r: max> <i: increment> <v: value>
+ **/
 #define CAP_RNG(n, l, r, i) .n = {.min = l, .max = r, .increment = i}
 
 #define CAP_RNG_ZERO(n) .n = {.min = 0, .max = 0, .increment = 0}
@@ -61,6 +64,7 @@ struct qat_crypto_gen_dev_ops {
 };
 
 extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
 
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 8b1a49c690..2bbb6615f3 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -11,11 +11,25 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
 
 uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
 void
 qat_sym_init_op_cookie(void *op_cookie)
 {
@@ -37,246 +51,67 @@ qat_sym_init_op_cookie(void *op_cookie)
 			opt.spc_gmac.cd_cipher);
 }
 
-/** Decrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_decrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_decrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
-	return -EINVAL;
-}
-
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
-		/* Decrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely((sym_op->m_dst != NULL)
-				&& (sym_op->m_dst != sym_op->m_src)))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = last_block - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
-			dst, last_block_len);
-#endif
-		bpi_cipher_decrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
-			dst, last_block_len);
-#endif
-	}
-
-	return sym_op->cipher.data.length - last_block_len;
-}
-
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op,
-		struct icp_qat_fw_la_bulk_req *qat_req)
+static __rte_always_inline int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
 {
-	/* copy IV into request if it fits */
-	if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
-		rte_memcpy(cipher_param->u.cipher_IV_array,
-				rte_crypto_op_ctod_offset(op, uint8_t *,
-					iv_offset),
-				iv_length);
-	} else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr =
-				rte_crypto_op_ctophys_offset(op,
-					iv_offset);
-	}
-}
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	void *sess = (void *)opaque[0];
+	qat_sym_build_request_t build_request = (void *)opaque[1];
+	struct qat_sym_session *ctx = NULL;
 
-/** Set IV for CCM is special case, 0th byte is set to q-1
- *  where q is padding of nonce in 16 byte block
- */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
-	rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
-			ICP_QAT_HW_CCM_NONCE_OFFSET,
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-	*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-	if (aad_len_field_sz)
-		rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-}
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
 
-/** Handle Single-Pass AES-GMAC on QAT GEN3 */
-static inline void
-handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
-		struct qat_sym_op_cookie *cookie,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	static const uint32_t ver_key_offset =
-			sizeof(struct icp_qat_hw_auth_setup) +
-			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
-			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
-			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
-			sizeof(struct icp_qat_hw_cipher_config);
-	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
-			(void *) &qat_req->cd_ctrl;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-			(void *) &qat_req->serv_specif_rqpars;
-	uint32_t data_length = op->sym->auth.data.length;
-
-	/* Fill separate Content Descriptor for this op */
-	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
-			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-				ctx->cd.cipher.key :
-				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
-			ctx->auth_key_length);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
-				ICP_QAT_HW_CIPHER_AEAD_MODE,
-				ctx->qat_cipher_alg,
-				ICP_QAT_HW_CIPHER_NO_CONVERT,
-				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-					ICP_QAT_HW_CIPHER_ENCRYPT :
-					ICP_QAT_HW_CIPHER_DECRYPT));
-	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
-			ctx->digest_length,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
-
-	/* Update the request */
-	qat_req->cd_pars.u.s.content_desc_addr =
-			cookie->opt.spc_gmac.cd_phys_addr;
-	qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
-			sizeof(struct icp_qat_hw_cipher_config) +
-			ctx->auth_key_length, 8) >> 3;
-	qat_req->comn_mid.src_length = data_length;
-	qat_req->comn_mid.dst_length = 0;
-
-	cipher_param->spc_aad_addr = 0;
-	cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
-	cipher_param->spc_aad_sz = data_length;
-	cipher_param->reserved = 0;
-	cipher_param->spc_auth_res_sz = ctx->digest_length;
-
-	qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
-	cipher_cd_ctrl->cipher_cfg_offset = 0;
-	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
-	ICP_QAT_FW_LA_PROTO_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_NO_PROTO);
-}
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
-{
-	int ret = 0;
-	struct qat_sym_session *ctx = NULL;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	register struct icp_qat_fw_la_bulk_req *qat_req;
-	uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
-	uint32_t cipher_len = 0, cipher_ofs = 0;
-	uint32_t auth_len = 0, auth_ofs = 0;
-	uint32_t min_ofs = 0;
-	uint64_t src_buf_start = 0, dst_buf_start = 0;
-	uint64_t auth_data_end = 0;
-	uint8_t do_sgl = 0;
-	uint8_t in_place = 1;
-	int alignment_adjustment = 0;
-	int oop_shift = 0;
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *)op_cookie;
-
-	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
-				"operation requests, op (%p) is not a "
-				"symmetric operation.", op);
-		return -EINVAL;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
 	}
 
-	if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
-				" requests, op (%p) is sessionless.", op);
-		return -EINVAL;
-	} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_sym_session *)get_sym_session_private_data(
-				op->sym->session, qat_sym_driver_id);
 #ifdef RTE_LIB_SECURITY
-	} else {
-		ctx = (struct qat_sym_session *)get_sec_session_private_data(
-				op->sym->sec_session);
-		if (likely(ctx)) {
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if (sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
 			if (unlikely(ctx->bpi_ctx == NULL)) {
 				QAT_DP_LOG(ERR, "QAT PMD only supports security"
 						" operation requests for"
@@ -292,463 +127,234 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 				return -EINVAL;
 			}
-		}
-#endif
-	}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
 
-	if (unlikely(ctx == NULL)) {
-		QAT_DP_LOG(ERR, "Session was not created for this device");
-		return -EINVAL;
-	}
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
-	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
-	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
-	cipher_param = (void *)&qat_req->serv_specif_rqpars;
-	cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			do_aead = 1;
-		} else {
-			do_auth = 1;
-			do_cipher = 1;
+			sess = (void *)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)sess;
+			opaque[1] = (uintptr_t)build_request;
 		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		do_auth = 1;
-		do_cipher = 0;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		do_auth = 0;
-		do_cipher = 1;
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
 	}
 
-	if (do_cipher) {
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
 
-		if (ctx->qat_cipher_alg ==
-					 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
 
-			if (unlikely(
-			    (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
-			    (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cipher_len = op->sym->cipher.data.length >> 3;
-			cipher_ofs = op->sym->cipher.data.offset >> 3;
-
-		} else if (ctx->bpi_ctx) {
-			/* DOCSIS - only send complete blocks to device.
-			 * Process any partial block using CFB mode.
-			 * Even if 0 complete blocks, still send this to device
-			 * to get into rx queue for post-process and dequeuing
-			 */
-			cipher_len = qat_bpicipher_preprocess(ctx, op);
-			cipher_ofs = op->sym->cipher.data.offset;
-		} else {
-			cipher_len = op->sym->cipher.data.length;
-			cipher_ofs = op->sym->cipher.data.offset;
-		}
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
 
-		set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
-				cipher_param, op, qat_req);
-		min_ofs = cipher_ofs;
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	if (do_auth) {
-
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
-			ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
-			if (unlikely(
-			    (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
-			    (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			auth_ofs = op->sym->auth.data.offset >> 3;
-			auth_len = op->sym->auth.data.length >> 3;
-
-			auth_param->u1.aad_adr =
-					rte_crypto_op_ctophys_offset(op,
-							ctx->auth_iv.offset);
-
-		} else if (ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-			/* AES-GMAC */
-			set_cipher_iv(ctx->auth_iv.length,
-				ctx->auth_iv.offset,
-				cipher_param, op, qat_req);
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
-			auth_param->u1.aad_adr = 0;
-			auth_param->u2.aad_sz = 0;
-
-		} else {
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-		min_ofs = auth_ofs;
-
-		if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
-				ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
-			auth_param->auth_res_addr =
-					op->sym->auth.digest.phys_addr;
-
 	}
 
-	if (do_aead) {
-		/*
-		 * This address may used for setting AAD physical pointer
-		 * into IV offset from op
-		 */
-		rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
-		if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-
-			set_cipher_iv(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, qat_req);
-
-		} else if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
-			/* In case of AES-CCM this may point to user selected
-			 * memory or iv offset in cypto_op
-			 */
-			uint8_t *aad_data = op->sym->aead.aad.data;
-			/* This is true AAD length, it not includes 18 bytes of
-			 * preceding data
-			 */
-			uint8_t aad_ccm_real_len = 0;
-			uint8_t aad_len_field_sz = 0;
-			uint32_t msg_len_be =
-					rte_bswap32(op->sym->aead.data.length);
-
-			if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-				aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-				aad_ccm_real_len = ctx->aad_len -
-					ICP_QAT_HW_CCM_AAD_B0_LEN -
-					ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			} else {
-				/*
-				 * aad_len not greater than 18, so no actual aad
-				 *  data, then use IV after op for B0 block
-				 */
-				aad_data = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-				aad_phys_addr_aead =
-						rte_crypto_op_ctophys_offset(op,
-							ctx->cipher_iv.offset);
-			}
-
-			uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
-							ctx->cipher_iv.length;
-
-			aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-							aad_len_field_sz,
-							ctx->digest_length, q);
-
-			if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET +
-				    (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				    (uint8_t *)&msg_len_be,
-				    ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-			} else {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET,
-				    (uint8_t *)&msg_len_be
-				    + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				    - q), q);
-			}
-
-			if (aad_len_field_sz > 0) {
-				*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
-						= rte_bswap16(aad_ccm_real_len);
-
-				if ((aad_ccm_real_len + aad_len_field_sz)
-						% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-					uint8_t pad_len = 0;
-					uint8_t pad_idx = 0;
-
-					pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len + aad_len_field_sz) %
-						ICP_QAT_HW_CCM_AAD_B0_LEN);
-					pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					    aad_ccm_real_len + aad_len_field_sz;
-					memset(&aad_data[pad_idx],
-							0, pad_len);
-				}
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
 
-			}
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
 
-			set_cipher_iv_ccm(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, q,
-					aad_len_field_sz);
+	if (cryptodev == NULL)
+		return -ENODEV;
 
-		}
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
-		cipher_len = op->sym->aead.data.length;
-		cipher_ofs = op->sym->aead.data.offset;
-		auth_len = op->sym->aead.data.length;
-		auth_ofs = op->sym->aead.data.offset;
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
-		auth_param->u1.aad_adr = aad_phys_addr_aead;
-		auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
-		min_ofs = op->sym->aead.data.offset;
-	}
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
-	if (op->sym->m_src->nb_segs > 1 ||
-			(op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
-		do_sgl = 1;
-
-	/* adjust for chain case */
-	if (do_cipher && do_auth)
-		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
-	if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
-		min_ofs = 0;
-
-	if (unlikely((op->sym->m_dst != NULL) &&
-			(op->sym->m_dst != op->sym->m_src))) {
-		/* Out-of-place operation (OOP)
-		 * Don't align DMA start. DMA the minimum data-set
-		 * so as not to overwrite data in dest buffer
-		 */
-		in_place = 0;
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-		dst_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-		oop_shift = min_ofs;
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
 
-	} else {
-		/* In-place operation
-		 * Start DMA at nearest aligned address below min_ofs
-		 */
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
-						& QAT_64_BTYE_ALIGN_MASK;
-
-		if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
-					rte_pktmbuf_headroom(op->sym->m_src))
-							> src_buf_start)) {
-			/* alignment has pushed addr ahead of start of mbuf
-			 * so revert and take the performance hit
-			 */
-			src_buf_start =
-				rte_pktmbuf_iova_offset(op->sym->m_src,
-								min_ofs);
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
 		}
-		dst_buf_start = src_buf_start;
-
-		/* remember any adjustment for later, note, can be +/- */
-		alignment_adjustment = src_buf_start -
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-	}
 
-	if (do_cipher || do_aead) {
-		cipher_param->cipher_offset =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, cipher_ofs) - src_buf_start;
-		cipher_param->cipher_length = cipher_len;
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
 	} else {
-		cipher_param->cipher_offset = 0;
-		cipher_param->cipher_length = 0;
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
 	}
-
-	if (!ctx->is_single_pass) {
-		/* Do not let to overwrite spc_aad len */
-		if (do_auth || do_aead) {
-			auth_param->auth_off =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, auth_ofs) - src_buf_start;
-			auth_param->auth_len = auth_len;
-		} else {
-			auth_param->auth_off = 0;
-			auth_param->auth_len = 0;
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
 		}
 	}
 
-	qat_req->comn_mid.dst_length =
-		qat_req->comn_mid.src_length =
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		> (auth_param->auth_off + auth_param->auth_len) ?
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		: (auth_param->auth_off + auth_param->auth_len);
-
-	if (do_auth && do_cipher) {
-		/* Handle digest-encrypted cases, i.e.
-		 * auth-gen-then-cipher-encrypt and
-		 * cipher-decrypt-then-auth-verify
-		 */
-		 /* First find the end of the data */
-		if (do_sgl) {
-			uint32_t remaining_off = auth_param->auth_off +
-				auth_param->auth_len + alignment_adjustment + oop_shift;
-			struct rte_mbuf *sgl_buf =
-				(in_place ?
-					op->sym->m_src : op->sym->m_dst);
-
-			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
-					&& sgl_buf->next != NULL) {
-				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
-				sgl_buf = sgl_buf->next;
-			}
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-			auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
-				sgl_buf, remaining_off);
-		} else {
-			auth_data_end = (in_place ?
-				src_buf_start : dst_buf_start) +
-				auth_param->auth_off + auth_param->auth_len;
-		}
-		/* Then check if digest-encrypted conditions are met */
-		if ((auth_param->auth_off + auth_param->auth_len <
-					cipher_param->cipher_offset +
-					cipher_param->cipher_length) &&
-				(op->sym->auth.digest.phys_addr ==
-					auth_data_end)) {
-			/* Handle partial digest encryption */
-			if (cipher_param->cipher_offset +
-					cipher_param->cipher_length <
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length)
-				qat_req->comn_mid.dst_length =
-					qat_req->comn_mid.src_length =
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length;
-			struct icp_qat_fw_comn_req_hdr *header =
-				&qat_req->comn_hdr;
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-		}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
 	}
 
-	if (do_sgl) {
-
-		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
-				QAT_COMN_PTR_TYPE_SGL);
-		ret = qat_sgl_fill_array(op->sym->m_src,
-		   (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
-		   &cookie->qat_sgl_src,
-		   qat_req->comn_mid.src_length,
-		   QAT_SYM_SGL_MAX_NUMBER);
-
-		if (unlikely(ret)) {
-			QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
-			return ret;
-		}
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
 
-		if (in_place)
-			qat_req->comn_mid.dest_data_addr =
-				qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-		else {
-			ret = qat_sgl_fill_array(op->sym->m_dst,
-				(int64_t)(dst_buf_start -
-					  rte_pktmbuf_iova(op->sym->m_dst)),
-				 &cookie->qat_sgl_dst,
-				 qat_req->comn_mid.dst_length,
-				 QAT_SYM_SGL_MAX_NUMBER);
-
-			if (unlikely(ret)) {
-				QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
-				return ret;
-			}
+	return 0;
 
-			qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-			qat_req->comn_mid.dest_data_addr =
-					cookie->qat_sgl_dst_phys_addr;
-		}
-		qat_req->comn_mid.src_length = 0;
-		qat_req->comn_mid.dst_length = 0;
-	} else {
-		qat_req->comn_mid.src_data_addr = src_buf_start;
-		qat_req->comn_mid.dest_data_addr = dst_buf_start;
-	}
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
 
-	if (ctx->is_single_pass) {
-		if (ctx->is_ucs) {
-			/* GEN 4 */
-			cipher_param20->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param20->spc_auth_res_addr =
-				op->sym->aead.digest.phys_addr;
-		} else {
-			cipher_param->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param->spc_auth_res_addr =
-					op->sym->aead.digest.phys_addr;
-		}
-	} else if (ctx->is_single_pass_gmac &&
-		       op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
-		/* Handle Single-Pass AES-GMAC */
-		handle_spc_gmac(ctx, op, cookie, qat_req);
-	}
+	return ret;
+}
 
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
-			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
-			rte_pktmbuf_data_len(op->sym->m_src));
-	if (do_cipher) {
-		uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
-				ctx->cipher_iv.length);
-	}
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
 
-	if (do_auth) {
-		if (ctx->auth_iv.length) {
-			uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
-							uint8_t *,
-							ctx->auth_iv.offset);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
-						ctx->auth_iv.length);
-		}
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
-				ctx->digest_length);
-	}
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
 
-	if (do_aead) {
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
-				ctx->digest_length);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
-				ctx->aad_len);
-	}
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
 #endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
+
 	return 0;
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index 87e23bb1d1..278abbfd3a 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -130,10 +130,6 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 933ac89569..27cdeb1de2 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -600,11 +600,11 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 	session->is_auth = 1;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
-	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
 		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
-	} else {
+	else
 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
-	}
+
 	session->cipher_iv.offset = aead_xform->iv.offset;
 	session->cipher_iv.length = aead_xform->iv.length;
 	session->aad_len = aead_xform->aad_length;
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 581bce3790..e1b5edb2f9 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -55,6 +55,11 @@
 #define QAT_SESSION_IS_SLICE_SET(flags, flag)	\
 	(!!((flags) & (flag)))
 
+struct qat_sym_session;
+
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 enum qat_sym_proto_flag {
 	QAT_CRYPTO_PROTO_FLAG_NONE = 0,
 	QAT_CRYPTO_PROTO_FLAG_CCM = 1,
@@ -63,11 +68,6 @@ enum qat_sym_proto_flag {
 	QAT_CRYPTO_PROTO_FLAG_ZUC = 4
 };
 
-struct qat_sym_session;
-
-typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
-		uint8_t *out_msg, void *op_cookie);
-
 /* Common content descriptor */
 struct qat_sym_cd {
 	struct icp_qat_hw_cipher_algo_blk cipher;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v4 08/11] compress/qat: comp dequeue burst update
  2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
                         ` (6 preceding siblings ...)
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 07/11] crypto/qat: op burst data path rework Kai Ji
@ 2021-11-05  0:19       ` Kai Ji
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 09/11] crypto/qat: raw dp api integration Kai Ji
                         ` (2 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-05  0:19 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch update the newly comp dequeue process
response function pointer introduced in dequeue burst

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/compress/qat/qat_comp_pmd.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 31de526056..2f17f8825e 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -616,11 +616,18 @@ static struct rte_compressdev_ops compress_qat_dummy_ops = {
 	.private_xform_free	= qat_comp_private_xform_free
 };
 
+static uint16_t
+qat_comp_dequeue_burst(void *qp, struct rte_comp_op **ops,  uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_comp_process_response,
+				nb_ops);
+}
+
 static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
+	uint16_t ret = qat_comp_dequeue_burst(qp, ops, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
@@ -638,8 +645,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 
 		} else {
 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
-					(compressdev_dequeue_pkt_burst_t)
-					qat_dequeue_op_burst;
+					qat_comp_dequeue_burst;
 		}
 	}
 	return ret;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v4 09/11] crypto/qat: raw dp api integration
  2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
                         ` (7 preceding siblings ...)
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 08/11] compress/qat: comp dequeue burst update Kai Ji
@ 2021-11-05  0:19       ` Kai Ji
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 10/11] crypto/qat: support out of place SG list Kai Ji
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 11/11] test/cryptodev: fix incomplete data length Kai Ji
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-05  0:19 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch unitfy all raw dp api into the qat pmd driver,
some qat generation specific are implemented respectively.
The qat_sym_hw_dp.c is removed as no longer required.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build               |   2 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |   2 +
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 214 ++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 122 +++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |  78 ++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 656 +++++++++++++
 drivers/crypto/qat/qat_crypto.h              |   3 +
 drivers/crypto/qat/qat_sym.c                 |  50 +
 drivers/crypto/qat/qat_sym_hw_dp.c           | 974 -------------------
 9 files changed, 1126 insertions(+), 975 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 25c15bebea..e53b51dcac 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -71,7 +71,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_asym.c', 'qat_crypto.c',
 	    'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 9320eb5adf..d88f17f0db 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -290,6 +290,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 			qat_sym_crypto_cap_get_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
 			qat_sym_crypto_set_session_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index 6baf1810ed..6494019050 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -393,6 +393,218 @@ qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
 	return 0;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
+	} else if (ctx->is_single_pass_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
+	}
+
+	return 0;
+}
+
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -402,6 +614,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_feature_flags_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
 			qat_sym_crypto_set_session_gen3;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index fa6a7ee2fd..167c95abcf 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -221,6 +221,126 @@ qat_sym_crypto_set_session_gen4(void *cdev, void *session)
 	return 0;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -228,6 +348,8 @@ RTE_INIT(qat_sym_crypto_gen4_init)
 			qat_sym_crypto_cap_get_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
 			qat_sym_crypto_set_session_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 6277211b35..b69d4d9091 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -839,6 +839,84 @@ int
 qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
 		uint8_t *out_msg, void *op_cookie);
 
+/* -----------------GEN 1 sym crypto raw data path APIs ---------------- */
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status);
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status);
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_dp_denqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 14fc9d7f50..b044c7b2d3 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -148,6 +148,10 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
+
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
@@ -451,6 +455,656 @@ qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
 	return 0;
 }
 
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct qat_sym_op_cookie *cookie;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, &iv,
+			NULL, NULL, NULL);
+#endif
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i],
+				cookie, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
+			(uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				NULL, NULL, NULL);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
+			NULL, 0, cipher_iv, digest, auth_iv, ofs,
+			(uint32_t)data_len)))
+		return -1;
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				NULL, 0,
+				&vec->iv[i], &vec->digest[i],
+				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+			break;
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				&vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp;
+	void *resp_opaque;
+	uint32_t i, n, inflight;
+	uint32_t head;
+	uint8_t status;
+
+	*n_success_jobs = 0;
+	*return_status = 0;
+	head = dp_ctx->head;
+
+	inflight = qp->enqueued - qp->dequeued;
+	if (unlikely(inflight == 0))
+		return 0;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			head);
+	/* no operation ready */
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return 0;
+
+	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
+	/* get the dequeue count */
+	if (get_dequeue_count) {
+		n = get_dequeue_count(resp_opaque);
+		if (unlikely(n == 0))
+			return 0;
+	} else {
+		if (unlikely(max_nb_to_dequeue == 0))
+			return 0;
+		n = max_nb_to_dequeue;
+	}
+
+	out_user_data[0] = resp_opaque;
+	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+	post_dequeue(resp_opaque, 0, status);
+	*n_success_jobs += status;
+
+	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+
+	/* we already finished dequeue when n == 1 */
+	if (unlikely(n == 1)) {
+		i = 1;
+		goto end_deq;
+	}
+
+	if (is_user_data_array) {
+		for (i = 1; i < n; i++) {
+			resp = (struct icp_qat_fw_comn_resp *)(
+				(uint8_t *)rx_queue->base_addr + head);
+			if (unlikely(*(uint32_t *)resp ==
+					ADF_RING_EMPTY_SIG))
+				goto end_deq;
+			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
+			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+			*n_success_jobs += status;
+			post_dequeue(out_user_data[i], i, status);
+			head = (head + rx_queue->msg_size) &
+					rx_queue->modulo_mask;
+		}
+
+		goto end_deq;
+	}
+
+	/* opaque is not array */
+	for (i = 1; i < n; i++) {
+		resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+			goto end_deq;
+		head = (head + rx_queue->msg_size) &
+				rx_queue->modulo_mask;
+		post_dequeue(resp_opaque, i, status);
+		*n_success_jobs += status;
+	}
+
+end_deq:
+	dp_ctx->head = head;
+	dp_ctx->cached_dequeue += i;
+	return i;
+}
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	register struct icp_qat_fw_comn_resp *resp;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			dp_ctx->head);
+
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return NULL;
+
+	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
+			rx_queue->modulo_mask;
+	dp_ctx->cached_dequeue++;
+
+	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
+			RTE_CRYPTO_OP_STATUS_SUCCESS :
+			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	*dequeue_status = 0;
+	return (void *)(uintptr_t)resp->opaque_data;
+}
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_enqueue != n))
+		return -1;
+
+	qp->enqueued += n;
+	qp->stats.enqueued_count += n;
+
+	tx_queue->tail = dp_ctx->tail;
+
+	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number, tx_queue->tail);
+	tx_queue->csr_tail = tx_queue->tail;
+	dp_ctx->cached_enqueue = 0;
+
+	return 0;
+}
+
+int
+qat_sym_dp_denqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_dequeue != n))
+		return -1;
+
+	rx_queue->head = dp_ctx->head;
+	rx_queue->nb_processed_responses += n;
+	qp->dequeued += n;
+	qp->stats.dequeued_count += n;
+	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
+		uint32_t old_head, new_head;
+		uint32_t max_head;
+
+		old_head = rx_queue->csr_head;
+		new_head = rx_queue->head;
+		max_head = qp->nb_descriptors * rx_queue->msg_size;
+
+		/* write out free descriptors */
+		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
+
+		if (new_head < old_head) {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
+					max_head - old_head);
+			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
+					new_head);
+		} else {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
+					old_head);
+		}
+		rx_queue->nb_processed_responses = 0;
+		rx_queue->csr_head = new_head;
+
+		/* write current head to CSR */
+		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
+			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
+			new_head);
+	}
+
+	dp_ctx->cached_dequeue = 0;
+	return 0;
+}
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+
+	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
+	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
+	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
+	raw_dp_ctx->dequeue_done = qat_sym_dp_denqueue_done_gen1;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_chain_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_chain_gen1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
+			ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_cipher_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_cipher_gen1;
+		}
+	} else
+		return -1;
+
+	return 0;
+}
+
 int
 qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
 {
@@ -518,6 +1172,8 @@ RTE_INIT(qat_sym_crypto_gen1_init)
 			qat_sym_crypto_cap_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
 			qat_sym_crypto_set_session_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 3ac6c90e1c..203f74f46f 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -53,11 +53,14 @@ typedef void * (*create_security_ctx_t)(void *cryptodev);
 
 typedef int (*set_session_t)(void *cryptodev, void *session);
 
+typedef int (*set_raw_dp_ctx_t)(void *raw_dp_ctx, void *ctx);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
 	set_session_t set_session;
+	set_raw_dp_ctx_t set_raw_dp_ctx;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 2bbb6615f3..b78e6a6ef3 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -354,6 +354,56 @@ qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
 	return 0;
 }
 
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct qat_cryptodev_private *internals = dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_crypto_gen_dev_ops *gen_dev_ops =
+			&qat_sym_gen_dev_ops[qat_dev_gen];
+	struct qat_qp *qp;
+	struct qat_sym_session *ctx;
+	struct qat_sym_dp_ctx *dp_ctx;
+
+	if (!gen_dev_ops->set_raw_dp_ctx) {
+		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+				qat_dev_gen);
+		return -ENOTSUP;
+	}
+
+	qp = dev->data->queue_pairs[qp_id];
+	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(struct qat_sym_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+		dp_ctx->tail = qp->tx_q.tail;
+		dp_ctx->head = qp->rx_q.head;
+		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
+	}
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -EINVAL;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, qat_sym_driver_id);
+
+	dp_ctx->session = ctx;
+
+	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
+}
+
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct qat_sym_dp_ctx);
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_sym_driver,
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
deleted file mode 100644
index 6bcc30d92d..0000000000
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ /dev/null
@@ -1,974 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "adf_transport_access_macros.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_qp.h"
-
-static __rte_always_inline int32_t
-qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
-		struct rte_crypto_vec *data, uint16_t n_data_vecs)
-{
-	struct qat_queue *tx_queue;
-	struct qat_sym_op_cookie *cookie;
-	struct qat_sgl *list;
-	uint32_t i;
-	uint32_t total_len;
-
-	if (likely(n_data_vecs == 1)) {
-		req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			data[0].iova;
-		req->comn_mid.src_length = req->comn_mid.dst_length =
-			data[0].len;
-		return data[0].len;
-	}
-
-	if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
-		return -1;
-
-	total_len = 0;
-	tx_queue = &qp->tx_q;
-
-	ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
-			QAT_COMN_PTR_TYPE_SGL);
-	cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
-	list = (struct qat_sgl *)&cookie->qat_sgl_src;
-
-	for (i = 0; i < n_data_vecs; i++) {
-		list->buffers[i].len = data[i].len;
-		list->buffers[i].resrvd = 0;
-		list->buffers[i].addr = data[i].iova;
-		if (total_len + data[i].len > UINT32_MAX) {
-			QAT_DP_LOG(ERR, "Message too long");
-			return -1;
-		}
-		total_len += data[i].len;
-	}
-
-	list->num_bufs = i;
-	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			cookie->qat_sgl_src_phys_addr;
-	req->comn_mid.src_length = req->comn_mid.dst_length = 0;
-	return total_len;
-}
-
-static __rte_always_inline void
-set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	/* copy IV into request if it fits */
-	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
-				iv_len);
-	else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
-	}
-}
-
-#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
-	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
-	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
-
-static __rte_always_inline void
-qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
-{
-	uint32_t i;
-
-	for (i = 0; i < n; i++)
-		sta[i] = status;
-}
-
-#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
-	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
-
-static __rte_always_inline void
-enqueue_one_cipher_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-
-	/* cipher IV */
-	set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest __rte_unused,
-	struct rte_crypto_va_iova_ptr *aad __rte_unused,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
-			(uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_auth_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = data_len - ofs.ofs.auth.head -
-			ofs.ofs.auth.tail;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
-				ctx->auth_iv.length);
-		break;
-	default:
-		break;
-	}
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv __rte_unused,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
-			(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_auth_job(ctx, req, &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline int
-enqueue_one_chain_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_vec *data,
-	uint16_t n_data_vecs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	rte_iova_t auth_iova_end;
-	int32_t cipher_len, auth_len;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	cipher_len = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
-
-	if (unlikely(cipher_len < 0 || auth_len < 0))
-		return -1;
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = cipher_len;
-	set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = auth_len;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		break;
-	default:
-		break;
-	}
-
-	if (unlikely(n_data_vecs > 1)) {
-		int auth_end_get = 0, i = n_data_vecs - 1;
-		struct rte_crypto_vec *cvec = &data[0];
-		uint32_t len;
-
-		len = data_len - ofs.ofs.auth.tail;
-
-		while (i >= 0 && len > 0) {
-			if (cvec->len >= len) {
-				auth_iova_end = cvec->iova + len;
-				len = 0;
-				auth_end_get = 1;
-				break;
-			}
-			len -= cvec->len;
-			i--;
-			cvec++;
-		}
-
-		if (unlikely(auth_end_get == 0))
-			return -1;
-	} else
-		auth_iova_end = data[0].iova + auth_param->auth_off +
-			auth_param->auth_len;
-
-	/* Then check if digest-encrypted conditions are met */
-	if ((auth_param->auth_off + auth_param->auth_len <
-		cipher_param->cipher_offset +
-		cipher_param->cipher_length) &&
-		(digest->iova == auth_iova_end)) {
-		/* Handle partial digest encryption */
-		if (cipher_param->cipher_offset +
-				cipher_param->cipher_length <
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length)
-			req->comn_mid.dst_length =
-				req->comn_mid.src_length =
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length;
-		struct icp_qat_fw_comn_req_hdr *header =
-			&req->comn_hdr;
-		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-			header->serv_specif_flags,
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-	}
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
-			cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
-		return -1;
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num,
-			&vec->iv[i], &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
-			break;
-
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_aead_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-		(void *)&req->serv_specif_rqpars;
-	struct icp_qat_fw_la_auth_req_params *auth_param =
-		(void *)((uint8_t *)&req->serv_specif_rqpars +
-		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-	uint8_t *aad_data;
-	uint8_t aad_ccm_real_len;
-	uint8_t aad_len_field_sz;
-	uint32_t msg_len_be;
-	rte_iova_t aad_iova = 0;
-	uint8_t q;
-
-	/* CPM 1.7 uses single pass to treat AEAD as cipher operation */
-	if (ctx->is_single_pass) {
-		enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
-		cipher_param->spc_aad_addr = aad->iova;
-		cipher_param->spc_auth_res_addr = digest->iova;
-		return;
-	}
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
-				ctx->cipher_iv.length);
-		aad_iova = aad->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
-		aad_data = aad->va;
-		aad_iova = aad->iova;
-		aad_ccm_real_len = 0;
-		aad_len_field_sz = 0;
-		msg_len_be = rte_bswap32((uint32_t)data_len -
-				ofs.ofs.cipher.head);
-
-		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			aad_ccm_real_len = ctx->aad_len -
-				ICP_QAT_HW_CCM_AAD_B0_LEN -
-				ICP_QAT_HW_CCM_AAD_LEN_INFO;
-		} else {
-			aad_data = iv->va;
-			aad_iova = iv->iova;
-		}
-
-		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-			aad_len_field_sz, ctx->digest_length, q);
-		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				(uint8_t *)&msg_len_be,
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-		} else {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-				(uint8_t *)&msg_len_be +
-				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				- q), q);
-		}
-
-		if (aad_len_field_sz > 0) {
-			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
-				rte_bswap16(aad_ccm_real_len);
-
-			if ((aad_ccm_real_len + aad_len_field_sz)
-				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-				uint8_t pad_len = 0;
-				uint8_t pad_idx = 0;
-
-				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len +
-					aad_len_field_sz) %
-					ICP_QAT_HW_CCM_AAD_B0_LEN);
-				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					aad_ccm_real_len +
-					aad_len_field_sz;
-				memset(&aad_data[pad_idx], 0, pad_len);
-			}
-		}
-
-		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
-			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va +
-			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
-		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-		rte_memcpy((uint8_t *)aad->va +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			ctx->cipher_iv.length);
-		break;
-	default:
-		break;
-	}
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_param->auth_off = ofs.ofs.cipher.head;
-	auth_param->auth_len = cipher_param->cipher_length;
-	auth_param->auth_res_addr = digest->iova;
-	auth_param->u1.aad_adr = aad_iova;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
-		(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
-			&vec->aad[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
-	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
-	uint32_t max_nb_to_dequeue,
-	rte_cryptodev_raw_post_dequeue_t post_dequeue,
-	void **out_user_data, uint8_t is_user_data_array,
-	uint32_t *n_success_jobs, int *return_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct icp_qat_fw_comn_resp *resp;
-	void *resp_opaque;
-	uint32_t i, n, inflight;
-	uint32_t head;
-	uint8_t status;
-
-	*n_success_jobs = 0;
-	*return_status = 0;
-	head = dp_ctx->head;
-
-	inflight = qp->enqueued - qp->dequeued;
-	if (unlikely(inflight == 0))
-		return 0;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			head);
-	/* no operation ready */
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return 0;
-
-	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
-	/* get the dequeue count */
-	if (get_dequeue_count) {
-		n = get_dequeue_count(resp_opaque);
-		if (unlikely(n == 0))
-			return 0;
-	} else {
-		if (unlikely(max_nb_to_dequeue == 0))
-			return 0;
-		n = max_nb_to_dequeue;
-	}
-
-	out_user_data[0] = resp_opaque;
-	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-	post_dequeue(resp_opaque, 0, status);
-	*n_success_jobs += status;
-
-	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
-
-	/* we already finished dequeue when n == 1 */
-	if (unlikely(n == 1)) {
-		i = 1;
-		goto end_deq;
-	}
-
-	if (is_user_data_array) {
-		for (i = 1; i < n; i++) {
-			resp = (struct icp_qat_fw_comn_resp *)(
-				(uint8_t *)rx_queue->base_addr + head);
-			if (unlikely(*(uint32_t *)resp ==
-					ADF_RING_EMPTY_SIG))
-				goto end_deq;
-			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
-			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-			*n_success_jobs += status;
-			post_dequeue(out_user_data[i], i, status);
-			head = (head + rx_queue->msg_size) &
-					rx_queue->modulo_mask;
-		}
-
-		goto end_deq;
-	}
-
-	/* opaque is not array */
-	for (i = 1; i < n; i++) {
-		resp = (struct icp_qat_fw_comn_resp *)(
-			(uint8_t *)rx_queue->base_addr + head);
-		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-			goto end_deq;
-		head = (head + rx_queue->msg_size) &
-				rx_queue->modulo_mask;
-		post_dequeue(resp_opaque, i, status);
-		*n_success_jobs += status;
-	}
-
-end_deq:
-	dp_ctx->head = head;
-	dp_ctx->cached_dequeue += i;
-	return i;
-}
-
-static __rte_always_inline void *
-qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
-		enum rte_crypto_op_status *op_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	register struct icp_qat_fw_comn_resp *resp;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			dp_ctx->head);
-
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return NULL;
-
-	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
-			rx_queue->modulo_mask;
-	dp_ctx->cached_dequeue++;
-
-	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
-			RTE_CRYPTO_OP_STATUS_SUCCESS :
-			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	*dequeue_status = 0;
-	return (void *)(uintptr_t)resp->opaque_data;
-}
-
-static __rte_always_inline int
-qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_enqueue != n))
-		return -1;
-
-	qp->enqueued += n;
-	qp->stats.enqueued_count += n;
-
-	tx_queue->tail = dp_ctx->tail;
-
-	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
-			tx_queue->hw_bundle_number,
-			tx_queue->hw_queue_number, tx_queue->tail);
-	tx_queue->csr_tail = tx_queue->tail;
-	dp_ctx->cached_enqueue = 0;
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_dequeue != n))
-		return -1;
-
-	rx_queue->head = dp_ctx->head;
-	rx_queue->nb_processed_responses += n;
-	qp->dequeued += n;
-	qp->stats.dequeued_count += n;
-	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
-		uint32_t old_head, new_head;
-		uint32_t max_head;
-
-		old_head = rx_queue->csr_head;
-		new_head = rx_queue->head;
-		max_head = qp->nb_descriptors * rx_queue->msg_size;
-
-		/* write out free descriptors */
-		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
-
-		if (new_head < old_head) {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
-					max_head - old_head);
-			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
-					new_head);
-		} else {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
-					old_head);
-		}
-		rx_queue->nb_processed_responses = 0;
-		rx_queue->csr_head = new_head;
-
-		/* write current head to CSR */
-		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
-			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
-			new_head);
-	}
-
-	dp_ctx->cached_dequeue = 0;
-	return 0;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
-	struct qat_qp *qp;
-	struct qat_sym_session *ctx;
-	struct qat_sym_dp_ctx *dp_ctx;
-
-	qp = dev->data->queue_pairs[qp_id];
-	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
-	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(struct qat_sym_dp_ctx));
-		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
-		dp_ctx->tail = qp->tx_q.tail;
-		dp_ctx->head = qp->rx_q.head;
-		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
-	}
-
-	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
-		return -EINVAL;
-
-	ctx = (struct qat_sym_session *)get_sym_session_private_data(
-			session_ctx.crypto_sess, qat_sym_driver_id);
-
-	dp_ctx->session = ctx;
-
-	raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
-	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
-	raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
-	raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_chain_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
-		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
-		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_cipher_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
-		}
-	} else
-		return -1;
-
-	return 0;
-}
-
-int
-qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
-{
-	return sizeof(struct qat_sym_dp_ctx);
-}
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v4 10/11] crypto/qat: support out of place SG list
  2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
                         ` (8 preceding siblings ...)
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 09/11] crypto/qat: raw dp api integration Kai Ji
@ 2021-11-05  0:19       ` Kai Ji
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 11/11] test/cryptodev: fix incomplete data length Kai Ji
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2021-11-05  0:19 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch add-in sgl oop support in qat driver

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++--
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 55 +++++++++++++++++---
 3 files changed, 83 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index 6494019050..c59c25fe8f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -467,8 +467,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -564,8 +574,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 167c95abcf..529878e3e7 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -295,8 +295,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index b044c7b2d3..4bbdfdf367 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -529,9 +529,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i],
-				cookie, vec->src_sgl[i].vec,
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
 				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
@@ -628,8 +637,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
@@ -728,8 +747,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -833,8 +862,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev] [dpdk-dev v4 11/11] test/cryptodev: fix incomplete data length
  2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
                         ` (9 preceding siblings ...)
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 10/11] crypto/qat: support out of place SG list Kai Ji
@ 2021-11-05  0:19       ` Kai Ji
  2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  10 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2021-11-05  0:19 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji, pablo.de.lara.guarch, adamx.dybkowski

This patch fixes incorrect data lengths computation in cryptodev
unit test. Previously some data lengths were incorrectly set, which
was insensitive for crypto op unit tets but is critical for raw data
path API unit tests. The patch addressed the issue by setting the
correct data lengths for some tests.

Fixes: 681f540da52b ("cryptodev: do not use AAD in wireless algorithms")
Cc: pablo.de.lara.guarch@intel.com

Fixes: e847fc512817 ("test/crypto: add encrypted digest case for AES-CTR-CMAC")
Cc: adamx.dybkowski@intel.com

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 app/test/test_cryptodev.c | 27 ++++++++++++++++++---------
 1 file changed, 18 insertions(+), 9 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index efd8bfd7a0..e1f7c6454d 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -209,6 +209,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 	int enqueue_status, dequeue_status;
 	struct crypto_unittest_params *ut_params = &unittest_params;
 	int is_sgl = sop->m_src->nb_segs > 1;
+	int is_oop = 0;
 
 	ctx_service_size = rte_cryptodev_get_raw_dp_ctx_size(dev_id);
 	if (ctx_service_size < 0) {
@@ -247,6 +248,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 
 	ofs.raw = 0;
 
+	if ((sop->m_dst != NULL) && (sop->m_dst != sop->m_src))
+		is_oop = 1;
+
 	if (is_cipher && is_auth) {
 		cipher_offset = sop->cipher.data.offset;
 		cipher_len = sop->cipher.data.length;
@@ -277,6 +281,8 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 		if (is_sgl) {
 			uint32_t remaining_off = auth_offset + auth_len;
 			struct rte_mbuf *sgl_buf = sop->m_src;
+			if (is_oop)
+				sgl_buf = sop->m_dst;
 
 			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
 					&& sgl_buf->next != NULL) {
@@ -293,7 +299,8 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 		/* Then check if digest-encrypted conditions are met */
 		if ((auth_offset + auth_len < cipher_offset + cipher_len) &&
 				(digest.iova == auth_end_iova) && is_sgl)
-			max_len = RTE_MAX(max_len, auth_offset + auth_len +
+			max_len = RTE_MAX(max_len,
+				auth_offset + auth_len +
 				ut_params->auth_xform.auth.digest_length);
 
 	} else if (is_cipher) {
@@ -356,7 +363,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 
 	sgl.num = n;
 	/* Out of place */
-	if (sop->m_dst != NULL) {
+	if (is_oop) {
 		dest_sgl.vec = dest_data_vec;
 		vec.dest_sgl = &dest_sgl;
 		n = rte_crypto_mbuf_to_vec(sop->m_dst, 0, max_len,
@@ -4102,9 +4109,9 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata)
 
 	/* Create KASUMI operation */
 	retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data,
-					tdata->cipher_iv.len,
-					tdata->ciphertext.len,
-					tdata->validCipherOffsetInBits.len);
+			tdata->cipher_iv.len,
+			RTE_ALIGN_CEIL(tdata->validCipherLenInBits.len, 8),
+			tdata->validCipherOffsetInBits.len);
 	if (retval < 0)
 		return retval;
 
@@ -7335,6 +7342,7 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata,
 	unsigned int plaintext_len;
 	unsigned int ciphertext_pad_len;
 	unsigned int ciphertext_len;
+	unsigned int data_len;
 
 	struct rte_cryptodev_info dev_info;
 	struct rte_crypto_op *op;
@@ -7395,21 +7403,22 @@ test_mixed_auth_cipher(const struct mixed_cipher_auth_test_data *tdata,
 	plaintext_len = ceil_byte_length(tdata->plaintext.len_bits);
 	ciphertext_pad_len = RTE_ALIGN_CEIL(ciphertext_len, 16);
 	plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
+	data_len = RTE_MAX(ciphertext_pad_len, plaintext_pad_len);
 
 	if (verify) {
 		ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
-				ciphertext_pad_len);
+				data_len);
 		memcpy(ciphertext, tdata->ciphertext.data, ciphertext_len);
 		if (op_mode == OUT_OF_PLACE)
-			rte_pktmbuf_append(ut_params->obuf, ciphertext_pad_len);
+			rte_pktmbuf_append(ut_params->obuf, data_len);
 		debug_hexdump(stdout, "ciphertext:", ciphertext,
 				ciphertext_len);
 	} else {
 		plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
-				plaintext_pad_len);
+				data_len);
 		memcpy(plaintext, tdata->plaintext.data, plaintext_len);
 		if (op_mode == OUT_OF_PLACE)
-			rte_pktmbuf_append(ut_params->obuf, plaintext_pad_len);
+			rte_pktmbuf_append(ut_params->obuf, data_len);
 		debug_hexdump(stdout, "plaintext:", plaintext, plaintext_len);
 	}
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework
  2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 11/11] test/cryptodev: fix incomplete data length Kai Ji
@ 2022-01-28 18:23         ` Kai Ji
  2022-01-28 18:23           ` [dpdk-dev v5 01/10] common/qat: define build op request and dequeue op Kai Ji
                             ` (10 more replies)
  0 siblings, 11 replies; 156+ messages in thread
From: Kai Ji @ 2022-01-28 18:23 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch reworks QAT symmetric crypto datapatch implementation where each
generation request building separated and the crypto operation under the
raw datapath api implementation are unified.

In addtion this patchset also enables QAT OOP support in raw datapath api
implementation.

v5: 
- rebase to the latest for-main
- patchset reconstruct

v4:
- patchset break down and reconstruct

v3:
- sperate a single patch 6 to two different patches

v2:
- review comments addressed

Kai Ji (10):
  common/qat: define build op request and dequeue op
  crypto/qat: sym build op request specific implementation
  crypto/qat: qat generation specific enqueue
  crypto/qat: rework session APIs
  crypto/qat: rework asymmetric crypto build operation
  crypto/qat: unify qat sym pmd apis
  crypto/qat: unify qat asym pmd apis
  crypto/qat: op burst data path rework
  crypto/qat: raw dp api integration
  crypto/qat: support out of place SG list

 drivers/common/qat/meson.build               |   6 +-
 drivers/common/qat/qat_device.c              |   4 +-
 drivers/common/qat/qat_qp.c                  |  42 +-
 drivers/common/qat/qat_qp.h                  |  54 +-
 drivers/compress/qat/qat_comp_pmd.c          |  14 +-
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  93 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 490 ++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 257 ++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 913 ++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 942 +++++++++++++++++-
 drivers/crypto/qat/qat_asym.c                | 762 ++++++++------
 drivers/crypto/qat/qat_asym.h                |  79 +-
 drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
 drivers/crypto/qat/qat_asym_pmd.h            |  54 -
 drivers/crypto/qat/qat_crypto.h              |  16 +-
 drivers/crypto/qat/qat_sym.c                 | 978 ++++++------------
 drivers/crypto/qat/qat_sym.h                 | 148 ++-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 983 -------------------
 drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
 drivers/crypto/qat/qat_sym_pmd.h             |  95 --
 drivers/crypto/qat/qat_sym_session.c         | 115 +--
 drivers/crypto/qat/qat_sym_session.h         |  15 +-
 23 files changed, 3812 insertions(+), 2739 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v5 01/10] common/qat: define build op request and dequeue op
  2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
@ 2022-01-28 18:23           ` Kai Ji
  2022-01-28 18:23           ` [dpdk-dev v5 02/10] crypto/qat: sym build op request specific implementation Kai Ji
                             ` (9 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-01-28 18:23 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch introduce build request op and dequeue op function
pointers to qat queue pair implementation. Those two functions
are used to be assigned during qat session generation based on
crypto operation

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c          | 10 ++++--
 drivers/common/qat/qat_qp.h          | 54 ++++++++++++++++++++++++++--
 drivers/compress/qat/qat_comp_pmd.c  |  4 +--
 drivers/crypto/qat/qat_asym_pmd.c    |  4 +--
 drivers/crypto/qat/qat_sym_pmd.c     |  4 +--
 drivers/crypto/qat/qat_sym_session.h | 13 ++++++-
 6 files changed, 76 insertions(+), 13 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index cde421eb77..ed632b5ebe 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_common.h>
@@ -550,7 +550,9 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_enqueue_op_burst(void *qp,
+		__rte_unused qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -817,7 +819,9 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 }
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_dequeue_op_burst(void *qp, void **ops,
+		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
+		uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index deafb407b3..66f00943a5 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 #ifndef _QAT_QP_H_
 #define _QAT_QP_H_
@@ -36,6 +36,51 @@ struct qat_queue {
 	/* number of responses processed since last CSR head write */
 };
 
+/**
+ * Type define qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ *
+ * @param in_op
+ *    An input op pointer
+ * @param out_msg
+ *    out_meg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param opaque
+ *    an opaque data may be used to store context may be useful between
+ *    2 enqueue operations.
+ * @param dev_gen
+ *    qat device gen id
+ * @return
+ *   - 0 if the crypto request is build successfully,
+ *   - EINVAL if error
+ **/
+typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
+
+/**
+ * Type define qat_op_dequeue_t function pointer, passed in as argument
+ * in dequeue op burst, where a dequeue op assigned base on the type of
+ * crypto op.
+ *
+ * @param op
+ *    An input op pointer
+ * @param resp
+ *    qat response msg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param dequeue_err_count
+ *    dequeue error counter
+ * @return
+ *    - 0 if dequeue OP is successful
+ *    - EINVAL if error
+ **/
+typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused);
+
+#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE	2
+
 struct qat_qp {
 	void			*mmap_bar_addr;
 	struct qat_queue	tx_q;
@@ -44,6 +89,7 @@ struct qat_qp {
 	struct rte_mempool *op_cookie_pool;
 	void **op_cookies;
 	uint32_t nb_descriptors;
+	uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
 	enum qat_device_gen qat_dev_gen;
 	enum qat_service_type service_type;
 	struct qat_pci_device *qat_dev;
@@ -78,13 +124,15 @@ struct qat_qp_config {
 };
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops);
 
 uint16_t
 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
 
 int
 qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index da6404c017..8e497e7a09 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_malloc.h>
@@ -620,7 +620,7 @@ static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
index addee384e3..9a7596b227 100644
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ b/drivers/crypto/qat/qat_asym_pmd.c
@@ -62,13 +62,13 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
 uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index b835245f17..28a26260fb 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -49,14 +49,14 @@ static uint16_t
 qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 static uint16_t
 qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 6ebc176729..fe875a7fd0 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 #ifndef _QAT_SYM_SESSION_H_
 #define _QAT_SYM_SESSION_H_
@@ -63,6 +63,16 @@ enum qat_sym_proto_flag {
 	QAT_CRYPTO_PROTO_FLAG_ZUC = 4
 };
 
+struct qat_sym_session;
+
+/*
+ * typedef qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ */
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* Common content descriptor */
 struct qat_sym_cd {
 	struct icp_qat_hw_cipher_algo_blk cipher;
@@ -107,6 +117,7 @@ struct qat_sym_session {
 	/* Some generations need different setup of counter */
 	uint32_t slice_types;
 	enum qat_sym_proto_flag qat_proto_flag;
+	qat_sym_build_request_t build_request[2];
 };
 
 int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v5 02/10] crypto/qat: sym build op request specific implementation
  2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2022-01-28 18:23           ` [dpdk-dev v5 01/10] common/qat: define build op request and dequeue op Kai Ji
@ 2022-01-28 18:23           ` Kai Ji
  2022-01-28 18:23           ` [dpdk-dev v5 03/10] crypto/qat: qat generation specific enqueue Kai Ji
                             ` (8 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-01-28 18:23 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch adds common inline functions for QAT symmetric
crypto driver to process crypto op and the build op
request function pointer implementation for QAT generation 1.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 832 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 187 ++++-
 drivers/crypto/qat/qat_sym.c                 |  90 +-
 3 files changed, 1019 insertions(+), 90 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 67a4d2cb2c..1130e0e76f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #ifndef _QAT_CRYPTO_PMD_GENS_H_
@@ -8,14 +8,844 @@
 #include <rte_cryptodev.h>
 #include "qat_crypto.h"
 #include "qat_sym_session.h"
+#include "qat_sym.h"
+
+#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
+	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
+
+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
+	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
+	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
+
+static __rte_always_inline int
+op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_decrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_decrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+	return -EINVAL;
+}
+
+static __rte_always_inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		/* Decrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely((sym_op->m_dst != NULL)
+				&& (sym_op->m_dst != sym_op->m_src)))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = last_block - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
+			dst, last_block_len);
+#endif
+		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+			dst, last_block_len);
+#endif
+	}
+
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+static __rte_always_inline int
+qat_auth_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_cipher_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+			((op->sym->cipher.data.offset %
+			BYTE_LENGTH) != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int32_t
+qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
+		void *opaque, struct qat_sym_op_cookie *cookie,
+		struct rte_crypto_vec *src_vec, uint16_t n_src,
+		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
+{
+	struct qat_sgl *list;
+	uint32_t i;
+	uint32_t tl_src = 0, total_len_src, total_len_dst;
+	uint64_t src_data_start = 0, dst_data_start = 0;
+	int is_sgl = n_src > 1 || n_dst > 1;
+
+	if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
+			n_dst > QAT_SYM_SGL_MAX_NUMBER))
+		return -1;
+
+	if (likely(!is_sgl)) {
+		src_data_start = src_vec[0].iova;
+		tl_src = total_len_src =
+				src_vec[0].len;
+		if (unlikely(n_dst)) { /* oop */
+			total_len_dst = dst_vec[0].len;
+
+			dst_data_start = dst_vec[0].iova;
+			if (unlikely(total_len_src != total_len_dst))
+				return -EINVAL;
+		} else {
+			dst_data_start = src_data_start;
+			total_len_dst = tl_src;
+		}
+	} else { /* sgl */
+		total_len_dst = total_len_src = 0;
+
+		ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+			QAT_COMN_PTR_TYPE_SGL);
+
+		list = (struct qat_sgl *)&cookie->qat_sgl_src;
+		for (i = 0; i < n_src; i++) {
+			list->buffers[i].len = src_vec[i].len;
+			list->buffers[i].resrvd = 0;
+			list->buffers[i].addr = src_vec[i].iova;
+			if (tl_src + src_vec[i].len > UINT32_MAX) {
+				QAT_DP_LOG(ERR, "Message too long");
+				return -1;
+			}
+			tl_src += src_vec[i].len;
+		}
+
+		list->num_bufs = i;
+		src_data_start = cookie->qat_sgl_src_phys_addr;
+
+		if (unlikely(n_dst > 0)) { /* oop sgl */
+			uint32_t tl_dst = 0;
+
+			list = (struct qat_sgl *)&cookie->qat_sgl_dst;
+
+			for (i = 0; i < n_dst; i++) {
+				list->buffers[i].len = dst_vec[i].len;
+				list->buffers[i].resrvd = 0;
+				list->buffers[i].addr = dst_vec[i].iova;
+				if (tl_dst + dst_vec[i].len > UINT32_MAX) {
+					QAT_DP_LOG(ERR, "Message too long");
+					return -ENOTSUP;
+				}
+
+				tl_dst += dst_vec[i].len;
+			}
+
+			if (tl_src != tl_dst)
+				return -EINVAL;
+			list->num_bufs = i;
+			dst_data_start = cookie->qat_sgl_dst_phys_addr;
+		} else
+			dst_data_start = src_data_start;
+	}
+
+	req->comn_mid.src_data_addr = src_data_start;
+	req->comn_mid.dest_data_addr = dst_data_start;
+	req->comn_mid.src_length = total_len_src;
+	req->comn_mid.dst_length = total_len_dst;
+	req->comn_mid.opaque_data = (uintptr_t)opaque;
+
+	return tl_src;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int n_src = 0;
+	int ret;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->cipher.data.length >> 3;
+		cipher_ofs = op->sym->cipher.data.offset >> 3;
+		break;
+	case 0:
+		if (ctx->bpi_ctx) {
+			/* DOCSIS - only send complete blocks to device.
+			 * Process any partial block using CFB mode.
+			 * Even if 0 complete blocks, still send this to device
+			 * to get into rx queue for post-process and dequeuing
+			 */
+			cipher_len = qat_bpicipher_preprocess(ctx, op);
+			cipher_ofs = op->sym->cipher.data.offset;
+		} else {
+			cipher_len = op->sym->cipher.data.length;
+			cipher_ofs = op->sym->cipher.data.offset;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
+			cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t auth_ofs = 0, auth_len = 0;
+	int n_src, ret;
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+				ctx->auth_iv.offset);
+		auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+				ctx->auth_iv.offset);
+		break;
+	case 0:
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+			/* AES-GMAC */
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+					ctx->auth_iv.offset);
+			auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+					ctx->auth_iv.offset);
+		} else {
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = NULL;
+			auth_iv->iova = 0;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
+			auth_ofs + auth_len, in_sgl->vec,
+			QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
+				auth_ofs + auth_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	union rte_crypto_sym_ofs ofs;
+	uint32_t min_ofs = 0, max_len = 0;
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	uint32_t auth_len = 0, auth_ofs = 0;
+	int is_oop = (op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src);
+	int is_sgl = op->sym->m_src->nb_segs > 1;
+	int n_src;
+	int ret;
+
+	if (unlikely(is_oop))
+		is_sgl |= op->sym->m_dst->nb_segs > 1;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->auth_iv.offset);
+	auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->auth_iv.offset);
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->aead.data.length >> 3;
+		cipher_ofs = op->sym->aead.data.offset >> 3;
+		break;
+	case 0:
+		cipher_len = op->sym->aead.data.length;
+		cipher_ofs = op->sym->aead.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		break;
+	case 0:
+		auth_len = op->sym->auth.data.length;
+		auth_ofs = op->sym->auth.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
+
+	/* digest in buffer check. Needed only for wireless algos */
+	if (ret == 1) {
+		/* Handle digest-encrypted cases, i.e.
+		 * auth-gen-then-cipher-encrypt and
+		 * cipher-decrypt-then-auth-verify
+		 */
+		uint64_t auth_end_iova;
+
+		if (unlikely(is_sgl)) {
+			uint32_t remaining_off = auth_ofs + auth_len;
+			struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
+				op->sym->m_src);
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else
+			auth_end_iova = (is_oop ?
+				rte_pktmbuf_iova(op->sym->m_dst) :
+				rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
+					auth_len;
+
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
+				(digest->iova == auth_end_iova))
+			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
+					ctx->digest_length);
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return -1;
+	}
+	in_sgl->num = n_src;
+
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs,
+				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	ofs.ofs.cipher.head = cipher_ofs;
+	ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
+	ofs.ofs.auth.head = auth_ofs;
+	ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
+
+	return ofs.raw;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int32_t n_src = 0;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
+	auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
+	digest->va = (void *)op->sym->aead.digest.data;
+	digest->iova = op->sym->aead.digest.phys_addr;
+
+	cipher_len = op->sym->aead.data.length;
+	cipher_ofs = op->sym->aead.data.offset;
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline void
+qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
+		struct icp_qat_fw_la_bulk_req *qat_req)
+{
+	/* copy IV into request if it fits */
+	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
+				iv_len);
+	else {
+		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+				qat_req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
+	}
+}
+
+static __rte_always_inline void
+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
+{
+	uint32_t i;
+
+	for (i = 0; i < n; i++)
+		sta[i] = status;
+}
+
+static __rte_always_inline void
+enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+
+	/* cipher IV */
+	qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
+				ctx->auth_iv.length);
+		break;
+	default:
+		break;
+	}
+}
+
+static __rte_always_inline int
+enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_vec *src_vec,
+	uint16_t n_src_vecs,
+	struct rte_crypto_vec *dst_vec,
+	uint16_t n_dst_vecs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
+			dst_vec : src_vec;
+	rte_iova_t auth_iova_end;
+	int cipher_len, auth_len;
+	int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	cipher_len = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (unlikely(cipher_len < 0 || auth_len < 0))
+		return -1;
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = cipher_len;
+	qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = auth_len;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		break;
+	default:
+		break;
+	}
+
+	if (unlikely(is_sgl)) {
+		/* sgl */
+		int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
+		uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
+
+		while (remaining_off >= cvec->len && i >= 1) {
+			i--;
+			remaining_off -= cvec->len;
+			cvec++;
+		}
+
+		auth_iova_end = cvec->iova + remaining_off;
+	} else
+		auth_iova_end = cvec[0].iova + auth_param->auth_off +
+			auth_param->auth_len;
+
+	/* Then check if digest-encrypted conditions are met */
+	if ((auth_param->auth_off + auth_param->auth_len <
+		cipher_param->cipher_offset + cipher_param->cipher_length) &&
+			(digest->iova == auth_iova_end)) {
+		/* Handle partial digest encryption */
+		if (cipher_param->cipher_offset + cipher_param->cipher_length <
+			auth_param->auth_off + auth_param->auth_len +
+				ctx->digest_length && !is_sgl)
+			req->comn_mid.dst_length = req->comn_mid.src_length =
+				auth_param->auth_off + auth_param->auth_len +
+					ctx->digest_length;
+		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void
+enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param =
+		(void *)&req->serv_specif_rqpars;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(void *)((uint8_t *)&req->serv_specif_rqpars +
+		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+	uint8_t *aad_data;
+	uint8_t aad_ccm_real_len;
+	uint8_t aad_len_field_sz;
+	uint32_t msg_len_be;
+	rte_iova_t aad_iova = 0;
+	uint8_t q;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
+				ctx->cipher_iv.length);
+		aad_iova = aad->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		aad_data = aad->va;
+		aad_iova = aad->iova;
+		aad_ccm_real_len = 0;
+		aad_len_field_sz = 0;
+		msg_len_be = rte_bswap32((uint32_t)data_len -
+				ofs.ofs.cipher.head);
+
+		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			aad_ccm_real_len = ctx->aad_len -
+				ICP_QAT_HW_CCM_AAD_B0_LEN -
+				ICP_QAT_HW_CCM_AAD_LEN_INFO;
+		} else {
+			aad_data = iv->va;
+			aad_iova = iv->iova;
+		}
+
+		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+			aad_len_field_sz, ctx->digest_length, q);
+		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+				(uint8_t *)&msg_len_be,
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+		} else {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+				(uint8_t *)&msg_len_be +
+				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+				- q), q);
+		}
+
+		if (aad_len_field_sz > 0) {
+			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+				rte_bswap16(aad_ccm_real_len);
+
+			if ((aad_ccm_real_len + aad_len_field_sz)
+				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+				uint8_t pad_len = 0;
+				uint8_t pad_idx = 0;
+
+				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+					((aad_ccm_real_len +
+					aad_len_field_sz) %
+					ICP_QAT_HW_CCM_AAD_B0_LEN);
+				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+					aad_ccm_real_len +
+					aad_len_field_sz;
+				memset(&aad_data[pad_idx], 0, pad_len);
+			}
+		}
+
+		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va +
+			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
+		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+		rte_memcpy((uint8_t *)aad->va +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			ctx->cipher_iv.length);
+		break;
+	default:
+		break;
+	}
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_param->auth_off = ofs.ofs.cipher.head;
+	auth_param->auth_len = cipher_param->cipher_length;
+	auth_param->auth_res_addr = digest->iova;
+	auth_param->u1.aad_adr = aad_iova;
+}
 
 extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
 extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
 
+/* -----------------GEN 1 sym crypto op data path APIs ---------------- */
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 void
 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 		uint8_t hash_flag);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 90b3ec803c..c429825a67 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -179,6 +179,191 @@ qat_sym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, NULL, NULL);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, NULL, NULL);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
+			total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
+			out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			&auth_iv, &digest);
+#endif
+
+	return 0;
+}
+
 #ifdef RTE_LIB_SECURITY
 
 #define QAT_SECURITY_SYM_CAPABILITIES					\
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 00ec703754..f814bf8f75 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/evp.h>
@@ -11,93 +11,7 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-
-
-/** Decrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_decrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_decrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
-	return -EINVAL;
-}
-
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
-		/* Decrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely((sym_op->m_dst != NULL)
-				&& (sym_op->m_dst != sym_op->m_src)))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = last_block - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
-			dst, last_block_len);
-#endif
-		bpi_cipher_decrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
-			dst, last_block_len);
-#endif
-	}
-
-	return sym_op->cipher.data.length - last_block_len;
-}
+#include "dev/qat_crypto_pmd_gens.h"
 
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v5 03/10] crypto/qat: qat generation specific enqueue
  2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2022-01-28 18:23           ` [dpdk-dev v5 01/10] common/qat: define build op request and dequeue op Kai Ji
  2022-01-28 18:23           ` [dpdk-dev v5 02/10] crypto/qat: sym build op request specific implementation Kai Ji
@ 2022-01-28 18:23           ` Kai Ji
  2022-01-28 18:23           ` [dpdk-dev v5 04/10] crypto/qat: rework session APIs Kai Ji
                             ` (7 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-01-28 18:23 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch add in specific aead & auth build op enqueue functions
for QAT Gen3 & Gen4

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 117 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |  34 +++++-
 2 files changed, 149 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index d3336cf4a1..fca7af2b7e 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -143,6 +143,121 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass) {
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN3 uses single pass to treat AEAD as
+		 * cipher operation
+		 */
+		cipher_param = (void *)&req->serv_specif_rqpars;
+
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+				ofs.ofs.cipher.tail;
+
+		cipher_param->spc_aad_addr = aad->iova;
+		cipher_param->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+	struct qat_sym_op_cookie *cookie,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	uint32_t ver_key_offset;
+	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+
+	if (!ctx->is_single_pass_gmac ||
+			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+				data_len);
+		return;
+	}
+
+	cipher_cd_ctrl = (void *) &req->cd_ctrl;
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+			sizeof(struct icp_qat_hw_cipher_config);
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+		/* AES-GMAC */
+		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+				req);
+	}
+
+	/* Fill separate Content Descriptor for this op */
+	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+				ctx->cd.cipher.key :
+				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+			ctx->auth_key_length);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+				ICP_QAT_HW_CIPHER_AEAD_MODE,
+				ctx->qat_cipher_alg,
+				ICP_QAT_HW_CIPHER_NO_CONVERT,
+				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+					ICP_QAT_HW_CIPHER_ENCRYPT :
+					ICP_QAT_HW_CIPHER_DECRYPT));
+	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+			ctx->digest_length,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+	/* Update the request */
+	req->cd_pars.u.s.content_desc_addr =
+			cookie->opt.spc_gmac.cd_phys_addr;
+	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+			sizeof(struct icp_qat_hw_cipher_config) +
+			ctx->auth_key_length, 8) >> 3;
+	req->comn_mid.src_length = data_len;
+	req->comn_mid.dst_length = 0;
+
+	cipher_param->spc_aad_addr = 0;
+	cipher_param->spc_auth_res_addr = digest->iova;
+	cipher_param->spc_aad_sz = auth_data_len;
+	cipher_param->reserved = 0;
+	cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+	ICP_QAT_FW_LA_PROTO_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_NO_PROTO);
+}
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 37a58c026f..8462c0b9b1 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -103,6 +103,38 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+			(void *)&req->serv_specif_rqpars;
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN4 uses single pass to treat AEAD as cipher
+		 * operation
+		 */
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+				req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len -
+				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+		cipher_param_20->spc_aad_addr = aad->iova;
+		cipher_param_20->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v5 04/10] crypto/qat: rework session APIs
  2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                             ` (2 preceding siblings ...)
  2022-01-28 18:23           ` [dpdk-dev v5 03/10] crypto/qat: qat generation specific enqueue Kai Ji
@ 2022-01-28 18:23           ` Kai Ji
  2022-01-28 18:23           ` [dpdk-dev v5 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
                             ` (6 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-01-28 18:23 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch introduces the set_session methods for different
generations of QAT. In addition, the patch replaces
'min_qat_dev_gen_id' with 'qat_dev_gen'. Thus, the session
no longer allow to be created by one generation of QAT used by another
generation.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  91 +++++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 139 +++++++++++++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |  91 ++++++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |   3 +
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  64 +++++++++
 drivers/crypto/qat/qat_crypto.h              |   8 +-
 drivers/crypto/qat/qat_sym.c                 |  12 +-
 drivers/crypto/qat/qat_sym_session.c         | 113 +++------------
 drivers/crypto/qat/qat_sym_session.h         |   2 +-
 10 files changed, 425 insertions(+), 107 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
index 9ed1f21d9d..01a897a21f 100644
--- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_asym_crypto_set_session_gen1(void *cdev __rte_unused,
+		void *session __rte_unused)
+{
+	return 0;
+}
+
 RTE_INIT(qat_asym_crypto_gen1_init)
 {
 	qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops =
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index b4ec440e05..64e6ae66ec 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -166,6 +166,91 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
 	return 0;
 }
 
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+		uint8_t hash_flag)
+{
+	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+	/* Set the Use Extended Protocol Flags bit in LW 1 */
+	QAT_FIELD_SET(header->comn_req_flags,
+			QAT_COMN_EXT_FLAGS_USED,
+			QAT_COMN_EXT_FLAGS_BITPOS,
+			QAT_COMN_EXT_FLAGS_MASK);
+
+	/* Set Hash Flags in LW 28 */
+	cd_ctrl->hash_flags |= hash_flag;
+
+	/* Set proto flags in LW 1 */
+	switch (session->qat_cipher_alg) {
+	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_SNOW_3G_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags,
+				ICP_QAT_FW_LA_ZUC_3G_PROTO);
+		break;
+	default:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	}
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+	struct rte_cryptodev *dev = cdev;
+	struct qat_sym_session *ctx = session;
+	const struct qat_cryptodev_private *qat_private =
+			dev->data->dev_private;
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * but some are not supported by GEN2, so checking here
+		 */
+		if ((qat_private->internal_capabilities &
+				QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+			return -ENOTSUP;
+
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
 
 	/* Device related operations */
@@ -204,6 +289,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
 			qat_sym_crypto_cap_get_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_sym_crypto_set_session_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
@@ -221,4 +308,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
 			qat_asym_crypto_cap_get_gen1;
 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_asym_crypto_feature_flags_get_gen1;
+	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_asym_crypto_set_session_gen1;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index fca7af2b7e..db864d973a 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -258,6 +258,142 @@ enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
 			ICP_QAT_FW_LA_NO_PROTO);
 }
 
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN3 */
+	if (ctx->is_single_pass)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+	else if (ctx->is_single_pass_gmac)
+		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN3
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -265,6 +401,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_cap_get_gen3;
 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+			qat_sym_crypto_set_session_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
@@ -276,4 +414,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 8462c0b9b1..7642a87d55 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -135,11 +135,101 @@ enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
 	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
 }
 
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN4 */
+	if (ctx->is_single_pass && ctx->is_ucs)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN4
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
 			qat_sym_crypto_cap_get_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+			qat_sym_crypto_set_session_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
@@ -153,4 +243,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
 	qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 1130e0e76f..96cdb97a26 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -856,6 +856,9 @@ qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev);
 uint64_t
 qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 #ifdef RTE_LIB_SECURITY
 extern struct rte_security_ops security_qat_ops_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index c429825a67..501132a448 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -452,12 +452,76 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int handle_mixed = 0;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			/* do_aead = 1; */
+			build_request = qat_sym_build_op_aead_gen1;
+		} else {
+			/* do_auth = 1; do_cipher = 1; */
+			build_request = qat_sym_build_op_chain_gen1;
+			handle_mixed = 1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		/* do_auth = 1; do_cipher = 0;*/
+		build_request = qat_sym_build_op_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		/* do_auth = 0; do_cipher = 1; */
+		build_request = qat_sym_build_op_cipher_gen1;
+	}
+
+	if (build_request)
+		ctx->build_request[proc_type] = build_request;
+	else
+		return -EINVAL;
+
+	/* no more work if not mixed op */
+	if (!handle_mixed)
+		return 0;
+
+	/* Check none supported algs if mixed */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		return -ENOTSUP;
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		return -ENOTSUP;
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
 
 RTE_INIT(qat_sym_crypto_gen1_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
 			qat_sym_crypto_cap_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+			qat_sym_crypto_set_session_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6eaa15b975..5ca76fcaa6 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
  #ifndef _QAT_CRYPTO_H_
@@ -48,15 +48,21 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
 
 typedef void * (*create_security_ctx_t)(void *cryptodev);
 
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
+	set_session_t set_session;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
 };
 
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
+
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
 		struct rte_cryptodev_config *config);
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index f814bf8f75..83bf55c933 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -13,6 +13,10 @@
 #include "qat_sym.h"
 #include "dev/qat_crypto_pmd_gens.h"
 
+uint8_t qat_sym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
@@ -126,7 +130,7 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
 
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen)
+		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
 {
 	int ret = 0;
 	struct qat_sym_session *ctx = NULL;
@@ -191,12 +195,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		return -EINVAL;
 	}
 
-	if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
-		QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -EINVAL;
-	}
-
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 8ca475ca8b..3a880096c4 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/sha.h>	/* Needed to calculate pre-compute values */
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
 	return 0;
 }
 
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
-		uint8_t hash_flag)
-{
-	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
-	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
-			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
-			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
-	/* Set the Use Extended Protocol Flags bit in LW 1 */
-	QAT_FIELD_SET(header->comn_req_flags,
-			QAT_COMN_EXT_FLAGS_USED,
-			QAT_COMN_EXT_FLAGS_BITPOS,
-			QAT_COMN_EXT_FLAGS_MASK);
-
-	/* Set Hash Flags in LW 28 */
-	cd_ctrl->hash_flags |= hash_flag;
-
-	/* Set proto flags in LW 1 */
-	switch (session->qat_cipher_alg) {
-	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_SNOW_3G_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_ZUC_3G_PROTO);
-		break;
-	default:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	}
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
-		struct qat_sym_session *session)
-{
-	const struct qat_cryptodev_private *qat_private =
-			dev->data->dev_private;
-	enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
-			QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
-	if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
-	} else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
-	} else if ((session->aes_cmac ||
-			session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
-			(session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session, 0);
-	}
-}
-
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
 	int ret;
 	int qat_cmd_id;
-	int handle_mixed = 0;
 
 	/* Verify the session physical address is known */
 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
+	session->dev_id = internals->dev_id;
 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
 	session->is_ucs = 0;
 
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		return -ENOTSUP;
 	}
 	qat_sym_session_finalize(session);
-	if (handle_mixed) {
-		/* Special handling of mixed hash+cipher algorithms */
-		qat_sym_session_handle_mixed(dev, session);
-	}
 
-	return 0;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+			(void *)session);
 }
 
 static int
@@ -678,14 +598,13 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 {
 	session->is_single_pass = 1;
 	session->is_auth = 1;
-	session->min_qat_dev_gen = QAT_GEN3;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
-	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
 		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
-	} else {
+	else
 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
-	}
+
 	session->cipher_iv.offset = aead_xform->iv.offset;
 	session->cipher_iv.length = aead_xform->iv.length;
 	session->aad_len = aead_xform->aad_length;
@@ -1205,9 +1124,9 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-			uint8_t *data_in,
-			uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in, uint8_t *data_out)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1573,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1920,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2180,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
-
 	/* Get requested QAT command id - should be cipher */
 	qat_cmd_id = qat_get_cmd_id(xform);
 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2204,9 @@ qat_security_session_create(void *dev,
 {
 	void *sess_private_data;
 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct qat_cryptodev_private *internals = cdev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_sym_session *sym_session = NULL;
 	int ret;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2230,11 @@ qat_security_session_create(void *dev,
 	}
 
 	set_sec_session_private_data(sess, sess_private_data);
+	sym_session = (struct qat_sym_session *)sess_private_data;
+	sym_session->dev_id = internals->dev_id;
 
-	return ret;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+			sess_private_data);
 }
 
 int
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index fe875a7fd0..01908abd9e 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -105,7 +105,7 @@ struct qat_sym_session {
 	uint16_t auth_key_length;
 	uint16_t digest_length;
 	rte_spinlock_t lock;	/* protects this struct */
-	enum qat_device_gen min_qat_dev_gen;
+	uint16_t dev_id;
 	uint8_t aes_cmac;
 	uint8_t is_single_pass;
 	uint8_t is_single_pass_gmac;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v5 05/10] crypto/qat: rework asymmetric crypto build operation
  2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                             ` (3 preceding siblings ...)
  2022-01-28 18:23           ` [dpdk-dev v5 04/10] crypto/qat: rework session APIs Kai Ji
@ 2022-01-28 18:23           ` Kai Ji
  2022-01-28 18:23           ` [dpdk-dev v5 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
                             ` (5 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-01-28 18:23 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch reworks the asymmetric crypto data path
implementation to QAT driver. The change includes separation
of different QAT generations' asymmetric crypto data path
implementations and shrink the device capabilities declaration
code size.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c   |   5 +-
 drivers/crypto/qat/qat_asym.c | 624 +++++++++++++++++-----------------
 drivers/crypto/qat/qat_asym.h |  63 +++-
 3 files changed, 380 insertions(+), 312 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index ed632b5ebe..c3265241a3 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -622,7 +622,7 @@ qat_enqueue_op_burst(void *qp,
 #ifdef BUILD_QAT_ASYM
 			ret = qat_asym_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
+				NULL, tmp_qp->qat_dev_gen);
 #endif
 		}
 		if (ret != 0) {
@@ -850,7 +850,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 #ifdef BUILD_QAT_ASYM
 		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
 			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 #endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 09d8761c5f..3d7aecd7c0 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -1,69 +1,119 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2022 Intel Corporation
  */
 
 #include <stdarg.h>
 
-#include "qat_asym.h"
+#include <cryptodev_pmd.h>
+
 #include "icp_qat_fw_pke.h"
 #include "icp_qat_fw.h"
 #include "qat_pke_functionality_arrays.h"
 
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+#include "qat_device.h"
 
-static int qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
-		size_t arr_sz, size_t *size, uint32_t *func_id)
+#include "qat_logs.h"
+#include "qat_asym.h"
+
+uint8_t qat_asym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
+
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_asym_xform *xform,
+		struct rte_cryptodev_asym_session *sess,
+		struct rte_mempool *mempool)
 {
-	size_t i;
+	int err = 0;
+	void *sess_private_data;
+	struct qat_asym_session *session;
 
-	for (i = 0; i < arr_sz; i++) {
-		if (*size <= arr[i][0]) {
-			*size = arr[i][0];
-			*func_id = arr[i][1];
-			return 0;
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		QAT_LOG(ERR,
+			"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	session = sess_private_data;
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		if (xform->modex.exponent.length == 0 ||
+				xform->modex.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod exp input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		if (xform->modinv.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod inv input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+		if (xform->rsa.n.length == 0) {
+			QAT_LOG(ERR, "Invalid rsa input parameter");
+			err = -EINVAL;
+			goto error;
 		}
+	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
+			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
+		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
+		err = -EINVAL;
+		goto error;
+	} else {
+		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
+		err = -EINVAL;
+		goto error;
 	}
-	return -1;
-}
 
-static inline void qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
-{
-	memset(qat_req, 0, sizeof(*qat_req));
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+	session->xform = xform;
+	qat_asym_build_req_tmpl(sess_private_data);
+	set_asym_session_private_data(sess, dev->driver_id,
+		sess_private_data);
 
-	qat_req->pke_hdr.hdr_flags =
-			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
-			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	return 0;
+error:
+	rte_mempool_put(mempool, sess_private_data);
+	return err;
 }
 
-static inline void qat_asym_build_req_tmpl(void *sess_private_data)
+unsigned int
+qat_asym_session_get_private_size(
+		struct rte_cryptodev *dev __rte_unused)
 {
-	struct icp_qat_fw_pke_request *qat_req;
-	struct qat_asym_session *session = sess_private_data;
-
-	qat_req = &session->req_tmpl;
-	qat_fill_req_tmpl(qat_req);
+	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
 }
 
-static size_t max_of(int n, ...)
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_asym_session *sess)
 {
-	va_list args;
-	size_t len = 0, num;
-	int i;
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_asym_session_private_data(sess, index);
+	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
 
-	va_start(args, n);
-	len = va_arg(args, size_t);
+	if (sess_priv) {
+		memset(s, 0, qat_asym_session_get_private_size(dev));
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
 
-	for (i = 0; i < n - 1; i++) {
-		num = va_arg(args, size_t);
-		if (num > len)
-			len = num;
+		set_asym_session_private_data(sess, index, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
 	}
-	va_end(args);
-
-	return len;
 }
 
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+	.name = qat_asym_drv_name,
+	.alias = qat_asym_drv_name
+};
+
+
 static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
 		int in_count, int out_count, int alg_size)
 {
@@ -106,7 +156,230 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
 	}
 }
 
-static int qat_asym_check_nonzero(rte_crypto_param n)
+static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
+		struct qat_asym_op_cookie *cookie,
+		struct rte_crypto_asym_xform *xform)
+{
+	size_t alg_size, alg_size_in_bytes = 0;
+	struct rte_crypto_asym_op *asym_op = rx_op->asym;
+
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		rte_crypto_param n = xform->modex.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modexp_result = asym_op->modex.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modexp_result +
+				(asym_op->modex.result.length -
+					n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length
+				);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		rte_crypto_param n = xform->modinv.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modinv_result = asym_op->modinv.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modinv_result +
+				(asym_op->modinv.result.length
+				- n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
+				asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+							cookie->output_array[0],
+							alg_size_in_bytes);
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+			}
+		} else {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_DECRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.message.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_SUCCESS;
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
+						rsa_result, alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_SIGN) {
+				uint8_t *rsa_result = asym_op->rsa.sign.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			}
+		}
+	}
+	qat_clear_arrays_by_alg(cookie, xform, alg_size_in_bytes);
+}
+
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
+{
+	struct qat_asym_session *ctx;
+	struct icp_qat_fw_pke_resp *resp_msg =
+			(struct icp_qat_fw_pke_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+			(resp_msg->opaque);
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	if (cookie->error) {
+		cookie->error = 0;
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		QAT_DP_LOG(ERR, "Cookie status returned error");
+	} else {
+		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric response status"
+					" returned error");
+		}
+		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric common status"
+					" returned error");
+		}
+	}
+
+	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		ctx = (struct qat_asym_session *)get_asym_session_private_data(
+			rx_op->asym->session, qat_asym_driver_id);
+		qat_asym_collect_response(rx_op, cookie, ctx->xform);
+	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
+	}
+	*op = rx_op;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
+			sizeof(struct icp_qat_fw_pke_resp));
+#endif
+
+	return 1;
+}
+
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+		size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+	size_t i;
+
+	for (i = 0; i < arr_sz; i++) {
+		if (*size <= arr[i][0]) {
+			*size = arr[i][0];
+			*func_id = arr[i][1];
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+	va_list args;
+	size_t len = 0, num;
+	int i;
+
+	va_start(args, n);
+	len = va_arg(args, size_t);
+
+	for (i = 0; i < n - 1; i++) {
+		num = va_arg(args, size_t);
+		if (num > len)
+			len = num;
+	}
+	va_end(args);
+
+	return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
 {
 	if (n.length < 8) {
 		/* Not a case for any cryptographic function except for DH
@@ -475,10 +748,9 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 }
 
 int
-qat_asym_build_request(void *in_op,
-			uint8_t *out_msg,
-			void *op_cookie,
-			__rte_unused enum qat_device_gen qat_dev_gen)
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
 {
 	struct qat_asym_session *ctx;
 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
@@ -545,263 +817,7 @@ qat_asym_build_request(void *in_op,
 	return 0;
 }
 
-static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
-		struct qat_asym_op_cookie *cookie,
-		struct rte_crypto_asym_xform *xform)
-{
-	size_t alg_size, alg_size_in_bytes = 0;
-	struct rte_crypto_asym_op *asym_op = rx_op->asym;
-
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		rte_crypto_param n = xform->modex.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modexp_result = asym_op->modex.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modexp_result +
-				(asym_op->modex.result.length -
-					n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length
-				);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		rte_crypto_param n = xform->modinv.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modinv_result = asym_op->modinv.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modinv_result + (asym_op->modinv.result.length
-				- n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
-				asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-							cookie->output_array[0],
-							alg_size_in_bytes);
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-			}
-		} else {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_DECRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.message.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_SUCCESS;
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
-						rsa_result, alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
-				uint8_t *rsa_result = asym_op->rsa.sign.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			}
-		}
-	}
-	qat_clear_arrays_by_alg(cookie, xform, alg_size_in_bytes);
-}
-
-void
-qat_asym_process_response(void **op, uint8_t *resp,
-		void *op_cookie)
-{
-	struct qat_asym_session *ctx;
-	struct icp_qat_fw_pke_resp *resp_msg =
-			(struct icp_qat_fw_pke_resp *)resp;
-	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
-			(resp_msg->opaque);
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	if (cookie->error) {
-		cookie->error = 0;
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		QAT_DP_LOG(ERR, "Cookie status returned error");
-	} else {
-		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
-			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric response status"
-					" returned error");
-		}
-		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric common status"
-					" returned error");
-		}
-	}
-
-	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_asym_session *)get_asym_session_private_data(
-			rx_op->asym->session, qat_asym_driver_id);
-		qat_asym_collect_response(rx_op, cookie, ctx->xform);
-	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
-	}
-	*op = rx_op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
-			sizeof(struct icp_qat_fw_pke_resp));
-#endif
-}
-
-int
-qat_asym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_asym_xform *xform,
-		struct rte_cryptodev_asym_session *sess,
-		struct rte_mempool *mempool)
-{
-	int err = 0;
-	void *sess_private_data;
-	struct qat_asym_session *session;
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		QAT_LOG(ERR,
-			"Couldn't get object from session mempool");
-		return -ENOMEM;
-	}
-
-	session = sess_private_data;
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		if (xform->modex.exponent.length == 0 ||
-				xform->modex.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod exp input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		if (xform->modinv.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod inv input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-		if (xform->rsa.n.length == 0) {
-			QAT_LOG(ERR, "Invalid rsa input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
-			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
-		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
-		err = -EINVAL;
-		goto error;
-	} else {
-		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
-		err = -EINVAL;
-		goto error;
-	}
-
-	session->xform = xform;
-	qat_asym_build_req_tmpl(sess_private_data);
-	set_asym_session_private_data(sess, dev->driver_id,
-		sess_private_data);
-
-	return 0;
-error:
-	rte_mempool_put(mempool, sess_private_data);
-	return err;
-}
-
-unsigned int qat_asym_session_get_private_size(
-		struct rte_cryptodev *dev __rte_unused)
-{
-	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
-}
-
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_asym_session *sess)
-{
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_asym_session_private_data(sess, index);
-	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
-
-	if (sess_priv) {
-		memset(s, 0, qat_asym_session_get_private_size(dev));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
-
-		set_asym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
-}
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_asym_driver,
+		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 308b6b2e0b..aba49d57cb 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #ifndef _QAT_ASYM_H_
@@ -8,10 +8,13 @@
 #include <cryptodev_pmd.h>
 #include <rte_crypto_asym.h>
 #include "icp_qat_fw_pke.h"
-#include "qat_common.h"
-#include "qat_asym_pmd.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
 #include "icp_qat_fw.h"
 
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
+
 typedef uint64_t large_int_ptr;
 #define MAX_PKE_PARAMS	8
 #define QAT_PKE_MAX_LN_SIZE 512
@@ -26,6 +29,28 @@ typedef uint64_t large_int_ptr;
 #define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
 #define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
 
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
+		{.asym = {						\
+			.xform_capa = {					\
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+				.op_types = o,				\
+				{					\
+				.modlen = {				\
+				.min = l,				\
+				.max = r,				\
+				.increment = i				\
+				}, }					\
+			}						\
+		},							\
+		}							\
+	}
+
 struct qat_asym_op_cookie {
 	size_t alg_size;
 	uint64_t error;
@@ -45,6 +70,27 @@ struct qat_asym_session {
 	struct rte_crypto_asym_xform *xform;
 };
 
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+	memset(qat_req, 0, sizeof(*qat_req));
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+	qat_req->pke_hdr.hdr_flags =
+			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+	struct icp_qat_fw_pke_request *qat_req;
+	struct qat_asym_session *session = sess_private_data;
+
+	qat_req = &session->req_tmpl;
+	qat_fill_req_tmpl(qat_req);
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev,
 		struct rte_crypto_asym_xform *xform,
@@ -76,7 +122,9 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
  */
 int
 qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
+		void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		enum qat_device_gen qat_dev_gen);
 
 /*
  * Process PKE response received from outgoing queue of QAT
@@ -88,8 +136,11 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg,
  * @param	op_cookie	Cookie pointer that holds private metadata
  *
  */
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count);
+
 void
-qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
-		void *op_cookie);
+qat_asym_init_op_cookie(void *cookie);
 
 #endif /* _QAT_ASYM_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v5 06/10] crypto/qat: unify qat sym pmd apis
  2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                             ` (4 preceding siblings ...)
  2022-01-28 18:23           ` [dpdk-dev v5 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
@ 2022-01-28 18:23           ` Kai Ji
  2022-01-28 18:23           ` [dpdk-dev v5 07/10] crypto/qat: unify qat asym " Kai Ji
                             ` (4 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-01-28 18:23 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch removes qat_sym_pmd.c and integrates all the apis into
qat_sym.c. The unified/integrated qat sym crypto pmd apis should
make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build       |   4 +-
 drivers/common/qat/qat_device.c      |   4 +-
 drivers/common/qat/qat_qp.c          |   3 +-
 drivers/crypto/qat/qat_crypto.h      |   5 +-
 drivers/crypto/qat/qat_sym.c         |  21 +++
 drivers/crypto/qat/qat_sym.h         | 147 ++++++++++++++--
 drivers/crypto/qat/qat_sym_hw_dp.c   |  11 +-
 drivers/crypto/qat/qat_sym_pmd.c     | 251 ---------------------------
 drivers/crypto/qat/qat_sym_pmd.h     |  95 ----------
 drivers/crypto/qat/qat_sym_session.c |   2 +-
 10 files changed, 168 insertions(+), 375 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index af92271a75..1bf6896a7e 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017-2018 Intel Corporation
+# Copyright(c) 2017-2022 Intel Corporation
 
 if is_windows
     build = false
@@ -73,7 +73,7 @@ if qat_compress
 endif
 
 if qat_crypto
-    foreach f: ['qat_sym_pmd.c', 'qat_sym.c', 'qat_sym_session.c',
+    foreach f: ['qat_sym.c', 'qat_sym_session.c',
             'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 437996f2e8..e5459fdfd1 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2020 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 
 #include <rte_string_fns.h>
@@ -8,7 +8,7 @@
 
 #include "qat_device.h"
 #include "adf_transport_access_macros.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 #include "qat_comp_pmd.h"
 #include "adf_pf2vf_msg.h"
 #include "qat_pf2vf.h"
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index c3265241a3..dd9056650d 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -841,7 +841,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
 			nb_fw_responses = qat_comp_process_response(
 				ops, resp_msg,
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 5ca76fcaa6..c01266f81c 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -12,7 +12,10 @@
 extern uint8_t qat_sym_driver_id;
 extern uint8_t qat_asym_driver_id;
 
-/** helper macro to set cryptodev capability range **/
+/**
+ * helper macro to set cryptodev capability range
+ * <n: name> <l: min > <r: max> <i: increment> <v: value>
+ **/
 #define CAP_RNG(n, l, r, i) .n = {.min = l, .max = r, .increment = i}
 
 #define CAP_RNG_ZERO(n) .n = {.min = 0, .max = 0, .increment = 0}
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 83bf55c933..aad4b243b7 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -17,6 +17,27 @@ uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_sym_init_op_cookie(void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+
+	cookie->qat_sgl_src_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_src);
+
+	cookie->qat_sgl_dst_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_dst);
+
+	cookie->opt.spc_gmac.cd_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			opt.spc_gmac.cd_cipher);
+}
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index e3ec7f0de4..f4ff2ce4cd 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #ifndef _QAT_SYM_H_
@@ -15,7 +15,7 @@
 
 #include "qat_common.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_crypto.h"
 #include "qat_logs.h"
 
 #define BYTE_LENGTH    8
@@ -24,6 +24,67 @@
  */
 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
+/** Intel(R) QAT Symmetric Crypto PMD name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
+
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
+#define QAT_SYM_CAP_VALID		(1 << 31)
+
+/**
+ * Macro to add a sym capability
+ * helper function to add an sym capability
+ * <n: name> <b: block size> <k: key size> <d: digest size>
+ * <a: aad_size> <i: iv_size>
+ **/
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, d					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_##n,		\
+				b, k, i					\
+			}, }						\
+		}, }							\
+	}
+
 /*
  * Maximum number of SGL entries
  */
@@ -54,6 +115,22 @@ struct qat_sym_op_cookie {
 	} opt;
 };
 
+struct qat_sym_dp_ctx {
+	struct qat_sym_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		void *op_cookie, enum qat_device_gen qat_dev_gen);
@@ -213,17 +290,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
 		}
 	}
 }
-#else
-
-static inline void
-qat_sym_preprocess_requests(void **ops __rte_unused,
-				uint16_t nb_ops __rte_unused)
-{
-}
 #endif
 
-static inline void
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused)
 {
 	struct icp_qat_fw_comn_resp *resp_msg =
 			(struct icp_qat_fw_comn_resp *)resp;
@@ -282,6 +353,12 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
 	}
 
 	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
 }
 
 int
@@ -293,6 +370,52 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 int
 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
 
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_vec *vec, uint32_t vec_len,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t i;
+
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_la_bulk_req));
+	for (i = 0; i < vec_len; i++)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+	if (cipher_iv && ctx->cipher_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+				ctx->cipher_iv.length);
+	if (auth_iv && ctx->auth_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+				ctx->auth_iv.length);
+	if (aad && ctx->aad_len > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+				ctx->aad_len);
+	if (digest && ctx->digest_length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+				ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+		struct qat_sym_session *ctx __rte_unused,
+		struct rte_crypto_vec *vec __rte_unused,
+		uint32_t vec_len __rte_unused,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+}
+#endif
+
 #else
 
 static inline void
@@ -307,5 +430,5 @@ qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
 {
 }
 
-#endif
+#endif /* BUILD_QAT_SYM */
 #endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 12825e448b..2576cb1be7 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #include <cryptodev_pmd.h>
@@ -9,18 +9,9 @@
 #include "icp_qat_fw_la.h"
 
 #include "qat_sym.h"
-#include "qat_sym_pmd.h"
 #include "qat_sym_session.h"
 #include "qat_qp.h"
 
-struct qat_sym_dp_ctx {
-	struct qat_sym_session *session;
-	uint32_t tail;
-	uint32_t head;
-	uint16_t cached_enqueue;
-	uint16_t cached_dequeue;
-};
-
 static __rte_always_inline int32_t
 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
 		struct rte_crypto_vec *data, uint16_t n_data_vecs)
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
deleted file mode 100644
index 28a26260fb..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security_driver.h>
-#endif
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
-
-#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
-	struct qat_sym_op_cookie *cookie = op_cookie;
-
-	cookie->qat_sgl_src_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_src);
-
-	cookie->qat_sgl_dst_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_dst);
-
-	cookie->opt.spc_gmac.cd_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			opt.spc_gmac.cd_cipher);
-}
-
-static uint16_t
-qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-static uint16_t
-qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
-	.name = qat_sym_drv_name,
-	.alias = qat_sym_drv_name
-};
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
-	int i = 0, ret = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "sym");
-	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	/*
-	 * All processes must use same driver id so they can share sessions.
-	 * Store driver_id so we can validate that all processes have the same
-	 * value, typically they have, but could differ if binaries built
-	 * separately.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_sym_driver_id =
-				qat_sym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_sym_driver_id !=
-				qat_sym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
-	qat_dev_instance->sym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->sym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->sym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_sym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-#ifdef RTE_LIB_SECURITY
-	if (gen_dev_ops->create_security_ctx) {
-		cryptodev->security_ctx =
-			gen_dev_ops->create_security_ctx((void *)cryptodev);
-		if (cryptodev->security_ctx == NULL) {
-			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
-	} else
-		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-
-#endif
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_SYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->service_type = QAT_SERVICE_SYMMETRIC;
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating capability memzon for %s",
-				name);
-			ret = -EFAULT;
-			goto error;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->sym_dev = internals;
-	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	memset(&qat_dev_instance->sym_rte_dev, 0,
-		sizeof(qat_dev_instance->sym_rte_dev));
-
-	return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->sym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
-	qat_pci_dev->sym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_sym_driver,
-		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
deleted file mode 100644
index 59fbdefa12..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_PMD_H_
-#define _QAT_SYM_PMD_H_
-
-#ifdef BUILD_QAT_SYM
-
-#include <rte_ether.h>
-#include <rte_cryptodev.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security.h>
-#endif
-
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Symmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
-#define QAT_SYM_CAP_VALID		(1 << 31)
-
-/**
- * Macro to add a sym capability
- * helper function to add an sym capability
- * <n: name> <b: block size> <k: key size> <d: digest size>
- * <a: aad_size> <i: iv_size>
- **/
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, d					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
-			{.aead = {					\
-				.algo = RTE_CRYPTO_AEAD_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_##n,		\
-				b, k, i					\
-			}, }						\
-		}, }							\
-	}
-
-extern uint8_t qat_sym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param);
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
-
-void
-qat_sym_init_op_cookie(void *op_cookie);
-
-#endif
-#endif /* _QAT_SYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 3a880096c4..9d6a19c0be 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -20,7 +20,7 @@
 
 #include "qat_logs.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 
 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
 static const uint8_t sha1InitialState[] = {
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v5 07/10] crypto/qat: unify qat asym pmd apis
  2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                             ` (5 preceding siblings ...)
  2022-01-28 18:23           ` [dpdk-dev v5 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
@ 2022-01-28 18:23           ` Kai Ji
  2022-01-28 18:23           ` [dpdk-dev v5 08/10] crypto/qat: op burst data path rework Kai Ji
                             ` (3 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-01-28 18:23 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch removes qat_asym_pmd.c and integrates all the
functions into qat_asym.c. The unified/integrated asym crypto
pmd apis should make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build    |   2 +-
 drivers/crypto/qat/qat_asym.c     | 180 +++++++++++++++++++++++
 drivers/crypto/qat/qat_asym_pmd.c | 231 ------------------------------
 drivers/crypto/qat/qat_asym_pmd.h |  54 -------
 4 files changed, 181 insertions(+), 286 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 1bf6896a7e..f687f5c9d8 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 3d7aecd7c0..da8d7e965c 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -19,6 +19,32 @@ uint8_t qat_asym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_asym_init_op_cookie(void *op_cookie)
+{
+	int j;
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	cookie->input_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					input_params_ptrs);
+
+	cookie->output_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					output_params_ptrs);
+
+	for (j = 0; j < 8; j++) {
+		cookie->input_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						input_array[j]);
+		cookie->output_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						output_array[j]);
+	}
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev,
 		struct rte_crypto_asym_xform *xform,
@@ -817,6 +843,160 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 	return 0;
 }
 
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+			nb_ops);
+}
+
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+				nb_ops);
+}
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param)
+{
+	struct qat_cryptodev_private *internals;
+	struct rte_cryptodev *cryptodev;
+	struct qat_device_info *qat_dev_instance =
+		&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	uint64_t capa_size;
+	int i = 0;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "asym");
+	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+				name);
+		return -(EFAULT);
+	}
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_asym_driver_id =
+				qat_asym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_asym_driver_id !=
+				qat_asym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
+		}
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+	qat_dev_instance->asym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->asym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_asym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_ASYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			rte_cryptodev_pmd_destroy(cryptodev);
+			memset(&qat_dev_instance->asym_rte_dev, 0,
+				sizeof(qat_dev_instance->asym_rte_dev));
+			return -EFAULT;
+		}
+	}
+
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
+
+	qat_pci_dev->asym_dev = internals;
+	internals->service_type = QAT_SERVICE_ASYMMETRIC;
+	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+	return 0;
+}
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->asym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(
+			qat_pci_dev->asym_dev->dev_id);
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+	qat_pci_dev->asym_dev = NULL;
+
+	return 0;
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_asym_driver,
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
deleted file mode 100644
index 9a7596b227..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "qat_logs.h"
-
-#include "qat_crypto.h"
-#include "qat_asym.h"
-#include "qat_asym_pmd.h"
-
-uint8_t qat_asym_driver_id;
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
-	int j;
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	cookie->input_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					input_params_ptrs);
-
-	cookie->output_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					output_params_ptrs);
-
-	for (j = 0; j < 8; j++) {
-		cookie->input_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						input_array[j]);
-		cookie->output_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						output_array[j]);
-	}
-}
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
-	/* Device related operations */
-	.dev_configure		= qat_cryptodev_config,
-	.dev_start		= qat_cryptodev_start,
-	.dev_stop		= qat_cryptodev_stop,
-	.dev_close		= qat_cryptodev_close,
-	.dev_infos_get		= qat_cryptodev_info_get,
-
-	.stats_get		= qat_cryptodev_stats_get,
-	.stats_reset		= qat_cryptodev_stats_reset,
-	.queue_pair_setup	= qat_cryptodev_qp_setup,
-	.queue_pair_release	= qat_cryptodev_qp_release,
-
-	/* Crypto related operations */
-	.asym_session_get_size	= qat_asym_session_get_private_size,
-	.asym_session_configure	= qat_asym_session_configure,
-	.asym_session_clear	= qat_asym_session_clear
-};
-
-uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
-	.name = qat_asym_drv_name,
-	.alias = qat_asym_drv_name
-};
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
-	int i = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "asym");
-	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_asym_driver_id =
-				qat_asym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_asym_driver_id !=
-				qat_asym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
-	qat_dev_instance->asym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->asym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->asym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_asym_driver_id;
-	cryptodev->dev_ops = &crypto_qat_ops;
-
-	cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst;
-
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_ASYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->dev_id = cryptodev->data->dev_id;
-	internals->service_type = QAT_SERVICE_ASYMMETRIC;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			rte_cryptodev_pmd_destroy(cryptodev);
-			memset(&qat_dev_instance->asym_rte_dev, 0,
-				sizeof(qat_dev_instance->asym_rte_dev));
-			return -EFAULT;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->asym_dev = internals;
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-	return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->asym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(
-			qat_pci_dev->asym_dev->dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
-	qat_pci_dev->asym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_asym_driver,
-		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_pmd.h b/drivers/crypto/qat/qat_asym_pmd.h
deleted file mode 100644
index f988d646e5..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-
-#ifndef _QAT_ASYM_PMD_H_
-#define _QAT_ASYM_PMD_H_
-
-#include <rte_cryptodev.h>
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
-
-
-/**
- * Helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
-		{.asym = {						\
-			.xform_capa = {					\
-				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
-				.op_types = o,				\
-				{					\
-				.modlen = {				\
-				.min = l,				\
-				.max = r,				\
-				.increment = i				\
-				}, }					\
-			}						\
-		},							\
-		}							\
-	}
-
-extern uint8_t qat_asym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
-
-void
-qat_asym_init_op_cookie(void *op_cookie);
-
-uint16_t
-qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-uint16_t
-qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-#endif /* _QAT_ASYM_PMD_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v5 08/10] crypto/qat: op burst data path rework
  2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                             ` (6 preceding siblings ...)
  2022-01-28 18:23           ` [dpdk-dev v5 07/10] crypto/qat: unify qat asym " Kai Ji
@ 2022-01-28 18:23           ` Kai Ji
  2022-01-28 18:23           ` [dpdk-dev v5 09/10] crypto/qat: raw dp api integration Kai Ji
                             ` (2 subsequent siblings)
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-01-28 18:23 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch enable op_build_request function in qat_enqueue_op_burst,
and qat_dequeue_process_response function in qat_dequeue_op_burst.
The op_build_request invoked in crypto build request op is based
on crypto operations setup during session init.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c               |  42 +-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c |   4 -
 drivers/crypto/qat/qat_asym.c             |   2 +-
 drivers/crypto/qat/qat_asym.h             |  22 -
 drivers/crypto/qat/qat_sym.c              | 829 +++++++---------------
 drivers/crypto/qat/qat_sym.h              |   5 -
 6 files changed, 270 insertions(+), 634 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index dd9056650d..9bbadc8f8e 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -550,8 +550,7 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp,
-		__rte_unused qat_op_build_request_t op_build_request,
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
 		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
@@ -602,29 +601,18 @@ qat_enqueue_op_burst(void *qp,
 		}
 	}
 
-#ifdef BUILD_QAT_SYM
+#ifdef RTE_LIB_SECURITY
 	if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 		qat_sym_preprocess_requests(ops, nb_ops_possible);
 #endif
 
+	memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque));
+
 	while (nb_ops_sent != nb_ops_possible) {
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
-#ifdef BUILD_QAT_SYM
-			ret = qat_sym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		} else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
-			ret = qat_comp_build_request(*ops, base_addr + tail,
+		ret = op_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-		} else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
-#ifdef BUILD_QAT_ASYM
-			ret = qat_asym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				NULL, tmp_qp->qat_dev_gen);
-#endif
-		}
+				tmp_qp->opaque, tmp_qp->qat_dev_gen);
+
 		if (ret != 0) {
 			tmp_qp->stats.enqueue_err_count++;
 			/* This message cannot be enqueued */
@@ -820,8 +808,7 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 
 uint16_t
 qat_dequeue_op_burst(void *qp, void **ops,
-		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
-		uint16_t nb_ops)
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -839,21 +826,10 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		nb_fw_responses = 1;
 
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
-			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
-			nb_fw_responses = qat_comp_process_response(
+		nb_fw_responses = qat_dequeue_process_response(
 				ops, resp_msg,
 				tmp_qp->op_cookies[head >> rx_queue->trailz],
 				&tmp_qp->stats.dequeue_err_count);
-#ifdef BUILD_QAT_ASYM
-		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
-			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-#endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
 				  rx_queue->modulo_mask);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 501132a448..c58a628915 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,10 +146,6 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
-
-	/* Raw data-path API related operations */
-	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
-	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index da8d7e965c..07e3baa172 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -773,7 +773,7 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	return 0;
 }
 
-int
+static __rte_always_inline int
 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 		__rte_unused uint64_t *opaque,
 		__rte_unused enum qat_device_gen dev_gen)
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index aba49d57cb..72e62120c5 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -104,28 +104,6 @@ void
 qat_asym_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_asym_session *sess);
 
-/*
- * Build PKE request to be sent to the fw, partially uses template
- * request generated during session creation.
- *
- * @param	in_op		Pointer to the crypto operation, for every
- *				service it points to service specific struct.
- * @param	out_msg		Message to be returned to enqueue function
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	qat_dev_gen	Generation of QAT hardware
- *
- * @return
- *	This function always returns zero,
- *	it is because of backward compatibility.
- *	- 0: Always returned
- *
- */
-int
-qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		enum qat_device_gen qat_dev_gen);
-
 /*
  * Process PKE response received from outgoing queue of QAT
  *
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index aad4b243b7..0b1ab0b000 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -11,12 +11,25 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-#include "dev/qat_crypto_pmd_gens.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
 
 uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
 void
 qat_sym_init_op_cookie(void *op_cookie)
 {
@@ -38,160 +51,67 @@ qat_sym_init_op_cookie(void *op_cookie)
 			opt.spc_gmac.cd_cipher);
 }
 
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op,
-		struct icp_qat_fw_la_bulk_req *qat_req)
+static __rte_always_inline int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
 {
-	/* copy IV into request if it fits */
-	if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
-		rte_memcpy(cipher_param->u.cipher_IV_array,
-				rte_crypto_op_ctod_offset(op, uint8_t *,
-					iv_offset),
-				iv_length);
-	} else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr =
-				rte_crypto_op_ctophys_offset(op,
-					iv_offset);
-	}
-}
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	void *sess = (void *)opaque[0];
+	qat_sym_build_request_t build_request = (void *)opaque[1];
+	struct qat_sym_session *ctx = NULL;
 
-/** Set IV for CCM is special case, 0th byte is set to q-1
- *  where q is padding of nonce in 16 byte block
- */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
-	rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
-			ICP_QAT_HW_CCM_NONCE_OFFSET,
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-	*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-	if (aad_len_field_sz)
-		rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-}
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
 
-/** Handle Single-Pass AES-GMAC on QAT GEN3 */
-static inline void
-handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
-		struct qat_sym_op_cookie *cookie,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	static const uint32_t ver_key_offset =
-			sizeof(struct icp_qat_hw_auth_setup) +
-			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
-			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
-			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
-			sizeof(struct icp_qat_hw_cipher_config);
-	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
-			(void *) &qat_req->cd_ctrl;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-			(void *) &qat_req->serv_specif_rqpars;
-	uint32_t data_length = op->sym->auth.data.length;
-
-	/* Fill separate Content Descriptor for this op */
-	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
-			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-				ctx->cd.cipher.key :
-				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
-			ctx->auth_key_length);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
-				ICP_QAT_HW_CIPHER_AEAD_MODE,
-				ctx->qat_cipher_alg,
-				ICP_QAT_HW_CIPHER_NO_CONVERT,
-				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-					ICP_QAT_HW_CIPHER_ENCRYPT :
-					ICP_QAT_HW_CIPHER_DECRYPT));
-	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
-			ctx->digest_length,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
-
-	/* Update the request */
-	qat_req->cd_pars.u.s.content_desc_addr =
-			cookie->opt.spc_gmac.cd_phys_addr;
-	qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
-			sizeof(struct icp_qat_hw_cipher_config) +
-			ctx->auth_key_length, 8) >> 3;
-	qat_req->comn_mid.src_length = data_length;
-	qat_req->comn_mid.dst_length = 0;
-
-	cipher_param->spc_aad_addr = 0;
-	cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
-	cipher_param->spc_aad_sz = data_length;
-	cipher_param->reserved = 0;
-	cipher_param->spc_auth_res_sz = ctx->digest_length;
-
-	qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
-	cipher_cd_ctrl->cipher_cfg_offset = 0;
-	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
-	ICP_QAT_FW_LA_PROTO_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_NO_PROTO);
-}
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
-{
-	int ret = 0;
-	struct qat_sym_session *ctx = NULL;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	register struct icp_qat_fw_la_bulk_req *qat_req;
-	uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
-	uint32_t cipher_len = 0, cipher_ofs = 0;
-	uint32_t auth_len = 0, auth_ofs = 0;
-	uint32_t min_ofs = 0;
-	uint64_t src_buf_start = 0, dst_buf_start = 0;
-	uint64_t auth_data_end = 0;
-	uint8_t do_sgl = 0;
-	uint8_t in_place = 1;
-	int alignment_adjustment = 0;
-	int oop_shift = 0;
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *)op_cookie;
-
-	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
-				"operation requests, op (%p) is not a "
-				"symmetric operation.", op);
-		return -EINVAL;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
 	}
 
-	if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
-				" requests, op (%p) is sessionless.", op);
-		return -EINVAL;
-	} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_sym_session *)get_sym_session_private_data(
-				op->sym->session, qat_sym_driver_id);
 #ifdef RTE_LIB_SECURITY
-	} else {
-		ctx = (struct qat_sym_session *)get_sec_session_private_data(
-				op->sym->sec_session);
-		if (likely(ctx)) {
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if (sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
 			if (unlikely(ctx->bpi_ctx == NULL)) {
 				QAT_DP_LOG(ERR, "QAT PMD only supports security"
 						" operation requests for"
@@ -207,463 +127,234 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 				return -EINVAL;
 			}
-		}
-#endif
-	}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
 
-	if (unlikely(ctx == NULL)) {
-		QAT_DP_LOG(ERR, "Session was not created for this device");
-		return -EINVAL;
-	}
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
-	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
-	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
-	cipher_param = (void *)&qat_req->serv_specif_rqpars;
-	cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			do_aead = 1;
-		} else {
-			do_auth = 1;
-			do_cipher = 1;
+			sess = (void *)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)sess;
+			opaque[1] = (uintptr_t)build_request;
 		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		do_auth = 1;
-		do_cipher = 0;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		do_auth = 0;
-		do_cipher = 1;
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
 	}
 
-	if (do_cipher) {
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
 
-		if (ctx->qat_cipher_alg ==
-					 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
 
-			if (unlikely(
-			    (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
-			    (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cipher_len = op->sym->cipher.data.length >> 3;
-			cipher_ofs = op->sym->cipher.data.offset >> 3;
-
-		} else if (ctx->bpi_ctx) {
-			/* DOCSIS - only send complete blocks to device.
-			 * Process any partial block using CFB mode.
-			 * Even if 0 complete blocks, still send this to device
-			 * to get into rx queue for post-process and dequeuing
-			 */
-			cipher_len = qat_bpicipher_preprocess(ctx, op);
-			cipher_ofs = op->sym->cipher.data.offset;
-		} else {
-			cipher_len = op->sym->cipher.data.length;
-			cipher_ofs = op->sym->cipher.data.offset;
-		}
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
 
-		set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
-				cipher_param, op, qat_req);
-		min_ofs = cipher_ofs;
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	if (do_auth) {
-
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
-			ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
-			if (unlikely(
-			    (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
-			    (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			auth_ofs = op->sym->auth.data.offset >> 3;
-			auth_len = op->sym->auth.data.length >> 3;
-
-			auth_param->u1.aad_adr =
-					rte_crypto_op_ctophys_offset(op,
-							ctx->auth_iv.offset);
-
-		} else if (ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-			/* AES-GMAC */
-			set_cipher_iv(ctx->auth_iv.length,
-				ctx->auth_iv.offset,
-				cipher_param, op, qat_req);
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
-			auth_param->u1.aad_adr = 0;
-			auth_param->u2.aad_sz = 0;
-
-		} else {
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-		min_ofs = auth_ofs;
-
-		if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
-				ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
-			auth_param->auth_res_addr =
-					op->sym->auth.digest.phys_addr;
-
 	}
 
-	if (do_aead) {
-		/*
-		 * This address may used for setting AAD physical pointer
-		 * into IV offset from op
-		 */
-		rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
-		if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-
-			set_cipher_iv(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, qat_req);
-
-		} else if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
-			/* In case of AES-CCM this may point to user selected
-			 * memory or iv offset in crypto_op
-			 */
-			uint8_t *aad_data = op->sym->aead.aad.data;
-			/* This is true AAD length, it not includes 18 bytes of
-			 * preceding data
-			 */
-			uint8_t aad_ccm_real_len = 0;
-			uint8_t aad_len_field_sz = 0;
-			uint32_t msg_len_be =
-					rte_bswap32(op->sym->aead.data.length);
-
-			if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-				aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-				aad_ccm_real_len = ctx->aad_len -
-					ICP_QAT_HW_CCM_AAD_B0_LEN -
-					ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			} else {
-				/*
-				 * aad_len not greater than 18, so no actual aad
-				 *  data, then use IV after op for B0 block
-				 */
-				aad_data = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-				aad_phys_addr_aead =
-						rte_crypto_op_ctophys_offset(op,
-							ctx->cipher_iv.offset);
-			}
-
-			uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
-							ctx->cipher_iv.length;
-
-			aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-							aad_len_field_sz,
-							ctx->digest_length, q);
-
-			if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET +
-				    (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				    (uint8_t *)&msg_len_be,
-				    ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-			} else {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET,
-				    (uint8_t *)&msg_len_be
-				    + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				    - q), q);
-			}
-
-			if (aad_len_field_sz > 0) {
-				*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
-						= rte_bswap16(aad_ccm_real_len);
-
-				if ((aad_ccm_real_len + aad_len_field_sz)
-						% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-					uint8_t pad_len = 0;
-					uint8_t pad_idx = 0;
-
-					pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len + aad_len_field_sz) %
-						ICP_QAT_HW_CCM_AAD_B0_LEN);
-					pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					    aad_ccm_real_len + aad_len_field_sz;
-					memset(&aad_data[pad_idx],
-							0, pad_len);
-				}
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
 
-			}
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
 
-			set_cipher_iv_ccm(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, q,
-					aad_len_field_sz);
+	if (cryptodev == NULL)
+		return -ENODEV;
 
-		}
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
-		cipher_len = op->sym->aead.data.length;
-		cipher_ofs = op->sym->aead.data.offset;
-		auth_len = op->sym->aead.data.length;
-		auth_ofs = op->sym->aead.data.offset;
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
-		auth_param->u1.aad_adr = aad_phys_addr_aead;
-		auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
-		min_ofs = op->sym->aead.data.offset;
-	}
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
-	if (op->sym->m_src->nb_segs > 1 ||
-			(op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
-		do_sgl = 1;
-
-	/* adjust for chain case */
-	if (do_cipher && do_auth)
-		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
-	if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
-		min_ofs = 0;
-
-	if (unlikely((op->sym->m_dst != NULL) &&
-			(op->sym->m_dst != op->sym->m_src))) {
-		/* Out-of-place operation (OOP)
-		 * Don't align DMA start. DMA the minimum data-set
-		 * so as not to overwrite data in dest buffer
-		 */
-		in_place = 0;
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-		dst_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-		oop_shift = min_ofs;
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
 
-	} else {
-		/* In-place operation
-		 * Start DMA at nearest aligned address below min_ofs
-		 */
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
-						& QAT_64_BTYE_ALIGN_MASK;
-
-		if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
-					rte_pktmbuf_headroom(op->sym->m_src))
-							> src_buf_start)) {
-			/* alignment has pushed addr ahead of start of mbuf
-			 * so revert and take the performance hit
-			 */
-			src_buf_start =
-				rte_pktmbuf_iova_offset(op->sym->m_src,
-								min_ofs);
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
 		}
-		dst_buf_start = src_buf_start;
 
-		/* remember any adjustment for later, note, can be +/- */
-		alignment_adjustment = src_buf_start -
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-	}
-
-	if (do_cipher || do_aead) {
-		cipher_param->cipher_offset =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, cipher_ofs) - src_buf_start;
-		cipher_param->cipher_length = cipher_len;
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
 	} else {
-		cipher_param->cipher_offset = 0;
-		cipher_param->cipher_length = 0;
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
 	}
-
-	if (!ctx->is_single_pass) {
-		/* Do not let to overwrite spc_aad len */
-		if (do_auth || do_aead) {
-			auth_param->auth_off =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, auth_ofs) - src_buf_start;
-			auth_param->auth_len = auth_len;
-		} else {
-			auth_param->auth_off = 0;
-			auth_param->auth_len = 0;
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
 		}
 	}
 
-	qat_req->comn_mid.dst_length =
-		qat_req->comn_mid.src_length =
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		> (auth_param->auth_off + auth_param->auth_len) ?
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		: (auth_param->auth_off + auth_param->auth_len);
-
-	if (do_auth && do_cipher) {
-		/* Handle digest-encrypted cases, i.e.
-		 * auth-gen-then-cipher-encrypt and
-		 * cipher-decrypt-then-auth-verify
-		 */
-		 /* First find the end of the data */
-		if (do_sgl) {
-			uint32_t remaining_off = auth_param->auth_off +
-				auth_param->auth_len + alignment_adjustment + oop_shift;
-			struct rte_mbuf *sgl_buf =
-				(in_place ?
-					op->sym->m_src : op->sym->m_dst);
-
-			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
-					&& sgl_buf->next != NULL) {
-				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
-				sgl_buf = sgl_buf->next;
-			}
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-			auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
-				sgl_buf, remaining_off);
-		} else {
-			auth_data_end = (in_place ?
-				src_buf_start : dst_buf_start) +
-				auth_param->auth_off + auth_param->auth_len;
-		}
-		/* Then check if digest-encrypted conditions are met */
-		if ((auth_param->auth_off + auth_param->auth_len <
-					cipher_param->cipher_offset +
-					cipher_param->cipher_length) &&
-				(op->sym->auth.digest.phys_addr ==
-					auth_data_end)) {
-			/* Handle partial digest encryption */
-			if (cipher_param->cipher_offset +
-					cipher_param->cipher_length <
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length)
-				qat_req->comn_mid.dst_length =
-					qat_req->comn_mid.src_length =
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length;
-			struct icp_qat_fw_comn_req_hdr *header =
-				&qat_req->comn_hdr;
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-		}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
 	}
 
-	if (do_sgl) {
-
-		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
-				QAT_COMN_PTR_TYPE_SGL);
-		ret = qat_sgl_fill_array(op->sym->m_src,
-		   (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
-		   &cookie->qat_sgl_src,
-		   qat_req->comn_mid.src_length,
-		   QAT_SYM_SGL_MAX_NUMBER);
-
-		if (unlikely(ret)) {
-			QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
-			return ret;
-		}
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
 
-		if (in_place)
-			qat_req->comn_mid.dest_data_addr =
-				qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-		else {
-			ret = qat_sgl_fill_array(op->sym->m_dst,
-				(int64_t)(dst_buf_start -
-					  rte_pktmbuf_iova(op->sym->m_dst)),
-				 &cookie->qat_sgl_dst,
-				 qat_req->comn_mid.dst_length,
-				 QAT_SYM_SGL_MAX_NUMBER);
-
-			if (unlikely(ret)) {
-				QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
-				return ret;
-			}
+	return 0;
 
-			qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-			qat_req->comn_mid.dest_data_addr =
-					cookie->qat_sgl_dst_phys_addr;
-		}
-		qat_req->comn_mid.src_length = 0;
-		qat_req->comn_mid.dst_length = 0;
-	} else {
-		qat_req->comn_mid.src_data_addr = src_buf_start;
-		qat_req->comn_mid.dest_data_addr = dst_buf_start;
-	}
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
 
-	if (ctx->is_single_pass) {
-		if (ctx->is_ucs) {
-			/* GEN 4 */
-			cipher_param20->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param20->spc_auth_res_addr =
-				op->sym->aead.digest.phys_addr;
-		} else {
-			cipher_param->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param->spc_auth_res_addr =
-					op->sym->aead.digest.phys_addr;
-		}
-	} else if (ctx->is_single_pass_gmac &&
-		       op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
-		/* Handle Single-Pass AES-GMAC */
-		handle_spc_gmac(ctx, op, cookie, qat_req);
-	}
+	return ret;
+}
 
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
-			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
-			rte_pktmbuf_data_len(op->sym->m_src));
-	if (do_cipher) {
-		uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
-				ctx->cipher_iv.length);
-	}
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
 
-	if (do_auth) {
-		if (ctx->auth_iv.length) {
-			uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
-							uint8_t *,
-							ctx->auth_iv.offset);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
-						ctx->auth_iv.length);
-		}
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
-				ctx->digest_length);
-	}
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
 
-	if (do_aead) {
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
-				ctx->digest_length);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
-				ctx->aad_len);
-	}
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
 #endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
+
 	return 0;
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f4ff2ce4cd..074612c11b 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -131,11 +131,6 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
-
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v5 09/10] crypto/qat: raw dp api integration
  2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                             ` (7 preceding siblings ...)
  2022-01-28 18:23           ` [dpdk-dev v5 08/10] crypto/qat: op burst data path rework Kai Ji
@ 2022-01-28 18:23           ` Kai Ji
  2022-01-28 18:23           ` [dpdk-dev v5 10/10] crypto/qat: support out of place SG list Kai Ji
  2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-01-28 18:23 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch "unifies" QAT's raw dp api implementations
to share the same enqueue/dequeue methods as the crypto
operation enqueue/dequeue methods. In addition, different QAT
generation specific implementations are done respectively.
The qat_sym_hw_dp.c is removed as no longer required.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build               |   2 +-
 drivers/compress/qat/qat_comp_pmd.c          |  12 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |   2 +
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 214 ++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 122 +++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |  78 ++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 656 +++++++++++++
 drivers/crypto/qat/qat_crypto.h              |   3 +
 drivers/crypto/qat/qat_sym.c                 |  56 +-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 974 -------------------
 10 files changed, 1137 insertions(+), 982 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index f687f5c9d8..b7027f3164 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 8e497e7a09..dc8db84a68 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -616,11 +616,18 @@ static struct rte_compressdev_ops compress_qat_dummy_ops = {
 	.private_xform_free	= qat_comp_private_xform_free
 };
 
+static uint16_t
+qat_comp_dequeue_burst(void *qp, struct rte_comp_op **ops,  uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_comp_process_response,
+				nb_ops);
+}
+
 static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
+	uint16_t ret = qat_comp_dequeue_burst(qp, ops, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
@@ -638,8 +645,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 
 		} else {
 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
-					(compressdev_dequeue_pkt_burst_t)
-					qat_dequeue_op_burst;
+					qat_comp_dequeue_burst;
 		}
 	}
 	return ret;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 64e6ae66ec..0c64c1e43f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -291,6 +291,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 			qat_sym_crypto_cap_get_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
 			qat_sym_crypto_set_session_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index db864d973a..ffa093a7a3 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -394,6 +394,218 @@ qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
+	} else if (ctx->is_single_pass_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
+	}
+
+	return 0;
+}
+
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -403,6 +615,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_feature_flags_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
 			qat_sym_crypto_set_session_gen3;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 7642a87d55..f803bc1459 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -223,6 +223,126 @@ qat_sym_crypto_set_session_gen4(void *cdev, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -230,6 +350,8 @@ RTE_INIT(qat_sym_crypto_gen4_init)
 			qat_sym_crypto_cap_get_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
 			qat_sym_crypto_set_session_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 96cdb97a26..50a9c5ad5b 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -839,6 +839,84 @@ int
 qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
 		uint8_t *out_msg, void *op_cookie);
 
+/* -----------------GEN 1 sym crypto raw data path APIs ---------------- */
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status);
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status);
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index c58a628915..fee6507512 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,6 +146,10 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
+
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
@@ -448,6 +452,656 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct qat_sym_op_cookie *cookie;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, &iv,
+			NULL, NULL, NULL);
+#endif
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i],
+				cookie, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
+			(uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				NULL, NULL, NULL);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
+			NULL, 0, cipher_iv, digest, auth_iv, ofs,
+			(uint32_t)data_len)))
+		return -1;
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				NULL, 0,
+				&vec->iv[i], &vec->digest[i],
+				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+			break;
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				&vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp;
+	void *resp_opaque;
+	uint32_t i, n, inflight;
+	uint32_t head;
+	uint8_t status;
+
+	*n_success_jobs = 0;
+	*return_status = 0;
+	head = dp_ctx->head;
+
+	inflight = qp->enqueued - qp->dequeued;
+	if (unlikely(inflight == 0))
+		return 0;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			head);
+	/* no operation ready */
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return 0;
+
+	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
+	/* get the dequeue count */
+	if (get_dequeue_count) {
+		n = get_dequeue_count(resp_opaque);
+		if (unlikely(n == 0))
+			return 0;
+	} else {
+		if (unlikely(max_nb_to_dequeue == 0))
+			return 0;
+		n = max_nb_to_dequeue;
+	}
+
+	out_user_data[0] = resp_opaque;
+	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+	post_dequeue(resp_opaque, 0, status);
+	*n_success_jobs += status;
+
+	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+
+	/* we already finished dequeue when n == 1 */
+	if (unlikely(n == 1)) {
+		i = 1;
+		goto end_deq;
+	}
+
+	if (is_user_data_array) {
+		for (i = 1; i < n; i++) {
+			resp = (struct icp_qat_fw_comn_resp *)(
+				(uint8_t *)rx_queue->base_addr + head);
+			if (unlikely(*(uint32_t *)resp ==
+					ADF_RING_EMPTY_SIG))
+				goto end_deq;
+			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
+			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+			*n_success_jobs += status;
+			post_dequeue(out_user_data[i], i, status);
+			head = (head + rx_queue->msg_size) &
+					rx_queue->modulo_mask;
+		}
+
+		goto end_deq;
+	}
+
+	/* opaque is not array */
+	for (i = 1; i < n; i++) {
+		resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+			goto end_deq;
+		head = (head + rx_queue->msg_size) &
+				rx_queue->modulo_mask;
+		post_dequeue(resp_opaque, i, status);
+		*n_success_jobs += status;
+	}
+
+end_deq:
+	dp_ctx->head = head;
+	dp_ctx->cached_dequeue += i;
+	return i;
+}
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	register struct icp_qat_fw_comn_resp *resp;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			dp_ctx->head);
+
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return NULL;
+
+	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
+			rx_queue->modulo_mask;
+	dp_ctx->cached_dequeue++;
+
+	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
+			RTE_CRYPTO_OP_STATUS_SUCCESS :
+			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	*dequeue_status = 0;
+	return (void *)(uintptr_t)resp->opaque_data;
+}
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_enqueue != n))
+		return -1;
+
+	qp->enqueued += n;
+	qp->stats.enqueued_count += n;
+
+	tx_queue->tail = dp_ctx->tail;
+
+	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number, tx_queue->tail);
+	tx_queue->csr_tail = tx_queue->tail;
+	dp_ctx->cached_enqueue = 0;
+
+	return 0;
+}
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_dequeue != n))
+		return -1;
+
+	rx_queue->head = dp_ctx->head;
+	rx_queue->nb_processed_responses += n;
+	qp->dequeued += n;
+	qp->stats.dequeued_count += n;
+	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
+		uint32_t old_head, new_head;
+		uint32_t max_head;
+
+		old_head = rx_queue->csr_head;
+		new_head = rx_queue->head;
+		max_head = qp->nb_descriptors * rx_queue->msg_size;
+
+		/* write out free descriptors */
+		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
+
+		if (new_head < old_head) {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
+					max_head - old_head);
+			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
+					new_head);
+		} else {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
+					old_head);
+		}
+		rx_queue->nb_processed_responses = 0;
+		rx_queue->csr_head = new_head;
+
+		/* write current head to CSR */
+		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
+			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
+			new_head);
+	}
+
+	dp_ctx->cached_dequeue = 0;
+	return 0;
+}
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+
+	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
+	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
+	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
+	raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen1;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_chain_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_chain_gen1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
+			ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_cipher_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_cipher_gen1;
+		}
+	} else
+		return -1;
+
+	return 0;
+}
+
 int
 qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
 {
@@ -518,6 +1172,8 @@ RTE_INIT(qat_sym_crypto_gen1_init)
 			qat_sym_crypto_cap_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
 			qat_sym_crypto_set_session_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index c01266f81c..cf386d0ed0 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -53,11 +53,14 @@ typedef void * (*create_security_ctx_t)(void *cryptodev);
 
 typedef int (*set_session_t)(void *cryptodev, void *session);
 
+typedef int (*set_raw_dp_ctx_t)(void *raw_dp_ctx, void *ctx);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
 	set_session_t set_session;
+	set_raw_dp_ctx_t set_raw_dp_ctx;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 0b1ab0b000..284a65a9c6 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -88,7 +88,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -143,7 +143,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -291,8 +291,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 		if (internals->capa_mz == NULL) {
 			QAT_LOG(DEBUG,
 				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
+				"destroying PMD for %s", name);
 			ret = -EFAULT;
 			goto error;
 		}
@@ -354,6 +353,55 @@ qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
 	return 0;
 }
 
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct qat_cryptodev_private *internals = dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_crypto_gen_dev_ops *gen_dev_ops =
+			&qat_sym_gen_dev_ops[qat_dev_gen];
+	struct qat_qp *qp;
+	struct qat_sym_session *ctx;
+	struct qat_sym_dp_ctx *dp_ctx;
+
+	if (!gen_dev_ops->set_raw_dp_ctx) {
+		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+				qat_dev_gen);
+		return -ENOTSUP;
+	}
+
+	qp = dev->data->queue_pairs[qp_id];
+	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(struct qat_sym_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+		dp_ctx->tail = qp->tx_q.tail;
+		dp_ctx->head = qp->rx_q.head;
+		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
+	}
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -EINVAL;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, qat_sym_driver_id);
+
+	dp_ctx->session = ctx;
+
+	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
+}
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct qat_sym_dp_ctx);
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_sym_driver,
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
deleted file mode 100644
index 2576cb1be7..0000000000
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ /dev/null
@@ -1,974 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2022 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "adf_transport_access_macros.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_qp.h"
-
-static __rte_always_inline int32_t
-qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
-		struct rte_crypto_vec *data, uint16_t n_data_vecs)
-{
-	struct qat_queue *tx_queue;
-	struct qat_sym_op_cookie *cookie;
-	struct qat_sgl *list;
-	uint32_t i;
-	uint32_t total_len;
-
-	if (likely(n_data_vecs == 1)) {
-		req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			data[0].iova;
-		req->comn_mid.src_length = req->comn_mid.dst_length =
-			data[0].len;
-		return data[0].len;
-	}
-
-	if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
-		return -1;
-
-	total_len = 0;
-	tx_queue = &qp->tx_q;
-
-	ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
-			QAT_COMN_PTR_TYPE_SGL);
-	cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
-	list = (struct qat_sgl *)&cookie->qat_sgl_src;
-
-	for (i = 0; i < n_data_vecs; i++) {
-		list->buffers[i].len = data[i].len;
-		list->buffers[i].resrvd = 0;
-		list->buffers[i].addr = data[i].iova;
-		if (total_len + data[i].len > UINT32_MAX) {
-			QAT_DP_LOG(ERR, "Message too long");
-			return -1;
-		}
-		total_len += data[i].len;
-	}
-
-	list->num_bufs = i;
-	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			cookie->qat_sgl_src_phys_addr;
-	req->comn_mid.src_length = req->comn_mid.dst_length = 0;
-	return total_len;
-}
-
-static __rte_always_inline void
-set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	/* copy IV into request if it fits */
-	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
-				iv_len);
-	else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
-	}
-}
-
-#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
-	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
-	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
-
-static __rte_always_inline void
-qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
-{
-	uint32_t i;
-
-	for (i = 0; i < n; i++)
-		sta[i] = status;
-}
-
-#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
-	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
-
-static __rte_always_inline void
-enqueue_one_cipher_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-
-	/* cipher IV */
-	set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest __rte_unused,
-	struct rte_crypto_va_iova_ptr *aad __rte_unused,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
-			(uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_auth_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = data_len - ofs.ofs.auth.head -
-			ofs.ofs.auth.tail;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
-				ctx->auth_iv.length);
-		break;
-	default:
-		break;
-	}
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv __rte_unused,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
-			(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_auth_job(ctx, req, &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline int
-enqueue_one_chain_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_vec *data,
-	uint16_t n_data_vecs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	rte_iova_t auth_iova_end;
-	int32_t cipher_len, auth_len;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	cipher_len = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
-
-	if (unlikely(cipher_len < 0 || auth_len < 0))
-		return -1;
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = cipher_len;
-	set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = auth_len;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		break;
-	default:
-		break;
-	}
-
-	if (unlikely(n_data_vecs > 1)) {
-		int auth_end_get = 0, i = n_data_vecs - 1;
-		struct rte_crypto_vec *cvec = &data[0];
-		uint32_t len;
-
-		len = data_len - ofs.ofs.auth.tail;
-
-		while (i >= 0 && len > 0) {
-			if (cvec->len >= len) {
-				auth_iova_end = cvec->iova + len;
-				len = 0;
-				auth_end_get = 1;
-				break;
-			}
-			len -= cvec->len;
-			i--;
-			cvec++;
-		}
-
-		if (unlikely(auth_end_get == 0))
-			return -1;
-	} else
-		auth_iova_end = data[0].iova + auth_param->auth_off +
-			auth_param->auth_len;
-
-	/* Then check if digest-encrypted conditions are met */
-	if ((auth_param->auth_off + auth_param->auth_len <
-		cipher_param->cipher_offset +
-		cipher_param->cipher_length) &&
-		(digest->iova == auth_iova_end)) {
-		/* Handle partial digest encryption */
-		if (cipher_param->cipher_offset +
-				cipher_param->cipher_length <
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length)
-			req->comn_mid.dst_length =
-				req->comn_mid.src_length =
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length;
-		struct icp_qat_fw_comn_req_hdr *header =
-			&req->comn_hdr;
-		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-			header->serv_specif_flags,
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-	}
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
-			cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
-		return -1;
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num,
-			&vec->iv[i], &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
-			break;
-
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_aead_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-		(void *)&req->serv_specif_rqpars;
-	struct icp_qat_fw_la_auth_req_params *auth_param =
-		(void *)((uint8_t *)&req->serv_specif_rqpars +
-		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-	uint8_t *aad_data;
-	uint8_t aad_ccm_real_len;
-	uint8_t aad_len_field_sz;
-	uint32_t msg_len_be;
-	rte_iova_t aad_iova = 0;
-	uint8_t q;
-
-	/* CPM 1.7 uses single pass to treat AEAD as cipher operation */
-	if (ctx->is_single_pass) {
-		enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
-		cipher_param->spc_aad_addr = aad->iova;
-		cipher_param->spc_auth_res_addr = digest->iova;
-		return;
-	}
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
-				ctx->cipher_iv.length);
-		aad_iova = aad->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
-		aad_data = aad->va;
-		aad_iova = aad->iova;
-		aad_ccm_real_len = 0;
-		aad_len_field_sz = 0;
-		msg_len_be = rte_bswap32((uint32_t)data_len -
-				ofs.ofs.cipher.head);
-
-		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			aad_ccm_real_len = ctx->aad_len -
-				ICP_QAT_HW_CCM_AAD_B0_LEN -
-				ICP_QAT_HW_CCM_AAD_LEN_INFO;
-		} else {
-			aad_data = iv->va;
-			aad_iova = iv->iova;
-		}
-
-		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-			aad_len_field_sz, ctx->digest_length, q);
-		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				(uint8_t *)&msg_len_be,
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-		} else {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-				(uint8_t *)&msg_len_be +
-				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				- q), q);
-		}
-
-		if (aad_len_field_sz > 0) {
-			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
-				rte_bswap16(aad_ccm_real_len);
-
-			if ((aad_ccm_real_len + aad_len_field_sz)
-				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-				uint8_t pad_len = 0;
-				uint8_t pad_idx = 0;
-
-				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len +
-					aad_len_field_sz) %
-					ICP_QAT_HW_CCM_AAD_B0_LEN);
-				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					aad_ccm_real_len +
-					aad_len_field_sz;
-				memset(&aad_data[pad_idx], 0, pad_len);
-			}
-		}
-
-		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
-			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va +
-			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
-		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-		rte_memcpy((uint8_t *)aad->va +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			ctx->cipher_iv.length);
-		break;
-	default:
-		break;
-	}
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_param->auth_off = ofs.ofs.cipher.head;
-	auth_param->auth_len = cipher_param->cipher_length;
-	auth_param->auth_res_addr = digest->iova;
-	auth_param->u1.aad_adr = aad_iova;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
-		(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
-			&vec->aad[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
-	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
-	uint32_t max_nb_to_dequeue,
-	rte_cryptodev_raw_post_dequeue_t post_dequeue,
-	void **out_user_data, uint8_t is_user_data_array,
-	uint32_t *n_success_jobs, int *return_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct icp_qat_fw_comn_resp *resp;
-	void *resp_opaque;
-	uint32_t i, n, inflight;
-	uint32_t head;
-	uint8_t status;
-
-	*n_success_jobs = 0;
-	*return_status = 0;
-	head = dp_ctx->head;
-
-	inflight = qp->enqueued - qp->dequeued;
-	if (unlikely(inflight == 0))
-		return 0;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			head);
-	/* no operation ready */
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return 0;
-
-	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
-	/* get the dequeue count */
-	if (get_dequeue_count) {
-		n = get_dequeue_count(resp_opaque);
-		if (unlikely(n == 0))
-			return 0;
-	} else {
-		if (unlikely(max_nb_to_dequeue == 0))
-			return 0;
-		n = max_nb_to_dequeue;
-	}
-
-	out_user_data[0] = resp_opaque;
-	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-	post_dequeue(resp_opaque, 0, status);
-	*n_success_jobs += status;
-
-	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
-
-	/* we already finished dequeue when n == 1 */
-	if (unlikely(n == 1)) {
-		i = 1;
-		goto end_deq;
-	}
-
-	if (is_user_data_array) {
-		for (i = 1; i < n; i++) {
-			resp = (struct icp_qat_fw_comn_resp *)(
-				(uint8_t *)rx_queue->base_addr + head);
-			if (unlikely(*(uint32_t *)resp ==
-					ADF_RING_EMPTY_SIG))
-				goto end_deq;
-			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
-			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-			*n_success_jobs += status;
-			post_dequeue(out_user_data[i], i, status);
-			head = (head + rx_queue->msg_size) &
-					rx_queue->modulo_mask;
-		}
-
-		goto end_deq;
-	}
-
-	/* opaque is not array */
-	for (i = 1; i < n; i++) {
-		resp = (struct icp_qat_fw_comn_resp *)(
-			(uint8_t *)rx_queue->base_addr + head);
-		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-			goto end_deq;
-		head = (head + rx_queue->msg_size) &
-				rx_queue->modulo_mask;
-		post_dequeue(resp_opaque, i, status);
-		*n_success_jobs += status;
-	}
-
-end_deq:
-	dp_ctx->head = head;
-	dp_ctx->cached_dequeue += i;
-	return i;
-}
-
-static __rte_always_inline void *
-qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
-		enum rte_crypto_op_status *op_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	register struct icp_qat_fw_comn_resp *resp;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			dp_ctx->head);
-
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return NULL;
-
-	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
-			rx_queue->modulo_mask;
-	dp_ctx->cached_dequeue++;
-
-	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
-			RTE_CRYPTO_OP_STATUS_SUCCESS :
-			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	*dequeue_status = 0;
-	return (void *)(uintptr_t)resp->opaque_data;
-}
-
-static __rte_always_inline int
-qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_enqueue != n))
-		return -1;
-
-	qp->enqueued += n;
-	qp->stats.enqueued_count += n;
-
-	tx_queue->tail = dp_ctx->tail;
-
-	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
-			tx_queue->hw_bundle_number,
-			tx_queue->hw_queue_number, tx_queue->tail);
-	tx_queue->csr_tail = tx_queue->tail;
-	dp_ctx->cached_enqueue = 0;
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_dequeue != n))
-		return -1;
-
-	rx_queue->head = dp_ctx->head;
-	rx_queue->nb_processed_responses += n;
-	qp->dequeued += n;
-	qp->stats.dequeued_count += n;
-	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
-		uint32_t old_head, new_head;
-		uint32_t max_head;
-
-		old_head = rx_queue->csr_head;
-		new_head = rx_queue->head;
-		max_head = qp->nb_descriptors * rx_queue->msg_size;
-
-		/* write out free descriptors */
-		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
-
-		if (new_head < old_head) {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
-					max_head - old_head);
-			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
-					new_head);
-		} else {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
-					old_head);
-		}
-		rx_queue->nb_processed_responses = 0;
-		rx_queue->csr_head = new_head;
-
-		/* write current head to CSR */
-		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
-			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
-			new_head);
-	}
-
-	dp_ctx->cached_dequeue = 0;
-	return 0;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
-	struct qat_qp *qp;
-	struct qat_sym_session *ctx;
-	struct qat_sym_dp_ctx *dp_ctx;
-
-	qp = dev->data->queue_pairs[qp_id];
-	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
-	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(struct qat_sym_dp_ctx));
-		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
-		dp_ctx->tail = qp->tx_q.tail;
-		dp_ctx->head = qp->rx_q.head;
-		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
-	}
-
-	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
-		return -EINVAL;
-
-	ctx = (struct qat_sym_session *)get_sym_session_private_data(
-			session_ctx.crypto_sess, qat_sym_driver_id);
-
-	dp_ctx->session = ctx;
-
-	raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
-	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
-	raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
-	raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_chain_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
-		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
-		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_cipher_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
-		}
-	} else
-		return -1;
-
-	return 0;
-}
-
-int
-qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
-{
-	return sizeof(struct qat_sym_dp_ctx);
-}
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v5 10/10] crypto/qat: support out of place SG list
  2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                             ` (8 preceding siblings ...)
  2022-01-28 18:23           ` [dpdk-dev v5 09/10] crypto/qat: raw dp api integration Kai Ji
@ 2022-01-28 18:23           ` Kai Ji
  2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  10 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-01-28 18:23 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch adds the SGL out of place support to QAT PMD

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++--
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 55 +++++++++++++++++---
 3 files changed, 83 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index ffa093a7a3..5084a5fcd1 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -468,8 +468,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -565,8 +575,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index f803bc1459..bd7f3785df 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -297,8 +297,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index fee6507512..83d9b66f34 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -526,9 +526,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i],
-				cookie, vec->src_sgl[i].vec,
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
 				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
@@ -625,8 +634,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
@@ -725,8 +744,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -830,8 +859,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework
  2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                             ` (9 preceding siblings ...)
  2022-01-28 18:23           ` [dpdk-dev v5 10/10] crypto/qat: support out of place SG list Kai Ji
@ 2022-02-04 18:50           ` Kai Ji
  2022-02-04 18:50             ` [dpdk-dev v6 01/10] common/qat: define build op request and dequeue op Kai Ji
                               ` (9 more replies)
  10 siblings, 10 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-04 18:50 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch reworks QAT symmetric crypto datapatch implementation where each
generation request building separated and the crypto operation under the
raw datapath api implementation are unified.

In addtion this patchset also enables QAT OOP support in raw datapath api
implementation.

v6:
- fix of pointer cast error in x86
- rebase to the lastest for-main

v5:
- rebase to the latest for-main
- patchset reconstruct

v4:
- patchset break down and reconstruct

v3:
- sperate a single patch 6 to two different patches

v2:
- review comments addressed

Kai Ji (10):
  common/qat: define build op request and dequeue op
  crypto/qat: sym build op request specific implementation
  crypto/qat: qat generation specific enqueue
  crypto/qat: rework session APIs
  crypto/qat: rework asymmetric crypto build operation
  crypto/qat: unify qat sym pmd apis
  crypto/qat: unify qat asym pmd apis
  crypto/qat: op burst data path rework
  crypto/qat: raw dp api integration
  crypto/qat: support out of place SG list

 drivers/common/qat/meson.build               |   6 +-
 drivers/common/qat/qat_device.c              |   4 +-
 drivers/common/qat/qat_qp.c                  |  42 +-
 drivers/common/qat/qat_qp.h                  |  54 +-
 drivers/compress/qat/qat_comp_pmd.c          |  14 +-
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  93 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 490 ++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 257 ++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 913 ++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 942 +++++++++++++++++-
 drivers/crypto/qat/qat_asym.c                | 762 ++++++++------
 drivers/crypto/qat/qat_asym.h                |  79 +-
 drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
 drivers/crypto/qat/qat_asym_pmd.h            |  54 -
 drivers/crypto/qat/qat_crypto.h              |  16 +-
 drivers/crypto/qat/qat_sym.c                 | 978 ++++++------------
 drivers/crypto/qat/qat_sym.h                 | 148 ++-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 983 -------------------
 drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
 drivers/crypto/qat/qat_sym_pmd.h             |  95 --
 drivers/crypto/qat/qat_sym_session.c         | 115 +--
 drivers/crypto/qat/qat_sym_session.h         |  15 +-
 23 files changed, 3812 insertions(+), 2739 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

--
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v6 01/10] common/qat: define build op request and dequeue op
  2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
@ 2022-02-04 18:50             ` Kai Ji
  2022-02-04 18:50             ` [dpdk-dev v6 02/10] crypto/qat: sym build op request specific implementation Kai Ji
                               ` (8 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-04 18:50 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch introduce build request op and dequeue op function
pointers to qat queue pair implementation. Those two functions
are used to be assigned during qat session generation based on
crypto operation

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c          | 10 ++++--
 drivers/common/qat/qat_qp.h          | 54 ++++++++++++++++++++++++++--
 drivers/compress/qat/qat_comp_pmd.c  |  4 +--
 drivers/crypto/qat/qat_asym_pmd.c    |  4 +--
 drivers/crypto/qat/qat_sym_pmd.c     |  4 +--
 drivers/crypto/qat/qat_sym_session.h | 13 ++++++-
 6 files changed, 76 insertions(+), 13 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index cde421eb77..ed632b5ebe 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_common.h>
@@ -550,7 +550,9 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_enqueue_op_burst(void *qp,
+		__rte_unused qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -817,7 +819,9 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 }
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_dequeue_op_burst(void *qp, void **ops,
+		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
+		uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index deafb407b3..66f00943a5 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 #ifndef _QAT_QP_H_
 #define _QAT_QP_H_
@@ -36,6 +36,51 @@ struct qat_queue {
 	/* number of responses processed since last CSR head write */
 };
 
+/**
+ * Type define qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ *
+ * @param in_op
+ *    An input op pointer
+ * @param out_msg
+ *    out_meg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param opaque
+ *    an opaque data may be used to store context may be useful between
+ *    2 enqueue operations.
+ * @param dev_gen
+ *    qat device gen id
+ * @return
+ *   - 0 if the crypto request is build successfully,
+ *   - EINVAL if error
+ **/
+typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
+
+/**
+ * Type define qat_op_dequeue_t function pointer, passed in as argument
+ * in dequeue op burst, where a dequeue op assigned base on the type of
+ * crypto op.
+ *
+ * @param op
+ *    An input op pointer
+ * @param resp
+ *    qat response msg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param dequeue_err_count
+ *    dequeue error counter
+ * @return
+ *    - 0 if dequeue OP is successful
+ *    - EINVAL if error
+ **/
+typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused);
+
+#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE	2
+
 struct qat_qp {
 	void			*mmap_bar_addr;
 	struct qat_queue	tx_q;
@@ -44,6 +89,7 @@ struct qat_qp {
 	struct rte_mempool *op_cookie_pool;
 	void **op_cookies;
 	uint32_t nb_descriptors;
+	uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
 	enum qat_device_gen qat_dev_gen;
 	enum qat_service_type service_type;
 	struct qat_pci_device *qat_dev;
@@ -78,13 +124,15 @@ struct qat_qp_config {
 };
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops);
 
 uint16_t
 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
 
 int
 qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index da6404c017..8e497e7a09 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_malloc.h>
@@ -620,7 +620,7 @@ static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
index addee384e3..9a7596b227 100644
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ b/drivers/crypto/qat/qat_asym_pmd.c
@@ -62,13 +62,13 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
 uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index b835245f17..28a26260fb 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -49,14 +49,14 @@ static uint16_t
 qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 static uint16_t
 qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 6ebc176729..fe875a7fd0 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 #ifndef _QAT_SYM_SESSION_H_
 #define _QAT_SYM_SESSION_H_
@@ -63,6 +63,16 @@ enum qat_sym_proto_flag {
 	QAT_CRYPTO_PROTO_FLAG_ZUC = 4
 };
 
+struct qat_sym_session;
+
+/*
+ * typedef qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ */
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* Common content descriptor */
 struct qat_sym_cd {
 	struct icp_qat_hw_cipher_algo_blk cipher;
@@ -107,6 +117,7 @@ struct qat_sym_session {
 	/* Some generations need different setup of counter */
 	uint32_t slice_types;
 	enum qat_sym_proto_flag qat_proto_flag;
+	qat_sym_build_request_t build_request[2];
 };
 
 int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v6 02/10] crypto/qat: sym build op request specific implementation
  2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2022-02-04 18:50             ` [dpdk-dev v6 01/10] common/qat: define build op request and dequeue op Kai Ji
@ 2022-02-04 18:50             ` Kai Ji
  2022-02-04 18:50             ` [dpdk-dev v6 03/10] crypto/qat: qat generation specific enqueue Kai Ji
                               ` (7 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-04 18:50 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch adds common inline functions for QAT symmetric
crypto driver to process crypto op and the build op
request function pointer implementation for QAT generation 1.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 832 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 187 ++++-
 drivers/crypto/qat/qat_sym.c                 |  90 +-
 3 files changed, 1019 insertions(+), 90 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 67a4d2cb2c..1130e0e76f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #ifndef _QAT_CRYPTO_PMD_GENS_H_
@@ -8,14 +8,844 @@
 #include <rte_cryptodev.h>
 #include "qat_crypto.h"
 #include "qat_sym_session.h"
+#include "qat_sym.h"
+
+#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
+	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
+
+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
+	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
+	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
+
+static __rte_always_inline int
+op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_decrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_decrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+	return -EINVAL;
+}
+
+static __rte_always_inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		/* Decrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely((sym_op->m_dst != NULL)
+				&& (sym_op->m_dst != sym_op->m_src)))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = last_block - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
+			dst, last_block_len);
+#endif
+		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+			dst, last_block_len);
+#endif
+	}
+
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+static __rte_always_inline int
+qat_auth_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_cipher_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+			((op->sym->cipher.data.offset %
+			BYTE_LENGTH) != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int32_t
+qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
+		void *opaque, struct qat_sym_op_cookie *cookie,
+		struct rte_crypto_vec *src_vec, uint16_t n_src,
+		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
+{
+	struct qat_sgl *list;
+	uint32_t i;
+	uint32_t tl_src = 0, total_len_src, total_len_dst;
+	uint64_t src_data_start = 0, dst_data_start = 0;
+	int is_sgl = n_src > 1 || n_dst > 1;
+
+	if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
+			n_dst > QAT_SYM_SGL_MAX_NUMBER))
+		return -1;
+
+	if (likely(!is_sgl)) {
+		src_data_start = src_vec[0].iova;
+		tl_src = total_len_src =
+				src_vec[0].len;
+		if (unlikely(n_dst)) { /* oop */
+			total_len_dst = dst_vec[0].len;
+
+			dst_data_start = dst_vec[0].iova;
+			if (unlikely(total_len_src != total_len_dst))
+				return -EINVAL;
+		} else {
+			dst_data_start = src_data_start;
+			total_len_dst = tl_src;
+		}
+	} else { /* sgl */
+		total_len_dst = total_len_src = 0;
+
+		ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+			QAT_COMN_PTR_TYPE_SGL);
+
+		list = (struct qat_sgl *)&cookie->qat_sgl_src;
+		for (i = 0; i < n_src; i++) {
+			list->buffers[i].len = src_vec[i].len;
+			list->buffers[i].resrvd = 0;
+			list->buffers[i].addr = src_vec[i].iova;
+			if (tl_src + src_vec[i].len > UINT32_MAX) {
+				QAT_DP_LOG(ERR, "Message too long");
+				return -1;
+			}
+			tl_src += src_vec[i].len;
+		}
+
+		list->num_bufs = i;
+		src_data_start = cookie->qat_sgl_src_phys_addr;
+
+		if (unlikely(n_dst > 0)) { /* oop sgl */
+			uint32_t tl_dst = 0;
+
+			list = (struct qat_sgl *)&cookie->qat_sgl_dst;
+
+			for (i = 0; i < n_dst; i++) {
+				list->buffers[i].len = dst_vec[i].len;
+				list->buffers[i].resrvd = 0;
+				list->buffers[i].addr = dst_vec[i].iova;
+				if (tl_dst + dst_vec[i].len > UINT32_MAX) {
+					QAT_DP_LOG(ERR, "Message too long");
+					return -ENOTSUP;
+				}
+
+				tl_dst += dst_vec[i].len;
+			}
+
+			if (tl_src != tl_dst)
+				return -EINVAL;
+			list->num_bufs = i;
+			dst_data_start = cookie->qat_sgl_dst_phys_addr;
+		} else
+			dst_data_start = src_data_start;
+	}
+
+	req->comn_mid.src_data_addr = src_data_start;
+	req->comn_mid.dest_data_addr = dst_data_start;
+	req->comn_mid.src_length = total_len_src;
+	req->comn_mid.dst_length = total_len_dst;
+	req->comn_mid.opaque_data = (uintptr_t)opaque;
+
+	return tl_src;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int n_src = 0;
+	int ret;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->cipher.data.length >> 3;
+		cipher_ofs = op->sym->cipher.data.offset >> 3;
+		break;
+	case 0:
+		if (ctx->bpi_ctx) {
+			/* DOCSIS - only send complete blocks to device.
+			 * Process any partial block using CFB mode.
+			 * Even if 0 complete blocks, still send this to device
+			 * to get into rx queue for post-process and dequeuing
+			 */
+			cipher_len = qat_bpicipher_preprocess(ctx, op);
+			cipher_ofs = op->sym->cipher.data.offset;
+		} else {
+			cipher_len = op->sym->cipher.data.length;
+			cipher_ofs = op->sym->cipher.data.offset;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
+			cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t auth_ofs = 0, auth_len = 0;
+	int n_src, ret;
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+				ctx->auth_iv.offset);
+		auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+				ctx->auth_iv.offset);
+		break;
+	case 0:
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+			/* AES-GMAC */
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+					ctx->auth_iv.offset);
+			auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+					ctx->auth_iv.offset);
+		} else {
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = NULL;
+			auth_iv->iova = 0;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
+			auth_ofs + auth_len, in_sgl->vec,
+			QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
+				auth_ofs + auth_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	union rte_crypto_sym_ofs ofs;
+	uint32_t min_ofs = 0, max_len = 0;
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	uint32_t auth_len = 0, auth_ofs = 0;
+	int is_oop = (op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src);
+	int is_sgl = op->sym->m_src->nb_segs > 1;
+	int n_src;
+	int ret;
+
+	if (unlikely(is_oop))
+		is_sgl |= op->sym->m_dst->nb_segs > 1;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->auth_iv.offset);
+	auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->auth_iv.offset);
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->aead.data.length >> 3;
+		cipher_ofs = op->sym->aead.data.offset >> 3;
+		break;
+	case 0:
+		cipher_len = op->sym->aead.data.length;
+		cipher_ofs = op->sym->aead.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		break;
+	case 0:
+		auth_len = op->sym->auth.data.length;
+		auth_ofs = op->sym->auth.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
+
+	/* digest in buffer check. Needed only for wireless algos */
+	if (ret == 1) {
+		/* Handle digest-encrypted cases, i.e.
+		 * auth-gen-then-cipher-encrypt and
+		 * cipher-decrypt-then-auth-verify
+		 */
+		uint64_t auth_end_iova;
+
+		if (unlikely(is_sgl)) {
+			uint32_t remaining_off = auth_ofs + auth_len;
+			struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
+				op->sym->m_src);
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else
+			auth_end_iova = (is_oop ?
+				rte_pktmbuf_iova(op->sym->m_dst) :
+				rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
+					auth_len;
+
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
+				(digest->iova == auth_end_iova))
+			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
+					ctx->digest_length);
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return -1;
+	}
+	in_sgl->num = n_src;
+
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs,
+				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	ofs.ofs.cipher.head = cipher_ofs;
+	ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
+	ofs.ofs.auth.head = auth_ofs;
+	ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
+
+	return ofs.raw;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int32_t n_src = 0;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
+	auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
+	digest->va = (void *)op->sym->aead.digest.data;
+	digest->iova = op->sym->aead.digest.phys_addr;
+
+	cipher_len = op->sym->aead.data.length;
+	cipher_ofs = op->sym->aead.data.offset;
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline void
+qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
+		struct icp_qat_fw_la_bulk_req *qat_req)
+{
+	/* copy IV into request if it fits */
+	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
+				iv_len);
+	else {
+		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+				qat_req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
+	}
+}
+
+static __rte_always_inline void
+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
+{
+	uint32_t i;
+
+	for (i = 0; i < n; i++)
+		sta[i] = status;
+}
+
+static __rte_always_inline void
+enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+
+	/* cipher IV */
+	qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
+				ctx->auth_iv.length);
+		break;
+	default:
+		break;
+	}
+}
+
+static __rte_always_inline int
+enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_vec *src_vec,
+	uint16_t n_src_vecs,
+	struct rte_crypto_vec *dst_vec,
+	uint16_t n_dst_vecs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
+			dst_vec : src_vec;
+	rte_iova_t auth_iova_end;
+	int cipher_len, auth_len;
+	int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	cipher_len = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (unlikely(cipher_len < 0 || auth_len < 0))
+		return -1;
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = cipher_len;
+	qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = auth_len;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		break;
+	default:
+		break;
+	}
+
+	if (unlikely(is_sgl)) {
+		/* sgl */
+		int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
+		uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
+
+		while (remaining_off >= cvec->len && i >= 1) {
+			i--;
+			remaining_off -= cvec->len;
+			cvec++;
+		}
+
+		auth_iova_end = cvec->iova + remaining_off;
+	} else
+		auth_iova_end = cvec[0].iova + auth_param->auth_off +
+			auth_param->auth_len;
+
+	/* Then check if digest-encrypted conditions are met */
+	if ((auth_param->auth_off + auth_param->auth_len <
+		cipher_param->cipher_offset + cipher_param->cipher_length) &&
+			(digest->iova == auth_iova_end)) {
+		/* Handle partial digest encryption */
+		if (cipher_param->cipher_offset + cipher_param->cipher_length <
+			auth_param->auth_off + auth_param->auth_len +
+				ctx->digest_length && !is_sgl)
+			req->comn_mid.dst_length = req->comn_mid.src_length =
+				auth_param->auth_off + auth_param->auth_len +
+					ctx->digest_length;
+		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void
+enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param =
+		(void *)&req->serv_specif_rqpars;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(void *)((uint8_t *)&req->serv_specif_rqpars +
+		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+	uint8_t *aad_data;
+	uint8_t aad_ccm_real_len;
+	uint8_t aad_len_field_sz;
+	uint32_t msg_len_be;
+	rte_iova_t aad_iova = 0;
+	uint8_t q;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
+				ctx->cipher_iv.length);
+		aad_iova = aad->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		aad_data = aad->va;
+		aad_iova = aad->iova;
+		aad_ccm_real_len = 0;
+		aad_len_field_sz = 0;
+		msg_len_be = rte_bswap32((uint32_t)data_len -
+				ofs.ofs.cipher.head);
+
+		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			aad_ccm_real_len = ctx->aad_len -
+				ICP_QAT_HW_CCM_AAD_B0_LEN -
+				ICP_QAT_HW_CCM_AAD_LEN_INFO;
+		} else {
+			aad_data = iv->va;
+			aad_iova = iv->iova;
+		}
+
+		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+			aad_len_field_sz, ctx->digest_length, q);
+		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+				(uint8_t *)&msg_len_be,
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+		} else {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+				(uint8_t *)&msg_len_be +
+				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+				- q), q);
+		}
+
+		if (aad_len_field_sz > 0) {
+			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+				rte_bswap16(aad_ccm_real_len);
+
+			if ((aad_ccm_real_len + aad_len_field_sz)
+				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+				uint8_t pad_len = 0;
+				uint8_t pad_idx = 0;
+
+				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+					((aad_ccm_real_len +
+					aad_len_field_sz) %
+					ICP_QAT_HW_CCM_AAD_B0_LEN);
+				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+					aad_ccm_real_len +
+					aad_len_field_sz;
+				memset(&aad_data[pad_idx], 0, pad_len);
+			}
+		}
+
+		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va +
+			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
+		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+		rte_memcpy((uint8_t *)aad->va +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			ctx->cipher_iv.length);
+		break;
+	default:
+		break;
+	}
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_param->auth_off = ofs.ofs.cipher.head;
+	auth_param->auth_len = cipher_param->cipher_length;
+	auth_param->auth_res_addr = digest->iova;
+	auth_param->u1.aad_adr = aad_iova;
+}
 
 extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
 extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
 
+/* -----------------GEN 1 sym crypto op data path APIs ---------------- */
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 void
 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 		uint8_t hash_flag);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 90b3ec803c..c429825a67 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -179,6 +179,191 @@ qat_sym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, NULL, NULL);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, NULL, NULL);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
+			total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
+			out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			&auth_iv, &digest);
+#endif
+
+	return 0;
+}
+
 #ifdef RTE_LIB_SECURITY
 
 #define QAT_SECURITY_SYM_CAPABILITIES					\
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 00ec703754..f814bf8f75 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/evp.h>
@@ -11,93 +11,7 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-
-
-/** Decrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_decrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_decrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
-	return -EINVAL;
-}
-
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
-		/* Decrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely((sym_op->m_dst != NULL)
-				&& (sym_op->m_dst != sym_op->m_src)))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = last_block - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
-			dst, last_block_len);
-#endif
-		bpi_cipher_decrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
-			dst, last_block_len);
-#endif
-	}
-
-	return sym_op->cipher.data.length - last_block_len;
-}
+#include "dev/qat_crypto_pmd_gens.h"
 
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v6 03/10] crypto/qat: qat generation specific enqueue
  2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2022-02-04 18:50             ` [dpdk-dev v6 01/10] common/qat: define build op request and dequeue op Kai Ji
  2022-02-04 18:50             ` [dpdk-dev v6 02/10] crypto/qat: sym build op request specific implementation Kai Ji
@ 2022-02-04 18:50             ` Kai Ji
  2022-02-04 18:50             ` [dpdk-dev v6 04/10] crypto/qat: rework session APIs Kai Ji
                               ` (6 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-04 18:50 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch add in specific aead & auth build op enqueue functions
for QAT Gen3 & Gen4

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 117 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |  34 +++++-
 2 files changed, 149 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index d3336cf4a1..fca7af2b7e 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -143,6 +143,121 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass) {
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN3 uses single pass to treat AEAD as
+		 * cipher operation
+		 */
+		cipher_param = (void *)&req->serv_specif_rqpars;
+
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+				ofs.ofs.cipher.tail;
+
+		cipher_param->spc_aad_addr = aad->iova;
+		cipher_param->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+	struct qat_sym_op_cookie *cookie,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	uint32_t ver_key_offset;
+	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+
+	if (!ctx->is_single_pass_gmac ||
+			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+				data_len);
+		return;
+	}
+
+	cipher_cd_ctrl = (void *) &req->cd_ctrl;
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+			sizeof(struct icp_qat_hw_cipher_config);
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+		/* AES-GMAC */
+		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+				req);
+	}
+
+	/* Fill separate Content Descriptor for this op */
+	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+				ctx->cd.cipher.key :
+				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+			ctx->auth_key_length);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+				ICP_QAT_HW_CIPHER_AEAD_MODE,
+				ctx->qat_cipher_alg,
+				ICP_QAT_HW_CIPHER_NO_CONVERT,
+				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+					ICP_QAT_HW_CIPHER_ENCRYPT :
+					ICP_QAT_HW_CIPHER_DECRYPT));
+	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+			ctx->digest_length,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+	/* Update the request */
+	req->cd_pars.u.s.content_desc_addr =
+			cookie->opt.spc_gmac.cd_phys_addr;
+	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+			sizeof(struct icp_qat_hw_cipher_config) +
+			ctx->auth_key_length, 8) >> 3;
+	req->comn_mid.src_length = data_len;
+	req->comn_mid.dst_length = 0;
+
+	cipher_param->spc_aad_addr = 0;
+	cipher_param->spc_auth_res_addr = digest->iova;
+	cipher_param->spc_aad_sz = auth_data_len;
+	cipher_param->reserved = 0;
+	cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+	ICP_QAT_FW_LA_PROTO_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_NO_PROTO);
+}
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 37a58c026f..8462c0b9b1 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -103,6 +103,38 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+			(void *)&req->serv_specif_rqpars;
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN4 uses single pass to treat AEAD as cipher
+		 * operation
+		 */
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+				req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len -
+				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+		cipher_param_20->spc_aad_addr = aad->iova;
+		cipher_param_20->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v6 04/10] crypto/qat: rework session APIs
  2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                               ` (2 preceding siblings ...)
  2022-02-04 18:50             ` [dpdk-dev v6 03/10] crypto/qat: qat generation specific enqueue Kai Ji
@ 2022-02-04 18:50             ` Kai Ji
  2022-02-04 18:50             ` [dpdk-dev v6 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
                               ` (5 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-04 18:50 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch introduces the set_session methods for different
generations of QAT. In addition, the patch replaces
'min_qat_dev_gen_id' with 'qat_dev_gen'. Thus, the session
no longer allow to be created by one generation of QAT used by another
generation.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  91 +++++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 139 +++++++++++++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |  91 ++++++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |   3 +
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  64 +++++++++
 drivers/crypto/qat/qat_crypto.h              |   8 +-
 drivers/crypto/qat/qat_sym.c                 |  12 +-
 drivers/crypto/qat/qat_sym_session.c         | 113 +++------------
 drivers/crypto/qat/qat_sym_session.h         |   2 +-
 10 files changed, 425 insertions(+), 107 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
index 9ed1f21d9d..01a897a21f 100644
--- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_asym_crypto_set_session_gen1(void *cdev __rte_unused,
+		void *session __rte_unused)
+{
+	return 0;
+}
+
 RTE_INIT(qat_asym_crypto_gen1_init)
 {
 	qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops =
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index b4ec440e05..64e6ae66ec 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -166,6 +166,91 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
 	return 0;
 }
 
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+		uint8_t hash_flag)
+{
+	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+	/* Set the Use Extended Protocol Flags bit in LW 1 */
+	QAT_FIELD_SET(header->comn_req_flags,
+			QAT_COMN_EXT_FLAGS_USED,
+			QAT_COMN_EXT_FLAGS_BITPOS,
+			QAT_COMN_EXT_FLAGS_MASK);
+
+	/* Set Hash Flags in LW 28 */
+	cd_ctrl->hash_flags |= hash_flag;
+
+	/* Set proto flags in LW 1 */
+	switch (session->qat_cipher_alg) {
+	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_SNOW_3G_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags,
+				ICP_QAT_FW_LA_ZUC_3G_PROTO);
+		break;
+	default:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	}
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+	struct rte_cryptodev *dev = cdev;
+	struct qat_sym_session *ctx = session;
+	const struct qat_cryptodev_private *qat_private =
+			dev->data->dev_private;
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * but some are not supported by GEN2, so checking here
+		 */
+		if ((qat_private->internal_capabilities &
+				QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+			return -ENOTSUP;
+
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
 
 	/* Device related operations */
@@ -204,6 +289,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
 			qat_sym_crypto_cap_get_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_sym_crypto_set_session_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
@@ -221,4 +308,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
 			qat_asym_crypto_cap_get_gen1;
 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_asym_crypto_feature_flags_get_gen1;
+	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_asym_crypto_set_session_gen1;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index fca7af2b7e..db864d973a 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -258,6 +258,142 @@ enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
 			ICP_QAT_FW_LA_NO_PROTO);
 }
 
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN3 */
+	if (ctx->is_single_pass)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+	else if (ctx->is_single_pass_gmac)
+		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN3
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -265,6 +401,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_cap_get_gen3;
 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+			qat_sym_crypto_set_session_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
@@ -276,4 +414,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 8462c0b9b1..7642a87d55 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -135,11 +135,101 @@ enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
 	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
 }
 
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN4 */
+	if (ctx->is_single_pass && ctx->is_ucs)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN4
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
 			qat_sym_crypto_cap_get_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+			qat_sym_crypto_set_session_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
@@ -153,4 +243,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
 	qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 1130e0e76f..96cdb97a26 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -856,6 +856,9 @@ qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev);
 uint64_t
 qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 #ifdef RTE_LIB_SECURITY
 extern struct rte_security_ops security_qat_ops_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index c429825a67..501132a448 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -452,12 +452,76 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int handle_mixed = 0;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			/* do_aead = 1; */
+			build_request = qat_sym_build_op_aead_gen1;
+		} else {
+			/* do_auth = 1; do_cipher = 1; */
+			build_request = qat_sym_build_op_chain_gen1;
+			handle_mixed = 1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		/* do_auth = 1; do_cipher = 0;*/
+		build_request = qat_sym_build_op_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		/* do_auth = 0; do_cipher = 1; */
+		build_request = qat_sym_build_op_cipher_gen1;
+	}
+
+	if (build_request)
+		ctx->build_request[proc_type] = build_request;
+	else
+		return -EINVAL;
+
+	/* no more work if not mixed op */
+	if (!handle_mixed)
+		return 0;
+
+	/* Check none supported algs if mixed */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		return -ENOTSUP;
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		return -ENOTSUP;
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
 
 RTE_INIT(qat_sym_crypto_gen1_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
 			qat_sym_crypto_cap_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+			qat_sym_crypto_set_session_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6eaa15b975..5ca76fcaa6 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
  #ifndef _QAT_CRYPTO_H_
@@ -48,15 +48,21 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
 
 typedef void * (*create_security_ctx_t)(void *cryptodev);
 
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
+	set_session_t set_session;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
 };
 
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
+
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
 		struct rte_cryptodev_config *config);
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index f814bf8f75..83bf55c933 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -13,6 +13,10 @@
 #include "qat_sym.h"
 #include "dev/qat_crypto_pmd_gens.h"
 
+uint8_t qat_sym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
@@ -126,7 +130,7 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
 
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen)
+		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
 {
 	int ret = 0;
 	struct qat_sym_session *ctx = NULL;
@@ -191,12 +195,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		return -EINVAL;
 	}
 
-	if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
-		QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -EINVAL;
-	}
-
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 8ca475ca8b..3a880096c4 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/sha.h>	/* Needed to calculate pre-compute values */
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
 	return 0;
 }
 
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
-		uint8_t hash_flag)
-{
-	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
-	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
-			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
-			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
-	/* Set the Use Extended Protocol Flags bit in LW 1 */
-	QAT_FIELD_SET(header->comn_req_flags,
-			QAT_COMN_EXT_FLAGS_USED,
-			QAT_COMN_EXT_FLAGS_BITPOS,
-			QAT_COMN_EXT_FLAGS_MASK);
-
-	/* Set Hash Flags in LW 28 */
-	cd_ctrl->hash_flags |= hash_flag;
-
-	/* Set proto flags in LW 1 */
-	switch (session->qat_cipher_alg) {
-	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_SNOW_3G_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_ZUC_3G_PROTO);
-		break;
-	default:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	}
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
-		struct qat_sym_session *session)
-{
-	const struct qat_cryptodev_private *qat_private =
-			dev->data->dev_private;
-	enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
-			QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
-	if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
-	} else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
-	} else if ((session->aes_cmac ||
-			session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
-			(session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session, 0);
-	}
-}
-
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
 	int ret;
 	int qat_cmd_id;
-	int handle_mixed = 0;
 
 	/* Verify the session physical address is known */
 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
+	session->dev_id = internals->dev_id;
 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
 	session->is_ucs = 0;
 
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		return -ENOTSUP;
 	}
 	qat_sym_session_finalize(session);
-	if (handle_mixed) {
-		/* Special handling of mixed hash+cipher algorithms */
-		qat_sym_session_handle_mixed(dev, session);
-	}
 
-	return 0;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+			(void *)session);
 }
 
 static int
@@ -678,14 +598,13 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 {
 	session->is_single_pass = 1;
 	session->is_auth = 1;
-	session->min_qat_dev_gen = QAT_GEN3;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
-	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
 		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
-	} else {
+	else
 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
-	}
+
 	session->cipher_iv.offset = aead_xform->iv.offset;
 	session->cipher_iv.length = aead_xform->iv.length;
 	session->aad_len = aead_xform->aad_length;
@@ -1205,9 +1124,9 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-			uint8_t *data_in,
-			uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in, uint8_t *data_out)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1573,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1920,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2180,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
-
 	/* Get requested QAT command id - should be cipher */
 	qat_cmd_id = qat_get_cmd_id(xform);
 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2204,9 @@ qat_security_session_create(void *dev,
 {
 	void *sess_private_data;
 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct qat_cryptodev_private *internals = cdev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_sym_session *sym_session = NULL;
 	int ret;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2230,11 @@ qat_security_session_create(void *dev,
 	}
 
 	set_sec_session_private_data(sess, sess_private_data);
+	sym_session = (struct qat_sym_session *)sess_private_data;
+	sym_session->dev_id = internals->dev_id;
 
-	return ret;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+			sess_private_data);
 }
 
 int
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index fe875a7fd0..01908abd9e 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -105,7 +105,7 @@ struct qat_sym_session {
 	uint16_t auth_key_length;
 	uint16_t digest_length;
 	rte_spinlock_t lock;	/* protects this struct */
-	enum qat_device_gen min_qat_dev_gen;
+	uint16_t dev_id;
 	uint8_t aes_cmac;
 	uint8_t is_single_pass;
 	uint8_t is_single_pass_gmac;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v6 05/10] crypto/qat: rework asymmetric crypto build operation
  2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                               ` (3 preceding siblings ...)
  2022-02-04 18:50             ` [dpdk-dev v6 04/10] crypto/qat: rework session APIs Kai Ji
@ 2022-02-04 18:50             ` Kai Ji
  2022-02-04 18:50             ` [dpdk-dev v6 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
                               ` (4 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-04 18:50 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch reworks the asymmetric crypto data path
implementation to QAT driver. The change includes separation
of different QAT generations' asymmetric crypto data path
implementations and shrink the device capabilities declaration
code size.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c   |   5 +-
 drivers/crypto/qat/qat_asym.c | 624 +++++++++++++++++-----------------
 drivers/crypto/qat/qat_asym.h |  63 +++-
 3 files changed, 380 insertions(+), 312 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index ed632b5ebe..c3265241a3 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -622,7 +622,7 @@ qat_enqueue_op_burst(void *qp,
 #ifdef BUILD_QAT_ASYM
 			ret = qat_asym_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
+				NULL, tmp_qp->qat_dev_gen);
 #endif
 		}
 		if (ret != 0) {
@@ -850,7 +850,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 #ifdef BUILD_QAT_ASYM
 		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
 			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 #endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 09d8761c5f..3d7aecd7c0 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -1,69 +1,119 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2022 Intel Corporation
  */
 
 #include <stdarg.h>
 
-#include "qat_asym.h"
+#include <cryptodev_pmd.h>
+
 #include "icp_qat_fw_pke.h"
 #include "icp_qat_fw.h"
 #include "qat_pke_functionality_arrays.h"
 
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+#include "qat_device.h"
 
-static int qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
-		size_t arr_sz, size_t *size, uint32_t *func_id)
+#include "qat_logs.h"
+#include "qat_asym.h"
+
+uint8_t qat_asym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
+
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_asym_xform *xform,
+		struct rte_cryptodev_asym_session *sess,
+		struct rte_mempool *mempool)
 {
-	size_t i;
+	int err = 0;
+	void *sess_private_data;
+	struct qat_asym_session *session;
 
-	for (i = 0; i < arr_sz; i++) {
-		if (*size <= arr[i][0]) {
-			*size = arr[i][0];
-			*func_id = arr[i][1];
-			return 0;
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		QAT_LOG(ERR,
+			"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	session = sess_private_data;
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		if (xform->modex.exponent.length == 0 ||
+				xform->modex.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod exp input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		if (xform->modinv.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod inv input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+		if (xform->rsa.n.length == 0) {
+			QAT_LOG(ERR, "Invalid rsa input parameter");
+			err = -EINVAL;
+			goto error;
 		}
+	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
+			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
+		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
+		err = -EINVAL;
+		goto error;
+	} else {
+		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
+		err = -EINVAL;
+		goto error;
 	}
-	return -1;
-}
 
-static inline void qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
-{
-	memset(qat_req, 0, sizeof(*qat_req));
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+	session->xform = xform;
+	qat_asym_build_req_tmpl(sess_private_data);
+	set_asym_session_private_data(sess, dev->driver_id,
+		sess_private_data);
 
-	qat_req->pke_hdr.hdr_flags =
-			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
-			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	return 0;
+error:
+	rte_mempool_put(mempool, sess_private_data);
+	return err;
 }
 
-static inline void qat_asym_build_req_tmpl(void *sess_private_data)
+unsigned int
+qat_asym_session_get_private_size(
+		struct rte_cryptodev *dev __rte_unused)
 {
-	struct icp_qat_fw_pke_request *qat_req;
-	struct qat_asym_session *session = sess_private_data;
-
-	qat_req = &session->req_tmpl;
-	qat_fill_req_tmpl(qat_req);
+	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
 }
 
-static size_t max_of(int n, ...)
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_asym_session *sess)
 {
-	va_list args;
-	size_t len = 0, num;
-	int i;
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_asym_session_private_data(sess, index);
+	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
 
-	va_start(args, n);
-	len = va_arg(args, size_t);
+	if (sess_priv) {
+		memset(s, 0, qat_asym_session_get_private_size(dev));
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
 
-	for (i = 0; i < n - 1; i++) {
-		num = va_arg(args, size_t);
-		if (num > len)
-			len = num;
+		set_asym_session_private_data(sess, index, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
 	}
-	va_end(args);
-
-	return len;
 }
 
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+	.name = qat_asym_drv_name,
+	.alias = qat_asym_drv_name
+};
+
+
 static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
 		int in_count, int out_count, int alg_size)
 {
@@ -106,7 +156,230 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
 	}
 }
 
-static int qat_asym_check_nonzero(rte_crypto_param n)
+static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
+		struct qat_asym_op_cookie *cookie,
+		struct rte_crypto_asym_xform *xform)
+{
+	size_t alg_size, alg_size_in_bytes = 0;
+	struct rte_crypto_asym_op *asym_op = rx_op->asym;
+
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		rte_crypto_param n = xform->modex.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modexp_result = asym_op->modex.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modexp_result +
+				(asym_op->modex.result.length -
+					n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length
+				);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		rte_crypto_param n = xform->modinv.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modinv_result = asym_op->modinv.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modinv_result +
+				(asym_op->modinv.result.length
+				- n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
+				asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+							cookie->output_array[0],
+							alg_size_in_bytes);
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+			}
+		} else {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_DECRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.message.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_SUCCESS;
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
+						rsa_result, alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_SIGN) {
+				uint8_t *rsa_result = asym_op->rsa.sign.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			}
+		}
+	}
+	qat_clear_arrays_by_alg(cookie, xform, alg_size_in_bytes);
+}
+
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
+{
+	struct qat_asym_session *ctx;
+	struct icp_qat_fw_pke_resp *resp_msg =
+			(struct icp_qat_fw_pke_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+			(resp_msg->opaque);
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	if (cookie->error) {
+		cookie->error = 0;
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		QAT_DP_LOG(ERR, "Cookie status returned error");
+	} else {
+		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric response status"
+					" returned error");
+		}
+		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric common status"
+					" returned error");
+		}
+	}
+
+	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		ctx = (struct qat_asym_session *)get_asym_session_private_data(
+			rx_op->asym->session, qat_asym_driver_id);
+		qat_asym_collect_response(rx_op, cookie, ctx->xform);
+	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
+	}
+	*op = rx_op;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
+			sizeof(struct icp_qat_fw_pke_resp));
+#endif
+
+	return 1;
+}
+
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+		size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+	size_t i;
+
+	for (i = 0; i < arr_sz; i++) {
+		if (*size <= arr[i][0]) {
+			*size = arr[i][0];
+			*func_id = arr[i][1];
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+	va_list args;
+	size_t len = 0, num;
+	int i;
+
+	va_start(args, n);
+	len = va_arg(args, size_t);
+
+	for (i = 0; i < n - 1; i++) {
+		num = va_arg(args, size_t);
+		if (num > len)
+			len = num;
+	}
+	va_end(args);
+
+	return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
 {
 	if (n.length < 8) {
 		/* Not a case for any cryptographic function except for DH
@@ -475,10 +748,9 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 }
 
 int
-qat_asym_build_request(void *in_op,
-			uint8_t *out_msg,
-			void *op_cookie,
-			__rte_unused enum qat_device_gen qat_dev_gen)
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
 {
 	struct qat_asym_session *ctx;
 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
@@ -545,263 +817,7 @@ qat_asym_build_request(void *in_op,
 	return 0;
 }
 
-static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
-		struct qat_asym_op_cookie *cookie,
-		struct rte_crypto_asym_xform *xform)
-{
-	size_t alg_size, alg_size_in_bytes = 0;
-	struct rte_crypto_asym_op *asym_op = rx_op->asym;
-
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		rte_crypto_param n = xform->modex.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modexp_result = asym_op->modex.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modexp_result +
-				(asym_op->modex.result.length -
-					n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length
-				);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		rte_crypto_param n = xform->modinv.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modinv_result = asym_op->modinv.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modinv_result + (asym_op->modinv.result.length
-				- n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
-				asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-							cookie->output_array[0],
-							alg_size_in_bytes);
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-			}
-		} else {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_DECRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.message.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_SUCCESS;
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
-						rsa_result, alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
-				uint8_t *rsa_result = asym_op->rsa.sign.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			}
-		}
-	}
-	qat_clear_arrays_by_alg(cookie, xform, alg_size_in_bytes);
-}
-
-void
-qat_asym_process_response(void **op, uint8_t *resp,
-		void *op_cookie)
-{
-	struct qat_asym_session *ctx;
-	struct icp_qat_fw_pke_resp *resp_msg =
-			(struct icp_qat_fw_pke_resp *)resp;
-	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
-			(resp_msg->opaque);
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	if (cookie->error) {
-		cookie->error = 0;
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		QAT_DP_LOG(ERR, "Cookie status returned error");
-	} else {
-		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
-			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric response status"
-					" returned error");
-		}
-		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric common status"
-					" returned error");
-		}
-	}
-
-	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_asym_session *)get_asym_session_private_data(
-			rx_op->asym->session, qat_asym_driver_id);
-		qat_asym_collect_response(rx_op, cookie, ctx->xform);
-	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
-	}
-	*op = rx_op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
-			sizeof(struct icp_qat_fw_pke_resp));
-#endif
-}
-
-int
-qat_asym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_asym_xform *xform,
-		struct rte_cryptodev_asym_session *sess,
-		struct rte_mempool *mempool)
-{
-	int err = 0;
-	void *sess_private_data;
-	struct qat_asym_session *session;
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		QAT_LOG(ERR,
-			"Couldn't get object from session mempool");
-		return -ENOMEM;
-	}
-
-	session = sess_private_data;
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		if (xform->modex.exponent.length == 0 ||
-				xform->modex.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod exp input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		if (xform->modinv.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod inv input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-		if (xform->rsa.n.length == 0) {
-			QAT_LOG(ERR, "Invalid rsa input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
-			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
-		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
-		err = -EINVAL;
-		goto error;
-	} else {
-		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
-		err = -EINVAL;
-		goto error;
-	}
-
-	session->xform = xform;
-	qat_asym_build_req_tmpl(sess_private_data);
-	set_asym_session_private_data(sess, dev->driver_id,
-		sess_private_data);
-
-	return 0;
-error:
-	rte_mempool_put(mempool, sess_private_data);
-	return err;
-}
-
-unsigned int qat_asym_session_get_private_size(
-		struct rte_cryptodev *dev __rte_unused)
-{
-	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
-}
-
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_asym_session *sess)
-{
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_asym_session_private_data(sess, index);
-	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
-
-	if (sess_priv) {
-		memset(s, 0, qat_asym_session_get_private_size(dev));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
-
-		set_asym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
-}
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_asym_driver,
+		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 308b6b2e0b..aba49d57cb 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #ifndef _QAT_ASYM_H_
@@ -8,10 +8,13 @@
 #include <cryptodev_pmd.h>
 #include <rte_crypto_asym.h>
 #include "icp_qat_fw_pke.h"
-#include "qat_common.h"
-#include "qat_asym_pmd.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
 #include "icp_qat_fw.h"
 
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
+
 typedef uint64_t large_int_ptr;
 #define MAX_PKE_PARAMS	8
 #define QAT_PKE_MAX_LN_SIZE 512
@@ -26,6 +29,28 @@ typedef uint64_t large_int_ptr;
 #define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
 #define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
 
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
+		{.asym = {						\
+			.xform_capa = {					\
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+				.op_types = o,				\
+				{					\
+				.modlen = {				\
+				.min = l,				\
+				.max = r,				\
+				.increment = i				\
+				}, }					\
+			}						\
+		},							\
+		}							\
+	}
+
 struct qat_asym_op_cookie {
 	size_t alg_size;
 	uint64_t error;
@@ -45,6 +70,27 @@ struct qat_asym_session {
 	struct rte_crypto_asym_xform *xform;
 };
 
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+	memset(qat_req, 0, sizeof(*qat_req));
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+	qat_req->pke_hdr.hdr_flags =
+			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+	struct icp_qat_fw_pke_request *qat_req;
+	struct qat_asym_session *session = sess_private_data;
+
+	qat_req = &session->req_tmpl;
+	qat_fill_req_tmpl(qat_req);
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev,
 		struct rte_crypto_asym_xform *xform,
@@ -76,7 +122,9 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
  */
 int
 qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
+		void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		enum qat_device_gen qat_dev_gen);
 
 /*
  * Process PKE response received from outgoing queue of QAT
@@ -88,8 +136,11 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg,
  * @param	op_cookie	Cookie pointer that holds private metadata
  *
  */
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count);
+
 void
-qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
-		void *op_cookie);
+qat_asym_init_op_cookie(void *cookie);
 
 #endif /* _QAT_ASYM_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v6 06/10] crypto/qat: unify qat sym pmd apis
  2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                               ` (4 preceding siblings ...)
  2022-02-04 18:50             ` [dpdk-dev v6 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
@ 2022-02-04 18:50             ` Kai Ji
  2022-02-04 18:50             ` [dpdk-dev v6 07/10] crypto/qat: unify qat asym " Kai Ji
                               ` (3 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-04 18:50 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch removes qat_sym_pmd.c and integrates all the apis into
qat_sym.c. The unified/integrated qat sym crypto pmd apis should
make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build       |   4 +-
 drivers/common/qat/qat_device.c      |   4 +-
 drivers/common/qat/qat_qp.c          |   3 +-
 drivers/crypto/qat/qat_crypto.h      |   5 +-
 drivers/crypto/qat/qat_sym.c         |  21 +++
 drivers/crypto/qat/qat_sym.h         | 147 ++++++++++++++--
 drivers/crypto/qat/qat_sym_hw_dp.c   |  11 +-
 drivers/crypto/qat/qat_sym_pmd.c     | 251 ---------------------------
 drivers/crypto/qat/qat_sym_pmd.h     |  95 ----------
 drivers/crypto/qat/qat_sym_session.c |   2 +-
 10 files changed, 168 insertions(+), 375 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index af92271a75..1bf6896a7e 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017-2018 Intel Corporation
+# Copyright(c) 2017-2022 Intel Corporation
 
 if is_windows
     build = false
@@ -73,7 +73,7 @@ if qat_compress
 endif
 
 if qat_crypto
-    foreach f: ['qat_sym_pmd.c', 'qat_sym.c', 'qat_sym_session.c',
+    foreach f: ['qat_sym.c', 'qat_sym_session.c',
             'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 437996f2e8..e5459fdfd1 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2020 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 
 #include <rte_string_fns.h>
@@ -8,7 +8,7 @@
 
 #include "qat_device.h"
 #include "adf_transport_access_macros.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 #include "qat_comp_pmd.h"
 #include "adf_pf2vf_msg.h"
 #include "qat_pf2vf.h"
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index c3265241a3..dd9056650d 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -841,7 +841,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
 			nb_fw_responses = qat_comp_process_response(
 				ops, resp_msg,
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 5ca76fcaa6..c01266f81c 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -12,7 +12,10 @@
 extern uint8_t qat_sym_driver_id;
 extern uint8_t qat_asym_driver_id;
 
-/** helper macro to set cryptodev capability range **/
+/**
+ * helper macro to set cryptodev capability range
+ * <n: name> <l: min > <r: max> <i: increment> <v: value>
+ **/
 #define CAP_RNG(n, l, r, i) .n = {.min = l, .max = r, .increment = i}
 
 #define CAP_RNG_ZERO(n) .n = {.min = 0, .max = 0, .increment = 0}
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 83bf55c933..aad4b243b7 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -17,6 +17,27 @@ uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_sym_init_op_cookie(void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+
+	cookie->qat_sgl_src_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_src);
+
+	cookie->qat_sgl_dst_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_dst);
+
+	cookie->opt.spc_gmac.cd_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			opt.spc_gmac.cd_cipher);
+}
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index e3ec7f0de4..f4ff2ce4cd 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #ifndef _QAT_SYM_H_
@@ -15,7 +15,7 @@
 
 #include "qat_common.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_crypto.h"
 #include "qat_logs.h"
 
 #define BYTE_LENGTH    8
@@ -24,6 +24,67 @@
  */
 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
+/** Intel(R) QAT Symmetric Crypto PMD name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
+
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
+#define QAT_SYM_CAP_VALID		(1 << 31)
+
+/**
+ * Macro to add a sym capability
+ * helper function to add an sym capability
+ * <n: name> <b: block size> <k: key size> <d: digest size>
+ * <a: aad_size> <i: iv_size>
+ **/
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, d					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_##n,		\
+				b, k, i					\
+			}, }						\
+		}, }							\
+	}
+
 /*
  * Maximum number of SGL entries
  */
@@ -54,6 +115,22 @@ struct qat_sym_op_cookie {
 	} opt;
 };
 
+struct qat_sym_dp_ctx {
+	struct qat_sym_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		void *op_cookie, enum qat_device_gen qat_dev_gen);
@@ -213,17 +290,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
 		}
 	}
 }
-#else
-
-static inline void
-qat_sym_preprocess_requests(void **ops __rte_unused,
-				uint16_t nb_ops __rte_unused)
-{
-}
 #endif
 
-static inline void
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused)
 {
 	struct icp_qat_fw_comn_resp *resp_msg =
 			(struct icp_qat_fw_comn_resp *)resp;
@@ -282,6 +353,12 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
 	}
 
 	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
 }
 
 int
@@ -293,6 +370,52 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 int
 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
 
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_vec *vec, uint32_t vec_len,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t i;
+
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_la_bulk_req));
+	for (i = 0; i < vec_len; i++)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+	if (cipher_iv && ctx->cipher_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+				ctx->cipher_iv.length);
+	if (auth_iv && ctx->auth_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+				ctx->auth_iv.length);
+	if (aad && ctx->aad_len > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+				ctx->aad_len);
+	if (digest && ctx->digest_length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+				ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+		struct qat_sym_session *ctx __rte_unused,
+		struct rte_crypto_vec *vec __rte_unused,
+		uint32_t vec_len __rte_unused,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+}
+#endif
+
 #else
 
 static inline void
@@ -307,5 +430,5 @@ qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
 {
 }
 
-#endif
+#endif /* BUILD_QAT_SYM */
 #endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 12825e448b..2576cb1be7 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #include <cryptodev_pmd.h>
@@ -9,18 +9,9 @@
 #include "icp_qat_fw_la.h"
 
 #include "qat_sym.h"
-#include "qat_sym_pmd.h"
 #include "qat_sym_session.h"
 #include "qat_qp.h"
 
-struct qat_sym_dp_ctx {
-	struct qat_sym_session *session;
-	uint32_t tail;
-	uint32_t head;
-	uint16_t cached_enqueue;
-	uint16_t cached_dequeue;
-};
-
 static __rte_always_inline int32_t
 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
 		struct rte_crypto_vec *data, uint16_t n_data_vecs)
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
deleted file mode 100644
index 28a26260fb..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security_driver.h>
-#endif
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
-
-#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
-	struct qat_sym_op_cookie *cookie = op_cookie;
-
-	cookie->qat_sgl_src_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_src);
-
-	cookie->qat_sgl_dst_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_dst);
-
-	cookie->opt.spc_gmac.cd_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			opt.spc_gmac.cd_cipher);
-}
-
-static uint16_t
-qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-static uint16_t
-qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
-	.name = qat_sym_drv_name,
-	.alias = qat_sym_drv_name
-};
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
-	int i = 0, ret = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "sym");
-	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	/*
-	 * All processes must use same driver id so they can share sessions.
-	 * Store driver_id so we can validate that all processes have the same
-	 * value, typically they have, but could differ if binaries built
-	 * separately.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_sym_driver_id =
-				qat_sym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_sym_driver_id !=
-				qat_sym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
-	qat_dev_instance->sym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->sym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->sym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_sym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-#ifdef RTE_LIB_SECURITY
-	if (gen_dev_ops->create_security_ctx) {
-		cryptodev->security_ctx =
-			gen_dev_ops->create_security_ctx((void *)cryptodev);
-		if (cryptodev->security_ctx == NULL) {
-			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
-	} else
-		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-
-#endif
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_SYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->service_type = QAT_SERVICE_SYMMETRIC;
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating capability memzon for %s",
-				name);
-			ret = -EFAULT;
-			goto error;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->sym_dev = internals;
-	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	memset(&qat_dev_instance->sym_rte_dev, 0,
-		sizeof(qat_dev_instance->sym_rte_dev));
-
-	return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->sym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
-	qat_pci_dev->sym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_sym_driver,
-		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
deleted file mode 100644
index 59fbdefa12..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_PMD_H_
-#define _QAT_SYM_PMD_H_
-
-#ifdef BUILD_QAT_SYM
-
-#include <rte_ether.h>
-#include <rte_cryptodev.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security.h>
-#endif
-
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Symmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
-#define QAT_SYM_CAP_VALID		(1 << 31)
-
-/**
- * Macro to add a sym capability
- * helper function to add an sym capability
- * <n: name> <b: block size> <k: key size> <d: digest size>
- * <a: aad_size> <i: iv_size>
- **/
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, d					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
-			{.aead = {					\
-				.algo = RTE_CRYPTO_AEAD_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_##n,		\
-				b, k, i					\
-			}, }						\
-		}, }							\
-	}
-
-extern uint8_t qat_sym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param);
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
-
-void
-qat_sym_init_op_cookie(void *op_cookie);
-
-#endif
-#endif /* _QAT_SYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 3a880096c4..9d6a19c0be 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -20,7 +20,7 @@
 
 #include "qat_logs.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 
 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
 static const uint8_t sha1InitialState[] = {
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v6 07/10] crypto/qat: unify qat asym pmd apis
  2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                               ` (5 preceding siblings ...)
  2022-02-04 18:50             ` [dpdk-dev v6 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
@ 2022-02-04 18:50             ` Kai Ji
  2022-02-04 18:50             ` [dpdk-dev v6 08/10] crypto/qat: op burst data path rework Kai Ji
                               ` (2 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-04 18:50 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch removes qat_asym_pmd.c and integrates all the
functions into qat_asym.c. The unified/integrated asym crypto
pmd apis should make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build    |   2 +-
 drivers/crypto/qat/qat_asym.c     | 180 +++++++++++++++++++++++
 drivers/crypto/qat/qat_asym_pmd.c | 231 ------------------------------
 drivers/crypto/qat/qat_asym_pmd.h |  54 -------
 4 files changed, 181 insertions(+), 286 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 1bf6896a7e..f687f5c9d8 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 3d7aecd7c0..da8d7e965c 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -19,6 +19,32 @@ uint8_t qat_asym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_asym_init_op_cookie(void *op_cookie)
+{
+	int j;
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	cookie->input_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					input_params_ptrs);
+
+	cookie->output_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					output_params_ptrs);
+
+	for (j = 0; j < 8; j++) {
+		cookie->input_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						input_array[j]);
+		cookie->output_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						output_array[j]);
+	}
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev,
 		struct rte_crypto_asym_xform *xform,
@@ -817,6 +843,160 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 	return 0;
 }
 
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+			nb_ops);
+}
+
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+				nb_ops);
+}
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param)
+{
+	struct qat_cryptodev_private *internals;
+	struct rte_cryptodev *cryptodev;
+	struct qat_device_info *qat_dev_instance =
+		&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	uint64_t capa_size;
+	int i = 0;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "asym");
+	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+				name);
+		return -(EFAULT);
+	}
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_asym_driver_id =
+				qat_asym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_asym_driver_id !=
+				qat_asym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
+		}
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+	qat_dev_instance->asym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->asym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_asym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_ASYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			rte_cryptodev_pmd_destroy(cryptodev);
+			memset(&qat_dev_instance->asym_rte_dev, 0,
+				sizeof(qat_dev_instance->asym_rte_dev));
+			return -EFAULT;
+		}
+	}
+
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
+
+	qat_pci_dev->asym_dev = internals;
+	internals->service_type = QAT_SERVICE_ASYMMETRIC;
+	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+	return 0;
+}
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->asym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(
+			qat_pci_dev->asym_dev->dev_id);
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+	qat_pci_dev->asym_dev = NULL;
+
+	return 0;
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_asym_driver,
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
deleted file mode 100644
index 9a7596b227..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "qat_logs.h"
-
-#include "qat_crypto.h"
-#include "qat_asym.h"
-#include "qat_asym_pmd.h"
-
-uint8_t qat_asym_driver_id;
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
-	int j;
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	cookie->input_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					input_params_ptrs);
-
-	cookie->output_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					output_params_ptrs);
-
-	for (j = 0; j < 8; j++) {
-		cookie->input_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						input_array[j]);
-		cookie->output_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						output_array[j]);
-	}
-}
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
-	/* Device related operations */
-	.dev_configure		= qat_cryptodev_config,
-	.dev_start		= qat_cryptodev_start,
-	.dev_stop		= qat_cryptodev_stop,
-	.dev_close		= qat_cryptodev_close,
-	.dev_infos_get		= qat_cryptodev_info_get,
-
-	.stats_get		= qat_cryptodev_stats_get,
-	.stats_reset		= qat_cryptodev_stats_reset,
-	.queue_pair_setup	= qat_cryptodev_qp_setup,
-	.queue_pair_release	= qat_cryptodev_qp_release,
-
-	/* Crypto related operations */
-	.asym_session_get_size	= qat_asym_session_get_private_size,
-	.asym_session_configure	= qat_asym_session_configure,
-	.asym_session_clear	= qat_asym_session_clear
-};
-
-uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
-	.name = qat_asym_drv_name,
-	.alias = qat_asym_drv_name
-};
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
-	int i = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "asym");
-	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_asym_driver_id =
-				qat_asym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_asym_driver_id !=
-				qat_asym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
-	qat_dev_instance->asym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->asym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->asym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_asym_driver_id;
-	cryptodev->dev_ops = &crypto_qat_ops;
-
-	cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst;
-
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_ASYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->dev_id = cryptodev->data->dev_id;
-	internals->service_type = QAT_SERVICE_ASYMMETRIC;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			rte_cryptodev_pmd_destroy(cryptodev);
-			memset(&qat_dev_instance->asym_rte_dev, 0,
-				sizeof(qat_dev_instance->asym_rte_dev));
-			return -EFAULT;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->asym_dev = internals;
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-	return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->asym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(
-			qat_pci_dev->asym_dev->dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
-	qat_pci_dev->asym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_asym_driver,
-		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_pmd.h b/drivers/crypto/qat/qat_asym_pmd.h
deleted file mode 100644
index f988d646e5..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-
-#ifndef _QAT_ASYM_PMD_H_
-#define _QAT_ASYM_PMD_H_
-
-#include <rte_cryptodev.h>
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
-
-
-/**
- * Helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
-		{.asym = {						\
-			.xform_capa = {					\
-				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
-				.op_types = o,				\
-				{					\
-				.modlen = {				\
-				.min = l,				\
-				.max = r,				\
-				.increment = i				\
-				}, }					\
-			}						\
-		},							\
-		}							\
-	}
-
-extern uint8_t qat_asym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
-
-void
-qat_asym_init_op_cookie(void *op_cookie);
-
-uint16_t
-qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-uint16_t
-qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-#endif /* _QAT_ASYM_PMD_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v6 08/10] crypto/qat: op burst data path rework
  2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                               ` (6 preceding siblings ...)
  2022-02-04 18:50             ` [dpdk-dev v6 07/10] crypto/qat: unify qat asym " Kai Ji
@ 2022-02-04 18:50             ` Kai Ji
  2022-02-04 18:50             ` [dpdk-dev v6 09/10] crypto/qat: raw dp api integration Kai Ji
  2022-02-04 18:50             ` [dpdk-dev v6 10/10] crypto/qat: support out of place SG list Kai Ji
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-04 18:50 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch enable op_build_request function in qat_enqueue_op_burst,
and qat_dequeue_process_response function in qat_dequeue_op_burst.
The op_build_request invoked in crypto build request op is based
on crypto operations setup during session init.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c               |  42 +-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c |   4 -
 drivers/crypto/qat/qat_asym.c             |   2 +-
 drivers/crypto/qat/qat_asym.h             |  22 -
 drivers/crypto/qat/qat_sym.c              | 829 +++++++---------------
 drivers/crypto/qat/qat_sym.h              |   5 -
 6 files changed, 270 insertions(+), 634 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index dd9056650d..9bbadc8f8e 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -550,8 +550,7 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp,
-		__rte_unused qat_op_build_request_t op_build_request,
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
 		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
@@ -602,29 +601,18 @@ qat_enqueue_op_burst(void *qp,
 		}
 	}
 
-#ifdef BUILD_QAT_SYM
+#ifdef RTE_LIB_SECURITY
 	if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 		qat_sym_preprocess_requests(ops, nb_ops_possible);
 #endif
 
+	memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque));
+
 	while (nb_ops_sent != nb_ops_possible) {
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
-#ifdef BUILD_QAT_SYM
-			ret = qat_sym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		} else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
-			ret = qat_comp_build_request(*ops, base_addr + tail,
+		ret = op_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-		} else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
-#ifdef BUILD_QAT_ASYM
-			ret = qat_asym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				NULL, tmp_qp->qat_dev_gen);
-#endif
-		}
+				tmp_qp->opaque, tmp_qp->qat_dev_gen);
+
 		if (ret != 0) {
 			tmp_qp->stats.enqueue_err_count++;
 			/* This message cannot be enqueued */
@@ -820,8 +808,7 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 
 uint16_t
 qat_dequeue_op_burst(void *qp, void **ops,
-		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
-		uint16_t nb_ops)
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -839,21 +826,10 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		nb_fw_responses = 1;
 
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
-			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
-			nb_fw_responses = qat_comp_process_response(
+		nb_fw_responses = qat_dequeue_process_response(
 				ops, resp_msg,
 				tmp_qp->op_cookies[head >> rx_queue->trailz],
 				&tmp_qp->stats.dequeue_err_count);
-#ifdef BUILD_QAT_ASYM
-		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
-			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-#endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
 				  rx_queue->modulo_mask);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 501132a448..c58a628915 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,10 +146,6 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
-
-	/* Raw data-path API related operations */
-	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
-	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index da8d7e965c..07e3baa172 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -773,7 +773,7 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	return 0;
 }
 
-int
+static __rte_always_inline int
 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 		__rte_unused uint64_t *opaque,
 		__rte_unused enum qat_device_gen dev_gen)
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index aba49d57cb..72e62120c5 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -104,28 +104,6 @@ void
 qat_asym_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_asym_session *sess);
 
-/*
- * Build PKE request to be sent to the fw, partially uses template
- * request generated during session creation.
- *
- * @param	in_op		Pointer to the crypto operation, for every
- *				service it points to service specific struct.
- * @param	out_msg		Message to be returned to enqueue function
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	qat_dev_gen	Generation of QAT hardware
- *
- * @return
- *	This function always returns zero,
- *	it is because of backward compatibility.
- *	- 0: Always returned
- *
- */
-int
-qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		enum qat_device_gen qat_dev_gen);
-
 /*
  * Process PKE response received from outgoing queue of QAT
  *
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index aad4b243b7..3c009c85e3 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -11,12 +11,25 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-#include "dev/qat_crypto_pmd_gens.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
 
 uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
 void
 qat_sym_init_op_cookie(void *op_cookie)
 {
@@ -38,160 +51,67 @@ qat_sym_init_op_cookie(void *op_cookie)
 			opt.spc_gmac.cd_cipher);
 }
 
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op,
-		struct icp_qat_fw_la_bulk_req *qat_req)
+static __rte_always_inline int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
 {
-	/* copy IV into request if it fits */
-	if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
-		rte_memcpy(cipher_param->u.cipher_IV_array,
-				rte_crypto_op_ctod_offset(op, uint8_t *,
-					iv_offset),
-				iv_length);
-	} else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr =
-				rte_crypto_op_ctophys_offset(op,
-					iv_offset);
-	}
-}
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	uintptr_t sess = (uintptr_t)opaque[0];
+	qat_sym_build_request_t build_request = (void *)opaque[1];
+	struct qat_sym_session *ctx = NULL;
 
-/** Set IV for CCM is special case, 0th byte is set to q-1
- *  where q is padding of nonce in 16 byte block
- */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
-	rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
-			ICP_QAT_HW_CCM_NONCE_OFFSET,
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-	*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-	if (aad_len_field_sz)
-		rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-}
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != (uintptr_t)ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
 
-/** Handle Single-Pass AES-GMAC on QAT GEN3 */
-static inline void
-handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
-		struct qat_sym_op_cookie *cookie,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	static const uint32_t ver_key_offset =
-			sizeof(struct icp_qat_hw_auth_setup) +
-			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
-			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
-			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
-			sizeof(struct icp_qat_hw_cipher_config);
-	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
-			(void *) &qat_req->cd_ctrl;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-			(void *) &qat_req->serv_specif_rqpars;
-	uint32_t data_length = op->sym->auth.data.length;
-
-	/* Fill separate Content Descriptor for this op */
-	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
-			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-				ctx->cd.cipher.key :
-				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
-			ctx->auth_key_length);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
-				ICP_QAT_HW_CIPHER_AEAD_MODE,
-				ctx->qat_cipher_alg,
-				ICP_QAT_HW_CIPHER_NO_CONVERT,
-				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-					ICP_QAT_HW_CIPHER_ENCRYPT :
-					ICP_QAT_HW_CIPHER_DECRYPT));
-	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
-			ctx->digest_length,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
-
-	/* Update the request */
-	qat_req->cd_pars.u.s.content_desc_addr =
-			cookie->opt.spc_gmac.cd_phys_addr;
-	qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
-			sizeof(struct icp_qat_hw_cipher_config) +
-			ctx->auth_key_length, 8) >> 3;
-	qat_req->comn_mid.src_length = data_length;
-	qat_req->comn_mid.dst_length = 0;
-
-	cipher_param->spc_aad_addr = 0;
-	cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
-	cipher_param->spc_aad_sz = data_length;
-	cipher_param->reserved = 0;
-	cipher_param->spc_auth_res_sz = ctx->digest_length;
-
-	qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
-	cipher_cd_ctrl->cipher_cfg_offset = 0;
-	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
-	ICP_QAT_FW_LA_PROTO_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_NO_PROTO);
-}
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, (void *)sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
-{
-	int ret = 0;
-	struct qat_sym_session *ctx = NULL;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	register struct icp_qat_fw_la_bulk_req *qat_req;
-	uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
-	uint32_t cipher_len = 0, cipher_ofs = 0;
-	uint32_t auth_len = 0, auth_ofs = 0;
-	uint32_t min_ofs = 0;
-	uint64_t src_buf_start = 0, dst_buf_start = 0;
-	uint64_t auth_data_end = 0;
-	uint8_t do_sgl = 0;
-	uint8_t in_place = 1;
-	int alignment_adjustment = 0;
-	int oop_shift = 0;
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *)op_cookie;
-
-	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
-				"operation requests, op (%p) is not a "
-				"symmetric operation.", op);
-		return -EINVAL;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
 	}
 
-	if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
-				" requests, op (%p) is sessionless.", op);
-		return -EINVAL;
-	} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_sym_session *)get_sym_session_private_data(
-				op->sym->session, qat_sym_driver_id);
 #ifdef RTE_LIB_SECURITY
-	} else {
-		ctx = (struct qat_sym_session *)get_sec_session_private_data(
-				op->sym->sec_session);
-		if (likely(ctx)) {
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if ((void *)sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
 			if (unlikely(ctx->bpi_ctx == NULL)) {
 				QAT_DP_LOG(ERR, "QAT PMD only supports security"
 						" operation requests for"
@@ -207,463 +127,234 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 				return -EINVAL;
 			}
-		}
-#endif
-	}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
 
-	if (unlikely(ctx == NULL)) {
-		QAT_DP_LOG(ERR, "Session was not created for this device");
-		return -EINVAL;
-	}
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, (void *)sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
-	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
-	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
-	cipher_param = (void *)&qat_req->serv_specif_rqpars;
-	cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			do_aead = 1;
-		} else {
-			do_auth = 1;
-			do_cipher = 1;
+			sess = (uintptr_t)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = sess;
+			opaque[1] = (uintptr_t)build_request;
 		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		do_auth = 1;
-		do_cipher = 0;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		do_auth = 0;
-		do_cipher = 1;
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
 	}
 
-	if (do_cipher) {
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
 
-		if (ctx->qat_cipher_alg ==
-					 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
 
-			if (unlikely(
-			    (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
-			    (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cipher_len = op->sym->cipher.data.length >> 3;
-			cipher_ofs = op->sym->cipher.data.offset >> 3;
-
-		} else if (ctx->bpi_ctx) {
-			/* DOCSIS - only send complete blocks to device.
-			 * Process any partial block using CFB mode.
-			 * Even if 0 complete blocks, still send this to device
-			 * to get into rx queue for post-process and dequeuing
-			 */
-			cipher_len = qat_bpicipher_preprocess(ctx, op);
-			cipher_ofs = op->sym->cipher.data.offset;
-		} else {
-			cipher_len = op->sym->cipher.data.length;
-			cipher_ofs = op->sym->cipher.data.offset;
-		}
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
 
-		set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
-				cipher_param, op, qat_req);
-		min_ofs = cipher_ofs;
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	if (do_auth) {
-
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
-			ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
-			if (unlikely(
-			    (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
-			    (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			auth_ofs = op->sym->auth.data.offset >> 3;
-			auth_len = op->sym->auth.data.length >> 3;
-
-			auth_param->u1.aad_adr =
-					rte_crypto_op_ctophys_offset(op,
-							ctx->auth_iv.offset);
-
-		} else if (ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-			/* AES-GMAC */
-			set_cipher_iv(ctx->auth_iv.length,
-				ctx->auth_iv.offset,
-				cipher_param, op, qat_req);
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
-			auth_param->u1.aad_adr = 0;
-			auth_param->u2.aad_sz = 0;
-
-		} else {
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-		min_ofs = auth_ofs;
-
-		if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
-				ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
-			auth_param->auth_res_addr =
-					op->sym->auth.digest.phys_addr;
-
 	}
 
-	if (do_aead) {
-		/*
-		 * This address may used for setting AAD physical pointer
-		 * into IV offset from op
-		 */
-		rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
-		if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-
-			set_cipher_iv(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, qat_req);
-
-		} else if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
-			/* In case of AES-CCM this may point to user selected
-			 * memory or iv offset in crypto_op
-			 */
-			uint8_t *aad_data = op->sym->aead.aad.data;
-			/* This is true AAD length, it not includes 18 bytes of
-			 * preceding data
-			 */
-			uint8_t aad_ccm_real_len = 0;
-			uint8_t aad_len_field_sz = 0;
-			uint32_t msg_len_be =
-					rte_bswap32(op->sym->aead.data.length);
-
-			if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-				aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-				aad_ccm_real_len = ctx->aad_len -
-					ICP_QAT_HW_CCM_AAD_B0_LEN -
-					ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			} else {
-				/*
-				 * aad_len not greater than 18, so no actual aad
-				 *  data, then use IV after op for B0 block
-				 */
-				aad_data = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-				aad_phys_addr_aead =
-						rte_crypto_op_ctophys_offset(op,
-							ctx->cipher_iv.offset);
-			}
-
-			uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
-							ctx->cipher_iv.length;
-
-			aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-							aad_len_field_sz,
-							ctx->digest_length, q);
-
-			if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET +
-				    (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				    (uint8_t *)&msg_len_be,
-				    ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-			} else {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET,
-				    (uint8_t *)&msg_len_be
-				    + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				    - q), q);
-			}
-
-			if (aad_len_field_sz > 0) {
-				*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
-						= rte_bswap16(aad_ccm_real_len);
-
-				if ((aad_ccm_real_len + aad_len_field_sz)
-						% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-					uint8_t pad_len = 0;
-					uint8_t pad_idx = 0;
-
-					pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len + aad_len_field_sz) %
-						ICP_QAT_HW_CCM_AAD_B0_LEN);
-					pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					    aad_ccm_real_len + aad_len_field_sz;
-					memset(&aad_data[pad_idx],
-							0, pad_len);
-				}
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
 
-			}
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
 
-			set_cipher_iv_ccm(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, q,
-					aad_len_field_sz);
+	if (cryptodev == NULL)
+		return -ENODEV;
 
-		}
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
-		cipher_len = op->sym->aead.data.length;
-		cipher_ofs = op->sym->aead.data.offset;
-		auth_len = op->sym->aead.data.length;
-		auth_ofs = op->sym->aead.data.offset;
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
-		auth_param->u1.aad_adr = aad_phys_addr_aead;
-		auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
-		min_ofs = op->sym->aead.data.offset;
-	}
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
-	if (op->sym->m_src->nb_segs > 1 ||
-			(op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
-		do_sgl = 1;
-
-	/* adjust for chain case */
-	if (do_cipher && do_auth)
-		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
-	if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
-		min_ofs = 0;
-
-	if (unlikely((op->sym->m_dst != NULL) &&
-			(op->sym->m_dst != op->sym->m_src))) {
-		/* Out-of-place operation (OOP)
-		 * Don't align DMA start. DMA the minimum data-set
-		 * so as not to overwrite data in dest buffer
-		 */
-		in_place = 0;
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-		dst_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-		oop_shift = min_ofs;
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
 
-	} else {
-		/* In-place operation
-		 * Start DMA at nearest aligned address below min_ofs
-		 */
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
-						& QAT_64_BTYE_ALIGN_MASK;
-
-		if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
-					rte_pktmbuf_headroom(op->sym->m_src))
-							> src_buf_start)) {
-			/* alignment has pushed addr ahead of start of mbuf
-			 * so revert and take the performance hit
-			 */
-			src_buf_start =
-				rte_pktmbuf_iova_offset(op->sym->m_src,
-								min_ofs);
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
 		}
-		dst_buf_start = src_buf_start;
 
-		/* remember any adjustment for later, note, can be +/- */
-		alignment_adjustment = src_buf_start -
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-	}
-
-	if (do_cipher || do_aead) {
-		cipher_param->cipher_offset =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, cipher_ofs) - src_buf_start;
-		cipher_param->cipher_length = cipher_len;
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
 	} else {
-		cipher_param->cipher_offset = 0;
-		cipher_param->cipher_length = 0;
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
 	}
-
-	if (!ctx->is_single_pass) {
-		/* Do not let to overwrite spc_aad len */
-		if (do_auth || do_aead) {
-			auth_param->auth_off =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, auth_ofs) - src_buf_start;
-			auth_param->auth_len = auth_len;
-		} else {
-			auth_param->auth_off = 0;
-			auth_param->auth_len = 0;
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
 		}
 	}
 
-	qat_req->comn_mid.dst_length =
-		qat_req->comn_mid.src_length =
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		> (auth_param->auth_off + auth_param->auth_len) ?
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		: (auth_param->auth_off + auth_param->auth_len);
-
-	if (do_auth && do_cipher) {
-		/* Handle digest-encrypted cases, i.e.
-		 * auth-gen-then-cipher-encrypt and
-		 * cipher-decrypt-then-auth-verify
-		 */
-		 /* First find the end of the data */
-		if (do_sgl) {
-			uint32_t remaining_off = auth_param->auth_off +
-				auth_param->auth_len + alignment_adjustment + oop_shift;
-			struct rte_mbuf *sgl_buf =
-				(in_place ?
-					op->sym->m_src : op->sym->m_dst);
-
-			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
-					&& sgl_buf->next != NULL) {
-				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
-				sgl_buf = sgl_buf->next;
-			}
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-			auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
-				sgl_buf, remaining_off);
-		} else {
-			auth_data_end = (in_place ?
-				src_buf_start : dst_buf_start) +
-				auth_param->auth_off + auth_param->auth_len;
-		}
-		/* Then check if digest-encrypted conditions are met */
-		if ((auth_param->auth_off + auth_param->auth_len <
-					cipher_param->cipher_offset +
-					cipher_param->cipher_length) &&
-				(op->sym->auth.digest.phys_addr ==
-					auth_data_end)) {
-			/* Handle partial digest encryption */
-			if (cipher_param->cipher_offset +
-					cipher_param->cipher_length <
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length)
-				qat_req->comn_mid.dst_length =
-					qat_req->comn_mid.src_length =
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length;
-			struct icp_qat_fw_comn_req_hdr *header =
-				&qat_req->comn_hdr;
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-		}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
 	}
 
-	if (do_sgl) {
-
-		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
-				QAT_COMN_PTR_TYPE_SGL);
-		ret = qat_sgl_fill_array(op->sym->m_src,
-		   (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
-		   &cookie->qat_sgl_src,
-		   qat_req->comn_mid.src_length,
-		   QAT_SYM_SGL_MAX_NUMBER);
-
-		if (unlikely(ret)) {
-			QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
-			return ret;
-		}
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
 
-		if (in_place)
-			qat_req->comn_mid.dest_data_addr =
-				qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-		else {
-			ret = qat_sgl_fill_array(op->sym->m_dst,
-				(int64_t)(dst_buf_start -
-					  rte_pktmbuf_iova(op->sym->m_dst)),
-				 &cookie->qat_sgl_dst,
-				 qat_req->comn_mid.dst_length,
-				 QAT_SYM_SGL_MAX_NUMBER);
-
-			if (unlikely(ret)) {
-				QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
-				return ret;
-			}
+	return 0;
 
-			qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-			qat_req->comn_mid.dest_data_addr =
-					cookie->qat_sgl_dst_phys_addr;
-		}
-		qat_req->comn_mid.src_length = 0;
-		qat_req->comn_mid.dst_length = 0;
-	} else {
-		qat_req->comn_mid.src_data_addr = src_buf_start;
-		qat_req->comn_mid.dest_data_addr = dst_buf_start;
-	}
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
 
-	if (ctx->is_single_pass) {
-		if (ctx->is_ucs) {
-			/* GEN 4 */
-			cipher_param20->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param20->spc_auth_res_addr =
-				op->sym->aead.digest.phys_addr;
-		} else {
-			cipher_param->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param->spc_auth_res_addr =
-					op->sym->aead.digest.phys_addr;
-		}
-	} else if (ctx->is_single_pass_gmac &&
-		       op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
-		/* Handle Single-Pass AES-GMAC */
-		handle_spc_gmac(ctx, op, cookie, qat_req);
-	}
+	return ret;
+}
 
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
-			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
-			rte_pktmbuf_data_len(op->sym->m_src));
-	if (do_cipher) {
-		uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
-				ctx->cipher_iv.length);
-	}
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
 
-	if (do_auth) {
-		if (ctx->auth_iv.length) {
-			uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
-							uint8_t *,
-							ctx->auth_iv.offset);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
-						ctx->auth_iv.length);
-		}
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
-				ctx->digest_length);
-	}
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
 
-	if (do_aead) {
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
-				ctx->digest_length);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
-				ctx->aad_len);
-	}
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
 #endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
+
 	return 0;
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f4ff2ce4cd..074612c11b 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -131,11 +131,6 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
-
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v6 09/10] crypto/qat: raw dp api integration
  2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                               ` (7 preceding siblings ...)
  2022-02-04 18:50             ` [dpdk-dev v6 08/10] crypto/qat: op burst data path rework Kai Ji
@ 2022-02-04 18:50             ` Kai Ji
  2022-02-04 18:50             ` [dpdk-dev v6 10/10] crypto/qat: support out of place SG list Kai Ji
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-04 18:50 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch "unifies" QAT's raw dp api implementations
to share the same enqueue/dequeue methods as the crypto
operation enqueue/dequeue methods. In addition, different QAT
generation specific implementations are done respectively.
The qat_sym_hw_dp.c is removed as no longer required.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build               |   2 +-
 drivers/compress/qat/qat_comp_pmd.c          |  12 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |   2 +
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 214 ++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 122 +++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |  78 ++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 656 +++++++++++++
 drivers/crypto/qat/qat_crypto.h              |   3 +
 drivers/crypto/qat/qat_sym.c                 |  56 +-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 974 -------------------
 10 files changed, 1137 insertions(+), 982 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index f687f5c9d8..b7027f3164 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 8e497e7a09..dc8db84a68 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -616,11 +616,18 @@ static struct rte_compressdev_ops compress_qat_dummy_ops = {
 	.private_xform_free	= qat_comp_private_xform_free
 };
 
+static uint16_t
+qat_comp_dequeue_burst(void *qp, struct rte_comp_op **ops,  uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_comp_process_response,
+				nb_ops);
+}
+
 static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
+	uint16_t ret = qat_comp_dequeue_burst(qp, ops, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
@@ -638,8 +645,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 
 		} else {
 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
-					(compressdev_dequeue_pkt_burst_t)
-					qat_dequeue_op_burst;
+					qat_comp_dequeue_burst;
 		}
 	}
 	return ret;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 64e6ae66ec..0c64c1e43f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -291,6 +291,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 			qat_sym_crypto_cap_get_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
 			qat_sym_crypto_set_session_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index db864d973a..ffa093a7a3 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -394,6 +394,218 @@ qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
+	} else if (ctx->is_single_pass_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
+	}
+
+	return 0;
+}
+
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -403,6 +615,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_feature_flags_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
 			qat_sym_crypto_set_session_gen3;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 7642a87d55..f803bc1459 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -223,6 +223,126 @@ qat_sym_crypto_set_session_gen4(void *cdev, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -230,6 +350,8 @@ RTE_INIT(qat_sym_crypto_gen4_init)
 			qat_sym_crypto_cap_get_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
 			qat_sym_crypto_set_session_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 96cdb97a26..50a9c5ad5b 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -839,6 +839,84 @@ int
 qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
 		uint8_t *out_msg, void *op_cookie);
 
+/* -----------------GEN 1 sym crypto raw data path APIs ---------------- */
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status);
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status);
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index c58a628915..fee6507512 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,6 +146,10 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
+
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
@@ -448,6 +452,656 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct qat_sym_op_cookie *cookie;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, &iv,
+			NULL, NULL, NULL);
+#endif
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i],
+				cookie, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
+			(uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				NULL, NULL, NULL);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
+			NULL, 0, cipher_iv, digest, auth_iv, ofs,
+			(uint32_t)data_len)))
+		return -1;
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				NULL, 0,
+				&vec->iv[i], &vec->digest[i],
+				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+			break;
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				&vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp;
+	void *resp_opaque;
+	uint32_t i, n, inflight;
+	uint32_t head;
+	uint8_t status;
+
+	*n_success_jobs = 0;
+	*return_status = 0;
+	head = dp_ctx->head;
+
+	inflight = qp->enqueued - qp->dequeued;
+	if (unlikely(inflight == 0))
+		return 0;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			head);
+	/* no operation ready */
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return 0;
+
+	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
+	/* get the dequeue count */
+	if (get_dequeue_count) {
+		n = get_dequeue_count(resp_opaque);
+		if (unlikely(n == 0))
+			return 0;
+	} else {
+		if (unlikely(max_nb_to_dequeue == 0))
+			return 0;
+		n = max_nb_to_dequeue;
+	}
+
+	out_user_data[0] = resp_opaque;
+	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+	post_dequeue(resp_opaque, 0, status);
+	*n_success_jobs += status;
+
+	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+
+	/* we already finished dequeue when n == 1 */
+	if (unlikely(n == 1)) {
+		i = 1;
+		goto end_deq;
+	}
+
+	if (is_user_data_array) {
+		for (i = 1; i < n; i++) {
+			resp = (struct icp_qat_fw_comn_resp *)(
+				(uint8_t *)rx_queue->base_addr + head);
+			if (unlikely(*(uint32_t *)resp ==
+					ADF_RING_EMPTY_SIG))
+				goto end_deq;
+			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
+			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+			*n_success_jobs += status;
+			post_dequeue(out_user_data[i], i, status);
+			head = (head + rx_queue->msg_size) &
+					rx_queue->modulo_mask;
+		}
+
+		goto end_deq;
+	}
+
+	/* opaque is not array */
+	for (i = 1; i < n; i++) {
+		resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+			goto end_deq;
+		head = (head + rx_queue->msg_size) &
+				rx_queue->modulo_mask;
+		post_dequeue(resp_opaque, i, status);
+		*n_success_jobs += status;
+	}
+
+end_deq:
+	dp_ctx->head = head;
+	dp_ctx->cached_dequeue += i;
+	return i;
+}
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	register struct icp_qat_fw_comn_resp *resp;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			dp_ctx->head);
+
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return NULL;
+
+	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
+			rx_queue->modulo_mask;
+	dp_ctx->cached_dequeue++;
+
+	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
+			RTE_CRYPTO_OP_STATUS_SUCCESS :
+			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	*dequeue_status = 0;
+	return (void *)(uintptr_t)resp->opaque_data;
+}
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_enqueue != n))
+		return -1;
+
+	qp->enqueued += n;
+	qp->stats.enqueued_count += n;
+
+	tx_queue->tail = dp_ctx->tail;
+
+	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number, tx_queue->tail);
+	tx_queue->csr_tail = tx_queue->tail;
+	dp_ctx->cached_enqueue = 0;
+
+	return 0;
+}
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_dequeue != n))
+		return -1;
+
+	rx_queue->head = dp_ctx->head;
+	rx_queue->nb_processed_responses += n;
+	qp->dequeued += n;
+	qp->stats.dequeued_count += n;
+	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
+		uint32_t old_head, new_head;
+		uint32_t max_head;
+
+		old_head = rx_queue->csr_head;
+		new_head = rx_queue->head;
+		max_head = qp->nb_descriptors * rx_queue->msg_size;
+
+		/* write out free descriptors */
+		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
+
+		if (new_head < old_head) {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
+					max_head - old_head);
+			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
+					new_head);
+		} else {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
+					old_head);
+		}
+		rx_queue->nb_processed_responses = 0;
+		rx_queue->csr_head = new_head;
+
+		/* write current head to CSR */
+		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
+			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
+			new_head);
+	}
+
+	dp_ctx->cached_dequeue = 0;
+	return 0;
+}
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+
+	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
+	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
+	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
+	raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen1;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_chain_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_chain_gen1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
+			ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_cipher_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_cipher_gen1;
+		}
+	} else
+		return -1;
+
+	return 0;
+}
+
 int
 qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
 {
@@ -518,6 +1172,8 @@ RTE_INIT(qat_sym_crypto_gen1_init)
 			qat_sym_crypto_cap_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
 			qat_sym_crypto_set_session_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index c01266f81c..cf386d0ed0 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -53,11 +53,14 @@ typedef void * (*create_security_ctx_t)(void *cryptodev);
 
 typedef int (*set_session_t)(void *cryptodev, void *session);
 
+typedef int (*set_raw_dp_ctx_t)(void *raw_dp_ctx, void *ctx);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
 	set_session_t set_session;
+	set_raw_dp_ctx_t set_raw_dp_ctx;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 3c009c85e3..e116bb81a5 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -88,7 +88,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, (void *)sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -143,7 +143,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, (void *)sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -291,8 +291,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 		if (internals->capa_mz == NULL) {
 			QAT_LOG(DEBUG,
 				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
+				"destroying PMD for %s", name);
 			ret = -EFAULT;
 			goto error;
 		}
@@ -354,6 +353,55 @@ qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
 	return 0;
 }
 
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct qat_cryptodev_private *internals = dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_crypto_gen_dev_ops *gen_dev_ops =
+			&qat_sym_gen_dev_ops[qat_dev_gen];
+	struct qat_qp *qp;
+	struct qat_sym_session *ctx;
+	struct qat_sym_dp_ctx *dp_ctx;
+
+	if (!gen_dev_ops->set_raw_dp_ctx) {
+		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+				qat_dev_gen);
+		return -ENOTSUP;
+	}
+
+	qp = dev->data->queue_pairs[qp_id];
+	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(struct qat_sym_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+		dp_ctx->tail = qp->tx_q.tail;
+		dp_ctx->head = qp->rx_q.head;
+		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
+	}
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -EINVAL;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, qat_sym_driver_id);
+
+	dp_ctx->session = ctx;
+
+	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
+}
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct qat_sym_dp_ctx);
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_sym_driver,
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
deleted file mode 100644
index 2576cb1be7..0000000000
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ /dev/null
@@ -1,974 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2022 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "adf_transport_access_macros.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_qp.h"
-
-static __rte_always_inline int32_t
-qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
-		struct rte_crypto_vec *data, uint16_t n_data_vecs)
-{
-	struct qat_queue *tx_queue;
-	struct qat_sym_op_cookie *cookie;
-	struct qat_sgl *list;
-	uint32_t i;
-	uint32_t total_len;
-
-	if (likely(n_data_vecs == 1)) {
-		req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			data[0].iova;
-		req->comn_mid.src_length = req->comn_mid.dst_length =
-			data[0].len;
-		return data[0].len;
-	}
-
-	if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
-		return -1;
-
-	total_len = 0;
-	tx_queue = &qp->tx_q;
-
-	ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
-			QAT_COMN_PTR_TYPE_SGL);
-	cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
-	list = (struct qat_sgl *)&cookie->qat_sgl_src;
-
-	for (i = 0; i < n_data_vecs; i++) {
-		list->buffers[i].len = data[i].len;
-		list->buffers[i].resrvd = 0;
-		list->buffers[i].addr = data[i].iova;
-		if (total_len + data[i].len > UINT32_MAX) {
-			QAT_DP_LOG(ERR, "Message too long");
-			return -1;
-		}
-		total_len += data[i].len;
-	}
-
-	list->num_bufs = i;
-	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			cookie->qat_sgl_src_phys_addr;
-	req->comn_mid.src_length = req->comn_mid.dst_length = 0;
-	return total_len;
-}
-
-static __rte_always_inline void
-set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	/* copy IV into request if it fits */
-	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
-				iv_len);
-	else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
-	}
-}
-
-#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
-	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
-	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
-
-static __rte_always_inline void
-qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
-{
-	uint32_t i;
-
-	for (i = 0; i < n; i++)
-		sta[i] = status;
-}
-
-#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
-	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
-
-static __rte_always_inline void
-enqueue_one_cipher_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-
-	/* cipher IV */
-	set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest __rte_unused,
-	struct rte_crypto_va_iova_ptr *aad __rte_unused,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
-			(uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_auth_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = data_len - ofs.ofs.auth.head -
-			ofs.ofs.auth.tail;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
-				ctx->auth_iv.length);
-		break;
-	default:
-		break;
-	}
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv __rte_unused,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
-			(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_auth_job(ctx, req, &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline int
-enqueue_one_chain_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_vec *data,
-	uint16_t n_data_vecs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	rte_iova_t auth_iova_end;
-	int32_t cipher_len, auth_len;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	cipher_len = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
-
-	if (unlikely(cipher_len < 0 || auth_len < 0))
-		return -1;
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = cipher_len;
-	set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = auth_len;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		break;
-	default:
-		break;
-	}
-
-	if (unlikely(n_data_vecs > 1)) {
-		int auth_end_get = 0, i = n_data_vecs - 1;
-		struct rte_crypto_vec *cvec = &data[0];
-		uint32_t len;
-
-		len = data_len - ofs.ofs.auth.tail;
-
-		while (i >= 0 && len > 0) {
-			if (cvec->len >= len) {
-				auth_iova_end = cvec->iova + len;
-				len = 0;
-				auth_end_get = 1;
-				break;
-			}
-			len -= cvec->len;
-			i--;
-			cvec++;
-		}
-
-		if (unlikely(auth_end_get == 0))
-			return -1;
-	} else
-		auth_iova_end = data[0].iova + auth_param->auth_off +
-			auth_param->auth_len;
-
-	/* Then check if digest-encrypted conditions are met */
-	if ((auth_param->auth_off + auth_param->auth_len <
-		cipher_param->cipher_offset +
-		cipher_param->cipher_length) &&
-		(digest->iova == auth_iova_end)) {
-		/* Handle partial digest encryption */
-		if (cipher_param->cipher_offset +
-				cipher_param->cipher_length <
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length)
-			req->comn_mid.dst_length =
-				req->comn_mid.src_length =
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length;
-		struct icp_qat_fw_comn_req_hdr *header =
-			&req->comn_hdr;
-		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-			header->serv_specif_flags,
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-	}
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
-			cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
-		return -1;
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num,
-			&vec->iv[i], &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
-			break;
-
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_aead_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-		(void *)&req->serv_specif_rqpars;
-	struct icp_qat_fw_la_auth_req_params *auth_param =
-		(void *)((uint8_t *)&req->serv_specif_rqpars +
-		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-	uint8_t *aad_data;
-	uint8_t aad_ccm_real_len;
-	uint8_t aad_len_field_sz;
-	uint32_t msg_len_be;
-	rte_iova_t aad_iova = 0;
-	uint8_t q;
-
-	/* CPM 1.7 uses single pass to treat AEAD as cipher operation */
-	if (ctx->is_single_pass) {
-		enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
-		cipher_param->spc_aad_addr = aad->iova;
-		cipher_param->spc_auth_res_addr = digest->iova;
-		return;
-	}
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
-				ctx->cipher_iv.length);
-		aad_iova = aad->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
-		aad_data = aad->va;
-		aad_iova = aad->iova;
-		aad_ccm_real_len = 0;
-		aad_len_field_sz = 0;
-		msg_len_be = rte_bswap32((uint32_t)data_len -
-				ofs.ofs.cipher.head);
-
-		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			aad_ccm_real_len = ctx->aad_len -
-				ICP_QAT_HW_CCM_AAD_B0_LEN -
-				ICP_QAT_HW_CCM_AAD_LEN_INFO;
-		} else {
-			aad_data = iv->va;
-			aad_iova = iv->iova;
-		}
-
-		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-			aad_len_field_sz, ctx->digest_length, q);
-		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				(uint8_t *)&msg_len_be,
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-		} else {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-				(uint8_t *)&msg_len_be +
-				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				- q), q);
-		}
-
-		if (aad_len_field_sz > 0) {
-			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
-				rte_bswap16(aad_ccm_real_len);
-
-			if ((aad_ccm_real_len + aad_len_field_sz)
-				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-				uint8_t pad_len = 0;
-				uint8_t pad_idx = 0;
-
-				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len +
-					aad_len_field_sz) %
-					ICP_QAT_HW_CCM_AAD_B0_LEN);
-				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					aad_ccm_real_len +
-					aad_len_field_sz;
-				memset(&aad_data[pad_idx], 0, pad_len);
-			}
-		}
-
-		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
-			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va +
-			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
-		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-		rte_memcpy((uint8_t *)aad->va +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			ctx->cipher_iv.length);
-		break;
-	default:
-		break;
-	}
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_param->auth_off = ofs.ofs.cipher.head;
-	auth_param->auth_len = cipher_param->cipher_length;
-	auth_param->auth_res_addr = digest->iova;
-	auth_param->u1.aad_adr = aad_iova;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
-		(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
-			&vec->aad[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
-	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
-	uint32_t max_nb_to_dequeue,
-	rte_cryptodev_raw_post_dequeue_t post_dequeue,
-	void **out_user_data, uint8_t is_user_data_array,
-	uint32_t *n_success_jobs, int *return_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct icp_qat_fw_comn_resp *resp;
-	void *resp_opaque;
-	uint32_t i, n, inflight;
-	uint32_t head;
-	uint8_t status;
-
-	*n_success_jobs = 0;
-	*return_status = 0;
-	head = dp_ctx->head;
-
-	inflight = qp->enqueued - qp->dequeued;
-	if (unlikely(inflight == 0))
-		return 0;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			head);
-	/* no operation ready */
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return 0;
-
-	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
-	/* get the dequeue count */
-	if (get_dequeue_count) {
-		n = get_dequeue_count(resp_opaque);
-		if (unlikely(n == 0))
-			return 0;
-	} else {
-		if (unlikely(max_nb_to_dequeue == 0))
-			return 0;
-		n = max_nb_to_dequeue;
-	}
-
-	out_user_data[0] = resp_opaque;
-	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-	post_dequeue(resp_opaque, 0, status);
-	*n_success_jobs += status;
-
-	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
-
-	/* we already finished dequeue when n == 1 */
-	if (unlikely(n == 1)) {
-		i = 1;
-		goto end_deq;
-	}
-
-	if (is_user_data_array) {
-		for (i = 1; i < n; i++) {
-			resp = (struct icp_qat_fw_comn_resp *)(
-				(uint8_t *)rx_queue->base_addr + head);
-			if (unlikely(*(uint32_t *)resp ==
-					ADF_RING_EMPTY_SIG))
-				goto end_deq;
-			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
-			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-			*n_success_jobs += status;
-			post_dequeue(out_user_data[i], i, status);
-			head = (head + rx_queue->msg_size) &
-					rx_queue->modulo_mask;
-		}
-
-		goto end_deq;
-	}
-
-	/* opaque is not array */
-	for (i = 1; i < n; i++) {
-		resp = (struct icp_qat_fw_comn_resp *)(
-			(uint8_t *)rx_queue->base_addr + head);
-		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-			goto end_deq;
-		head = (head + rx_queue->msg_size) &
-				rx_queue->modulo_mask;
-		post_dequeue(resp_opaque, i, status);
-		*n_success_jobs += status;
-	}
-
-end_deq:
-	dp_ctx->head = head;
-	dp_ctx->cached_dequeue += i;
-	return i;
-}
-
-static __rte_always_inline void *
-qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
-		enum rte_crypto_op_status *op_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	register struct icp_qat_fw_comn_resp *resp;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			dp_ctx->head);
-
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return NULL;
-
-	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
-			rx_queue->modulo_mask;
-	dp_ctx->cached_dequeue++;
-
-	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
-			RTE_CRYPTO_OP_STATUS_SUCCESS :
-			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	*dequeue_status = 0;
-	return (void *)(uintptr_t)resp->opaque_data;
-}
-
-static __rte_always_inline int
-qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_enqueue != n))
-		return -1;
-
-	qp->enqueued += n;
-	qp->stats.enqueued_count += n;
-
-	tx_queue->tail = dp_ctx->tail;
-
-	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
-			tx_queue->hw_bundle_number,
-			tx_queue->hw_queue_number, tx_queue->tail);
-	tx_queue->csr_tail = tx_queue->tail;
-	dp_ctx->cached_enqueue = 0;
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_dequeue != n))
-		return -1;
-
-	rx_queue->head = dp_ctx->head;
-	rx_queue->nb_processed_responses += n;
-	qp->dequeued += n;
-	qp->stats.dequeued_count += n;
-	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
-		uint32_t old_head, new_head;
-		uint32_t max_head;
-
-		old_head = rx_queue->csr_head;
-		new_head = rx_queue->head;
-		max_head = qp->nb_descriptors * rx_queue->msg_size;
-
-		/* write out free descriptors */
-		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
-
-		if (new_head < old_head) {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
-					max_head - old_head);
-			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
-					new_head);
-		} else {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
-					old_head);
-		}
-		rx_queue->nb_processed_responses = 0;
-		rx_queue->csr_head = new_head;
-
-		/* write current head to CSR */
-		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
-			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
-			new_head);
-	}
-
-	dp_ctx->cached_dequeue = 0;
-	return 0;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
-	struct qat_qp *qp;
-	struct qat_sym_session *ctx;
-	struct qat_sym_dp_ctx *dp_ctx;
-
-	qp = dev->data->queue_pairs[qp_id];
-	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
-	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(struct qat_sym_dp_ctx));
-		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
-		dp_ctx->tail = qp->tx_q.tail;
-		dp_ctx->head = qp->rx_q.head;
-		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
-	}
-
-	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
-		return -EINVAL;
-
-	ctx = (struct qat_sym_session *)get_sym_session_private_data(
-			session_ctx.crypto_sess, qat_sym_driver_id);
-
-	dp_ctx->session = ctx;
-
-	raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
-	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
-	raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
-	raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_chain_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
-		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
-		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_cipher_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
-		}
-	} else
-		return -1;
-
-	return 0;
-}
-
-int
-qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
-{
-	return sizeof(struct qat_sym_dp_ctx);
-}
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v6 10/10] crypto/qat: support out of place SG list
  2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                               ` (8 preceding siblings ...)
  2022-02-04 18:50             ` [dpdk-dev v6 09/10] crypto/qat: raw dp api integration Kai Ji
@ 2022-02-04 18:50             ` Kai Ji
  2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  9 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-04 18:50 UTC (permalink / raw)
  To: dev; +Cc: Kai Ji

This patch adds the SGL out of place support to QAT PMD

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++--
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 55 +++++++++++++++++---
 3 files changed, 83 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index ffa093a7a3..5084a5fcd1 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -468,8 +468,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -565,8 +575,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index f803bc1459..bd7f3785df 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -297,8 +297,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index fee6507512..83d9b66f34 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -526,9 +526,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i],
-				cookie, vec->src_sgl[i].vec,
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
 				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
@@ -625,8 +634,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
@@ -725,8 +744,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -830,8 +859,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework
  2022-02-04 18:50             ` [dpdk-dev v6 10/10] crypto/qat: support out of place SG list Kai Ji
@ 2022-02-08 18:14               ` Kai Ji
  2022-02-08 18:14                 ` [dpdk-dev v7 01/10] common/qat: define build op request and dequeue op Kai Ji
                                   ` (11 more replies)
  0 siblings, 12 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-08 18:14 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch reworks QAT symmetric crypto datapatch implementation where each
generation request building separated and the crypto operation under the
raw datapath api implementation are unified.

In addtion this patchset also enables QAT OOP support in raw datapath api
implementation.

v7:
- fix of pointer cast compile error in x86

v6:
- fix of pointer cast error in x86
- rebase to the lastest for-main

v5:
- rebase to the latest for-main
- patchset reconstruct

v4:
- patchset break down and reconstruct

v3:
- sperate a single patch 6 to two different patches

v2:
- review comments addressed

Kai Ji (10):
  common/qat: define build op request and dequeue op
  crypto/qat: sym build op request specific implementation
  crypto/qat: qat generation specific enqueue
  crypto/qat: rework session APIs
  crypto/qat: rework asymmetric crypto build operation
  crypto/qat: unify qat sym pmd apis
  crypto/qat: unify qat asym pmd apis
  crypto/qat: op burst data path rework
  crypto/qat: raw dp api integration
  crypto/qat: support out of place SG list

 drivers/common/qat/meson.build               |   6 +-
 drivers/common/qat/qat_device.c              |   4 +-
 drivers/common/qat/qat_qp.c                  |  42 +-
 drivers/common/qat/qat_qp.h                  |  54 +-
 drivers/compress/qat/qat_comp_pmd.c          |  14 +-
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  93 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 490 ++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 257 ++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 913 ++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 942 +++++++++++++++++-
 drivers/crypto/qat/qat_asym.c                | 762 ++++++++------
 drivers/crypto/qat/qat_asym.h                |  79 +-
 drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
 drivers/crypto/qat/qat_asym_pmd.h            |  54 -
 drivers/crypto/qat/qat_crypto.h              |  16 +-
 drivers/crypto/qat/qat_sym.c                 | 979 ++++++------------
 drivers/crypto/qat/qat_sym.h                 | 148 ++-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 983 -------------------
 drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
 drivers/crypto/qat/qat_sym_pmd.h             |  95 --
 drivers/crypto/qat/qat_sym_session.c         | 115 +--
 drivers/crypto/qat/qat_sym_session.h         |  15 +-
 23 files changed, 3813 insertions(+), 2739 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

--
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v7 01/10] common/qat: define build op request and dequeue op
  2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
@ 2022-02-08 18:14                 ` Kai Ji
  2022-02-09 10:22                   ` Zhang, Roy Fan
  2022-02-08 18:14                 ` [dpdk-dev v7 02/10] crypto/qat: sym build op request specific implementation Kai Ji
                                   ` (10 subsequent siblings)
  11 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-08 18:14 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch introduce build request op and dequeue op function
pointers to qat queue pair implementation. Those two functions
are used to be assigned during qat session generation based on
crypto operation

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c          | 10 ++++--
 drivers/common/qat/qat_qp.h          | 54 ++++++++++++++++++++++++++--
 drivers/compress/qat/qat_comp_pmd.c  |  4 +--
 drivers/crypto/qat/qat_asym_pmd.c    |  4 +--
 drivers/crypto/qat/qat_sym_pmd.c     |  4 +--
 drivers/crypto/qat/qat_sym_session.h | 13 ++++++-
 6 files changed, 76 insertions(+), 13 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index cde421eb77..ed632b5ebe 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_common.h>
@@ -550,7 +550,9 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_enqueue_op_burst(void *qp,
+		__rte_unused qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -817,7 +819,9 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 }
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_dequeue_op_burst(void *qp, void **ops,
+		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
+		uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index deafb407b3..66f00943a5 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 #ifndef _QAT_QP_H_
 #define _QAT_QP_H_
@@ -36,6 +36,51 @@ struct qat_queue {
 	/* number of responses processed since last CSR head write */
 };
 
+/**
+ * Type define qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ *
+ * @param in_op
+ *    An input op pointer
+ * @param out_msg
+ *    out_meg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param opaque
+ *    an opaque data may be used to store context may be useful between
+ *    2 enqueue operations.
+ * @param dev_gen
+ *    qat device gen id
+ * @return
+ *   - 0 if the crypto request is build successfully,
+ *   - EINVAL if error
+ **/
+typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
+
+/**
+ * Type define qat_op_dequeue_t function pointer, passed in as argument
+ * in dequeue op burst, where a dequeue op assigned base on the type of
+ * crypto op.
+ *
+ * @param op
+ *    An input op pointer
+ * @param resp
+ *    qat response msg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param dequeue_err_count
+ *    dequeue error counter
+ * @return
+ *    - 0 if dequeue OP is successful
+ *    - EINVAL if error
+ **/
+typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused);
+
+#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE	2
+
 struct qat_qp {
 	void			*mmap_bar_addr;
 	struct qat_queue	tx_q;
@@ -44,6 +89,7 @@ struct qat_qp {
 	struct rte_mempool *op_cookie_pool;
 	void **op_cookies;
 	uint32_t nb_descriptors;
+	uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
 	enum qat_device_gen qat_dev_gen;
 	enum qat_service_type service_type;
 	struct qat_pci_device *qat_dev;
@@ -78,13 +124,15 @@ struct qat_qp_config {
 };
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops);
 
 uint16_t
 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
 
 int
 qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index da6404c017..8e497e7a09 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_malloc.h>
@@ -620,7 +620,7 @@ static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
index addee384e3..9a7596b227 100644
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ b/drivers/crypto/qat/qat_asym_pmd.c
@@ -62,13 +62,13 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
 uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index b835245f17..28a26260fb 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -49,14 +49,14 @@ static uint16_t
 qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 static uint16_t
 qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 6ebc176729..fe875a7fd0 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 #ifndef _QAT_SYM_SESSION_H_
 #define _QAT_SYM_SESSION_H_
@@ -63,6 +63,16 @@ enum qat_sym_proto_flag {
 	QAT_CRYPTO_PROTO_FLAG_ZUC = 4
 };
 
+struct qat_sym_session;
+
+/*
+ * typedef qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ */
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* Common content descriptor */
 struct qat_sym_cd {
 	struct icp_qat_hw_cipher_algo_blk cipher;
@@ -107,6 +117,7 @@ struct qat_sym_session {
 	/* Some generations need different setup of counter */
 	uint32_t slice_types;
 	enum qat_sym_proto_flag qat_proto_flag;
+	qat_sym_build_request_t build_request[2];
 };
 
 int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v7 02/10] crypto/qat: sym build op request specific implementation
  2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2022-02-08 18:14                 ` [dpdk-dev v7 01/10] common/qat: define build op request and dequeue op Kai Ji
@ 2022-02-08 18:14                 ` Kai Ji
  2022-02-09 10:22                   ` Zhang, Roy Fan
  2022-02-08 18:14                 ` [dpdk-dev v7 03/10] crypto/qat: qat generation specific enqueue Kai Ji
                                   ` (9 subsequent siblings)
  11 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-08 18:14 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch adds common inline functions for QAT symmetric
crypto driver to process crypto op and the build op
request function pointer implementation for QAT generation 1.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 832 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 187 ++++-
 drivers/crypto/qat/qat_sym.c                 |  90 +-
 3 files changed, 1019 insertions(+), 90 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 67a4d2cb2c..1130e0e76f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #ifndef _QAT_CRYPTO_PMD_GENS_H_
@@ -8,14 +8,844 @@
 #include <rte_cryptodev.h>
 #include "qat_crypto.h"
 #include "qat_sym_session.h"
+#include "qat_sym.h"
+
+#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
+	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
+
+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
+	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
+	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
+
+static __rte_always_inline int
+op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_decrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_decrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+	return -EINVAL;
+}
+
+static __rte_always_inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		/* Decrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely((sym_op->m_dst != NULL)
+				&& (sym_op->m_dst != sym_op->m_src)))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = last_block - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
+			dst, last_block_len);
+#endif
+		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+			dst, last_block_len);
+#endif
+	}
+
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+static __rte_always_inline int
+qat_auth_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_cipher_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+			((op->sym->cipher.data.offset %
+			BYTE_LENGTH) != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int32_t
+qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
+		void *opaque, struct qat_sym_op_cookie *cookie,
+		struct rte_crypto_vec *src_vec, uint16_t n_src,
+		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
+{
+	struct qat_sgl *list;
+	uint32_t i;
+	uint32_t tl_src = 0, total_len_src, total_len_dst;
+	uint64_t src_data_start = 0, dst_data_start = 0;
+	int is_sgl = n_src > 1 || n_dst > 1;
+
+	if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
+			n_dst > QAT_SYM_SGL_MAX_NUMBER))
+		return -1;
+
+	if (likely(!is_sgl)) {
+		src_data_start = src_vec[0].iova;
+		tl_src = total_len_src =
+				src_vec[0].len;
+		if (unlikely(n_dst)) { /* oop */
+			total_len_dst = dst_vec[0].len;
+
+			dst_data_start = dst_vec[0].iova;
+			if (unlikely(total_len_src != total_len_dst))
+				return -EINVAL;
+		} else {
+			dst_data_start = src_data_start;
+			total_len_dst = tl_src;
+		}
+	} else { /* sgl */
+		total_len_dst = total_len_src = 0;
+
+		ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+			QAT_COMN_PTR_TYPE_SGL);
+
+		list = (struct qat_sgl *)&cookie->qat_sgl_src;
+		for (i = 0; i < n_src; i++) {
+			list->buffers[i].len = src_vec[i].len;
+			list->buffers[i].resrvd = 0;
+			list->buffers[i].addr = src_vec[i].iova;
+			if (tl_src + src_vec[i].len > UINT32_MAX) {
+				QAT_DP_LOG(ERR, "Message too long");
+				return -1;
+			}
+			tl_src += src_vec[i].len;
+		}
+
+		list->num_bufs = i;
+		src_data_start = cookie->qat_sgl_src_phys_addr;
+
+		if (unlikely(n_dst > 0)) { /* oop sgl */
+			uint32_t tl_dst = 0;
+
+			list = (struct qat_sgl *)&cookie->qat_sgl_dst;
+
+			for (i = 0; i < n_dst; i++) {
+				list->buffers[i].len = dst_vec[i].len;
+				list->buffers[i].resrvd = 0;
+				list->buffers[i].addr = dst_vec[i].iova;
+				if (tl_dst + dst_vec[i].len > UINT32_MAX) {
+					QAT_DP_LOG(ERR, "Message too long");
+					return -ENOTSUP;
+				}
+
+				tl_dst += dst_vec[i].len;
+			}
+
+			if (tl_src != tl_dst)
+				return -EINVAL;
+			list->num_bufs = i;
+			dst_data_start = cookie->qat_sgl_dst_phys_addr;
+		} else
+			dst_data_start = src_data_start;
+	}
+
+	req->comn_mid.src_data_addr = src_data_start;
+	req->comn_mid.dest_data_addr = dst_data_start;
+	req->comn_mid.src_length = total_len_src;
+	req->comn_mid.dst_length = total_len_dst;
+	req->comn_mid.opaque_data = (uintptr_t)opaque;
+
+	return tl_src;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int n_src = 0;
+	int ret;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->cipher.data.length >> 3;
+		cipher_ofs = op->sym->cipher.data.offset >> 3;
+		break;
+	case 0:
+		if (ctx->bpi_ctx) {
+			/* DOCSIS - only send complete blocks to device.
+			 * Process any partial block using CFB mode.
+			 * Even if 0 complete blocks, still send this to device
+			 * to get into rx queue for post-process and dequeuing
+			 */
+			cipher_len = qat_bpicipher_preprocess(ctx, op);
+			cipher_ofs = op->sym->cipher.data.offset;
+		} else {
+			cipher_len = op->sym->cipher.data.length;
+			cipher_ofs = op->sym->cipher.data.offset;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
+			cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t auth_ofs = 0, auth_len = 0;
+	int n_src, ret;
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+				ctx->auth_iv.offset);
+		auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+				ctx->auth_iv.offset);
+		break;
+	case 0:
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+			/* AES-GMAC */
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+					ctx->auth_iv.offset);
+			auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+					ctx->auth_iv.offset);
+		} else {
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = NULL;
+			auth_iv->iova = 0;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
+			auth_ofs + auth_len, in_sgl->vec,
+			QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
+				auth_ofs + auth_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	union rte_crypto_sym_ofs ofs;
+	uint32_t min_ofs = 0, max_len = 0;
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	uint32_t auth_len = 0, auth_ofs = 0;
+	int is_oop = (op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src);
+	int is_sgl = op->sym->m_src->nb_segs > 1;
+	int n_src;
+	int ret;
+
+	if (unlikely(is_oop))
+		is_sgl |= op->sym->m_dst->nb_segs > 1;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->auth_iv.offset);
+	auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->auth_iv.offset);
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->aead.data.length >> 3;
+		cipher_ofs = op->sym->aead.data.offset >> 3;
+		break;
+	case 0:
+		cipher_len = op->sym->aead.data.length;
+		cipher_ofs = op->sym->aead.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		break;
+	case 0:
+		auth_len = op->sym->auth.data.length;
+		auth_ofs = op->sym->auth.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
+
+	/* digest in buffer check. Needed only for wireless algos */
+	if (ret == 1) {
+		/* Handle digest-encrypted cases, i.e.
+		 * auth-gen-then-cipher-encrypt and
+		 * cipher-decrypt-then-auth-verify
+		 */
+		uint64_t auth_end_iova;
+
+		if (unlikely(is_sgl)) {
+			uint32_t remaining_off = auth_ofs + auth_len;
+			struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
+				op->sym->m_src);
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else
+			auth_end_iova = (is_oop ?
+				rte_pktmbuf_iova(op->sym->m_dst) :
+				rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
+					auth_len;
+
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
+				(digest->iova == auth_end_iova))
+			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
+					ctx->digest_length);
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return -1;
+	}
+	in_sgl->num = n_src;
+
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs,
+				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	ofs.ofs.cipher.head = cipher_ofs;
+	ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
+	ofs.ofs.auth.head = auth_ofs;
+	ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
+
+	return ofs.raw;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int32_t n_src = 0;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
+	auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
+	digest->va = (void *)op->sym->aead.digest.data;
+	digest->iova = op->sym->aead.digest.phys_addr;
+
+	cipher_len = op->sym->aead.data.length;
+	cipher_ofs = op->sym->aead.data.offset;
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline void
+qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
+		struct icp_qat_fw_la_bulk_req *qat_req)
+{
+	/* copy IV into request if it fits */
+	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
+				iv_len);
+	else {
+		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+				qat_req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
+	}
+}
+
+static __rte_always_inline void
+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
+{
+	uint32_t i;
+
+	for (i = 0; i < n; i++)
+		sta[i] = status;
+}
+
+static __rte_always_inline void
+enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+
+	/* cipher IV */
+	qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
+				ctx->auth_iv.length);
+		break;
+	default:
+		break;
+	}
+}
+
+static __rte_always_inline int
+enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_vec *src_vec,
+	uint16_t n_src_vecs,
+	struct rte_crypto_vec *dst_vec,
+	uint16_t n_dst_vecs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
+			dst_vec : src_vec;
+	rte_iova_t auth_iova_end;
+	int cipher_len, auth_len;
+	int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	cipher_len = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (unlikely(cipher_len < 0 || auth_len < 0))
+		return -1;
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = cipher_len;
+	qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = auth_len;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		break;
+	default:
+		break;
+	}
+
+	if (unlikely(is_sgl)) {
+		/* sgl */
+		int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
+		uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
+
+		while (remaining_off >= cvec->len && i >= 1) {
+			i--;
+			remaining_off -= cvec->len;
+			cvec++;
+		}
+
+		auth_iova_end = cvec->iova + remaining_off;
+	} else
+		auth_iova_end = cvec[0].iova + auth_param->auth_off +
+			auth_param->auth_len;
+
+	/* Then check if digest-encrypted conditions are met */
+	if ((auth_param->auth_off + auth_param->auth_len <
+		cipher_param->cipher_offset + cipher_param->cipher_length) &&
+			(digest->iova == auth_iova_end)) {
+		/* Handle partial digest encryption */
+		if (cipher_param->cipher_offset + cipher_param->cipher_length <
+			auth_param->auth_off + auth_param->auth_len +
+				ctx->digest_length && !is_sgl)
+			req->comn_mid.dst_length = req->comn_mid.src_length =
+				auth_param->auth_off + auth_param->auth_len +
+					ctx->digest_length;
+		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void
+enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param =
+		(void *)&req->serv_specif_rqpars;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(void *)((uint8_t *)&req->serv_specif_rqpars +
+		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+	uint8_t *aad_data;
+	uint8_t aad_ccm_real_len;
+	uint8_t aad_len_field_sz;
+	uint32_t msg_len_be;
+	rte_iova_t aad_iova = 0;
+	uint8_t q;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
+				ctx->cipher_iv.length);
+		aad_iova = aad->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		aad_data = aad->va;
+		aad_iova = aad->iova;
+		aad_ccm_real_len = 0;
+		aad_len_field_sz = 0;
+		msg_len_be = rte_bswap32((uint32_t)data_len -
+				ofs.ofs.cipher.head);
+
+		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			aad_ccm_real_len = ctx->aad_len -
+				ICP_QAT_HW_CCM_AAD_B0_LEN -
+				ICP_QAT_HW_CCM_AAD_LEN_INFO;
+		} else {
+			aad_data = iv->va;
+			aad_iova = iv->iova;
+		}
+
+		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+			aad_len_field_sz, ctx->digest_length, q);
+		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+				(uint8_t *)&msg_len_be,
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+		} else {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+				(uint8_t *)&msg_len_be +
+				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+				- q), q);
+		}
+
+		if (aad_len_field_sz > 0) {
+			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+				rte_bswap16(aad_ccm_real_len);
+
+			if ((aad_ccm_real_len + aad_len_field_sz)
+				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+				uint8_t pad_len = 0;
+				uint8_t pad_idx = 0;
+
+				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+					((aad_ccm_real_len +
+					aad_len_field_sz) %
+					ICP_QAT_HW_CCM_AAD_B0_LEN);
+				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+					aad_ccm_real_len +
+					aad_len_field_sz;
+				memset(&aad_data[pad_idx], 0, pad_len);
+			}
+		}
+
+		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va +
+			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
+		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+		rte_memcpy((uint8_t *)aad->va +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			ctx->cipher_iv.length);
+		break;
+	default:
+		break;
+	}
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_param->auth_off = ofs.ofs.cipher.head;
+	auth_param->auth_len = cipher_param->cipher_length;
+	auth_param->auth_res_addr = digest->iova;
+	auth_param->u1.aad_adr = aad_iova;
+}
 
 extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
 extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
 
+/* -----------------GEN 1 sym crypto op data path APIs ---------------- */
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 void
 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 		uint8_t hash_flag);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 90b3ec803c..c429825a67 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -179,6 +179,191 @@ qat_sym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, NULL, NULL);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, NULL, NULL);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
+			total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
+			out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			&auth_iv, &digest);
+#endif
+
+	return 0;
+}
+
 #ifdef RTE_LIB_SECURITY
 
 #define QAT_SECURITY_SYM_CAPABILITIES					\
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 00ec703754..f814bf8f75 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/evp.h>
@@ -11,93 +11,7 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-
-
-/** Decrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_decrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_decrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
-	return -EINVAL;
-}
-
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
-		/* Decrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely((sym_op->m_dst != NULL)
-				&& (sym_op->m_dst != sym_op->m_src)))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = last_block - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
-			dst, last_block_len);
-#endif
-		bpi_cipher_decrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
-			dst, last_block_len);
-#endif
-	}
-
-	return sym_op->cipher.data.length - last_block_len;
-}
+#include "dev/qat_crypto_pmd_gens.h"
 
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v7 03/10] crypto/qat: qat generation specific enqueue
  2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2022-02-08 18:14                 ` [dpdk-dev v7 01/10] common/qat: define build op request and dequeue op Kai Ji
  2022-02-08 18:14                 ` [dpdk-dev v7 02/10] crypto/qat: sym build op request specific implementation Kai Ji
@ 2022-02-08 18:14                 ` Kai Ji
  2022-02-09 10:23                   ` Zhang, Roy Fan
  2022-02-08 18:14                 ` [dpdk-dev v7 04/10] crypto/qat: rework session APIs Kai Ji
                                   ` (8 subsequent siblings)
  11 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-08 18:14 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch add in specific aead & auth build op enqueue functions
for QAT Gen3 & Gen4

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 117 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |  34 +++++-
 2 files changed, 149 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index d3336cf4a1..fca7af2b7e 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -143,6 +143,121 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass) {
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN3 uses single pass to treat AEAD as
+		 * cipher operation
+		 */
+		cipher_param = (void *)&req->serv_specif_rqpars;
+
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+				ofs.ofs.cipher.tail;
+
+		cipher_param->spc_aad_addr = aad->iova;
+		cipher_param->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+	struct qat_sym_op_cookie *cookie,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	uint32_t ver_key_offset;
+	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+
+	if (!ctx->is_single_pass_gmac ||
+			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+				data_len);
+		return;
+	}
+
+	cipher_cd_ctrl = (void *) &req->cd_ctrl;
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+			sizeof(struct icp_qat_hw_cipher_config);
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+		/* AES-GMAC */
+		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+				req);
+	}
+
+	/* Fill separate Content Descriptor for this op */
+	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+				ctx->cd.cipher.key :
+				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+			ctx->auth_key_length);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+				ICP_QAT_HW_CIPHER_AEAD_MODE,
+				ctx->qat_cipher_alg,
+				ICP_QAT_HW_CIPHER_NO_CONVERT,
+				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+					ICP_QAT_HW_CIPHER_ENCRYPT :
+					ICP_QAT_HW_CIPHER_DECRYPT));
+	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+			ctx->digest_length,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+	/* Update the request */
+	req->cd_pars.u.s.content_desc_addr =
+			cookie->opt.spc_gmac.cd_phys_addr;
+	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+			sizeof(struct icp_qat_hw_cipher_config) +
+			ctx->auth_key_length, 8) >> 3;
+	req->comn_mid.src_length = data_len;
+	req->comn_mid.dst_length = 0;
+
+	cipher_param->spc_aad_addr = 0;
+	cipher_param->spc_auth_res_addr = digest->iova;
+	cipher_param->spc_aad_sz = auth_data_len;
+	cipher_param->reserved = 0;
+	cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+	ICP_QAT_FW_LA_PROTO_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_NO_PROTO);
+}
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 37a58c026f..8462c0b9b1 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -103,6 +103,38 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+			(void *)&req->serv_specif_rqpars;
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN4 uses single pass to treat AEAD as cipher
+		 * operation
+		 */
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+				req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len -
+				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+		cipher_param_20->spc_aad_addr = aad->iova;
+		cipher_param_20->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v7 04/10] crypto/qat: rework session APIs
  2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                   ` (2 preceding siblings ...)
  2022-02-08 18:14                 ` [dpdk-dev v7 03/10] crypto/qat: qat generation specific enqueue Kai Ji
@ 2022-02-08 18:14                 ` Kai Ji
  2022-02-09 10:23                   ` Zhang, Roy Fan
  2022-02-08 18:14                 ` [dpdk-dev v7 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
                                   ` (7 subsequent siblings)
  11 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-08 18:14 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch introduces the set_session methods for different
generations of QAT. In addition, the patch replaces
'min_qat_dev_gen_id' with 'qat_dev_gen'. Thus, the session
no longer allow to be created by one generation of QAT used by another
generation.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  91 +++++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 139 +++++++++++++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |  91 ++++++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |   3 +
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  64 +++++++++
 drivers/crypto/qat/qat_crypto.h              |   8 +-
 drivers/crypto/qat/qat_sym.c                 |  12 +-
 drivers/crypto/qat/qat_sym_session.c         | 113 +++------------
 drivers/crypto/qat/qat_sym_session.h         |   2 +-
 10 files changed, 425 insertions(+), 107 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
index 9ed1f21d9d..01a897a21f 100644
--- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_asym_crypto_set_session_gen1(void *cdev __rte_unused,
+		void *session __rte_unused)
+{
+	return 0;
+}
+
 RTE_INIT(qat_asym_crypto_gen1_init)
 {
 	qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops =
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index b4ec440e05..64e6ae66ec 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -166,6 +166,91 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
 	return 0;
 }
 
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+		uint8_t hash_flag)
+{
+	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+	/* Set the Use Extended Protocol Flags bit in LW 1 */
+	QAT_FIELD_SET(header->comn_req_flags,
+			QAT_COMN_EXT_FLAGS_USED,
+			QAT_COMN_EXT_FLAGS_BITPOS,
+			QAT_COMN_EXT_FLAGS_MASK);
+
+	/* Set Hash Flags in LW 28 */
+	cd_ctrl->hash_flags |= hash_flag;
+
+	/* Set proto flags in LW 1 */
+	switch (session->qat_cipher_alg) {
+	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_SNOW_3G_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags,
+				ICP_QAT_FW_LA_ZUC_3G_PROTO);
+		break;
+	default:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	}
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+	struct rte_cryptodev *dev = cdev;
+	struct qat_sym_session *ctx = session;
+	const struct qat_cryptodev_private *qat_private =
+			dev->data->dev_private;
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * but some are not supported by GEN2, so checking here
+		 */
+		if ((qat_private->internal_capabilities &
+				QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+			return -ENOTSUP;
+
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
 
 	/* Device related operations */
@@ -204,6 +289,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
 			qat_sym_crypto_cap_get_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_sym_crypto_set_session_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
@@ -221,4 +308,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
 			qat_asym_crypto_cap_get_gen1;
 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_asym_crypto_feature_flags_get_gen1;
+	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_asym_crypto_set_session_gen1;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index fca7af2b7e..db864d973a 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -258,6 +258,142 @@ enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
 			ICP_QAT_FW_LA_NO_PROTO);
 }
 
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN3 */
+	if (ctx->is_single_pass)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+	else if (ctx->is_single_pass_gmac)
+		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN3
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -265,6 +401,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_cap_get_gen3;
 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+			qat_sym_crypto_set_session_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
@@ -276,4 +414,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 8462c0b9b1..7642a87d55 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -135,11 +135,101 @@ enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
 	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
 }
 
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN4 */
+	if (ctx->is_single_pass && ctx->is_ucs)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN4
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
 			qat_sym_crypto_cap_get_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+			qat_sym_crypto_set_session_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
@@ -153,4 +243,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
 	qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 1130e0e76f..96cdb97a26 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -856,6 +856,9 @@ qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev);
 uint64_t
 qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 #ifdef RTE_LIB_SECURITY
 extern struct rte_security_ops security_qat_ops_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index c429825a67..501132a448 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -452,12 +452,76 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int handle_mixed = 0;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			/* do_aead = 1; */
+			build_request = qat_sym_build_op_aead_gen1;
+		} else {
+			/* do_auth = 1; do_cipher = 1; */
+			build_request = qat_sym_build_op_chain_gen1;
+			handle_mixed = 1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		/* do_auth = 1; do_cipher = 0;*/
+		build_request = qat_sym_build_op_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		/* do_auth = 0; do_cipher = 1; */
+		build_request = qat_sym_build_op_cipher_gen1;
+	}
+
+	if (build_request)
+		ctx->build_request[proc_type] = build_request;
+	else
+		return -EINVAL;
+
+	/* no more work if not mixed op */
+	if (!handle_mixed)
+		return 0;
+
+	/* Check none supported algs if mixed */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		return -ENOTSUP;
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		return -ENOTSUP;
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
 
 RTE_INIT(qat_sym_crypto_gen1_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
 			qat_sym_crypto_cap_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+			qat_sym_crypto_set_session_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6eaa15b975..5ca76fcaa6 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
  #ifndef _QAT_CRYPTO_H_
@@ -48,15 +48,21 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
 
 typedef void * (*create_security_ctx_t)(void *cryptodev);
 
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
+	set_session_t set_session;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
 };
 
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
+
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
 		struct rte_cryptodev_config *config);
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index f814bf8f75..83bf55c933 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -13,6 +13,10 @@
 #include "qat_sym.h"
 #include "dev/qat_crypto_pmd_gens.h"
 
+uint8_t qat_sym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
@@ -126,7 +130,7 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
 
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen)
+		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
 {
 	int ret = 0;
 	struct qat_sym_session *ctx = NULL;
@@ -191,12 +195,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		return -EINVAL;
 	}
 
-	if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
-		QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -EINVAL;
-	}
-
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 8ca475ca8b..3a880096c4 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/sha.h>	/* Needed to calculate pre-compute values */
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
 	return 0;
 }
 
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
-		uint8_t hash_flag)
-{
-	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
-	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
-			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
-			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
-	/* Set the Use Extended Protocol Flags bit in LW 1 */
-	QAT_FIELD_SET(header->comn_req_flags,
-			QAT_COMN_EXT_FLAGS_USED,
-			QAT_COMN_EXT_FLAGS_BITPOS,
-			QAT_COMN_EXT_FLAGS_MASK);
-
-	/* Set Hash Flags in LW 28 */
-	cd_ctrl->hash_flags |= hash_flag;
-
-	/* Set proto flags in LW 1 */
-	switch (session->qat_cipher_alg) {
-	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_SNOW_3G_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_ZUC_3G_PROTO);
-		break;
-	default:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	}
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
-		struct qat_sym_session *session)
-{
-	const struct qat_cryptodev_private *qat_private =
-			dev->data->dev_private;
-	enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
-			QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
-	if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
-	} else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
-	} else if ((session->aes_cmac ||
-			session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
-			(session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session, 0);
-	}
-}
-
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
 	int ret;
 	int qat_cmd_id;
-	int handle_mixed = 0;
 
 	/* Verify the session physical address is known */
 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
+	session->dev_id = internals->dev_id;
 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
 	session->is_ucs = 0;
 
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		return -ENOTSUP;
 	}
 	qat_sym_session_finalize(session);
-	if (handle_mixed) {
-		/* Special handling of mixed hash+cipher algorithms */
-		qat_sym_session_handle_mixed(dev, session);
-	}
 
-	return 0;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+			(void *)session);
 }
 
 static int
@@ -678,14 +598,13 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 {
 	session->is_single_pass = 1;
 	session->is_auth = 1;
-	session->min_qat_dev_gen = QAT_GEN3;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
-	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
 		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
-	} else {
+	else
 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
-	}
+
 	session->cipher_iv.offset = aead_xform->iv.offset;
 	session->cipher_iv.length = aead_xform->iv.length;
 	session->aad_len = aead_xform->aad_length;
@@ -1205,9 +1124,9 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-			uint8_t *data_in,
-			uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in, uint8_t *data_out)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1573,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1920,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2180,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
-
 	/* Get requested QAT command id - should be cipher */
 	qat_cmd_id = qat_get_cmd_id(xform);
 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2204,9 @@ qat_security_session_create(void *dev,
 {
 	void *sess_private_data;
 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct qat_cryptodev_private *internals = cdev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_sym_session *sym_session = NULL;
 	int ret;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2230,11 @@ qat_security_session_create(void *dev,
 	}
 
 	set_sec_session_private_data(sess, sess_private_data);
+	sym_session = (struct qat_sym_session *)sess_private_data;
+	sym_session->dev_id = internals->dev_id;
 
-	return ret;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+			sess_private_data);
 }
 
 int
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index fe875a7fd0..01908abd9e 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -105,7 +105,7 @@ struct qat_sym_session {
 	uint16_t auth_key_length;
 	uint16_t digest_length;
 	rte_spinlock_t lock;	/* protects this struct */
-	enum qat_device_gen min_qat_dev_gen;
+	uint16_t dev_id;
 	uint8_t aes_cmac;
 	uint8_t is_single_pass;
 	uint8_t is_single_pass_gmac;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v7 05/10] crypto/qat: rework asymmetric crypto build operation
  2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                   ` (3 preceding siblings ...)
  2022-02-08 18:14                 ` [dpdk-dev v7 04/10] crypto/qat: rework session APIs Kai Ji
@ 2022-02-08 18:14                 ` Kai Ji
  2022-02-09 10:23                   ` Zhang, Roy Fan
  2022-02-08 18:14                 ` [dpdk-dev v7 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
                                   ` (6 subsequent siblings)
  11 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-08 18:14 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch reworks the asymmetric crypto data path
implementation to QAT driver. The change includes separation
of different QAT generations' asymmetric crypto data path
implementations and shrink the device capabilities declaration
code size.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c   |   5 +-
 drivers/crypto/qat/qat_asym.c | 624 +++++++++++++++++-----------------
 drivers/crypto/qat/qat_asym.h |  63 +++-
 3 files changed, 380 insertions(+), 312 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index ed632b5ebe..c3265241a3 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -622,7 +622,7 @@ qat_enqueue_op_burst(void *qp,
 #ifdef BUILD_QAT_ASYM
 			ret = qat_asym_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
+				NULL, tmp_qp->qat_dev_gen);
 #endif
 		}
 		if (ret != 0) {
@@ -850,7 +850,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 #ifdef BUILD_QAT_ASYM
 		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
 			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 #endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 09d8761c5f..3d7aecd7c0 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -1,69 +1,119 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2022 Intel Corporation
  */
 
 #include <stdarg.h>
 
-#include "qat_asym.h"
+#include <cryptodev_pmd.h>
+
 #include "icp_qat_fw_pke.h"
 #include "icp_qat_fw.h"
 #include "qat_pke_functionality_arrays.h"
 
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+#include "qat_device.h"
 
-static int qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
-		size_t arr_sz, size_t *size, uint32_t *func_id)
+#include "qat_logs.h"
+#include "qat_asym.h"
+
+uint8_t qat_asym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
+
+int
+qat_asym_session_configure(struct rte_cryptodev *dev,
+		struct rte_crypto_asym_xform *xform,
+		struct rte_cryptodev_asym_session *sess,
+		struct rte_mempool *mempool)
 {
-	size_t i;
+	int err = 0;
+	void *sess_private_data;
+	struct qat_asym_session *session;
 
-	for (i = 0; i < arr_sz; i++) {
-		if (*size <= arr[i][0]) {
-			*size = arr[i][0];
-			*func_id = arr[i][1];
-			return 0;
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		QAT_LOG(ERR,
+			"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	session = sess_private_data;
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		if (xform->modex.exponent.length == 0 ||
+				xform->modex.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod exp input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		if (xform->modinv.modulus.length == 0) {
+			QAT_LOG(ERR, "Invalid mod inv input parameter");
+			err = -EINVAL;
+			goto error;
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+		if (xform->rsa.n.length == 0) {
+			QAT_LOG(ERR, "Invalid rsa input parameter");
+			err = -EINVAL;
+			goto error;
 		}
+	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
+			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
+		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
+		err = -EINVAL;
+		goto error;
+	} else {
+		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
+		err = -EINVAL;
+		goto error;
 	}
-	return -1;
-}
 
-static inline void qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
-{
-	memset(qat_req, 0, sizeof(*qat_req));
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+	session->xform = xform;
+	qat_asym_build_req_tmpl(sess_private_data);
+	set_asym_session_private_data(sess, dev->driver_id,
+		sess_private_data);
 
-	qat_req->pke_hdr.hdr_flags =
-			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
-			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	return 0;
+error:
+	rte_mempool_put(mempool, sess_private_data);
+	return err;
 }
 
-static inline void qat_asym_build_req_tmpl(void *sess_private_data)
+unsigned int
+qat_asym_session_get_private_size(
+		struct rte_cryptodev *dev __rte_unused)
 {
-	struct icp_qat_fw_pke_request *qat_req;
-	struct qat_asym_session *session = sess_private_data;
-
-	qat_req = &session->req_tmpl;
-	qat_fill_req_tmpl(qat_req);
+	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
 }
 
-static size_t max_of(int n, ...)
+void
+qat_asym_session_clear(struct rte_cryptodev *dev,
+		struct rte_cryptodev_asym_session *sess)
 {
-	va_list args;
-	size_t len = 0, num;
-	int i;
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_asym_session_private_data(sess, index);
+	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
 
-	va_start(args, n);
-	len = va_arg(args, size_t);
+	if (sess_priv) {
+		memset(s, 0, qat_asym_session_get_private_size(dev));
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
 
-	for (i = 0; i < n - 1; i++) {
-		num = va_arg(args, size_t);
-		if (num > len)
-			len = num;
+		set_asym_session_private_data(sess, index, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
 	}
-	va_end(args);
-
-	return len;
 }
 
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+	.name = qat_asym_drv_name,
+	.alias = qat_asym_drv_name
+};
+
+
 static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
 		int in_count, int out_count, int alg_size)
 {
@@ -106,7 +156,230 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
 	}
 }
 
-static int qat_asym_check_nonzero(rte_crypto_param n)
+static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
+		struct qat_asym_op_cookie *cookie,
+		struct rte_crypto_asym_xform *xform)
+{
+	size_t alg_size, alg_size_in_bytes = 0;
+	struct rte_crypto_asym_op *asym_op = rx_op->asym;
+
+	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
+		rte_crypto_param n = xform->modex.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modexp_result = asym_op->modex.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modexp_result +
+				(asym_op->modex.result.length -
+					n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length
+				);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
+		rte_crypto_param n = xform->modinv.modulus;
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		uint8_t *modinv_result = asym_op->modinv.result.data;
+
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
+			rte_memcpy(modinv_result +
+				(asym_op->modinv.result.length
+				- n.length),
+				cookie->output_array[0] + alg_size_in_bytes
+				- n.length, n.length);
+			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
+					cookie->output_array[0],
+					alg_size_in_bytes);
+#endif
+		}
+	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
+
+		alg_size = cookie->alg_size;
+		alg_size_in_bytes = alg_size >> 3;
+		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
+				asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_VERIFY) {
+				uint8_t *rsa_result = asym_op->rsa.cipher.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+							cookie->output_array[0],
+							alg_size_in_bytes);
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+			}
+		} else {
+			if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_DECRYPT) {
+				uint8_t *rsa_result = asym_op->rsa.message.data;
+
+				switch (asym_op->rsa.pad) {
+				case RTE_CRYPTO_RSA_PADDING_NONE:
+					rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_SUCCESS;
+					break;
+				default:
+					QAT_LOG(ERR, "Padding not supported");
+					rx_op->status =
+						RTE_CRYPTO_OP_STATUS_ERROR;
+					break;
+				}
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
+						rsa_result, alg_size_in_bytes);
+#endif
+			} else if (asym_op->rsa.op_type ==
+					RTE_CRYPTO_ASYM_OP_SIGN) {
+				uint8_t *rsa_result = asym_op->rsa.sign.data;
+
+				rte_memcpy(rsa_result,
+						cookie->output_array[0],
+						alg_size_in_bytes);
+				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
+						cookie->output_array[0],
+						alg_size_in_bytes);
+#endif
+			}
+		}
+	}
+	qat_clear_arrays_by_alg(cookie, xform, alg_size_in_bytes);
+}
+
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
+{
+	struct qat_asym_session *ctx;
+	struct icp_qat_fw_pke_resp *resp_msg =
+			(struct icp_qat_fw_pke_resp *)resp;
+	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+			(resp_msg->opaque);
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	if (cookie->error) {
+		cookie->error = 0;
+		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		QAT_DP_LOG(ERR, "Cookie status returned error");
+	} else {
+		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric response status"
+					" returned error");
+		}
+		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
+			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			QAT_DP_LOG(ERR, "Asymmetric common status"
+					" returned error");
+		}
+	}
+
+	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		ctx = (struct qat_asym_session *)get_asym_session_private_data(
+			rx_op->asym->session, qat_asym_driver_id);
+		qat_asym_collect_response(rx_op, cookie, ctx->xform);
+	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
+	}
+	*op = rx_op;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
+			sizeof(struct icp_qat_fw_pke_resp));
+#endif
+
+	return 1;
+}
+
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+		size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+	size_t i;
+
+	for (i = 0; i < arr_sz; i++) {
+		if (*size <= arr[i][0]) {
+			*size = arr[i][0];
+			*func_id = arr[i][1];
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+	va_list args;
+	size_t len = 0, num;
+	int i;
+
+	va_start(args, n);
+	len = va_arg(args, size_t);
+
+	for (i = 0; i < n - 1; i++) {
+		num = va_arg(args, size_t);
+		if (num > len)
+			len = num;
+	}
+	va_end(args);
+
+	return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
 {
 	if (n.length < 8) {
 		/* Not a case for any cryptographic function except for DH
@@ -475,10 +748,9 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 }
 
 int
-qat_asym_build_request(void *in_op,
-			uint8_t *out_msg,
-			void *op_cookie,
-			__rte_unused enum qat_device_gen qat_dev_gen)
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
 {
 	struct qat_asym_session *ctx;
 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
@@ -545,263 +817,7 @@ qat_asym_build_request(void *in_op,
 	return 0;
 }
 
-static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
-		struct qat_asym_op_cookie *cookie,
-		struct rte_crypto_asym_xform *xform)
-{
-	size_t alg_size, alg_size_in_bytes = 0;
-	struct rte_crypto_asym_op *asym_op = rx_op->asym;
-
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		rte_crypto_param n = xform->modex.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modexp_result = asym_op->modex.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modexp_result +
-				(asym_op->modex.result.length -
-					n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length
-				);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		rte_crypto_param n = xform->modinv.modulus;
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		uint8_t *modinv_result = asym_op->modinv.result.data;
-
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
-			rte_memcpy(modinv_result + (asym_op->modinv.result.length
-				- n.length),
-				cookie->output_array[0] + alg_size_in_bytes
-				- n.length, n.length);
-			rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-			QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result",
-					cookie->output_array[0],
-					alg_size_in_bytes);
-#endif
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-
-		alg_size = cookie->alg_size;
-		alg_size_in_bytes = alg_size >> 3;
-		if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT ||
-				asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_ENCRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_VERIFY) {
-				uint8_t *rsa_result = asym_op->rsa.cipher.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-							cookie->output_array[0],
-							alg_size_in_bytes);
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-			}
-		} else {
-			if (asym_op->rsa.op_type ==
-					RTE_CRYPTO_ASYM_OP_DECRYPT) {
-				uint8_t *rsa_result = asym_op->rsa.message.data;
-
-				switch (asym_op->rsa.pad) {
-				case RTE_CRYPTO_RSA_PADDING_NONE:
-					rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_SUCCESS;
-					break;
-				default:
-					QAT_LOG(ERR, "Padding not supported");
-					rx_op->status =
-						RTE_CRYPTO_OP_STATUS_ERROR;
-					break;
-				}
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message",
-						rsa_result, alg_size_in_bytes);
-#endif
-			} else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
-				uint8_t *rsa_result = asym_op->rsa.sign.data;
-
-				rte_memcpy(rsa_result,
-						cookie->output_array[0],
-						alg_size_in_bytes);
-				rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-				QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature",
-						cookie->output_array[0],
-						alg_size_in_bytes);
-#endif
-			}
-		}
-	}
-	qat_clear_arrays_by_alg(cookie, xform, alg_size_in_bytes);
-}
-
-void
-qat_asym_process_response(void **op, uint8_t *resp,
-		void *op_cookie)
-{
-	struct qat_asym_session *ctx;
-	struct icp_qat_fw_pke_resp *resp_msg =
-			(struct icp_qat_fw_pke_resp *)resp;
-	struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
-			(resp_msg->opaque);
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	if (cookie->error) {
-		cookie->error = 0;
-		if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-			rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		QAT_DP_LOG(ERR, "Cookie status returned error");
-	} else {
-		if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
-			resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric response status"
-					" returned error");
-		}
-		if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) {
-			if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-				rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-			QAT_DP_LOG(ERR, "Asymmetric common status"
-					" returned error");
-		}
-	}
-
-	if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_asym_session *)get_asym_session_private_data(
-			rx_op->asym->session, qat_asym_driver_id);
-		qat_asym_collect_response(rx_op, cookie, ctx->xform);
-	} else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform);
-	}
-	*op = rx_op;
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
-			sizeof(struct icp_qat_fw_pke_resp));
-#endif
-}
-
-int
-qat_asym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_asym_xform *xform,
-		struct rte_cryptodev_asym_session *sess,
-		struct rte_mempool *mempool)
-{
-	int err = 0;
-	void *sess_private_data;
-	struct qat_asym_session *session;
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		QAT_LOG(ERR,
-			"Couldn't get object from session mempool");
-		return -ENOMEM;
-	}
-
-	session = sess_private_data;
-	if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) {
-		if (xform->modex.exponent.length == 0 ||
-				xform->modex.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod exp input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) {
-		if (xform->modinv.modulus.length == 0) {
-			QAT_LOG(ERR, "Invalid mod inv input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) {
-		if (xform->rsa.n.length == 0) {
-			QAT_LOG(ERR, "Invalid rsa input parameter");
-			err = -EINVAL;
-			goto error;
-		}
-	} else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
-			|| xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) {
-		QAT_LOG(ERR, "Invalid asymmetric crypto xform");
-		err = -EINVAL;
-		goto error;
-	} else {
-		QAT_LOG(ERR, "Asymmetric crypto xform not implemented");
-		err = -EINVAL;
-		goto error;
-	}
-
-	session->xform = xform;
-	qat_asym_build_req_tmpl(sess_private_data);
-	set_asym_session_private_data(sess, dev->driver_id,
-		sess_private_data);
-
-	return 0;
-error:
-	rte_mempool_put(mempool, sess_private_data);
-	return err;
-}
-
-unsigned int qat_asym_session_get_private_size(
-		struct rte_cryptodev *dev __rte_unused)
-{
-	return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8);
-}
-
-void
-qat_asym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_asym_session *sess)
-{
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_asym_session_private_data(sess, index);
-	struct qat_asym_session *s = (struct qat_asym_session *)sess_priv;
-
-	if (sess_priv) {
-		memset(s, 0, qat_asym_session_get_private_size(dev));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
-
-		set_asym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
-}
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_asym_driver,
+		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 308b6b2e0b..aba49d57cb 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #ifndef _QAT_ASYM_H_
@@ -8,10 +8,13 @@
 #include <cryptodev_pmd.h>
 #include <rte_crypto_asym.h>
 #include "icp_qat_fw_pke.h"
-#include "qat_common.h"
-#include "qat_asym_pmd.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
 #include "icp_qat_fw.h"
 
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
+
 typedef uint64_t large_int_ptr;
 #define MAX_PKE_PARAMS	8
 #define QAT_PKE_MAX_LN_SIZE 512
@@ -26,6 +29,28 @@ typedef uint64_t large_int_ptr;
 #define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
 #define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
 
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
+		{.asym = {						\
+			.xform_capa = {					\
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+				.op_types = o,				\
+				{					\
+				.modlen = {				\
+				.min = l,				\
+				.max = r,				\
+				.increment = i				\
+				}, }					\
+			}						\
+		},							\
+		}							\
+	}
+
 struct qat_asym_op_cookie {
 	size_t alg_size;
 	uint64_t error;
@@ -45,6 +70,27 @@ struct qat_asym_session {
 	struct rte_crypto_asym_xform *xform;
 };
 
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+	memset(qat_req, 0, sizeof(*qat_req));
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+	qat_req->pke_hdr.hdr_flags =
+			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+	struct icp_qat_fw_pke_request *qat_req;
+	struct qat_asym_session *session = sess_private_data;
+
+	qat_req = &session->req_tmpl;
+	qat_fill_req_tmpl(qat_req);
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev,
 		struct rte_crypto_asym_xform *xform,
@@ -76,7 +122,9 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
  */
 int
 qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
+		void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		enum qat_device_gen qat_dev_gen);
 
 /*
  * Process PKE response received from outgoing queue of QAT
@@ -88,8 +136,11 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg,
  * @param	op_cookie	Cookie pointer that holds private metadata
  *
  */
+int
+qat_asym_process_response(void __rte_unused * *op, uint8_t *resp,
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count);
+
 void
-qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
-		void *op_cookie);
+qat_asym_init_op_cookie(void *cookie);
 
 #endif /* _QAT_ASYM_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v7 06/10] crypto/qat: unify qat sym pmd apis
  2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                   ` (4 preceding siblings ...)
  2022-02-08 18:14                 ` [dpdk-dev v7 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
@ 2022-02-08 18:14                 ` Kai Ji
  2022-02-09 10:24                   ` Zhang, Roy Fan
  2022-02-08 18:14                 ` [dpdk-dev v7 07/10] crypto/qat: unify qat asym " Kai Ji
                                   ` (5 subsequent siblings)
  11 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-08 18:14 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch removes qat_sym_pmd.c and integrates all the apis into
qat_sym.c. The unified/integrated qat sym crypto pmd apis should
make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build       |   4 +-
 drivers/common/qat/qat_device.c      |   4 +-
 drivers/common/qat/qat_qp.c          |   3 +-
 drivers/crypto/qat/qat_crypto.h      |   5 +-
 drivers/crypto/qat/qat_sym.c         |  21 +++
 drivers/crypto/qat/qat_sym.h         | 147 ++++++++++++++--
 drivers/crypto/qat/qat_sym_hw_dp.c   |  11 +-
 drivers/crypto/qat/qat_sym_pmd.c     | 251 ---------------------------
 drivers/crypto/qat/qat_sym_pmd.h     |  95 ----------
 drivers/crypto/qat/qat_sym_session.c |   2 +-
 10 files changed, 168 insertions(+), 375 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index af92271a75..1bf6896a7e 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017-2018 Intel Corporation
+# Copyright(c) 2017-2022 Intel Corporation
 
 if is_windows
     build = false
@@ -73,7 +73,7 @@ if qat_compress
 endif
 
 if qat_crypto
-    foreach f: ['qat_sym_pmd.c', 'qat_sym.c', 'qat_sym_session.c',
+    foreach f: ['qat_sym.c', 'qat_sym_session.c',
             'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 437996f2e8..e5459fdfd1 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2020 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 
 #include <rte_string_fns.h>
@@ -8,7 +8,7 @@
 
 #include "qat_device.h"
 #include "adf_transport_access_macros.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 #include "qat_comp_pmd.h"
 #include "adf_pf2vf_msg.h"
 #include "qat_pf2vf.h"
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index c3265241a3..dd9056650d 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -841,7 +841,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
 			nb_fw_responses = qat_comp_process_response(
 				ops, resp_msg,
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 5ca76fcaa6..c01266f81c 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -12,7 +12,10 @@
 extern uint8_t qat_sym_driver_id;
 extern uint8_t qat_asym_driver_id;
 
-/** helper macro to set cryptodev capability range **/
+/**
+ * helper macro to set cryptodev capability range
+ * <n: name> <l: min > <r: max> <i: increment> <v: value>
+ **/
 #define CAP_RNG(n, l, r, i) .n = {.min = l, .max = r, .increment = i}
 
 #define CAP_RNG_ZERO(n) .n = {.min = 0, .max = 0, .increment = 0}
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 83bf55c933..aad4b243b7 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -17,6 +17,27 @@ uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_sym_init_op_cookie(void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+
+	cookie->qat_sgl_src_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_src);
+
+	cookie->qat_sgl_dst_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_dst);
+
+	cookie->opt.spc_gmac.cd_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			opt.spc_gmac.cd_cipher);
+}
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index e3ec7f0de4..f4ff2ce4cd 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #ifndef _QAT_SYM_H_
@@ -15,7 +15,7 @@
 
 #include "qat_common.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_crypto.h"
 #include "qat_logs.h"
 
 #define BYTE_LENGTH    8
@@ -24,6 +24,67 @@
  */
 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
+/** Intel(R) QAT Symmetric Crypto PMD name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
+
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
+#define QAT_SYM_CAP_VALID		(1 << 31)
+
+/**
+ * Macro to add a sym capability
+ * helper function to add an sym capability
+ * <n: name> <b: block size> <k: key size> <d: digest size>
+ * <a: aad_size> <i: iv_size>
+ **/
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, d					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_##n,		\
+				b, k, i					\
+			}, }						\
+		}, }							\
+	}
+
 /*
  * Maximum number of SGL entries
  */
@@ -54,6 +115,22 @@ struct qat_sym_op_cookie {
 	} opt;
 };
 
+struct qat_sym_dp_ctx {
+	struct qat_sym_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		void *op_cookie, enum qat_device_gen qat_dev_gen);
@@ -213,17 +290,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
 		}
 	}
 }
-#else
-
-static inline void
-qat_sym_preprocess_requests(void **ops __rte_unused,
-				uint16_t nb_ops __rte_unused)
-{
-}
 #endif
 
-static inline void
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused)
 {
 	struct icp_qat_fw_comn_resp *resp_msg =
 			(struct icp_qat_fw_comn_resp *)resp;
@@ -282,6 +353,12 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
 	}
 
 	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
 }
 
 int
@@ -293,6 +370,52 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 int
 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
 
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_vec *vec, uint32_t vec_len,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t i;
+
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_la_bulk_req));
+	for (i = 0; i < vec_len; i++)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+	if (cipher_iv && ctx->cipher_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+				ctx->cipher_iv.length);
+	if (auth_iv && ctx->auth_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+				ctx->auth_iv.length);
+	if (aad && ctx->aad_len > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+				ctx->aad_len);
+	if (digest && ctx->digest_length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+				ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+		struct qat_sym_session *ctx __rte_unused,
+		struct rte_crypto_vec *vec __rte_unused,
+		uint32_t vec_len __rte_unused,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+}
+#endif
+
 #else
 
 static inline void
@@ -307,5 +430,5 @@ qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
 {
 }
 
-#endif
+#endif /* BUILD_QAT_SYM */
 #endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 12825e448b..2576cb1be7 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #include <cryptodev_pmd.h>
@@ -9,18 +9,9 @@
 #include "icp_qat_fw_la.h"
 
 #include "qat_sym.h"
-#include "qat_sym_pmd.h"
 #include "qat_sym_session.h"
 #include "qat_qp.h"
 
-struct qat_sym_dp_ctx {
-	struct qat_sym_session *session;
-	uint32_t tail;
-	uint32_t head;
-	uint16_t cached_enqueue;
-	uint16_t cached_dequeue;
-};
-
 static __rte_always_inline int32_t
 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
 		struct rte_crypto_vec *data, uint16_t n_data_vecs)
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
deleted file mode 100644
index 28a26260fb..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security_driver.h>
-#endif
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
-
-#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
-	struct qat_sym_op_cookie *cookie = op_cookie;
-
-	cookie->qat_sgl_src_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_src);
-
-	cookie->qat_sgl_dst_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_dst);
-
-	cookie->opt.spc_gmac.cd_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			opt.spc_gmac.cd_cipher);
-}
-
-static uint16_t
-qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-static uint16_t
-qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
-	.name = qat_sym_drv_name,
-	.alias = qat_sym_drv_name
-};
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
-	int i = 0, ret = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "sym");
-	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	/*
-	 * All processes must use same driver id so they can share sessions.
-	 * Store driver_id so we can validate that all processes have the same
-	 * value, typically they have, but could differ if binaries built
-	 * separately.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_sym_driver_id =
-				qat_sym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_sym_driver_id !=
-				qat_sym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
-	qat_dev_instance->sym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->sym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->sym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_sym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-#ifdef RTE_LIB_SECURITY
-	if (gen_dev_ops->create_security_ctx) {
-		cryptodev->security_ctx =
-			gen_dev_ops->create_security_ctx((void *)cryptodev);
-		if (cryptodev->security_ctx == NULL) {
-			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
-	} else
-		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-
-#endif
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_SYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->service_type = QAT_SERVICE_SYMMETRIC;
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating capability memzon for %s",
-				name);
-			ret = -EFAULT;
-			goto error;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->sym_dev = internals;
-	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	memset(&qat_dev_instance->sym_rte_dev, 0,
-		sizeof(qat_dev_instance->sym_rte_dev));
-
-	return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->sym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
-	qat_pci_dev->sym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_sym_driver,
-		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
deleted file mode 100644
index 59fbdefa12..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_PMD_H_
-#define _QAT_SYM_PMD_H_
-
-#ifdef BUILD_QAT_SYM
-
-#include <rte_ether.h>
-#include <rte_cryptodev.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security.h>
-#endif
-
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Symmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
-#define QAT_SYM_CAP_VALID		(1 << 31)
-
-/**
- * Macro to add a sym capability
- * helper function to add an sym capability
- * <n: name> <b: block size> <k: key size> <d: digest size>
- * <a: aad_size> <i: iv_size>
- **/
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, d					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
-			{.aead = {					\
-				.algo = RTE_CRYPTO_AEAD_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_##n,		\
-				b, k, i					\
-			}, }						\
-		}, }							\
-	}
-
-extern uint8_t qat_sym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param);
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
-
-void
-qat_sym_init_op_cookie(void *op_cookie);
-
-#endif
-#endif /* _QAT_SYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 3a880096c4..9d6a19c0be 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -20,7 +20,7 @@
 
 #include "qat_logs.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 
 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
 static const uint8_t sha1InitialState[] = {
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v7 07/10] crypto/qat: unify qat asym pmd apis
  2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                   ` (5 preceding siblings ...)
  2022-02-08 18:14                 ` [dpdk-dev v7 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
@ 2022-02-08 18:14                 ` Kai Ji
  2022-02-09 10:24                   ` Zhang, Roy Fan
  2022-02-08 18:14                 ` [dpdk-dev v7 08/10] crypto/qat: op burst data path rework Kai Ji
                                   ` (4 subsequent siblings)
  11 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-08 18:14 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch removes qat_asym_pmd.c and integrates all the
functions into qat_asym.c. The unified/integrated asym crypto
pmd apis should make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build    |   2 +-
 drivers/crypto/qat/qat_asym.c     | 180 +++++++++++++++++++++++
 drivers/crypto/qat/qat_asym_pmd.c | 231 ------------------------------
 drivers/crypto/qat/qat_asym_pmd.h |  54 -------
 4 files changed, 181 insertions(+), 286 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 1bf6896a7e..f687f5c9d8 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 3d7aecd7c0..da8d7e965c 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -19,6 +19,32 @@ uint8_t qat_asym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_asym_init_op_cookie(void *op_cookie)
+{
+	int j;
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	cookie->input_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					input_params_ptrs);
+
+	cookie->output_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					output_params_ptrs);
+
+	for (j = 0; j < 8; j++) {
+		cookie->input_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						input_array[j]);
+		cookie->output_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						output_array[j]);
+	}
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev,
 		struct rte_crypto_asym_xform *xform,
@@ -817,6 +843,160 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 	return 0;
 }
 
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+			nb_ops);
+}
+
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+				nb_ops);
+}
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param)
+{
+	struct qat_cryptodev_private *internals;
+	struct rte_cryptodev *cryptodev;
+	struct qat_device_info *qat_dev_instance =
+		&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	uint64_t capa_size;
+	int i = 0;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "asym");
+	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+				name);
+		return -(EFAULT);
+	}
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_asym_driver_id =
+				qat_asym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_asym_driver_id !=
+				qat_asym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
+		}
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+	qat_dev_instance->asym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->asym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_asym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_ASYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			rte_cryptodev_pmd_destroy(cryptodev);
+			memset(&qat_dev_instance->asym_rte_dev, 0,
+				sizeof(qat_dev_instance->asym_rte_dev));
+			return -EFAULT;
+		}
+	}
+
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
+
+	qat_pci_dev->asym_dev = internals;
+	internals->service_type = QAT_SERVICE_ASYMMETRIC;
+	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+	return 0;
+}
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->asym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(
+			qat_pci_dev->asym_dev->dev_id);
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+	qat_pci_dev->asym_dev = NULL;
+
+	return 0;
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_asym_driver,
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
deleted file mode 100644
index 9a7596b227..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "qat_logs.h"
-
-#include "qat_crypto.h"
-#include "qat_asym.h"
-#include "qat_asym_pmd.h"
-
-uint8_t qat_asym_driver_id;
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
-	int j;
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	cookie->input_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					input_params_ptrs);
-
-	cookie->output_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					output_params_ptrs);
-
-	for (j = 0; j < 8; j++) {
-		cookie->input_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						input_array[j]);
-		cookie->output_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						output_array[j]);
-	}
-}
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
-	/* Device related operations */
-	.dev_configure		= qat_cryptodev_config,
-	.dev_start		= qat_cryptodev_start,
-	.dev_stop		= qat_cryptodev_stop,
-	.dev_close		= qat_cryptodev_close,
-	.dev_infos_get		= qat_cryptodev_info_get,
-
-	.stats_get		= qat_cryptodev_stats_get,
-	.stats_reset		= qat_cryptodev_stats_reset,
-	.queue_pair_setup	= qat_cryptodev_qp_setup,
-	.queue_pair_release	= qat_cryptodev_qp_release,
-
-	/* Crypto related operations */
-	.asym_session_get_size	= qat_asym_session_get_private_size,
-	.asym_session_configure	= qat_asym_session_configure,
-	.asym_session_clear	= qat_asym_session_clear
-};
-
-uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
-	.name = qat_asym_drv_name,
-	.alias = qat_asym_drv_name
-};
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
-	int i = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "asym");
-	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_asym_driver_id =
-				qat_asym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_asym_driver_id !=
-				qat_asym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
-	qat_dev_instance->asym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->asym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->asym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_asym_driver_id;
-	cryptodev->dev_ops = &crypto_qat_ops;
-
-	cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst;
-
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_ASYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->dev_id = cryptodev->data->dev_id;
-	internals->service_type = QAT_SERVICE_ASYMMETRIC;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			rte_cryptodev_pmd_destroy(cryptodev);
-			memset(&qat_dev_instance->asym_rte_dev, 0,
-				sizeof(qat_dev_instance->asym_rte_dev));
-			return -EFAULT;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->asym_dev = internals;
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-	return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->asym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(
-			qat_pci_dev->asym_dev->dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
-	qat_pci_dev->asym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_asym_driver,
-		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_pmd.h b/drivers/crypto/qat/qat_asym_pmd.h
deleted file mode 100644
index f988d646e5..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-
-#ifndef _QAT_ASYM_PMD_H_
-#define _QAT_ASYM_PMD_H_
-
-#include <rte_cryptodev.h>
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
-
-
-/**
- * Helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
-		{.asym = {						\
-			.xform_capa = {					\
-				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
-				.op_types = o,				\
-				{					\
-				.modlen = {				\
-				.min = l,				\
-				.max = r,				\
-				.increment = i				\
-				}, }					\
-			}						\
-		},							\
-		}							\
-	}
-
-extern uint8_t qat_asym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
-
-void
-qat_asym_init_op_cookie(void *op_cookie);
-
-uint16_t
-qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-uint16_t
-qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-#endif /* _QAT_ASYM_PMD_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v7 08/10] crypto/qat: op burst data path rework
  2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                   ` (6 preceding siblings ...)
  2022-02-08 18:14                 ` [dpdk-dev v7 07/10] crypto/qat: unify qat asym " Kai Ji
@ 2022-02-08 18:14                 ` Kai Ji
  2022-02-09 10:24                   ` Zhang, Roy Fan
  2022-02-08 18:14                 ` [dpdk-dev v7 09/10] crypto/qat: raw dp api integration Kai Ji
                                   ` (3 subsequent siblings)
  11 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-08 18:14 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch enable op_build_request function in qat_enqueue_op_burst,
and qat_dequeue_process_response function in qat_dequeue_op_burst.
The op_build_request invoked in crypto build request op is based
on crypto operations setup during session init.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c               |  42 +-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c |   4 -
 drivers/crypto/qat/qat_asym.c             |   2 +-
 drivers/crypto/qat/qat_asym.h             |  22 -
 drivers/crypto/qat/qat_sym.c              | 830 +++++++---------------
 drivers/crypto/qat/qat_sym.h              |   5 -
 6 files changed, 271 insertions(+), 634 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index dd9056650d..9bbadc8f8e 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -550,8 +550,7 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp,
-		__rte_unused qat_op_build_request_t op_build_request,
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
 		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
@@ -602,29 +601,18 @@ qat_enqueue_op_burst(void *qp,
 		}
 	}
 
-#ifdef BUILD_QAT_SYM
+#ifdef RTE_LIB_SECURITY
 	if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 		qat_sym_preprocess_requests(ops, nb_ops_possible);
 #endif
 
+	memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque));
+
 	while (nb_ops_sent != nb_ops_possible) {
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
-#ifdef BUILD_QAT_SYM
-			ret = qat_sym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		} else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
-			ret = qat_comp_build_request(*ops, base_addr + tail,
+		ret = op_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-		} else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
-#ifdef BUILD_QAT_ASYM
-			ret = qat_asym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				NULL, tmp_qp->qat_dev_gen);
-#endif
-		}
+				tmp_qp->opaque, tmp_qp->qat_dev_gen);
+
 		if (ret != 0) {
 			tmp_qp->stats.enqueue_err_count++;
 			/* This message cannot be enqueued */
@@ -820,8 +808,7 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 
 uint16_t
 qat_dequeue_op_burst(void *qp, void **ops,
-		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
-		uint16_t nb_ops)
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -839,21 +826,10 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		nb_fw_responses = 1;
 
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
-			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
-			nb_fw_responses = qat_comp_process_response(
+		nb_fw_responses = qat_dequeue_process_response(
 				ops, resp_msg,
 				tmp_qp->op_cookies[head >> rx_queue->trailz],
 				&tmp_qp->stats.dequeue_err_count);
-#ifdef BUILD_QAT_ASYM
-		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
-			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-#endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
 				  rx_queue->modulo_mask);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 501132a448..c58a628915 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,10 +146,6 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
-
-	/* Raw data-path API related operations */
-	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
-	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index da8d7e965c..07e3baa172 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -773,7 +773,7 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	return 0;
 }
 
-int
+static __rte_always_inline int
 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 		__rte_unused uint64_t *opaque,
 		__rte_unused enum qat_device_gen dev_gen)
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index aba49d57cb..72e62120c5 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -104,28 +104,6 @@ void
 qat_asym_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_asym_session *sess);
 
-/*
- * Build PKE request to be sent to the fw, partially uses template
- * request generated during session creation.
- *
- * @param	in_op		Pointer to the crypto operation, for every
- *				service it points to service specific struct.
- * @param	out_msg		Message to be returned to enqueue function
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	qat_dev_gen	Generation of QAT hardware
- *
- * @return
- *	This function always returns zero,
- *	it is because of backward compatibility.
- *	- 0: Always returned
- *
- */
-int
-qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		enum qat_device_gen qat_dev_gen);
-
 /*
  * Process PKE response received from outgoing queue of QAT
  *
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index aad4b243b7..692e1f8f0a 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -11,12 +11,25 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-#include "dev/qat_crypto_pmd_gens.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
 
 uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
 void
 qat_sym_init_op_cookie(void *op_cookie)
 {
@@ -38,160 +51,68 @@ qat_sym_init_op_cookie(void *op_cookie)
 			opt.spc_gmac.cd_cipher);
 }
 
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op,
-		struct icp_qat_fw_la_bulk_req *qat_req)
+static __rte_always_inline int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
 {
-	/* copy IV into request if it fits */
-	if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
-		rte_memcpy(cipher_param->u.cipher_IV_array,
-				rte_crypto_op_ctod_offset(op, uint8_t *,
-					iv_offset),
-				iv_length);
-	} else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr =
-				rte_crypto_op_ctophys_offset(op,
-					iv_offset);
-	}
-}
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	uintptr_t sess = (uintptr_t)opaque[0];
+	uintptr_t build_request_p = (uintptr_t)opaque[1];
+	qat_sym_build_request_t build_request = (void *)build_request_p;
+	struct qat_sym_session *ctx = NULL;
 
-/** Set IV for CCM is special case, 0th byte is set to q-1
- *  where q is padding of nonce in 16 byte block
- */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
-	rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
-			ICP_QAT_HW_CCM_NONCE_OFFSET,
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-	*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-	if (aad_len_field_sz)
-		rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-}
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != (uintptr_t)ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
 
-/** Handle Single-Pass AES-GMAC on QAT GEN3 */
-static inline void
-handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
-		struct qat_sym_op_cookie *cookie,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	static const uint32_t ver_key_offset =
-			sizeof(struct icp_qat_hw_auth_setup) +
-			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
-			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
-			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
-			sizeof(struct icp_qat_hw_cipher_config);
-	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
-			(void *) &qat_req->cd_ctrl;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-			(void *) &qat_req->serv_specif_rqpars;
-	uint32_t data_length = op->sym->auth.data.length;
-
-	/* Fill separate Content Descriptor for this op */
-	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
-			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-				ctx->cd.cipher.key :
-				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
-			ctx->auth_key_length);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
-				ICP_QAT_HW_CIPHER_AEAD_MODE,
-				ctx->qat_cipher_alg,
-				ICP_QAT_HW_CIPHER_NO_CONVERT,
-				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-					ICP_QAT_HW_CIPHER_ENCRYPT :
-					ICP_QAT_HW_CIPHER_DECRYPT));
-	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
-			ctx->digest_length,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
-
-	/* Update the request */
-	qat_req->cd_pars.u.s.content_desc_addr =
-			cookie->opt.spc_gmac.cd_phys_addr;
-	qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
-			sizeof(struct icp_qat_hw_cipher_config) +
-			ctx->auth_key_length, 8) >> 3;
-	qat_req->comn_mid.src_length = data_length;
-	qat_req->comn_mid.dst_length = 0;
-
-	cipher_param->spc_aad_addr = 0;
-	cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
-	cipher_param->spc_aad_sz = data_length;
-	cipher_param->reserved = 0;
-	cipher_param->spc_auth_res_sz = ctx->digest_length;
-
-	qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
-	cipher_cd_ctrl->cipher_cfg_offset = 0;
-	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
-	ICP_QAT_FW_LA_PROTO_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_NO_PROTO);
-}
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, (void *)sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
-{
-	int ret = 0;
-	struct qat_sym_session *ctx = NULL;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	register struct icp_qat_fw_la_bulk_req *qat_req;
-	uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
-	uint32_t cipher_len = 0, cipher_ofs = 0;
-	uint32_t auth_len = 0, auth_ofs = 0;
-	uint32_t min_ofs = 0;
-	uint64_t src_buf_start = 0, dst_buf_start = 0;
-	uint64_t auth_data_end = 0;
-	uint8_t do_sgl = 0;
-	uint8_t in_place = 1;
-	int alignment_adjustment = 0;
-	int oop_shift = 0;
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *)op_cookie;
-
-	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
-				"operation requests, op (%p) is not a "
-				"symmetric operation.", op);
-		return -EINVAL;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
 	}
 
-	if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
-				" requests, op (%p) is sessionless.", op);
-		return -EINVAL;
-	} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_sym_session *)get_sym_session_private_data(
-				op->sym->session, qat_sym_driver_id);
 #ifdef RTE_LIB_SECURITY
-	} else {
-		ctx = (struct qat_sym_session *)get_sec_session_private_data(
-				op->sym->sec_session);
-		if (likely(ctx)) {
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if ((void *)sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
 			if (unlikely(ctx->bpi_ctx == NULL)) {
 				QAT_DP_LOG(ERR, "QAT PMD only supports security"
 						" operation requests for"
@@ -207,463 +128,234 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 				return -EINVAL;
 			}
-		}
-#endif
-	}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
 
-	if (unlikely(ctx == NULL)) {
-		QAT_DP_LOG(ERR, "Session was not created for this device");
-		return -EINVAL;
-	}
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, (void *)sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
-	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
-	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
-	cipher_param = (void *)&qat_req->serv_specif_rqpars;
-	cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			do_aead = 1;
-		} else {
-			do_auth = 1;
-			do_cipher = 1;
+			sess = (uintptr_t)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = sess;
+			opaque[1] = (uintptr_t)build_request;
 		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		do_auth = 1;
-		do_cipher = 0;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		do_auth = 0;
-		do_cipher = 1;
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
 	}
 
-	if (do_cipher) {
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
 
-		if (ctx->qat_cipher_alg ==
-					 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
 
-			if (unlikely(
-			    (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
-			    (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cipher_len = op->sym->cipher.data.length >> 3;
-			cipher_ofs = op->sym->cipher.data.offset >> 3;
-
-		} else if (ctx->bpi_ctx) {
-			/* DOCSIS - only send complete blocks to device.
-			 * Process any partial block using CFB mode.
-			 * Even if 0 complete blocks, still send this to device
-			 * to get into rx queue for post-process and dequeuing
-			 */
-			cipher_len = qat_bpicipher_preprocess(ctx, op);
-			cipher_ofs = op->sym->cipher.data.offset;
-		} else {
-			cipher_len = op->sym->cipher.data.length;
-			cipher_ofs = op->sym->cipher.data.offset;
-		}
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
 
-		set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
-				cipher_param, op, qat_req);
-		min_ofs = cipher_ofs;
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	if (do_auth) {
-
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
-			ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
-			if (unlikely(
-			    (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
-			    (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			auth_ofs = op->sym->auth.data.offset >> 3;
-			auth_len = op->sym->auth.data.length >> 3;
-
-			auth_param->u1.aad_adr =
-					rte_crypto_op_ctophys_offset(op,
-							ctx->auth_iv.offset);
-
-		} else if (ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-			/* AES-GMAC */
-			set_cipher_iv(ctx->auth_iv.length,
-				ctx->auth_iv.offset,
-				cipher_param, op, qat_req);
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
-			auth_param->u1.aad_adr = 0;
-			auth_param->u2.aad_sz = 0;
-
-		} else {
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-		min_ofs = auth_ofs;
-
-		if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
-				ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
-			auth_param->auth_res_addr =
-					op->sym->auth.digest.phys_addr;
-
 	}
 
-	if (do_aead) {
-		/*
-		 * This address may used for setting AAD physical pointer
-		 * into IV offset from op
-		 */
-		rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
-		if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-
-			set_cipher_iv(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, qat_req);
-
-		} else if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
-			/* In case of AES-CCM this may point to user selected
-			 * memory or iv offset in crypto_op
-			 */
-			uint8_t *aad_data = op->sym->aead.aad.data;
-			/* This is true AAD length, it not includes 18 bytes of
-			 * preceding data
-			 */
-			uint8_t aad_ccm_real_len = 0;
-			uint8_t aad_len_field_sz = 0;
-			uint32_t msg_len_be =
-					rte_bswap32(op->sym->aead.data.length);
-
-			if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-				aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-				aad_ccm_real_len = ctx->aad_len -
-					ICP_QAT_HW_CCM_AAD_B0_LEN -
-					ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			} else {
-				/*
-				 * aad_len not greater than 18, so no actual aad
-				 *  data, then use IV after op for B0 block
-				 */
-				aad_data = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-				aad_phys_addr_aead =
-						rte_crypto_op_ctophys_offset(op,
-							ctx->cipher_iv.offset);
-			}
-
-			uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
-							ctx->cipher_iv.length;
-
-			aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-							aad_len_field_sz,
-							ctx->digest_length, q);
-
-			if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET +
-				    (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				    (uint8_t *)&msg_len_be,
-				    ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-			} else {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET,
-				    (uint8_t *)&msg_len_be
-				    + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				    - q), q);
-			}
-
-			if (aad_len_field_sz > 0) {
-				*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
-						= rte_bswap16(aad_ccm_real_len);
-
-				if ((aad_ccm_real_len + aad_len_field_sz)
-						% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-					uint8_t pad_len = 0;
-					uint8_t pad_idx = 0;
-
-					pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len + aad_len_field_sz) %
-						ICP_QAT_HW_CCM_AAD_B0_LEN);
-					pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					    aad_ccm_real_len + aad_len_field_sz;
-					memset(&aad_data[pad_idx],
-							0, pad_len);
-				}
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
 
-			}
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
 
-			set_cipher_iv_ccm(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, q,
-					aad_len_field_sz);
+	if (cryptodev == NULL)
+		return -ENODEV;
 
-		}
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
-		cipher_len = op->sym->aead.data.length;
-		cipher_ofs = op->sym->aead.data.offset;
-		auth_len = op->sym->aead.data.length;
-		auth_ofs = op->sym->aead.data.offset;
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
-		auth_param->u1.aad_adr = aad_phys_addr_aead;
-		auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
-		min_ofs = op->sym->aead.data.offset;
-	}
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
-	if (op->sym->m_src->nb_segs > 1 ||
-			(op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
-		do_sgl = 1;
-
-	/* adjust for chain case */
-	if (do_cipher && do_auth)
-		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
-	if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
-		min_ofs = 0;
-
-	if (unlikely((op->sym->m_dst != NULL) &&
-			(op->sym->m_dst != op->sym->m_src))) {
-		/* Out-of-place operation (OOP)
-		 * Don't align DMA start. DMA the minimum data-set
-		 * so as not to overwrite data in dest buffer
-		 */
-		in_place = 0;
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-		dst_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-		oop_shift = min_ofs;
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
 
-	} else {
-		/* In-place operation
-		 * Start DMA at nearest aligned address below min_ofs
-		 */
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
-						& QAT_64_BTYE_ALIGN_MASK;
-
-		if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
-					rte_pktmbuf_headroom(op->sym->m_src))
-							> src_buf_start)) {
-			/* alignment has pushed addr ahead of start of mbuf
-			 * so revert and take the performance hit
-			 */
-			src_buf_start =
-				rte_pktmbuf_iova_offset(op->sym->m_src,
-								min_ofs);
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
 		}
-		dst_buf_start = src_buf_start;
 
-		/* remember any adjustment for later, note, can be +/- */
-		alignment_adjustment = src_buf_start -
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-	}
-
-	if (do_cipher || do_aead) {
-		cipher_param->cipher_offset =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, cipher_ofs) - src_buf_start;
-		cipher_param->cipher_length = cipher_len;
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
 	} else {
-		cipher_param->cipher_offset = 0;
-		cipher_param->cipher_length = 0;
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
 	}
-
-	if (!ctx->is_single_pass) {
-		/* Do not let to overwrite spc_aad len */
-		if (do_auth || do_aead) {
-			auth_param->auth_off =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, auth_ofs) - src_buf_start;
-			auth_param->auth_len = auth_len;
-		} else {
-			auth_param->auth_off = 0;
-			auth_param->auth_len = 0;
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
 		}
 	}
 
-	qat_req->comn_mid.dst_length =
-		qat_req->comn_mid.src_length =
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		> (auth_param->auth_off + auth_param->auth_len) ?
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		: (auth_param->auth_off + auth_param->auth_len);
-
-	if (do_auth && do_cipher) {
-		/* Handle digest-encrypted cases, i.e.
-		 * auth-gen-then-cipher-encrypt and
-		 * cipher-decrypt-then-auth-verify
-		 */
-		 /* First find the end of the data */
-		if (do_sgl) {
-			uint32_t remaining_off = auth_param->auth_off +
-				auth_param->auth_len + alignment_adjustment + oop_shift;
-			struct rte_mbuf *sgl_buf =
-				(in_place ?
-					op->sym->m_src : op->sym->m_dst);
-
-			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
-					&& sgl_buf->next != NULL) {
-				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
-				sgl_buf = sgl_buf->next;
-			}
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-			auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
-				sgl_buf, remaining_off);
-		} else {
-			auth_data_end = (in_place ?
-				src_buf_start : dst_buf_start) +
-				auth_param->auth_off + auth_param->auth_len;
-		}
-		/* Then check if digest-encrypted conditions are met */
-		if ((auth_param->auth_off + auth_param->auth_len <
-					cipher_param->cipher_offset +
-					cipher_param->cipher_length) &&
-				(op->sym->auth.digest.phys_addr ==
-					auth_data_end)) {
-			/* Handle partial digest encryption */
-			if (cipher_param->cipher_offset +
-					cipher_param->cipher_length <
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length)
-				qat_req->comn_mid.dst_length =
-					qat_req->comn_mid.src_length =
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length;
-			struct icp_qat_fw_comn_req_hdr *header =
-				&qat_req->comn_hdr;
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-		}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
 	}
 
-	if (do_sgl) {
-
-		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
-				QAT_COMN_PTR_TYPE_SGL);
-		ret = qat_sgl_fill_array(op->sym->m_src,
-		   (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
-		   &cookie->qat_sgl_src,
-		   qat_req->comn_mid.src_length,
-		   QAT_SYM_SGL_MAX_NUMBER);
-
-		if (unlikely(ret)) {
-			QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
-			return ret;
-		}
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
 
-		if (in_place)
-			qat_req->comn_mid.dest_data_addr =
-				qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-		else {
-			ret = qat_sgl_fill_array(op->sym->m_dst,
-				(int64_t)(dst_buf_start -
-					  rte_pktmbuf_iova(op->sym->m_dst)),
-				 &cookie->qat_sgl_dst,
-				 qat_req->comn_mid.dst_length,
-				 QAT_SYM_SGL_MAX_NUMBER);
-
-			if (unlikely(ret)) {
-				QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
-				return ret;
-			}
+	return 0;
 
-			qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-			qat_req->comn_mid.dest_data_addr =
-					cookie->qat_sgl_dst_phys_addr;
-		}
-		qat_req->comn_mid.src_length = 0;
-		qat_req->comn_mid.dst_length = 0;
-	} else {
-		qat_req->comn_mid.src_data_addr = src_buf_start;
-		qat_req->comn_mid.dest_data_addr = dst_buf_start;
-	}
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
 
-	if (ctx->is_single_pass) {
-		if (ctx->is_ucs) {
-			/* GEN 4 */
-			cipher_param20->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param20->spc_auth_res_addr =
-				op->sym->aead.digest.phys_addr;
-		} else {
-			cipher_param->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param->spc_auth_res_addr =
-					op->sym->aead.digest.phys_addr;
-		}
-	} else if (ctx->is_single_pass_gmac &&
-		       op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
-		/* Handle Single-Pass AES-GMAC */
-		handle_spc_gmac(ctx, op, cookie, qat_req);
-	}
+	return ret;
+}
 
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
-			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
-			rte_pktmbuf_data_len(op->sym->m_src));
-	if (do_cipher) {
-		uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
-				ctx->cipher_iv.length);
-	}
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
 
-	if (do_auth) {
-		if (ctx->auth_iv.length) {
-			uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
-							uint8_t *,
-							ctx->auth_iv.offset);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
-						ctx->auth_iv.length);
-		}
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
-				ctx->digest_length);
-	}
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
 
-	if (do_aead) {
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
-				ctx->digest_length);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
-				ctx->aad_len);
-	}
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
 #endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
+
 	return 0;
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f4ff2ce4cd..074612c11b 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -131,11 +131,6 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
-
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v7 09/10] crypto/qat: raw dp api integration
  2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                   ` (7 preceding siblings ...)
  2022-02-08 18:14                 ` [dpdk-dev v7 08/10] crypto/qat: op burst data path rework Kai Ji
@ 2022-02-08 18:14                 ` Kai Ji
  2022-02-09 10:25                   ` Zhang, Roy Fan
  2022-02-08 18:14                 ` [dpdk-dev v7 10/10] crypto/qat: support out of place SG list Kai Ji
                                   ` (2 subsequent siblings)
  11 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-08 18:14 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch "unifies" QAT's raw dp api implementations
to share the same enqueue/dequeue methods as the crypto
operation enqueue/dequeue methods. In addition, different QAT
generation specific implementations are done respectively.
The qat_sym_hw_dp.c is removed as no longer required.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build               |   2 +-
 drivers/compress/qat/qat_comp_pmd.c          |  12 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |   2 +
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 214 ++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 122 +++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |  78 ++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 656 +++++++++++++
 drivers/crypto/qat/qat_crypto.h              |   3 +
 drivers/crypto/qat/qat_sym.c                 |  56 +-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 974 -------------------
 10 files changed, 1137 insertions(+), 982 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index f687f5c9d8..b7027f3164 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 8e497e7a09..dc8db84a68 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -616,11 +616,18 @@ static struct rte_compressdev_ops compress_qat_dummy_ops = {
 	.private_xform_free	= qat_comp_private_xform_free
 };
 
+static uint16_t
+qat_comp_dequeue_burst(void *qp, struct rte_comp_op **ops,  uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_comp_process_response,
+				nb_ops);
+}
+
 static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
+	uint16_t ret = qat_comp_dequeue_burst(qp, ops, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
@@ -638,8 +645,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 
 		} else {
 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
-					(compressdev_dequeue_pkt_burst_t)
-					qat_dequeue_op_burst;
+					qat_comp_dequeue_burst;
 		}
 	}
 	return ret;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 64e6ae66ec..0c64c1e43f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -291,6 +291,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 			qat_sym_crypto_cap_get_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
 			qat_sym_crypto_set_session_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index db864d973a..ffa093a7a3 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -394,6 +394,218 @@ qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
+	} else if (ctx->is_single_pass_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
+	}
+
+	return 0;
+}
+
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -403,6 +615,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_feature_flags_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
 			qat_sym_crypto_set_session_gen3;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 7642a87d55..f803bc1459 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -223,6 +223,126 @@ qat_sym_crypto_set_session_gen4(void *cdev, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -230,6 +350,8 @@ RTE_INIT(qat_sym_crypto_gen4_init)
 			qat_sym_crypto_cap_get_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
 			qat_sym_crypto_set_session_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 96cdb97a26..50a9c5ad5b 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -839,6 +839,84 @@ int
 qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
 		uint8_t *out_msg, void *op_cookie);
 
+/* -----------------GEN 1 sym crypto raw data path APIs ---------------- */
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status);
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status);
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index c58a628915..fee6507512 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,6 +146,10 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
+
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
@@ -448,6 +452,656 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct qat_sym_op_cookie *cookie;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, &iv,
+			NULL, NULL, NULL);
+#endif
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i],
+				cookie, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
+			(uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				NULL, NULL, NULL);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
+			NULL, 0, cipher_iv, digest, auth_iv, ofs,
+			(uint32_t)data_len)))
+		return -1;
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				NULL, 0,
+				&vec->iv[i], &vec->digest[i],
+				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+			break;
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				&vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp;
+	void *resp_opaque;
+	uint32_t i, n, inflight;
+	uint32_t head;
+	uint8_t status;
+
+	*n_success_jobs = 0;
+	*return_status = 0;
+	head = dp_ctx->head;
+
+	inflight = qp->enqueued - qp->dequeued;
+	if (unlikely(inflight == 0))
+		return 0;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			head);
+	/* no operation ready */
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return 0;
+
+	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
+	/* get the dequeue count */
+	if (get_dequeue_count) {
+		n = get_dequeue_count(resp_opaque);
+		if (unlikely(n == 0))
+			return 0;
+	} else {
+		if (unlikely(max_nb_to_dequeue == 0))
+			return 0;
+		n = max_nb_to_dequeue;
+	}
+
+	out_user_data[0] = resp_opaque;
+	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+	post_dequeue(resp_opaque, 0, status);
+	*n_success_jobs += status;
+
+	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+
+	/* we already finished dequeue when n == 1 */
+	if (unlikely(n == 1)) {
+		i = 1;
+		goto end_deq;
+	}
+
+	if (is_user_data_array) {
+		for (i = 1; i < n; i++) {
+			resp = (struct icp_qat_fw_comn_resp *)(
+				(uint8_t *)rx_queue->base_addr + head);
+			if (unlikely(*(uint32_t *)resp ==
+					ADF_RING_EMPTY_SIG))
+				goto end_deq;
+			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
+			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+			*n_success_jobs += status;
+			post_dequeue(out_user_data[i], i, status);
+			head = (head + rx_queue->msg_size) &
+					rx_queue->modulo_mask;
+		}
+
+		goto end_deq;
+	}
+
+	/* opaque is not array */
+	for (i = 1; i < n; i++) {
+		resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+			goto end_deq;
+		head = (head + rx_queue->msg_size) &
+				rx_queue->modulo_mask;
+		post_dequeue(resp_opaque, i, status);
+		*n_success_jobs += status;
+	}
+
+end_deq:
+	dp_ctx->head = head;
+	dp_ctx->cached_dequeue += i;
+	return i;
+}
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	register struct icp_qat_fw_comn_resp *resp;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			dp_ctx->head);
+
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return NULL;
+
+	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
+			rx_queue->modulo_mask;
+	dp_ctx->cached_dequeue++;
+
+	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
+			RTE_CRYPTO_OP_STATUS_SUCCESS :
+			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	*dequeue_status = 0;
+	return (void *)(uintptr_t)resp->opaque_data;
+}
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_enqueue != n))
+		return -1;
+
+	qp->enqueued += n;
+	qp->stats.enqueued_count += n;
+
+	tx_queue->tail = dp_ctx->tail;
+
+	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number, tx_queue->tail);
+	tx_queue->csr_tail = tx_queue->tail;
+	dp_ctx->cached_enqueue = 0;
+
+	return 0;
+}
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_dequeue != n))
+		return -1;
+
+	rx_queue->head = dp_ctx->head;
+	rx_queue->nb_processed_responses += n;
+	qp->dequeued += n;
+	qp->stats.dequeued_count += n;
+	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
+		uint32_t old_head, new_head;
+		uint32_t max_head;
+
+		old_head = rx_queue->csr_head;
+		new_head = rx_queue->head;
+		max_head = qp->nb_descriptors * rx_queue->msg_size;
+
+		/* write out free descriptors */
+		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
+
+		if (new_head < old_head) {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
+					max_head - old_head);
+			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
+					new_head);
+		} else {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
+					old_head);
+		}
+		rx_queue->nb_processed_responses = 0;
+		rx_queue->csr_head = new_head;
+
+		/* write current head to CSR */
+		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
+			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
+			new_head);
+	}
+
+	dp_ctx->cached_dequeue = 0;
+	return 0;
+}
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+
+	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
+	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
+	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
+	raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen1;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_chain_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_chain_gen1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
+			ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_cipher_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_cipher_gen1;
+		}
+	} else
+		return -1;
+
+	return 0;
+}
+
 int
 qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
 {
@@ -518,6 +1172,8 @@ RTE_INIT(qat_sym_crypto_gen1_init)
 			qat_sym_crypto_cap_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
 			qat_sym_crypto_set_session_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index c01266f81c..cf386d0ed0 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -53,11 +53,14 @@ typedef void * (*create_security_ctx_t)(void *cryptodev);
 
 typedef int (*set_session_t)(void *cryptodev, void *session);
 
+typedef int (*set_raw_dp_ctx_t)(void *raw_dp_ctx, void *ctx);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
 	set_session_t set_session;
+	set_raw_dp_ctx_t set_raw_dp_ctx;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 692e1f8f0a..1ccffad5ab 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -89,7 +89,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, (void *)sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -144,7 +144,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, (void *)sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -292,8 +292,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 		if (internals->capa_mz == NULL) {
 			QAT_LOG(DEBUG,
 				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
+				"destroying PMD for %s", name);
 			ret = -EFAULT;
 			goto error;
 		}
@@ -355,6 +354,55 @@ qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
 	return 0;
 }
 
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct qat_cryptodev_private *internals = dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_crypto_gen_dev_ops *gen_dev_ops =
+			&qat_sym_gen_dev_ops[qat_dev_gen];
+	struct qat_qp *qp;
+	struct qat_sym_session *ctx;
+	struct qat_sym_dp_ctx *dp_ctx;
+
+	if (!gen_dev_ops->set_raw_dp_ctx) {
+		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+				qat_dev_gen);
+		return -ENOTSUP;
+	}
+
+	qp = dev->data->queue_pairs[qp_id];
+	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(struct qat_sym_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+		dp_ctx->tail = qp->tx_q.tail;
+		dp_ctx->head = qp->rx_q.head;
+		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
+	}
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -EINVAL;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, qat_sym_driver_id);
+
+	dp_ctx->session = ctx;
+
+	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
+}
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct qat_sym_dp_ctx);
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_sym_driver,
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
deleted file mode 100644
index 2576cb1be7..0000000000
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ /dev/null
@@ -1,974 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2022 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "adf_transport_access_macros.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_qp.h"
-
-static __rte_always_inline int32_t
-qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
-		struct rte_crypto_vec *data, uint16_t n_data_vecs)
-{
-	struct qat_queue *tx_queue;
-	struct qat_sym_op_cookie *cookie;
-	struct qat_sgl *list;
-	uint32_t i;
-	uint32_t total_len;
-
-	if (likely(n_data_vecs == 1)) {
-		req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			data[0].iova;
-		req->comn_mid.src_length = req->comn_mid.dst_length =
-			data[0].len;
-		return data[0].len;
-	}
-
-	if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
-		return -1;
-
-	total_len = 0;
-	tx_queue = &qp->tx_q;
-
-	ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
-			QAT_COMN_PTR_TYPE_SGL);
-	cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
-	list = (struct qat_sgl *)&cookie->qat_sgl_src;
-
-	for (i = 0; i < n_data_vecs; i++) {
-		list->buffers[i].len = data[i].len;
-		list->buffers[i].resrvd = 0;
-		list->buffers[i].addr = data[i].iova;
-		if (total_len + data[i].len > UINT32_MAX) {
-			QAT_DP_LOG(ERR, "Message too long");
-			return -1;
-		}
-		total_len += data[i].len;
-	}
-
-	list->num_bufs = i;
-	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			cookie->qat_sgl_src_phys_addr;
-	req->comn_mid.src_length = req->comn_mid.dst_length = 0;
-	return total_len;
-}
-
-static __rte_always_inline void
-set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	/* copy IV into request if it fits */
-	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
-				iv_len);
-	else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
-	}
-}
-
-#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
-	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
-	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
-
-static __rte_always_inline void
-qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
-{
-	uint32_t i;
-
-	for (i = 0; i < n; i++)
-		sta[i] = status;
-}
-
-#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
-	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
-
-static __rte_always_inline void
-enqueue_one_cipher_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-
-	/* cipher IV */
-	set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest __rte_unused,
-	struct rte_crypto_va_iova_ptr *aad __rte_unused,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
-			(uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_auth_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = data_len - ofs.ofs.auth.head -
-			ofs.ofs.auth.tail;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
-				ctx->auth_iv.length);
-		break;
-	default:
-		break;
-	}
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv __rte_unused,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
-			(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_auth_job(ctx, req, &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline int
-enqueue_one_chain_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_vec *data,
-	uint16_t n_data_vecs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	rte_iova_t auth_iova_end;
-	int32_t cipher_len, auth_len;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	cipher_len = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
-
-	if (unlikely(cipher_len < 0 || auth_len < 0))
-		return -1;
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = cipher_len;
-	set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = auth_len;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		break;
-	default:
-		break;
-	}
-
-	if (unlikely(n_data_vecs > 1)) {
-		int auth_end_get = 0, i = n_data_vecs - 1;
-		struct rte_crypto_vec *cvec = &data[0];
-		uint32_t len;
-
-		len = data_len - ofs.ofs.auth.tail;
-
-		while (i >= 0 && len > 0) {
-			if (cvec->len >= len) {
-				auth_iova_end = cvec->iova + len;
-				len = 0;
-				auth_end_get = 1;
-				break;
-			}
-			len -= cvec->len;
-			i--;
-			cvec++;
-		}
-
-		if (unlikely(auth_end_get == 0))
-			return -1;
-	} else
-		auth_iova_end = data[0].iova + auth_param->auth_off +
-			auth_param->auth_len;
-
-	/* Then check if digest-encrypted conditions are met */
-	if ((auth_param->auth_off + auth_param->auth_len <
-		cipher_param->cipher_offset +
-		cipher_param->cipher_length) &&
-		(digest->iova == auth_iova_end)) {
-		/* Handle partial digest encryption */
-		if (cipher_param->cipher_offset +
-				cipher_param->cipher_length <
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length)
-			req->comn_mid.dst_length =
-				req->comn_mid.src_length =
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length;
-		struct icp_qat_fw_comn_req_hdr *header =
-			&req->comn_hdr;
-		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-			header->serv_specif_flags,
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-	}
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
-			cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
-		return -1;
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num,
-			&vec->iv[i], &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
-			break;
-
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_aead_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-		(void *)&req->serv_specif_rqpars;
-	struct icp_qat_fw_la_auth_req_params *auth_param =
-		(void *)((uint8_t *)&req->serv_specif_rqpars +
-		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-	uint8_t *aad_data;
-	uint8_t aad_ccm_real_len;
-	uint8_t aad_len_field_sz;
-	uint32_t msg_len_be;
-	rte_iova_t aad_iova = 0;
-	uint8_t q;
-
-	/* CPM 1.7 uses single pass to treat AEAD as cipher operation */
-	if (ctx->is_single_pass) {
-		enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
-		cipher_param->spc_aad_addr = aad->iova;
-		cipher_param->spc_auth_res_addr = digest->iova;
-		return;
-	}
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
-				ctx->cipher_iv.length);
-		aad_iova = aad->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
-		aad_data = aad->va;
-		aad_iova = aad->iova;
-		aad_ccm_real_len = 0;
-		aad_len_field_sz = 0;
-		msg_len_be = rte_bswap32((uint32_t)data_len -
-				ofs.ofs.cipher.head);
-
-		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			aad_ccm_real_len = ctx->aad_len -
-				ICP_QAT_HW_CCM_AAD_B0_LEN -
-				ICP_QAT_HW_CCM_AAD_LEN_INFO;
-		} else {
-			aad_data = iv->va;
-			aad_iova = iv->iova;
-		}
-
-		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-			aad_len_field_sz, ctx->digest_length, q);
-		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				(uint8_t *)&msg_len_be,
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-		} else {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-				(uint8_t *)&msg_len_be +
-				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				- q), q);
-		}
-
-		if (aad_len_field_sz > 0) {
-			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
-				rte_bswap16(aad_ccm_real_len);
-
-			if ((aad_ccm_real_len + aad_len_field_sz)
-				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-				uint8_t pad_len = 0;
-				uint8_t pad_idx = 0;
-
-				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len +
-					aad_len_field_sz) %
-					ICP_QAT_HW_CCM_AAD_B0_LEN);
-				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					aad_ccm_real_len +
-					aad_len_field_sz;
-				memset(&aad_data[pad_idx], 0, pad_len);
-			}
-		}
-
-		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
-			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va +
-			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
-		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-		rte_memcpy((uint8_t *)aad->va +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			ctx->cipher_iv.length);
-		break;
-	default:
-		break;
-	}
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_param->auth_off = ofs.ofs.cipher.head;
-	auth_param->auth_len = cipher_param->cipher_length;
-	auth_param->auth_res_addr = digest->iova;
-	auth_param->u1.aad_adr = aad_iova;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
-		(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
-			&vec->aad[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
-	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
-	uint32_t max_nb_to_dequeue,
-	rte_cryptodev_raw_post_dequeue_t post_dequeue,
-	void **out_user_data, uint8_t is_user_data_array,
-	uint32_t *n_success_jobs, int *return_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct icp_qat_fw_comn_resp *resp;
-	void *resp_opaque;
-	uint32_t i, n, inflight;
-	uint32_t head;
-	uint8_t status;
-
-	*n_success_jobs = 0;
-	*return_status = 0;
-	head = dp_ctx->head;
-
-	inflight = qp->enqueued - qp->dequeued;
-	if (unlikely(inflight == 0))
-		return 0;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			head);
-	/* no operation ready */
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return 0;
-
-	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
-	/* get the dequeue count */
-	if (get_dequeue_count) {
-		n = get_dequeue_count(resp_opaque);
-		if (unlikely(n == 0))
-			return 0;
-	} else {
-		if (unlikely(max_nb_to_dequeue == 0))
-			return 0;
-		n = max_nb_to_dequeue;
-	}
-
-	out_user_data[0] = resp_opaque;
-	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-	post_dequeue(resp_opaque, 0, status);
-	*n_success_jobs += status;
-
-	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
-
-	/* we already finished dequeue when n == 1 */
-	if (unlikely(n == 1)) {
-		i = 1;
-		goto end_deq;
-	}
-
-	if (is_user_data_array) {
-		for (i = 1; i < n; i++) {
-			resp = (struct icp_qat_fw_comn_resp *)(
-				(uint8_t *)rx_queue->base_addr + head);
-			if (unlikely(*(uint32_t *)resp ==
-					ADF_RING_EMPTY_SIG))
-				goto end_deq;
-			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
-			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-			*n_success_jobs += status;
-			post_dequeue(out_user_data[i], i, status);
-			head = (head + rx_queue->msg_size) &
-					rx_queue->modulo_mask;
-		}
-
-		goto end_deq;
-	}
-
-	/* opaque is not array */
-	for (i = 1; i < n; i++) {
-		resp = (struct icp_qat_fw_comn_resp *)(
-			(uint8_t *)rx_queue->base_addr + head);
-		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-			goto end_deq;
-		head = (head + rx_queue->msg_size) &
-				rx_queue->modulo_mask;
-		post_dequeue(resp_opaque, i, status);
-		*n_success_jobs += status;
-	}
-
-end_deq:
-	dp_ctx->head = head;
-	dp_ctx->cached_dequeue += i;
-	return i;
-}
-
-static __rte_always_inline void *
-qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
-		enum rte_crypto_op_status *op_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	register struct icp_qat_fw_comn_resp *resp;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			dp_ctx->head);
-
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return NULL;
-
-	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
-			rx_queue->modulo_mask;
-	dp_ctx->cached_dequeue++;
-
-	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
-			RTE_CRYPTO_OP_STATUS_SUCCESS :
-			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	*dequeue_status = 0;
-	return (void *)(uintptr_t)resp->opaque_data;
-}
-
-static __rte_always_inline int
-qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_enqueue != n))
-		return -1;
-
-	qp->enqueued += n;
-	qp->stats.enqueued_count += n;
-
-	tx_queue->tail = dp_ctx->tail;
-
-	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
-			tx_queue->hw_bundle_number,
-			tx_queue->hw_queue_number, tx_queue->tail);
-	tx_queue->csr_tail = tx_queue->tail;
-	dp_ctx->cached_enqueue = 0;
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_dequeue != n))
-		return -1;
-
-	rx_queue->head = dp_ctx->head;
-	rx_queue->nb_processed_responses += n;
-	qp->dequeued += n;
-	qp->stats.dequeued_count += n;
-	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
-		uint32_t old_head, new_head;
-		uint32_t max_head;
-
-		old_head = rx_queue->csr_head;
-		new_head = rx_queue->head;
-		max_head = qp->nb_descriptors * rx_queue->msg_size;
-
-		/* write out free descriptors */
-		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
-
-		if (new_head < old_head) {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
-					max_head - old_head);
-			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
-					new_head);
-		} else {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
-					old_head);
-		}
-		rx_queue->nb_processed_responses = 0;
-		rx_queue->csr_head = new_head;
-
-		/* write current head to CSR */
-		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
-			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
-			new_head);
-	}
-
-	dp_ctx->cached_dequeue = 0;
-	return 0;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
-	struct qat_qp *qp;
-	struct qat_sym_session *ctx;
-	struct qat_sym_dp_ctx *dp_ctx;
-
-	qp = dev->data->queue_pairs[qp_id];
-	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
-	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(struct qat_sym_dp_ctx));
-		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
-		dp_ctx->tail = qp->tx_q.tail;
-		dp_ctx->head = qp->rx_q.head;
-		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
-	}
-
-	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
-		return -EINVAL;
-
-	ctx = (struct qat_sym_session *)get_sym_session_private_data(
-			session_ctx.crypto_sess, qat_sym_driver_id);
-
-	dp_ctx->session = ctx;
-
-	raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
-	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
-	raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
-	raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_chain_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
-		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
-		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_cipher_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
-		}
-	} else
-		return -1;
-
-	return 0;
-}
-
-int
-qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
-{
-	return sizeof(struct qat_sym_dp_ctx);
-}
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v7 10/10] crypto/qat: support out of place SG list
  2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                   ` (8 preceding siblings ...)
  2022-02-08 18:14                 ` [dpdk-dev v7 09/10] crypto/qat: raw dp api integration Kai Ji
@ 2022-02-08 18:14                 ` Kai Ji
  2022-02-09 10:25                   ` Zhang, Roy Fan
  2022-02-09 10:20                 ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Zhang, Roy Fan
  2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
  11 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-08 18:14 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch adds the SGL out of place support to QAT PMD

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++--
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 55 +++++++++++++++++---
 3 files changed, 83 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index ffa093a7a3..5084a5fcd1 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -468,8 +468,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -565,8 +575,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index f803bc1459..bd7f3785df 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -297,8 +297,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index fee6507512..83d9b66f34 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -526,9 +526,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i],
-				cookie, vec->src_sgl[i].vec,
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
 				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
@@ -625,8 +634,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
@@ -725,8 +744,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -830,8 +859,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework
  2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                   ` (9 preceding siblings ...)
  2022-02-08 18:14                 ` [dpdk-dev v7 10/10] crypto/qat: support out of place SG list Kai Ji
@ 2022-02-09 10:20                 ` Zhang, Roy Fan
  2022-02-12 11:32                   ` Akhil Goyal
  2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
  11 siblings, 1 reply; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-09 10:20 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil, Ji, Kai

> -----Original Message-----
> From: Kai Ji <kai.ji@intel.com>
> Sent: Tuesday, February 8, 2022 6:15 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch
> rework
> 
> This patch reworks QAT symmetric crypto datapatch implementation where
> each
> generation request building separated and the crypto operation under the
> raw datapath api implementation are unified.
> 
> In addtion this patchset also enables QAT OOP support in raw datapath api
> implementation.
> 
> v7:
> - fix of pointer cast compile error in x86
> 
> v6:
> - fix of pointer cast error in x86
> - rebase to the lastest for-main
> 
> v5:
> - rebase to the latest for-main
> - patchset reconstruct
> 
> v4:
> - patchset break down and reconstruct
> 
> v3:
> - sperate a single patch 6 to two different patches
> 
> v2:
> - review comments addressed
> 
> Kai Ji (10):
>   common/qat: define build op request and dequeue op
>   crypto/qat: sym build op request specific implementation
>   crypto/qat: qat generation specific enqueue
>   crypto/qat: rework session APIs
>   crypto/qat: rework asymmetric crypto build operation
>   crypto/qat: unify qat sym pmd apis
>   crypto/qat: unify qat asym pmd apis
>   crypto/qat: op burst data path rework
>   crypto/qat: raw dp api integration
>   crypto/qat: support out of place SG list
> 
>  drivers/common/qat/meson.build               |   6 +-
>  drivers/common/qat/qat_device.c              |   4 +-
>  drivers/common/qat/qat_qp.c                  |  42 +-
>  drivers/common/qat/qat_qp.h                  |  54 +-
>  drivers/compress/qat/qat_comp_pmd.c          |  14 +-
>  drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
>  drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  93 +-
>  drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 490 ++++++++-
>  drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 257 ++++-
>  drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 913
> ++++++++++++++++-
>  drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 942
> +++++++++++++++++-
>  drivers/crypto/qat/qat_asym.c                | 762 ++++++++------
>  drivers/crypto/qat/qat_asym.h                |  79 +-
>  drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
>  drivers/crypto/qat/qat_asym_pmd.h            |  54 -
>  drivers/crypto/qat/qat_crypto.h              |  16 +-
>  drivers/crypto/qat/qat_sym.c                 | 979 ++++++------------
>  drivers/crypto/qat/qat_sym.h                 | 148 ++-
>  drivers/crypto/qat/qat_sym_hw_dp.c           | 983 -------------------
>  drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
>  drivers/crypto/qat/qat_sym_pmd.h             |  95 --
>  drivers/crypto/qat/qat_sym_session.c         | 115 +--
>  drivers/crypto/qat/qat_sym_session.h         |  15 +-
>  23 files changed, 3813 insertions(+), 2739 deletions(-)
>  delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
>  delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
>  delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
>  delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
>  delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h
> 
> --
> 2.17.1
Series-acked-by: Fan Zhang <roy.fan.zhang@intel.com>


^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v7 01/10] common/qat: define build op request and dequeue op
  2022-02-08 18:14                 ` [dpdk-dev v7 01/10] common/qat: define build op request and dequeue op Kai Ji
@ 2022-02-09 10:22                   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-09 10:22 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil, Ji, Kai

> -----Original Message-----
> From: Kai Ji <kai.ji@intel.com>
> Sent: Tuesday, February 8, 2022 6:15 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev v7 01/10] common/qat: define build op request and
> dequeue op
> 
> This patch introduce build request op and dequeue op function
> pointers to qat queue pair implementation. Those two functions
> are used to be assigned during qat session generation based on
> crypto operation
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v7 02/10] crypto/qat: sym build op request specific implementation
  2022-02-08 18:14                 ` [dpdk-dev v7 02/10] crypto/qat: sym build op request specific implementation Kai Ji
@ 2022-02-09 10:22                   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-09 10:22 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil, Ji, Kai

> -----Original Message-----
> From: Kai Ji <kai.ji@intel.com>
> Sent: Tuesday, February 8, 2022 6:15 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev v7 02/10] crypto/qat: sym build op request specific
> implementation
> 
> This patch adds common inline functions for QAT symmetric
> crypto driver to process crypto op and the build op
> request function pointer implementation for QAT generation 1.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v7 03/10] crypto/qat: qat generation specific enqueue
  2022-02-08 18:14                 ` [dpdk-dev v7 03/10] crypto/qat: qat generation specific enqueue Kai Ji
@ 2022-02-09 10:23                   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-09 10:23 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil, Ji, Kai

> -----Original Message-----
> From: Kai Ji <kai.ji@intel.com>
> Sent: Tuesday, February 8, 2022 6:15 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev v7 03/10] crypto/qat: qat generation specific enqueue
> 
> This patch add in specific aead & auth build op enqueue functions
> for QAT Gen3 & Gen4
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v7 04/10] crypto/qat: rework session APIs
  2022-02-08 18:14                 ` [dpdk-dev v7 04/10] crypto/qat: rework session APIs Kai Ji
@ 2022-02-09 10:23                   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-09 10:23 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil, Ji, Kai

> -----Original Message-----
> From: Kai Ji <kai.ji@intel.com>
> Sent: Tuesday, February 8, 2022 6:15 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev v7 04/10] crypto/qat: rework session APIs
> 
> This patch introduces the set_session methods for different
> generations of QAT. In addition, the patch replaces
> 'min_qat_dev_gen_id' with 'qat_dev_gen'. Thus, the session
> no longer allow to be created by one generation of QAT used by another
> generation.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v7 05/10] crypto/qat: rework asymmetric crypto build operation
  2022-02-08 18:14                 ` [dpdk-dev v7 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
@ 2022-02-09 10:23                   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-09 10:23 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil, Ji, Kai

> -----Original Message-----
> From: Kai Ji <kai.ji@intel.com>
> Sent: Tuesday, February 8, 2022 6:15 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev v7 05/10] crypto/qat: rework asymmetric crypto build
> operation
> 
> This patch reworks the asymmetric crypto data path
> implementation to QAT driver. The change includes separation
> of different QAT generations' asymmetric crypto data path
> implementations and shrink the device capabilities declaration
> code size.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v7 06/10] crypto/qat: unify qat sym pmd apis
  2022-02-08 18:14                 ` [dpdk-dev v7 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
@ 2022-02-09 10:24                   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-09 10:24 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil, Ji, Kai

> -----Original Message-----
> From: Kai Ji <kai.ji@intel.com>
> Sent: Tuesday, February 8, 2022 6:15 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev v7 06/10] crypto/qat: unify qat sym pmd apis
> 
> This patch removes qat_sym_pmd.c and integrates all the apis into
> qat_sym.c. The unified/integrated qat sym crypto pmd apis should
> make them easier to maintain.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v7 07/10] crypto/qat: unify qat asym pmd apis
  2022-02-08 18:14                 ` [dpdk-dev v7 07/10] crypto/qat: unify qat asym " Kai Ji
@ 2022-02-09 10:24                   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-09 10:24 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil, Ji, Kai

> -----Original Message-----
> From: Kai Ji <kai.ji@intel.com>
> Sent: Tuesday, February 8, 2022 6:15 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev v7 07/10] crypto/qat: unify qat asym pmd apis
> 
> This patch removes qat_asym_pmd.c and integrates all the
> functions into qat_asym.c. The unified/integrated asym crypto
> pmd apis should make them easier to maintain.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v7 08/10] crypto/qat: op burst data path rework
  2022-02-08 18:14                 ` [dpdk-dev v7 08/10] crypto/qat: op burst data path rework Kai Ji
@ 2022-02-09 10:24                   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-09 10:24 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil, Ji, Kai

> -----Original Message-----
> From: Kai Ji <kai.ji@intel.com>
> Sent: Tuesday, February 8, 2022 6:15 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev v7 08/10] crypto/qat: op burst data path rework
> 
> This patch enable op_build_request function in qat_enqueue_op_burst,
> and qat_dequeue_process_response function in qat_dequeue_op_burst.
> The op_build_request invoked in crypto build request op is based
> on crypto operations setup during session init.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v7 09/10] crypto/qat: raw dp api integration
  2022-02-08 18:14                 ` [dpdk-dev v7 09/10] crypto/qat: raw dp api integration Kai Ji
@ 2022-02-09 10:25                   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-09 10:25 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil, Ji, Kai

> -----Original Message-----
> From: Kai Ji <kai.ji@intel.com>
> Sent: Tuesday, February 8, 2022 6:15 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev v7 09/10] crypto/qat: raw dp api integration
> 
> This patch "unifies" QAT's raw dp api implementations
> to share the same enqueue/dequeue methods as the crypto
> operation enqueue/dequeue methods. In addition, different QAT
> generation specific implementations are done respectively.
> The qat_sym_hw_dp.c is removed as no longer required.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v7 10/10] crypto/qat: support out of place SG list
  2022-02-08 18:14                 ` [dpdk-dev v7 10/10] crypto/qat: support out of place SG list Kai Ji
@ 2022-02-09 10:25                   ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-09 10:25 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil, Ji, Kai

> -----Original Message-----
> From: Kai Ji <kai.ji@intel.com>
> Sent: Tuesday, February 8, 2022 6:15 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Ji, Kai <kai.ji@intel.com>
> Subject: [dpdk-dev v7 10/10] crypto/qat: support out of place SG list
> 
> This patch adds the SGL out of place support to QAT PMD
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework
  2022-02-09 10:20                 ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Zhang, Roy Fan
@ 2022-02-12 11:32                   ` Akhil Goyal
  0 siblings, 0 replies; 156+ messages in thread
From: Akhil Goyal @ 2022-02-12 11:32 UTC (permalink / raw)
  To: Zhang, Roy Fan, Ji, Kai, dev; +Cc: Ji, Kai

> > -----Original Message-----
> > From: Kai Ji <kai.ji@intel.com>
> > Sent: Tuesday, February 8, 2022 6:15 PM
> > To: dev@dpdk.org
> > Cc: gakhil@marvell.com; Ji, Kai <kai.ji@intel.com>
> > Subject: [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch
> > rework
> >
> > This patch reworks QAT symmetric crypto datapatch implementation where
> > each
> > generation request building separated and the crypto operation under the
> > raw datapath api implementation are unified.
> >
> > In addtion this patchset also enables QAT OOP support in raw datapath api
> > implementation.
> >
> > v7:
> > - fix of pointer cast compile error in x86
> >
> > v6:
> > - fix of pointer cast error in x86
> > - rebase to the lastest for-main
> >
> > v5:
> > - rebase to the latest for-main
> > - patchset reconstruct
> >
> > v4:
> > - patchset break down and reconstruct
> >
> > v3:
> > - sperate a single patch 6 to two different patches
> >
> > v2:
> > - review comments addressed
> >
> > Kai Ji (10):
> >   common/qat: define build op request and dequeue op
> >   crypto/qat: sym build op request specific implementation
> >   crypto/qat: qat generation specific enqueue
> >   crypto/qat: rework session APIs
> >   crypto/qat: rework asymmetric crypto build operation
> >   crypto/qat: unify qat sym pmd apis
> >   crypto/qat: unify qat asym pmd apis
> >   crypto/qat: op burst data path rework
> >   crypto/qat: raw dp api integration
> >   crypto/qat: support out of place SG list
> >
> >  drivers/common/qat/meson.build               |   6 +-
> >  drivers/common/qat/qat_device.c              |   4 +-
> >  drivers/common/qat/qat_qp.c                  |  42 +-
> >  drivers/common/qat/qat_qp.h                  |  54 +-
> >  drivers/compress/qat/qat_comp_pmd.c          |  14 +-
> >  drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
> >  drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  93 +-
> >  drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 490 ++++++++-
> >  drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 257 ++++-
> >  drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 913
> > ++++++++++++++++-
> >  drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 942
> > +++++++++++++++++-
> >  drivers/crypto/qat/qat_asym.c                | 762 ++++++++------
> >  drivers/crypto/qat/qat_asym.h                |  79 +-
> >  drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
> >  drivers/crypto/qat/qat_asym_pmd.h            |  54 -
> >  drivers/crypto/qat/qat_crypto.h              |  16 +-
> >  drivers/crypto/qat/qat_sym.c                 | 979 ++++++------------
> >  drivers/crypto/qat/qat_sym.h                 | 148 ++-
> >  drivers/crypto/qat/qat_sym_hw_dp.c           | 983 -------------------
> >  drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
> >  drivers/crypto/qat/qat_sym_pmd.h             |  95 --
> >  drivers/crypto/qat/qat_sym_session.c         | 115 +--
> >  drivers/crypto/qat/qat_sym_session.h         |  15 +-
> >  23 files changed, 3813 insertions(+), 2739 deletions(-)
> >  delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
> >  delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
> >  delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
> >  delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
> >  delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h
> >
> > --
> > 2.17.1
> Series-acked-by: Fan Zhang <roy.fan.zhang@intel.com>

The patchset does not apply on next-crypto TOT.
Please rebase.


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v8 00/10] drivers/qat: QAT symmetric crypto datapatch rework
  2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                   ` (10 preceding siblings ...)
  2022-02-09 10:20                 ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Zhang, Roy Fan
@ 2022-02-17 16:28                 ` Kai Ji
  2022-02-17 16:29                   ` [dpdk-dev v8 01/10] common/qat: define build op request and dequeue op Kai Ji
                                     ` (11 more replies)
  11 siblings, 12 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-17 16:28 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch reworks QAT symmetric crypto datapatch implementation where each
generation request building separated and the crypto operation under the
raw datapath api implementation are unified.

In addtion this patchset also enables QAT OOP support in raw datapath api
implementation.

v8:
- rebase to 22.03-rc1

v7:
- fix of pointer cast compile error in x86

v6:
- fix of pointer cast error in x86
- rebase to the lastest for-main

v5:
- rebase to the latest for-main
- patchset reconstruct

v4:
- patchset break down and reconstruct

v3:
- sperate a single patch 6 to two different patches

v2:
- review comments addressed

Kai Ji (10):
  common/qat: define build op request and dequeue op
  crypto/qat: sym build op request specific implementation
  crypto/qat: qat generation specific enqueue
  crypto/qat: rework session APIs
  crypto/qat: rework asymmetric crypto build operation
  crypto/qat: unify qat sym pmd apis
  crypto/qat: unify qat asym pmd apis
  crypto/qat: op burst data path rework
  crypto/qat: raw dp api integration
  crypto/qat: support out of place SG list

 drivers/common/qat/meson.build               |   6 +-
 drivers/common/qat/qat_device.c              |   4 +-
 drivers/common/qat/qat_qp.c                  |  42 +-
 drivers/common/qat/qat_qp.h                  |  54 +-
 drivers/compress/qat/qat_comp_pmd.c          |  14 +-
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  93 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 490 ++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 257 ++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 913 ++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 942 +++++++++++++++++-
 drivers/crypto/qat/qat_asym.c                | 303 +++++-
 drivers/crypto/qat/qat_asym.h                |  79 +-
 drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
 drivers/crypto/qat/qat_asym_pmd.h            |  54 -
 drivers/crypto/qat/qat_crypto.h              |  16 +-
 drivers/crypto/qat/qat_sym.c                 | 979 ++++++------------
 drivers/crypto/qat/qat_sym.h                 | 148 ++-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 995 -------------------
 drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
 drivers/crypto/qat/qat_sym_pmd.h             |  95 --
 drivers/crypto/qat/qat_sym_session.c         | 115 +--
 drivers/crypto/qat/qat_sym_session.h         |  15 +-
 23 files changed, 3582 insertions(+), 2523 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

--
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v8 01/10] common/qat: define build op request and dequeue op
  2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
@ 2022-02-17 16:29                   ` Kai Ji
  2022-02-17 16:29                   ` [dpdk-dev v8 02/10] crypto/qat: sym build op request specific implementation Kai Ji
                                     ` (10 subsequent siblings)
  11 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-17 16:29 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch introduce build request op and dequeue op function
pointers to qat queue pair implementation. Those two functions
are used to be assigned during qat session generation based on
crypto operation

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c          | 10 ++++--
 drivers/common/qat/qat_qp.h          | 54 ++++++++++++++++++++++++++--
 drivers/compress/qat/qat_comp_pmd.c  |  4 +--
 drivers/crypto/qat/qat_asym_pmd.c    |  4 +--
 drivers/crypto/qat/qat_sym_pmd.c     |  4 +--
 drivers/crypto/qat/qat_sym_session.h | 13 ++++++-
 6 files changed, 76 insertions(+), 13 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 57ac8fefca..56234ca1a4 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_common.h>
@@ -547,7 +547,9 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_enqueue_op_burst(void *qp,
+		__rte_unused qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -814,7 +816,9 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 }
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_dequeue_op_burst(void *qp, void **ops,
+		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
+		uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index deafb407b3..66f00943a5 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 #ifndef _QAT_QP_H_
 #define _QAT_QP_H_
@@ -36,6 +36,51 @@ struct qat_queue {
 	/* number of responses processed since last CSR head write */
 };
 
+/**
+ * Type define qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ *
+ * @param in_op
+ *    An input op pointer
+ * @param out_msg
+ *    out_meg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param opaque
+ *    an opaque data may be used to store context may be useful between
+ *    2 enqueue operations.
+ * @param dev_gen
+ *    qat device gen id
+ * @return
+ *   - 0 if the crypto request is build successfully,
+ *   - EINVAL if error
+ **/
+typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
+
+/**
+ * Type define qat_op_dequeue_t function pointer, passed in as argument
+ * in dequeue op burst, where a dequeue op assigned base on the type of
+ * crypto op.
+ *
+ * @param op
+ *    An input op pointer
+ * @param resp
+ *    qat response msg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param dequeue_err_count
+ *    dequeue error counter
+ * @return
+ *    - 0 if dequeue OP is successful
+ *    - EINVAL if error
+ **/
+typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused);
+
+#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE	2
+
 struct qat_qp {
 	void			*mmap_bar_addr;
 	struct qat_queue	tx_q;
@@ -44,6 +89,7 @@ struct qat_qp {
 	struct rte_mempool *op_cookie_pool;
 	void **op_cookies;
 	uint32_t nb_descriptors;
+	uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
 	enum qat_device_gen qat_dev_gen;
 	enum qat_service_type service_type;
 	struct qat_pci_device *qat_dev;
@@ -78,13 +124,15 @@ struct qat_qp_config {
 };
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops);
 
 uint16_t
 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
 
 int
 qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index da6404c017..8e497e7a09 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_malloc.h>
@@ -620,7 +620,7 @@ static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
index addee384e3..9a7596b227 100644
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ b/drivers/crypto/qat/qat_asym_pmd.c
@@ -62,13 +62,13 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
 uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index b835245f17..28a26260fb 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -49,14 +49,14 @@ static uint16_t
 qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 static uint16_t
 qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 6ebc176729..fe875a7fd0 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 #ifndef _QAT_SYM_SESSION_H_
 #define _QAT_SYM_SESSION_H_
@@ -63,6 +63,16 @@ enum qat_sym_proto_flag {
 	QAT_CRYPTO_PROTO_FLAG_ZUC = 4
 };
 
+struct qat_sym_session;
+
+/*
+ * typedef qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ */
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* Common content descriptor */
 struct qat_sym_cd {
 	struct icp_qat_hw_cipher_algo_blk cipher;
@@ -107,6 +117,7 @@ struct qat_sym_session {
 	/* Some generations need different setup of counter */
 	uint32_t slice_types;
 	enum qat_sym_proto_flag qat_proto_flag;
+	qat_sym_build_request_t build_request[2];
 };
 
 int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v8 02/10] crypto/qat: sym build op request specific implementation
  2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
  2022-02-17 16:29                   ` [dpdk-dev v8 01/10] common/qat: define build op request and dequeue op Kai Ji
@ 2022-02-17 16:29                   ` Kai Ji
  2022-02-17 16:29                   ` [dpdk-dev v8 03/10] crypto/qat: qat generation specific enqueue Kai Ji
                                     ` (9 subsequent siblings)
  11 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-17 16:29 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch adds common inline functions for QAT symmetric
crypto driver to process crypto op and the build op
request function pointer implementation for QAT generation 1.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 832 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 187 ++++-
 drivers/crypto/qat/qat_sym.c                 |  90 +-
 3 files changed, 1019 insertions(+), 90 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 67a4d2cb2c..1130e0e76f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #ifndef _QAT_CRYPTO_PMD_GENS_H_
@@ -8,14 +8,844 @@
 #include <rte_cryptodev.h>
 #include "qat_crypto.h"
 #include "qat_sym_session.h"
+#include "qat_sym.h"
+
+#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
+	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
+
+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
+	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
+	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
+
+static __rte_always_inline int
+op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_decrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_decrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+	return -EINVAL;
+}
+
+static __rte_always_inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		/* Decrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely((sym_op->m_dst != NULL)
+				&& (sym_op->m_dst != sym_op->m_src)))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = last_block - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
+			dst, last_block_len);
+#endif
+		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+			dst, last_block_len);
+#endif
+	}
+
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+static __rte_always_inline int
+qat_auth_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_cipher_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+			((op->sym->cipher.data.offset %
+			BYTE_LENGTH) != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int32_t
+qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
+		void *opaque, struct qat_sym_op_cookie *cookie,
+		struct rte_crypto_vec *src_vec, uint16_t n_src,
+		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
+{
+	struct qat_sgl *list;
+	uint32_t i;
+	uint32_t tl_src = 0, total_len_src, total_len_dst;
+	uint64_t src_data_start = 0, dst_data_start = 0;
+	int is_sgl = n_src > 1 || n_dst > 1;
+
+	if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
+			n_dst > QAT_SYM_SGL_MAX_NUMBER))
+		return -1;
+
+	if (likely(!is_sgl)) {
+		src_data_start = src_vec[0].iova;
+		tl_src = total_len_src =
+				src_vec[0].len;
+		if (unlikely(n_dst)) { /* oop */
+			total_len_dst = dst_vec[0].len;
+
+			dst_data_start = dst_vec[0].iova;
+			if (unlikely(total_len_src != total_len_dst))
+				return -EINVAL;
+		} else {
+			dst_data_start = src_data_start;
+			total_len_dst = tl_src;
+		}
+	} else { /* sgl */
+		total_len_dst = total_len_src = 0;
+
+		ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+			QAT_COMN_PTR_TYPE_SGL);
+
+		list = (struct qat_sgl *)&cookie->qat_sgl_src;
+		for (i = 0; i < n_src; i++) {
+			list->buffers[i].len = src_vec[i].len;
+			list->buffers[i].resrvd = 0;
+			list->buffers[i].addr = src_vec[i].iova;
+			if (tl_src + src_vec[i].len > UINT32_MAX) {
+				QAT_DP_LOG(ERR, "Message too long");
+				return -1;
+			}
+			tl_src += src_vec[i].len;
+		}
+
+		list->num_bufs = i;
+		src_data_start = cookie->qat_sgl_src_phys_addr;
+
+		if (unlikely(n_dst > 0)) { /* oop sgl */
+			uint32_t tl_dst = 0;
+
+			list = (struct qat_sgl *)&cookie->qat_sgl_dst;
+
+			for (i = 0; i < n_dst; i++) {
+				list->buffers[i].len = dst_vec[i].len;
+				list->buffers[i].resrvd = 0;
+				list->buffers[i].addr = dst_vec[i].iova;
+				if (tl_dst + dst_vec[i].len > UINT32_MAX) {
+					QAT_DP_LOG(ERR, "Message too long");
+					return -ENOTSUP;
+				}
+
+				tl_dst += dst_vec[i].len;
+			}
+
+			if (tl_src != tl_dst)
+				return -EINVAL;
+			list->num_bufs = i;
+			dst_data_start = cookie->qat_sgl_dst_phys_addr;
+		} else
+			dst_data_start = src_data_start;
+	}
+
+	req->comn_mid.src_data_addr = src_data_start;
+	req->comn_mid.dest_data_addr = dst_data_start;
+	req->comn_mid.src_length = total_len_src;
+	req->comn_mid.dst_length = total_len_dst;
+	req->comn_mid.opaque_data = (uintptr_t)opaque;
+
+	return tl_src;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int n_src = 0;
+	int ret;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->cipher.data.length >> 3;
+		cipher_ofs = op->sym->cipher.data.offset >> 3;
+		break;
+	case 0:
+		if (ctx->bpi_ctx) {
+			/* DOCSIS - only send complete blocks to device.
+			 * Process any partial block using CFB mode.
+			 * Even if 0 complete blocks, still send this to device
+			 * to get into rx queue for post-process and dequeuing
+			 */
+			cipher_len = qat_bpicipher_preprocess(ctx, op);
+			cipher_ofs = op->sym->cipher.data.offset;
+		} else {
+			cipher_len = op->sym->cipher.data.length;
+			cipher_ofs = op->sym->cipher.data.offset;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
+			cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t auth_ofs = 0, auth_len = 0;
+	int n_src, ret;
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+				ctx->auth_iv.offset);
+		auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+				ctx->auth_iv.offset);
+		break;
+	case 0:
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+			/* AES-GMAC */
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+					ctx->auth_iv.offset);
+			auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+					ctx->auth_iv.offset);
+		} else {
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = NULL;
+			auth_iv->iova = 0;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
+			auth_ofs + auth_len, in_sgl->vec,
+			QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
+				auth_ofs + auth_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	union rte_crypto_sym_ofs ofs;
+	uint32_t min_ofs = 0, max_len = 0;
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	uint32_t auth_len = 0, auth_ofs = 0;
+	int is_oop = (op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src);
+	int is_sgl = op->sym->m_src->nb_segs > 1;
+	int n_src;
+	int ret;
+
+	if (unlikely(is_oop))
+		is_sgl |= op->sym->m_dst->nb_segs > 1;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->auth_iv.offset);
+	auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->auth_iv.offset);
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->aead.data.length >> 3;
+		cipher_ofs = op->sym->aead.data.offset >> 3;
+		break;
+	case 0:
+		cipher_len = op->sym->aead.data.length;
+		cipher_ofs = op->sym->aead.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		break;
+	case 0:
+		auth_len = op->sym->auth.data.length;
+		auth_ofs = op->sym->auth.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
+
+	/* digest in buffer check. Needed only for wireless algos */
+	if (ret == 1) {
+		/* Handle digest-encrypted cases, i.e.
+		 * auth-gen-then-cipher-encrypt and
+		 * cipher-decrypt-then-auth-verify
+		 */
+		uint64_t auth_end_iova;
+
+		if (unlikely(is_sgl)) {
+			uint32_t remaining_off = auth_ofs + auth_len;
+			struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
+				op->sym->m_src);
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else
+			auth_end_iova = (is_oop ?
+				rte_pktmbuf_iova(op->sym->m_dst) :
+				rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
+					auth_len;
+
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
+				(digest->iova == auth_end_iova))
+			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
+					ctx->digest_length);
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return -1;
+	}
+	in_sgl->num = n_src;
+
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs,
+				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	ofs.ofs.cipher.head = cipher_ofs;
+	ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
+	ofs.ofs.auth.head = auth_ofs;
+	ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
+
+	return ofs.raw;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int32_t n_src = 0;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
+	auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
+	digest->va = (void *)op->sym->aead.digest.data;
+	digest->iova = op->sym->aead.digest.phys_addr;
+
+	cipher_len = op->sym->aead.data.length;
+	cipher_ofs = op->sym->aead.data.offset;
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline void
+qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
+		struct icp_qat_fw_la_bulk_req *qat_req)
+{
+	/* copy IV into request if it fits */
+	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
+				iv_len);
+	else {
+		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+				qat_req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
+	}
+}
+
+static __rte_always_inline void
+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
+{
+	uint32_t i;
+
+	for (i = 0; i < n; i++)
+		sta[i] = status;
+}
+
+static __rte_always_inline void
+enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+
+	/* cipher IV */
+	qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
+				ctx->auth_iv.length);
+		break;
+	default:
+		break;
+	}
+}
+
+static __rte_always_inline int
+enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_vec *src_vec,
+	uint16_t n_src_vecs,
+	struct rte_crypto_vec *dst_vec,
+	uint16_t n_dst_vecs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
+			dst_vec : src_vec;
+	rte_iova_t auth_iova_end;
+	int cipher_len, auth_len;
+	int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	cipher_len = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (unlikely(cipher_len < 0 || auth_len < 0))
+		return -1;
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = cipher_len;
+	qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = auth_len;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		break;
+	default:
+		break;
+	}
+
+	if (unlikely(is_sgl)) {
+		/* sgl */
+		int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
+		uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
+
+		while (remaining_off >= cvec->len && i >= 1) {
+			i--;
+			remaining_off -= cvec->len;
+			cvec++;
+		}
+
+		auth_iova_end = cvec->iova + remaining_off;
+	} else
+		auth_iova_end = cvec[0].iova + auth_param->auth_off +
+			auth_param->auth_len;
+
+	/* Then check if digest-encrypted conditions are met */
+	if ((auth_param->auth_off + auth_param->auth_len <
+		cipher_param->cipher_offset + cipher_param->cipher_length) &&
+			(digest->iova == auth_iova_end)) {
+		/* Handle partial digest encryption */
+		if (cipher_param->cipher_offset + cipher_param->cipher_length <
+			auth_param->auth_off + auth_param->auth_len +
+				ctx->digest_length && !is_sgl)
+			req->comn_mid.dst_length = req->comn_mid.src_length =
+				auth_param->auth_off + auth_param->auth_len +
+					ctx->digest_length;
+		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void
+enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param =
+		(void *)&req->serv_specif_rqpars;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(void *)((uint8_t *)&req->serv_specif_rqpars +
+		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+	uint8_t *aad_data;
+	uint8_t aad_ccm_real_len;
+	uint8_t aad_len_field_sz;
+	uint32_t msg_len_be;
+	rte_iova_t aad_iova = 0;
+	uint8_t q;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
+				ctx->cipher_iv.length);
+		aad_iova = aad->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		aad_data = aad->va;
+		aad_iova = aad->iova;
+		aad_ccm_real_len = 0;
+		aad_len_field_sz = 0;
+		msg_len_be = rte_bswap32((uint32_t)data_len -
+				ofs.ofs.cipher.head);
+
+		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			aad_ccm_real_len = ctx->aad_len -
+				ICP_QAT_HW_CCM_AAD_B0_LEN -
+				ICP_QAT_HW_CCM_AAD_LEN_INFO;
+		} else {
+			aad_data = iv->va;
+			aad_iova = iv->iova;
+		}
+
+		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+			aad_len_field_sz, ctx->digest_length, q);
+		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+				(uint8_t *)&msg_len_be,
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+		} else {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+				(uint8_t *)&msg_len_be +
+				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+				- q), q);
+		}
+
+		if (aad_len_field_sz > 0) {
+			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+				rte_bswap16(aad_ccm_real_len);
+
+			if ((aad_ccm_real_len + aad_len_field_sz)
+				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+				uint8_t pad_len = 0;
+				uint8_t pad_idx = 0;
+
+				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+					((aad_ccm_real_len +
+					aad_len_field_sz) %
+					ICP_QAT_HW_CCM_AAD_B0_LEN);
+				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+					aad_ccm_real_len +
+					aad_len_field_sz;
+				memset(&aad_data[pad_idx], 0, pad_len);
+			}
+		}
+
+		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va +
+			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
+		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+		rte_memcpy((uint8_t *)aad->va +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			ctx->cipher_iv.length);
+		break;
+	default:
+		break;
+	}
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_param->auth_off = ofs.ofs.cipher.head;
+	auth_param->auth_len = cipher_param->cipher_length;
+	auth_param->auth_res_addr = digest->iova;
+	auth_param->u1.aad_adr = aad_iova;
+}
 
 extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
 extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
 
+/* -----------------GEN 1 sym crypto op data path APIs ---------------- */
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 void
 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 		uint8_t hash_flag);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 90b3ec803c..c429825a67 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -179,6 +179,191 @@ qat_sym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, NULL, NULL);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, NULL, NULL);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
+			total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
+			out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			&auth_iv, &digest);
+#endif
+
+	return 0;
+}
+
 #ifdef RTE_LIB_SECURITY
 
 #define QAT_SECURITY_SYM_CAPABILITIES					\
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 00ec703754..f814bf8f75 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/evp.h>
@@ -11,93 +11,7 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-
-
-/** Decrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_decrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_decrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
-	return -EINVAL;
-}
-
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
-		/* Decrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely((sym_op->m_dst != NULL)
-				&& (sym_op->m_dst != sym_op->m_src)))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = last_block - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
-			dst, last_block_len);
-#endif
-		bpi_cipher_decrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
-			dst, last_block_len);
-#endif
-	}
-
-	return sym_op->cipher.data.length - last_block_len;
-}
+#include "dev/qat_crypto_pmd_gens.h"
 
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v8 03/10] crypto/qat: qat generation specific enqueue
  2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
  2022-02-17 16:29                   ` [dpdk-dev v8 01/10] common/qat: define build op request and dequeue op Kai Ji
  2022-02-17 16:29                   ` [dpdk-dev v8 02/10] crypto/qat: sym build op request specific implementation Kai Ji
@ 2022-02-17 16:29                   ` Kai Ji
  2022-02-17 16:29                   ` [dpdk-dev v8 04/10] crypto/qat: rework session APIs Kai Ji
                                     ` (8 subsequent siblings)
  11 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-17 16:29 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch add in specific aead & auth build op enqueue functions
for QAT Gen3 & Gen4

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 117 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |  34 +++++-
 2 files changed, 149 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index d3336cf4a1..fca7af2b7e 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -143,6 +143,121 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass) {
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN3 uses single pass to treat AEAD as
+		 * cipher operation
+		 */
+		cipher_param = (void *)&req->serv_specif_rqpars;
+
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+				ofs.ofs.cipher.tail;
+
+		cipher_param->spc_aad_addr = aad->iova;
+		cipher_param->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+	struct qat_sym_op_cookie *cookie,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	uint32_t ver_key_offset;
+	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+
+	if (!ctx->is_single_pass_gmac ||
+			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+				data_len);
+		return;
+	}
+
+	cipher_cd_ctrl = (void *) &req->cd_ctrl;
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+			sizeof(struct icp_qat_hw_cipher_config);
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+		/* AES-GMAC */
+		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+				req);
+	}
+
+	/* Fill separate Content Descriptor for this op */
+	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+				ctx->cd.cipher.key :
+				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+			ctx->auth_key_length);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+				ICP_QAT_HW_CIPHER_AEAD_MODE,
+				ctx->qat_cipher_alg,
+				ICP_QAT_HW_CIPHER_NO_CONVERT,
+				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+					ICP_QAT_HW_CIPHER_ENCRYPT :
+					ICP_QAT_HW_CIPHER_DECRYPT));
+	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+			ctx->digest_length,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+	/* Update the request */
+	req->cd_pars.u.s.content_desc_addr =
+			cookie->opt.spc_gmac.cd_phys_addr;
+	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+			sizeof(struct icp_qat_hw_cipher_config) +
+			ctx->auth_key_length, 8) >> 3;
+	req->comn_mid.src_length = data_len;
+	req->comn_mid.dst_length = 0;
+
+	cipher_param->spc_aad_addr = 0;
+	cipher_param->spc_auth_res_addr = digest->iova;
+	cipher_param->spc_aad_sz = auth_data_len;
+	cipher_param->reserved = 0;
+	cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+	ICP_QAT_FW_LA_PROTO_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_NO_PROTO);
+}
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 37a58c026f..8462c0b9b1 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -103,6 +103,38 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+			(void *)&req->serv_specif_rqpars;
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN4 uses single pass to treat AEAD as cipher
+		 * operation
+		 */
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+				req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len -
+				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+		cipher_param_20->spc_aad_addr = aad->iova;
+		cipher_param_20->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v8 04/10] crypto/qat: rework session APIs
  2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
                                     ` (2 preceding siblings ...)
  2022-02-17 16:29                   ` [dpdk-dev v8 03/10] crypto/qat: qat generation specific enqueue Kai Ji
@ 2022-02-17 16:29                   ` Kai Ji
  2022-02-17 16:29                   ` [dpdk-dev v8 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
                                     ` (7 subsequent siblings)
  11 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-17 16:29 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch introduces the set_session methods for different
generations of QAT. In addition, the patch replaces
'min_qat_dev_gen_id' with 'qat_dev_gen'. Thus, the session
no longer allow to be created by one generation of QAT used by another
generation.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  91 +++++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 139 +++++++++++++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |  91 ++++++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |   3 +
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  64 +++++++++
 drivers/crypto/qat/qat_crypto.h              |   8 +-
 drivers/crypto/qat/qat_sym.c                 |  12 +-
 drivers/crypto/qat/qat_sym_session.c         | 113 +++------------
 drivers/crypto/qat/qat_sym_session.h         |   2 +-
 10 files changed, 425 insertions(+), 107 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
index 9ed1f21d9d..01a897a21f 100644
--- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_asym_crypto_set_session_gen1(void *cdev __rte_unused,
+		void *session __rte_unused)
+{
+	return 0;
+}
+
 RTE_INIT(qat_asym_crypto_gen1_init)
 {
 	qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops =
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index b4ec440e05..64e6ae66ec 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -166,6 +166,91 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
 	return 0;
 }
 
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+		uint8_t hash_flag)
+{
+	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+	/* Set the Use Extended Protocol Flags bit in LW 1 */
+	QAT_FIELD_SET(header->comn_req_flags,
+			QAT_COMN_EXT_FLAGS_USED,
+			QAT_COMN_EXT_FLAGS_BITPOS,
+			QAT_COMN_EXT_FLAGS_MASK);
+
+	/* Set Hash Flags in LW 28 */
+	cd_ctrl->hash_flags |= hash_flag;
+
+	/* Set proto flags in LW 1 */
+	switch (session->qat_cipher_alg) {
+	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_SNOW_3G_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags,
+				ICP_QAT_FW_LA_ZUC_3G_PROTO);
+		break;
+	default:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	}
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+	struct rte_cryptodev *dev = cdev;
+	struct qat_sym_session *ctx = session;
+	const struct qat_cryptodev_private *qat_private =
+			dev->data->dev_private;
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * but some are not supported by GEN2, so checking here
+		 */
+		if ((qat_private->internal_capabilities &
+				QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+			return -ENOTSUP;
+
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
 
 	/* Device related operations */
@@ -204,6 +289,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
 			qat_sym_crypto_cap_get_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_sym_crypto_set_session_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
@@ -221,4 +308,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
 			qat_asym_crypto_cap_get_gen1;
 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_asym_crypto_feature_flags_get_gen1;
+	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_asym_crypto_set_session_gen1;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index fca7af2b7e..db864d973a 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -258,6 +258,142 @@ enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
 			ICP_QAT_FW_LA_NO_PROTO);
 }
 
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN3 */
+	if (ctx->is_single_pass)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+	else if (ctx->is_single_pass_gmac)
+		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN3
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -265,6 +401,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_cap_get_gen3;
 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+			qat_sym_crypto_set_session_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
@@ -276,4 +414,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 8462c0b9b1..7642a87d55 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -135,11 +135,101 @@ enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
 	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
 }
 
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN4 */
+	if (ctx->is_single_pass && ctx->is_ucs)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN4
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
 			qat_sym_crypto_cap_get_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+			qat_sym_crypto_set_session_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
@@ -153,4 +243,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
 	qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 1130e0e76f..96cdb97a26 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -856,6 +856,9 @@ qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev);
 uint64_t
 qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 #ifdef RTE_LIB_SECURITY
 extern struct rte_security_ops security_qat_ops_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index c429825a67..501132a448 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -452,12 +452,76 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int handle_mixed = 0;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			/* do_aead = 1; */
+			build_request = qat_sym_build_op_aead_gen1;
+		} else {
+			/* do_auth = 1; do_cipher = 1; */
+			build_request = qat_sym_build_op_chain_gen1;
+			handle_mixed = 1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		/* do_auth = 1; do_cipher = 0;*/
+		build_request = qat_sym_build_op_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		/* do_auth = 0; do_cipher = 1; */
+		build_request = qat_sym_build_op_cipher_gen1;
+	}
+
+	if (build_request)
+		ctx->build_request[proc_type] = build_request;
+	else
+		return -EINVAL;
+
+	/* no more work if not mixed op */
+	if (!handle_mixed)
+		return 0;
+
+	/* Check none supported algs if mixed */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		return -ENOTSUP;
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		return -ENOTSUP;
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
 
 RTE_INIT(qat_sym_crypto_gen1_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
 			qat_sym_crypto_cap_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+			qat_sym_crypto_set_session_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6eaa15b975..5ca76fcaa6 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
  #ifndef _QAT_CRYPTO_H_
@@ -48,15 +48,21 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
 
 typedef void * (*create_security_ctx_t)(void *cryptodev);
 
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
+	set_session_t set_session;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
 };
 
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
+
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
 		struct rte_cryptodev_config *config);
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index f814bf8f75..83bf55c933 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -13,6 +13,10 @@
 #include "qat_sym.h"
 #include "dev/qat_crypto_pmd_gens.h"
 
+uint8_t qat_sym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
@@ -126,7 +130,7 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
 
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen)
+		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
 {
 	int ret = 0;
 	struct qat_sym_session *ctx = NULL;
@@ -191,12 +195,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		return -EINVAL;
 	}
 
-	if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
-		QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -EINVAL;
-	}
-
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 8ca475ca8b..3a880096c4 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/sha.h>	/* Needed to calculate pre-compute values */
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
 	return 0;
 }
 
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
-		uint8_t hash_flag)
-{
-	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
-	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
-			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
-			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
-	/* Set the Use Extended Protocol Flags bit in LW 1 */
-	QAT_FIELD_SET(header->comn_req_flags,
-			QAT_COMN_EXT_FLAGS_USED,
-			QAT_COMN_EXT_FLAGS_BITPOS,
-			QAT_COMN_EXT_FLAGS_MASK);
-
-	/* Set Hash Flags in LW 28 */
-	cd_ctrl->hash_flags |= hash_flag;
-
-	/* Set proto flags in LW 1 */
-	switch (session->qat_cipher_alg) {
-	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_SNOW_3G_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_ZUC_3G_PROTO);
-		break;
-	default:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	}
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
-		struct qat_sym_session *session)
-{
-	const struct qat_cryptodev_private *qat_private =
-			dev->data->dev_private;
-	enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
-			QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
-	if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
-	} else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
-	} else if ((session->aes_cmac ||
-			session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
-			(session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session, 0);
-	}
-}
-
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
 	int ret;
 	int qat_cmd_id;
-	int handle_mixed = 0;
 
 	/* Verify the session physical address is known */
 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
+	session->dev_id = internals->dev_id;
 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
 	session->is_ucs = 0;
 
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		return -ENOTSUP;
 	}
 	qat_sym_session_finalize(session);
-	if (handle_mixed) {
-		/* Special handling of mixed hash+cipher algorithms */
-		qat_sym_session_handle_mixed(dev, session);
-	}
 
-	return 0;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+			(void *)session);
 }
 
 static int
@@ -678,14 +598,13 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 {
 	session->is_single_pass = 1;
 	session->is_auth = 1;
-	session->min_qat_dev_gen = QAT_GEN3;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
-	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
 		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
-	} else {
+	else
 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
-	}
+
 	session->cipher_iv.offset = aead_xform->iv.offset;
 	session->cipher_iv.length = aead_xform->iv.length;
 	session->aad_len = aead_xform->aad_length;
@@ -1205,9 +1124,9 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-			uint8_t *data_in,
-			uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in, uint8_t *data_out)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1573,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1920,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2180,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
-
 	/* Get requested QAT command id - should be cipher */
 	qat_cmd_id = qat_get_cmd_id(xform);
 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2204,9 @@ qat_security_session_create(void *dev,
 {
 	void *sess_private_data;
 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct qat_cryptodev_private *internals = cdev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_sym_session *sym_session = NULL;
 	int ret;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2230,11 @@ qat_security_session_create(void *dev,
 	}
 
 	set_sec_session_private_data(sess, sess_private_data);
+	sym_session = (struct qat_sym_session *)sess_private_data;
+	sym_session->dev_id = internals->dev_id;
 
-	return ret;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+			sess_private_data);
 }
 
 int
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index fe875a7fd0..01908abd9e 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -105,7 +105,7 @@ struct qat_sym_session {
 	uint16_t auth_key_length;
 	uint16_t digest_length;
 	rte_spinlock_t lock;	/* protects this struct */
-	enum qat_device_gen min_qat_dev_gen;
+	uint16_t dev_id;
 	uint8_t aes_cmac;
 	uint8_t is_single_pass;
 	uint8_t is_single_pass_gmac;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v8 05/10] crypto/qat: rework asymmetric crypto build operation
  2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
                                     ` (3 preceding siblings ...)
  2022-02-17 16:29                   ` [dpdk-dev v8 04/10] crypto/qat: rework session APIs Kai Ji
@ 2022-02-17 16:29                   ` Kai Ji
  2022-02-17 16:29                   ` [dpdk-dev v8 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
                                     ` (6 subsequent siblings)
  11 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-17 16:29 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch reworks the asymmetric crypto data path
implementation to QAT driver. The change includes separation
of different QAT generations' asymmetric crypto data path
implementations and shrink the device capabilities declaration
code size.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c   |   5 +-
 drivers/crypto/qat/qat_asym.c | 129 +++++++++++++++++++---------------
 drivers/crypto/qat/qat_asym.h |  63 +++++++++++++++--
 3 files changed, 131 insertions(+), 66 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 56234ca1a4..7f2fdc53ce 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -619,7 +619,7 @@ qat_enqueue_op_burst(void *qp,
 #ifdef BUILD_QAT_ASYM
 			ret = qat_asym_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
+				NULL, tmp_qp->qat_dev_gen);
 #endif
 		}
 		if (ret != 0) {
@@ -847,7 +847,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 #ifdef BUILD_QAT_ASYM
 		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
 			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 #endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index f46eefd4b3..845e905a89 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -1,68 +1,36 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2022 Intel Corporation
  */
 
 #include <stdarg.h>
 
-#include "qat_asym.h"
+#include <cryptodev_pmd.h>
+
 #include "icp_qat_fw_pke.h"
 #include "icp_qat_fw.h"
 #include "qat_pke_functionality_arrays.h"
 
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
-
-static int qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
-		size_t arr_sz, size_t *size, uint32_t *func_id)
-{
-	size_t i;
-
-	for (i = 0; i < arr_sz; i++) {
-		if (*size <= arr[i][0]) {
-			*size = arr[i][0];
-			*func_id = arr[i][1];
-			return 0;
-		}
-	}
-	return -1;
-}
-
-static inline void qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
-{
-	memset(qat_req, 0, sizeof(*qat_req));
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
-
-	qat_req->pke_hdr.hdr_flags =
-			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
-			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
-}
-
-static inline void qat_asym_build_req_tmpl(void *sess_private_data)
-{
-	struct icp_qat_fw_pke_request *qat_req;
-	struct qat_asym_session *session = sess_private_data;
+#include "qat_device.h"
 
-	qat_req = &session->req_tmpl;
-	qat_fill_req_tmpl(qat_req);
-}
+#include "qat_logs.h"
+#include "qat_asym.h"
 
-static size_t max_of(int n, ...)
-{
-	va_list args;
-	size_t len = 0, num;
-	int i;
+uint8_t qat_asym_driver_id;
 
-	va_start(args, n);
-	len = va_arg(args, size_t);
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
 
-	for (i = 0; i < n - 1; i++) {
-		num = va_arg(args, size_t);
-		if (num > len)
-			len = num;
-	}
-	va_end(args);
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+	.name = qat_asym_drv_name,
+	.alias = qat_asym_drv_name
+};
 
-	return len;
-}
 
 static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
 		int in_count, int out_count, int alg_size)
@@ -106,7 +74,46 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
 	}
 }
 
-static int qat_asym_check_nonzero(rte_crypto_param n)
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+		size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+	size_t i;
+
+	for (i = 0; i < arr_sz; i++) {
+		if (*size <= arr[i][0]) {
+			*size = arr[i][0];
+			*func_id = arr[i][1];
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+	va_list args;
+	size_t len = 0, num;
+	int i;
+
+	va_start(args, n);
+	len = va_arg(args, size_t);
+
+	for (i = 0; i < n - 1; i++) {
+		num = va_arg(args, size_t);
+		if (num > len)
+			len = num;
+	}
+	va_end(args);
+
+	return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
 {
 	if (n.length < 8) {
 		/* Not a case for any cryptographic function except for DH
@@ -475,10 +482,9 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 }
 
 int
-qat_asym_build_request(void *in_op,
-			uint8_t *out_msg,
-			void *op_cookie,
-			__rte_unused enum qat_device_gen qat_dev_gen)
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
 {
 	struct qat_asym_session *ctx;
 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
@@ -677,9 +683,9 @@ static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
 	qat_clear_arrays_by_alg(cookie, xform, alg_size_in_bytes);
 }
 
-void
+int
 qat_asym_process_response(void **op, uint8_t *resp,
-		void *op_cookie)
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
 {
 	struct qat_asym_session *ctx;
 	struct icp_qat_fw_pke_resp *resp_msg =
@@ -722,6 +728,8 @@ qat_asym_process_response(void **op, uint8_t *resp,
 	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
 			sizeof(struct icp_qat_fw_pke_resp));
 #endif
+
+	return 1;
 }
 
 int
@@ -779,3 +787,8 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
 	if (sess_priv)
 		memset(s, 0, qat_asym_session_get_private_size(dev));
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_asym_driver,
+		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index c9242a12ca..3ae95f2e7b 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #ifndef _QAT_ASYM_H_
@@ -8,10 +8,13 @@
 #include <cryptodev_pmd.h>
 #include <rte_crypto_asym.h>
 #include "icp_qat_fw_pke.h"
-#include "qat_common.h"
-#include "qat_asym_pmd.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
 #include "icp_qat_fw.h"
 
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
+
 typedef uint64_t large_int_ptr;
 #define MAX_PKE_PARAMS	8
 #define QAT_PKE_MAX_LN_SIZE 512
@@ -26,6 +29,28 @@ typedef uint64_t large_int_ptr;
 #define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
 #define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
 
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
+		{.asym = {						\
+			.xform_capa = {					\
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+				.op_types = o,				\
+				{					\
+				.modlen = {				\
+				.min = l,				\
+				.max = r,				\
+				.increment = i				\
+				}, }					\
+			}						\
+		},							\
+		}							\
+	}
+
 struct qat_asym_op_cookie {
 	size_t alg_size;
 	uint64_t error;
@@ -45,6 +70,27 @@ struct qat_asym_session {
 	struct rte_crypto_asym_xform *xform;
 };
 
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+	memset(qat_req, 0, sizeof(*qat_req));
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+	qat_req->pke_hdr.hdr_flags =
+			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+	struct icp_qat_fw_pke_request *qat_req;
+	struct qat_asym_session *session = sess_private_data;
+
+	qat_req = &session->req_tmpl;
+	qat_fill_req_tmpl(qat_req);
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
 		struct rte_crypto_asym_xform *xform,
@@ -75,7 +121,9 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
  */
 int
 qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
+		void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		enum qat_device_gen qat_dev_gen);
 
 /*
  * Process PKE response received from outgoing queue of QAT
@@ -87,8 +135,11 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg,
  * @param	op_cookie	Cookie pointer that holds private metadata
  *
  */
+int
+qat_asym_process_response(void **op, uint8_t *resp,
+		void *op_cookie,  __rte_unused uint64_t *dequeue_err_count);
+
 void
-qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
-		void *op_cookie);
+qat_asym_init_op_cookie(void *cookie);
 
 #endif /* _QAT_ASYM_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v8 06/10] crypto/qat: unify qat sym pmd apis
  2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
                                     ` (4 preceding siblings ...)
  2022-02-17 16:29                   ` [dpdk-dev v8 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
@ 2022-02-17 16:29                   ` Kai Ji
  2022-02-17 16:29                   ` [dpdk-dev v8 07/10] crypto/qat: unify qat asym " Kai Ji
                                     ` (5 subsequent siblings)
  11 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-17 16:29 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch removes qat_sym_pmd.c and integrates all the apis into
qat_sym.c. The unified/integrated qat sym crypto pmd apis should
make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build       |   4 +-
 drivers/common/qat/qat_device.c      |   4 +-
 drivers/common/qat/qat_qp.c          |   3 +-
 drivers/crypto/qat/qat_crypto.h      |   5 +-
 drivers/crypto/qat/qat_sym.c         |  21 +++
 drivers/crypto/qat/qat_sym.h         | 147 ++++++++++++++--
 drivers/crypto/qat/qat_sym_hw_dp.c   |  11 +-
 drivers/crypto/qat/qat_sym_pmd.c     | 251 ---------------------------
 drivers/crypto/qat/qat_sym_pmd.h     |  95 ----------
 drivers/crypto/qat/qat_sym_session.c |   2 +-
 10 files changed, 168 insertions(+), 375 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index af92271a75..1bf6896a7e 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017-2018 Intel Corporation
+# Copyright(c) 2017-2022 Intel Corporation
 
 if is_windows
     build = false
@@ -73,7 +73,7 @@ if qat_compress
 endif
 
 if qat_crypto
-    foreach f: ['qat_sym_pmd.c', 'qat_sym.c', 'qat_sym_session.c',
+    foreach f: ['qat_sym.c', 'qat_sym_session.c',
             'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 1f870d689a..6824d97050 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2020 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 
 #include <rte_string_fns.h>
@@ -8,7 +8,7 @@
 
 #include "qat_device.h"
 #include "adf_transport_access_macros.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 #include "qat_comp_pmd.h"
 #include "adf_pf2vf_msg.h"
 #include "qat_pf2vf.h"
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 7f2fdc53ce..b36ffa6f6d 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -838,7 +838,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
 			nb_fw_responses = qat_comp_process_response(
 				ops, resp_msg,
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 5ca76fcaa6..c01266f81c 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -12,7 +12,10 @@
 extern uint8_t qat_sym_driver_id;
 extern uint8_t qat_asym_driver_id;
 
-/** helper macro to set cryptodev capability range **/
+/**
+ * helper macro to set cryptodev capability range
+ * <n: name> <l: min > <r: max> <i: increment> <v: value>
+ **/
 #define CAP_RNG(n, l, r, i) .n = {.min = l, .max = r, .increment = i}
 
 #define CAP_RNG_ZERO(n) .n = {.min = 0, .max = 0, .increment = 0}
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 83bf55c933..aad4b243b7 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -17,6 +17,27 @@ uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_sym_init_op_cookie(void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+
+	cookie->qat_sgl_src_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_src);
+
+	cookie->qat_sgl_dst_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_dst);
+
+	cookie->opt.spc_gmac.cd_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			opt.spc_gmac.cd_cipher);
+}
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index e3ec7f0de4..f4ff2ce4cd 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #ifndef _QAT_SYM_H_
@@ -15,7 +15,7 @@
 
 #include "qat_common.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_crypto.h"
 #include "qat_logs.h"
 
 #define BYTE_LENGTH    8
@@ -24,6 +24,67 @@
  */
 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
+/** Intel(R) QAT Symmetric Crypto PMD name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
+
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
+#define QAT_SYM_CAP_VALID		(1 << 31)
+
+/**
+ * Macro to add a sym capability
+ * helper function to add an sym capability
+ * <n: name> <b: block size> <k: key size> <d: digest size>
+ * <a: aad_size> <i: iv_size>
+ **/
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, d					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_##n,		\
+				b, k, i					\
+			}, }						\
+		}, }							\
+	}
+
 /*
  * Maximum number of SGL entries
  */
@@ -54,6 +115,22 @@ struct qat_sym_op_cookie {
 	} opt;
 };
 
+struct qat_sym_dp_ctx {
+	struct qat_sym_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		void *op_cookie, enum qat_device_gen qat_dev_gen);
@@ -213,17 +290,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
 		}
 	}
 }
-#else
-
-static inline void
-qat_sym_preprocess_requests(void **ops __rte_unused,
-				uint16_t nb_ops __rte_unused)
-{
-}
 #endif
 
-static inline void
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused)
 {
 	struct icp_qat_fw_comn_resp *resp_msg =
 			(struct icp_qat_fw_comn_resp *)resp;
@@ -282,6 +353,12 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
 	}
 
 	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
 }
 
 int
@@ -293,6 +370,52 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 int
 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
 
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_vec *vec, uint32_t vec_len,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t i;
+
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_la_bulk_req));
+	for (i = 0; i < vec_len; i++)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+	if (cipher_iv && ctx->cipher_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+				ctx->cipher_iv.length);
+	if (auth_iv && ctx->auth_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+				ctx->auth_iv.length);
+	if (aad && ctx->aad_len > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+				ctx->aad_len);
+	if (digest && ctx->digest_length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+				ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+		struct qat_sym_session *ctx __rte_unused,
+		struct rte_crypto_vec *vec __rte_unused,
+		uint32_t vec_len __rte_unused,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+}
+#endif
+
 #else
 
 static inline void
@@ -307,5 +430,5 @@ qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
 {
 }
 
-#endif
+#endif /* BUILD_QAT_SYM */
 #endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 792ad2b213..5322faff44 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #include <cryptodev_pmd.h>
@@ -9,18 +9,9 @@
 #include "icp_qat_fw_la.h"
 
 #include "qat_sym.h"
-#include "qat_sym_pmd.h"
 #include "qat_sym_session.h"
 #include "qat_qp.h"
 
-struct qat_sym_dp_ctx {
-	struct qat_sym_session *session;
-	uint32_t tail;
-	uint32_t head;
-	uint16_t cached_enqueue;
-	uint16_t cached_dequeue;
-};
-
 static __rte_always_inline int32_t
 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
 		struct rte_crypto_vec *data, uint16_t n_data_vecs)
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
deleted file mode 100644
index 28a26260fb..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security_driver.h>
-#endif
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
-
-#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
-	struct qat_sym_op_cookie *cookie = op_cookie;
-
-	cookie->qat_sgl_src_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_src);
-
-	cookie->qat_sgl_dst_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_dst);
-
-	cookie->opt.spc_gmac.cd_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			opt.spc_gmac.cd_cipher);
-}
-
-static uint16_t
-qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-static uint16_t
-qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
-	.name = qat_sym_drv_name,
-	.alias = qat_sym_drv_name
-};
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
-	int i = 0, ret = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "sym");
-	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	/*
-	 * All processes must use same driver id so they can share sessions.
-	 * Store driver_id so we can validate that all processes have the same
-	 * value, typically they have, but could differ if binaries built
-	 * separately.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_sym_driver_id =
-				qat_sym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_sym_driver_id !=
-				qat_sym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
-	qat_dev_instance->sym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->sym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->sym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_sym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-#ifdef RTE_LIB_SECURITY
-	if (gen_dev_ops->create_security_ctx) {
-		cryptodev->security_ctx =
-			gen_dev_ops->create_security_ctx((void *)cryptodev);
-		if (cryptodev->security_ctx == NULL) {
-			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
-	} else
-		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-
-#endif
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_SYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->service_type = QAT_SERVICE_SYMMETRIC;
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating capability memzon for %s",
-				name);
-			ret = -EFAULT;
-			goto error;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->sym_dev = internals;
-	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	memset(&qat_dev_instance->sym_rte_dev, 0,
-		sizeof(qat_dev_instance->sym_rte_dev));
-
-	return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->sym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
-	qat_pci_dev->sym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_sym_driver,
-		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
deleted file mode 100644
index 59fbdefa12..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_PMD_H_
-#define _QAT_SYM_PMD_H_
-
-#ifdef BUILD_QAT_SYM
-
-#include <rte_ether.h>
-#include <rte_cryptodev.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security.h>
-#endif
-
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Symmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
-#define QAT_SYM_CAP_VALID		(1 << 31)
-
-/**
- * Macro to add a sym capability
- * helper function to add an sym capability
- * <n: name> <b: block size> <k: key size> <d: digest size>
- * <a: aad_size> <i: iv_size>
- **/
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, d					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
-			{.aead = {					\
-				.algo = RTE_CRYPTO_AEAD_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_##n,		\
-				b, k, i					\
-			}, }						\
-		}, }							\
-	}
-
-extern uint8_t qat_sym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param);
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
-
-void
-qat_sym_init_op_cookie(void *op_cookie);
-
-#endif
-#endif /* _QAT_SYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 3a880096c4..9d6a19c0be 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -20,7 +20,7 @@
 
 #include "qat_logs.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 
 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
 static const uint8_t sha1InitialState[] = {
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v8 07/10] crypto/qat: unify qat asym pmd apis
  2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
                                     ` (5 preceding siblings ...)
  2022-02-17 16:29                   ` [dpdk-dev v8 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
@ 2022-02-17 16:29                   ` Kai Ji
  2022-02-17 16:29                   ` [dpdk-dev v8 08/10] crypto/qat: op burst data path rework Kai Ji
                                     ` (4 subsequent siblings)
  11 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-17 16:29 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch removes qat_asym_pmd.c and integrates all the
functions into qat_asym.c. The unified/integrated asym crypto
pmd apis should make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build    |   2 +-
 drivers/crypto/qat/qat_asym.c     | 180 +++++++++++++++++++++++
 drivers/crypto/qat/qat_asym_pmd.c | 231 ------------------------------
 drivers/crypto/qat/qat_asym_pmd.h |  54 -------
 4 files changed, 181 insertions(+), 286 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 1bf6896a7e..f687f5c9d8 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 845e905a89..76d50dd6f9 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -19,6 +19,32 @@ uint8_t qat_asym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_asym_init_op_cookie(void *op_cookie)
+{
+	int j;
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	cookie->input_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					input_params_ptrs);
+
+	cookie->output_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					output_params_ptrs);
+
+	for (j = 0; j < 8; j++) {
+		cookie->input_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						input_array[j]);
+		cookie->output_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						output_array[j]);
+	}
+}
+
 /* An rte_driver is needed in the registration of both the device and the driver
  * with cryptodev.
  * The actual qat pci's rte_driver can't be used as its name represents
@@ -788,6 +814,160 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
 		memset(s, 0, qat_asym_session_get_private_size(dev));
 }
 
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+			nb_ops);
+}
+
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+				nb_ops);
+}
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param)
+{
+	struct qat_cryptodev_private *internals;
+	struct rte_cryptodev *cryptodev;
+	struct qat_device_info *qat_dev_instance =
+		&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	uint64_t capa_size;
+	int i = 0;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "asym");
+	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+				name);
+		return -(EFAULT);
+	}
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_asym_driver_id =
+				qat_asym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_asym_driver_id !=
+				qat_asym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
+		}
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+	qat_dev_instance->asym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->asym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_asym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_ASYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			rte_cryptodev_pmd_destroy(cryptodev);
+			memset(&qat_dev_instance->asym_rte_dev, 0,
+				sizeof(qat_dev_instance->asym_rte_dev));
+			return -EFAULT;
+		}
+	}
+
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
+
+	qat_pci_dev->asym_dev = internals;
+	internals->service_type = QAT_SERVICE_ASYMMETRIC;
+	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+	return 0;
+}
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->asym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(
+			qat_pci_dev->asym_dev->dev_id);
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+	qat_pci_dev->asym_dev = NULL;
+
+	return 0;
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_asym_driver,
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
deleted file mode 100644
index 9a7596b227..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "qat_logs.h"
-
-#include "qat_crypto.h"
-#include "qat_asym.h"
-#include "qat_asym_pmd.h"
-
-uint8_t qat_asym_driver_id;
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
-	int j;
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	cookie->input_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					input_params_ptrs);
-
-	cookie->output_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					output_params_ptrs);
-
-	for (j = 0; j < 8; j++) {
-		cookie->input_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						input_array[j]);
-		cookie->output_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						output_array[j]);
-	}
-}
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
-	/* Device related operations */
-	.dev_configure		= qat_cryptodev_config,
-	.dev_start		= qat_cryptodev_start,
-	.dev_stop		= qat_cryptodev_stop,
-	.dev_close		= qat_cryptodev_close,
-	.dev_infos_get		= qat_cryptodev_info_get,
-
-	.stats_get		= qat_cryptodev_stats_get,
-	.stats_reset		= qat_cryptodev_stats_reset,
-	.queue_pair_setup	= qat_cryptodev_qp_setup,
-	.queue_pair_release	= qat_cryptodev_qp_release,
-
-	/* Crypto related operations */
-	.asym_session_get_size	= qat_asym_session_get_private_size,
-	.asym_session_configure	= qat_asym_session_configure,
-	.asym_session_clear	= qat_asym_session_clear
-};
-
-uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
-	.name = qat_asym_drv_name,
-	.alias = qat_asym_drv_name
-};
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
-	int i = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "asym");
-	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_asym_driver_id =
-				qat_asym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_asym_driver_id !=
-				qat_asym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
-	qat_dev_instance->asym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->asym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->asym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_asym_driver_id;
-	cryptodev->dev_ops = &crypto_qat_ops;
-
-	cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst;
-
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_ASYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->dev_id = cryptodev->data->dev_id;
-	internals->service_type = QAT_SERVICE_ASYMMETRIC;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			rte_cryptodev_pmd_destroy(cryptodev);
-			memset(&qat_dev_instance->asym_rte_dev, 0,
-				sizeof(qat_dev_instance->asym_rte_dev));
-			return -EFAULT;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->asym_dev = internals;
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-	return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->asym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(
-			qat_pci_dev->asym_dev->dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
-	qat_pci_dev->asym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_asym_driver,
-		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_pmd.h b/drivers/crypto/qat/qat_asym_pmd.h
deleted file mode 100644
index f988d646e5..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-
-#ifndef _QAT_ASYM_PMD_H_
-#define _QAT_ASYM_PMD_H_
-
-#include <rte_cryptodev.h>
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
-
-
-/**
- * Helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
-		{.asym = {						\
-			.xform_capa = {					\
-				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
-				.op_types = o,				\
-				{					\
-				.modlen = {				\
-				.min = l,				\
-				.max = r,				\
-				.increment = i				\
-				}, }					\
-			}						\
-		},							\
-		}							\
-	}
-
-extern uint8_t qat_asym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
-
-void
-qat_asym_init_op_cookie(void *op_cookie);
-
-uint16_t
-qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-uint16_t
-qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-#endif /* _QAT_ASYM_PMD_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v8 08/10] crypto/qat: op burst data path rework
  2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
                                     ` (6 preceding siblings ...)
  2022-02-17 16:29                   ` [dpdk-dev v8 07/10] crypto/qat: unify qat asym " Kai Ji
@ 2022-02-17 16:29                   ` Kai Ji
  2022-02-17 16:29                   ` [dpdk-dev v8 09/10] crypto/qat: raw dp api integration Kai Ji
                                     ` (3 subsequent siblings)
  11 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-17 16:29 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch enable op_build_request function in qat_enqueue_op_burst,
and qat_dequeue_process_response function in qat_dequeue_op_burst.
The op_build_request invoked in crypto build request op is based
on crypto operations setup during session init.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c               |  42 +-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c |   4 -
 drivers/crypto/qat/qat_asym.c             |   2 +-
 drivers/crypto/qat/qat_asym.h             |  22 -
 drivers/crypto/qat/qat_sym.c              | 830 +++++++---------------
 drivers/crypto/qat/qat_sym.h              |   5 -
 6 files changed, 271 insertions(+), 634 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index b36ffa6f6d..08ac91eac4 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,7 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp,
-		__rte_unused qat_op_build_request_t op_build_request,
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
 		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
@@ -599,29 +598,18 @@ qat_enqueue_op_burst(void *qp,
 		}
 	}
 
-#ifdef BUILD_QAT_SYM
+#ifdef RTE_LIB_SECURITY
 	if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 		qat_sym_preprocess_requests(ops, nb_ops_possible);
 #endif
 
+	memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque));
+
 	while (nb_ops_sent != nb_ops_possible) {
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
-#ifdef BUILD_QAT_SYM
-			ret = qat_sym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		} else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
-			ret = qat_comp_build_request(*ops, base_addr + tail,
+		ret = op_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-		} else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
-#ifdef BUILD_QAT_ASYM
-			ret = qat_asym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				NULL, tmp_qp->qat_dev_gen);
-#endif
-		}
+				tmp_qp->opaque, tmp_qp->qat_dev_gen);
+
 		if (ret != 0) {
 			tmp_qp->stats.enqueue_err_count++;
 			/* This message cannot be enqueued */
@@ -817,8 +805,7 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 
 uint16_t
 qat_dequeue_op_burst(void *qp, void **ops,
-		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
-		uint16_t nb_ops)
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -836,21 +823,10 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		nb_fw_responses = 1;
 
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
-			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
-			nb_fw_responses = qat_comp_process_response(
+		nb_fw_responses = qat_dequeue_process_response(
 				ops, resp_msg,
 				tmp_qp->op_cookies[head >> rx_queue->trailz],
 				&tmp_qp->stats.dequeue_err_count);
-#ifdef BUILD_QAT_ASYM
-		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
-			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-#endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
 				  rx_queue->modulo_mask);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 501132a448..c58a628915 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,10 +146,6 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
-
-	/* Raw data-path API related operations */
-	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
-	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 76d50dd6f9..3c7cba6140 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -507,7 +507,7 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	return 0;
 }
 
-int
+static __rte_always_inline int
 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 		__rte_unused uint64_t *opaque,
 		__rte_unused enum qat_device_gen dev_gen)
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 3ae95f2e7b..78caa5649c 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -103,28 +103,6 @@ void
 qat_asym_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_asym_session *sess);
 
-/*
- * Build PKE request to be sent to the fw, partially uses template
- * request generated during session creation.
- *
- * @param	in_op		Pointer to the crypto operation, for every
- *				service it points to service specific struct.
- * @param	out_msg		Message to be returned to enqueue function
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	qat_dev_gen	Generation of QAT hardware
- *
- * @return
- *	This function always returns zero,
- *	it is because of backward compatibility.
- *	- 0: Always returned
- *
- */
-int
-qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		enum qat_device_gen qat_dev_gen);
-
 /*
  * Process PKE response received from outgoing queue of QAT
  *
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index aad4b243b7..692e1f8f0a 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -11,12 +11,25 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-#include "dev/qat_crypto_pmd_gens.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
 
 uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
 void
 qat_sym_init_op_cookie(void *op_cookie)
 {
@@ -38,160 +51,68 @@ qat_sym_init_op_cookie(void *op_cookie)
 			opt.spc_gmac.cd_cipher);
 }
 
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op,
-		struct icp_qat_fw_la_bulk_req *qat_req)
+static __rte_always_inline int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
 {
-	/* copy IV into request if it fits */
-	if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
-		rte_memcpy(cipher_param->u.cipher_IV_array,
-				rte_crypto_op_ctod_offset(op, uint8_t *,
-					iv_offset),
-				iv_length);
-	} else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr =
-				rte_crypto_op_ctophys_offset(op,
-					iv_offset);
-	}
-}
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	uintptr_t sess = (uintptr_t)opaque[0];
+	uintptr_t build_request_p = (uintptr_t)opaque[1];
+	qat_sym_build_request_t build_request = (void *)build_request_p;
+	struct qat_sym_session *ctx = NULL;
 
-/** Set IV for CCM is special case, 0th byte is set to q-1
- *  where q is padding of nonce in 16 byte block
- */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
-	rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
-			ICP_QAT_HW_CCM_NONCE_OFFSET,
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-	*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-	if (aad_len_field_sz)
-		rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-}
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != (uintptr_t)ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
 
-/** Handle Single-Pass AES-GMAC on QAT GEN3 */
-static inline void
-handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
-		struct qat_sym_op_cookie *cookie,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	static const uint32_t ver_key_offset =
-			sizeof(struct icp_qat_hw_auth_setup) +
-			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
-			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
-			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
-			sizeof(struct icp_qat_hw_cipher_config);
-	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
-			(void *) &qat_req->cd_ctrl;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-			(void *) &qat_req->serv_specif_rqpars;
-	uint32_t data_length = op->sym->auth.data.length;
-
-	/* Fill separate Content Descriptor for this op */
-	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
-			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-				ctx->cd.cipher.key :
-				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
-			ctx->auth_key_length);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
-				ICP_QAT_HW_CIPHER_AEAD_MODE,
-				ctx->qat_cipher_alg,
-				ICP_QAT_HW_CIPHER_NO_CONVERT,
-				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-					ICP_QAT_HW_CIPHER_ENCRYPT :
-					ICP_QAT_HW_CIPHER_DECRYPT));
-	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
-			ctx->digest_length,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
-
-	/* Update the request */
-	qat_req->cd_pars.u.s.content_desc_addr =
-			cookie->opt.spc_gmac.cd_phys_addr;
-	qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
-			sizeof(struct icp_qat_hw_cipher_config) +
-			ctx->auth_key_length, 8) >> 3;
-	qat_req->comn_mid.src_length = data_length;
-	qat_req->comn_mid.dst_length = 0;
-
-	cipher_param->spc_aad_addr = 0;
-	cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
-	cipher_param->spc_aad_sz = data_length;
-	cipher_param->reserved = 0;
-	cipher_param->spc_auth_res_sz = ctx->digest_length;
-
-	qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
-	cipher_cd_ctrl->cipher_cfg_offset = 0;
-	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
-	ICP_QAT_FW_LA_PROTO_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_NO_PROTO);
-}
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, (void *)sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
-{
-	int ret = 0;
-	struct qat_sym_session *ctx = NULL;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	register struct icp_qat_fw_la_bulk_req *qat_req;
-	uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
-	uint32_t cipher_len = 0, cipher_ofs = 0;
-	uint32_t auth_len = 0, auth_ofs = 0;
-	uint32_t min_ofs = 0;
-	uint64_t src_buf_start = 0, dst_buf_start = 0;
-	uint64_t auth_data_end = 0;
-	uint8_t do_sgl = 0;
-	uint8_t in_place = 1;
-	int alignment_adjustment = 0;
-	int oop_shift = 0;
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *)op_cookie;
-
-	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
-				"operation requests, op (%p) is not a "
-				"symmetric operation.", op);
-		return -EINVAL;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
 	}
 
-	if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
-				" requests, op (%p) is sessionless.", op);
-		return -EINVAL;
-	} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_sym_session *)get_sym_session_private_data(
-				op->sym->session, qat_sym_driver_id);
 #ifdef RTE_LIB_SECURITY
-	} else {
-		ctx = (struct qat_sym_session *)get_sec_session_private_data(
-				op->sym->sec_session);
-		if (likely(ctx)) {
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if ((void *)sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
 			if (unlikely(ctx->bpi_ctx == NULL)) {
 				QAT_DP_LOG(ERR, "QAT PMD only supports security"
 						" operation requests for"
@@ -207,463 +128,234 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 				return -EINVAL;
 			}
-		}
-#endif
-	}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
 
-	if (unlikely(ctx == NULL)) {
-		QAT_DP_LOG(ERR, "Session was not created for this device");
-		return -EINVAL;
-	}
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, (void *)sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
-	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
-	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
-	cipher_param = (void *)&qat_req->serv_specif_rqpars;
-	cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			do_aead = 1;
-		} else {
-			do_auth = 1;
-			do_cipher = 1;
+			sess = (uintptr_t)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = sess;
+			opaque[1] = (uintptr_t)build_request;
 		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		do_auth = 1;
-		do_cipher = 0;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		do_auth = 0;
-		do_cipher = 1;
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
 	}
 
-	if (do_cipher) {
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
 
-		if (ctx->qat_cipher_alg ==
-					 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
 
-			if (unlikely(
-			    (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
-			    (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cipher_len = op->sym->cipher.data.length >> 3;
-			cipher_ofs = op->sym->cipher.data.offset >> 3;
-
-		} else if (ctx->bpi_ctx) {
-			/* DOCSIS - only send complete blocks to device.
-			 * Process any partial block using CFB mode.
-			 * Even if 0 complete blocks, still send this to device
-			 * to get into rx queue for post-process and dequeuing
-			 */
-			cipher_len = qat_bpicipher_preprocess(ctx, op);
-			cipher_ofs = op->sym->cipher.data.offset;
-		} else {
-			cipher_len = op->sym->cipher.data.length;
-			cipher_ofs = op->sym->cipher.data.offset;
-		}
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
 
-		set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
-				cipher_param, op, qat_req);
-		min_ofs = cipher_ofs;
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	if (do_auth) {
-
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
-			ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
-			if (unlikely(
-			    (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
-			    (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			auth_ofs = op->sym->auth.data.offset >> 3;
-			auth_len = op->sym->auth.data.length >> 3;
-
-			auth_param->u1.aad_adr =
-					rte_crypto_op_ctophys_offset(op,
-							ctx->auth_iv.offset);
-
-		} else if (ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-			/* AES-GMAC */
-			set_cipher_iv(ctx->auth_iv.length,
-				ctx->auth_iv.offset,
-				cipher_param, op, qat_req);
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
-			auth_param->u1.aad_adr = 0;
-			auth_param->u2.aad_sz = 0;
-
-		} else {
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-		min_ofs = auth_ofs;
-
-		if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
-				ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
-			auth_param->auth_res_addr =
-					op->sym->auth.digest.phys_addr;
-
 	}
 
-	if (do_aead) {
-		/*
-		 * This address may used for setting AAD physical pointer
-		 * into IV offset from op
-		 */
-		rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
-		if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-
-			set_cipher_iv(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, qat_req);
-
-		} else if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
-			/* In case of AES-CCM this may point to user selected
-			 * memory or iv offset in crypto_op
-			 */
-			uint8_t *aad_data = op->sym->aead.aad.data;
-			/* This is true AAD length, it not includes 18 bytes of
-			 * preceding data
-			 */
-			uint8_t aad_ccm_real_len = 0;
-			uint8_t aad_len_field_sz = 0;
-			uint32_t msg_len_be =
-					rte_bswap32(op->sym->aead.data.length);
-
-			if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-				aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-				aad_ccm_real_len = ctx->aad_len -
-					ICP_QAT_HW_CCM_AAD_B0_LEN -
-					ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			} else {
-				/*
-				 * aad_len not greater than 18, so no actual aad
-				 *  data, then use IV after op for B0 block
-				 */
-				aad_data = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-				aad_phys_addr_aead =
-						rte_crypto_op_ctophys_offset(op,
-							ctx->cipher_iv.offset);
-			}
-
-			uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
-							ctx->cipher_iv.length;
-
-			aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-							aad_len_field_sz,
-							ctx->digest_length, q);
-
-			if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET +
-				    (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				    (uint8_t *)&msg_len_be,
-				    ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-			} else {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET,
-				    (uint8_t *)&msg_len_be
-				    + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				    - q), q);
-			}
-
-			if (aad_len_field_sz > 0) {
-				*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
-						= rte_bswap16(aad_ccm_real_len);
-
-				if ((aad_ccm_real_len + aad_len_field_sz)
-						% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-					uint8_t pad_len = 0;
-					uint8_t pad_idx = 0;
-
-					pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len + aad_len_field_sz) %
-						ICP_QAT_HW_CCM_AAD_B0_LEN);
-					pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					    aad_ccm_real_len + aad_len_field_sz;
-					memset(&aad_data[pad_idx],
-							0, pad_len);
-				}
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
 
-			}
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
 
-			set_cipher_iv_ccm(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, q,
-					aad_len_field_sz);
+	if (cryptodev == NULL)
+		return -ENODEV;
 
-		}
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
-		cipher_len = op->sym->aead.data.length;
-		cipher_ofs = op->sym->aead.data.offset;
-		auth_len = op->sym->aead.data.length;
-		auth_ofs = op->sym->aead.data.offset;
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
-		auth_param->u1.aad_adr = aad_phys_addr_aead;
-		auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
-		min_ofs = op->sym->aead.data.offset;
-	}
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
-	if (op->sym->m_src->nb_segs > 1 ||
-			(op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
-		do_sgl = 1;
-
-	/* adjust for chain case */
-	if (do_cipher && do_auth)
-		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
-	if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
-		min_ofs = 0;
-
-	if (unlikely((op->sym->m_dst != NULL) &&
-			(op->sym->m_dst != op->sym->m_src))) {
-		/* Out-of-place operation (OOP)
-		 * Don't align DMA start. DMA the minimum data-set
-		 * so as not to overwrite data in dest buffer
-		 */
-		in_place = 0;
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-		dst_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-		oop_shift = min_ofs;
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
 
-	} else {
-		/* In-place operation
-		 * Start DMA at nearest aligned address below min_ofs
-		 */
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
-						& QAT_64_BTYE_ALIGN_MASK;
-
-		if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
-					rte_pktmbuf_headroom(op->sym->m_src))
-							> src_buf_start)) {
-			/* alignment has pushed addr ahead of start of mbuf
-			 * so revert and take the performance hit
-			 */
-			src_buf_start =
-				rte_pktmbuf_iova_offset(op->sym->m_src,
-								min_ofs);
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
 		}
-		dst_buf_start = src_buf_start;
 
-		/* remember any adjustment for later, note, can be +/- */
-		alignment_adjustment = src_buf_start -
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-	}
-
-	if (do_cipher || do_aead) {
-		cipher_param->cipher_offset =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, cipher_ofs) - src_buf_start;
-		cipher_param->cipher_length = cipher_len;
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
 	} else {
-		cipher_param->cipher_offset = 0;
-		cipher_param->cipher_length = 0;
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
 	}
-
-	if (!ctx->is_single_pass) {
-		/* Do not let to overwrite spc_aad len */
-		if (do_auth || do_aead) {
-			auth_param->auth_off =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, auth_ofs) - src_buf_start;
-			auth_param->auth_len = auth_len;
-		} else {
-			auth_param->auth_off = 0;
-			auth_param->auth_len = 0;
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
 		}
 	}
 
-	qat_req->comn_mid.dst_length =
-		qat_req->comn_mid.src_length =
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		> (auth_param->auth_off + auth_param->auth_len) ?
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		: (auth_param->auth_off + auth_param->auth_len);
-
-	if (do_auth && do_cipher) {
-		/* Handle digest-encrypted cases, i.e.
-		 * auth-gen-then-cipher-encrypt and
-		 * cipher-decrypt-then-auth-verify
-		 */
-		 /* First find the end of the data */
-		if (do_sgl) {
-			uint32_t remaining_off = auth_param->auth_off +
-				auth_param->auth_len + alignment_adjustment + oop_shift;
-			struct rte_mbuf *sgl_buf =
-				(in_place ?
-					op->sym->m_src : op->sym->m_dst);
-
-			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
-					&& sgl_buf->next != NULL) {
-				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
-				sgl_buf = sgl_buf->next;
-			}
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-			auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
-				sgl_buf, remaining_off);
-		} else {
-			auth_data_end = (in_place ?
-				src_buf_start : dst_buf_start) +
-				auth_param->auth_off + auth_param->auth_len;
-		}
-		/* Then check if digest-encrypted conditions are met */
-		if ((auth_param->auth_off + auth_param->auth_len <
-					cipher_param->cipher_offset +
-					cipher_param->cipher_length) &&
-				(op->sym->auth.digest.phys_addr ==
-					auth_data_end)) {
-			/* Handle partial digest encryption */
-			if (cipher_param->cipher_offset +
-					cipher_param->cipher_length <
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length)
-				qat_req->comn_mid.dst_length =
-					qat_req->comn_mid.src_length =
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length;
-			struct icp_qat_fw_comn_req_hdr *header =
-				&qat_req->comn_hdr;
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-		}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
 	}
 
-	if (do_sgl) {
-
-		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
-				QAT_COMN_PTR_TYPE_SGL);
-		ret = qat_sgl_fill_array(op->sym->m_src,
-		   (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
-		   &cookie->qat_sgl_src,
-		   qat_req->comn_mid.src_length,
-		   QAT_SYM_SGL_MAX_NUMBER);
-
-		if (unlikely(ret)) {
-			QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
-			return ret;
-		}
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
 
-		if (in_place)
-			qat_req->comn_mid.dest_data_addr =
-				qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-		else {
-			ret = qat_sgl_fill_array(op->sym->m_dst,
-				(int64_t)(dst_buf_start -
-					  rte_pktmbuf_iova(op->sym->m_dst)),
-				 &cookie->qat_sgl_dst,
-				 qat_req->comn_mid.dst_length,
-				 QAT_SYM_SGL_MAX_NUMBER);
-
-			if (unlikely(ret)) {
-				QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
-				return ret;
-			}
+	return 0;
 
-			qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-			qat_req->comn_mid.dest_data_addr =
-					cookie->qat_sgl_dst_phys_addr;
-		}
-		qat_req->comn_mid.src_length = 0;
-		qat_req->comn_mid.dst_length = 0;
-	} else {
-		qat_req->comn_mid.src_data_addr = src_buf_start;
-		qat_req->comn_mid.dest_data_addr = dst_buf_start;
-	}
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
 
-	if (ctx->is_single_pass) {
-		if (ctx->is_ucs) {
-			/* GEN 4 */
-			cipher_param20->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param20->spc_auth_res_addr =
-				op->sym->aead.digest.phys_addr;
-		} else {
-			cipher_param->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param->spc_auth_res_addr =
-					op->sym->aead.digest.phys_addr;
-		}
-	} else if (ctx->is_single_pass_gmac &&
-		       op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
-		/* Handle Single-Pass AES-GMAC */
-		handle_spc_gmac(ctx, op, cookie, qat_req);
-	}
+	return ret;
+}
 
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
-			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
-			rte_pktmbuf_data_len(op->sym->m_src));
-	if (do_cipher) {
-		uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
-				ctx->cipher_iv.length);
-	}
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
 
-	if (do_auth) {
-		if (ctx->auth_iv.length) {
-			uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
-							uint8_t *,
-							ctx->auth_iv.offset);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
-						ctx->auth_iv.length);
-		}
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
-				ctx->digest_length);
-	}
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
 
-	if (do_aead) {
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
-				ctx->digest_length);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
-				ctx->aad_len);
-	}
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
 #endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
+
 	return 0;
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f4ff2ce4cd..074612c11b 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -131,11 +131,6 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
-
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v8 09/10] crypto/qat: raw dp api integration
  2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
                                     ` (7 preceding siblings ...)
  2022-02-17 16:29                   ` [dpdk-dev v8 08/10] crypto/qat: op burst data path rework Kai Ji
@ 2022-02-17 16:29                   ` Kai Ji
  2022-02-17 16:29                   ` [dpdk-dev v8 10/10] crypto/qat: support out of place SG list Kai Ji
                                     ` (2 subsequent siblings)
  11 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-17 16:29 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch "unifies" QAT's raw dp api implementations
to share the same enqueue/dequeue methods as the crypto
operation enqueue/dequeue methods. In addition, different QAT
generation specific implementations are done respectively.
The qat_sym_hw_dp.c is removed as no longer required.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build               |   2 +-
 drivers/compress/qat/qat_comp_pmd.c          |  12 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |   2 +
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 214 ++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 122 +++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |  78 ++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 656 ++++++++++++
 drivers/crypto/qat/qat_crypto.h              |   3 +
 drivers/crypto/qat/qat_sym.c                 |  56 +-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 986 -------------------
 10 files changed, 1137 insertions(+), 994 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index f687f5c9d8..b7027f3164 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 8e497e7a09..dc8db84a68 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -616,11 +616,18 @@ static struct rte_compressdev_ops compress_qat_dummy_ops = {
 	.private_xform_free	= qat_comp_private_xform_free
 };
 
+static uint16_t
+qat_comp_dequeue_burst(void *qp, struct rte_comp_op **ops,  uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_comp_process_response,
+				nb_ops);
+}
+
 static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
+	uint16_t ret = qat_comp_dequeue_burst(qp, ops, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
@@ -638,8 +645,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 
 		} else {
 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
-					(compressdev_dequeue_pkt_burst_t)
-					qat_dequeue_op_burst;
+					qat_comp_dequeue_burst;
 		}
 	}
 	return ret;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 64e6ae66ec..0c64c1e43f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -291,6 +291,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 			qat_sym_crypto_cap_get_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
 			qat_sym_crypto_set_session_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index db864d973a..ffa093a7a3 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -394,6 +394,218 @@ qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
+	} else if (ctx->is_single_pass_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
+	}
+
+	return 0;
+}
+
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -403,6 +615,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_feature_flags_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
 			qat_sym_crypto_set_session_gen3;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 7642a87d55..f803bc1459 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -223,6 +223,126 @@ qat_sym_crypto_set_session_gen4(void *cdev, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -230,6 +350,8 @@ RTE_INIT(qat_sym_crypto_gen4_init)
 			qat_sym_crypto_cap_get_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
 			qat_sym_crypto_set_session_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 96cdb97a26..50a9c5ad5b 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -839,6 +839,84 @@ int
 qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
 		uint8_t *out_msg, void *op_cookie);
 
+/* -----------------GEN 1 sym crypto raw data path APIs ---------------- */
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status);
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status);
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index c58a628915..fee6507512 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,6 +146,10 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
+
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
@@ -448,6 +452,656 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct qat_sym_op_cookie *cookie;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, &iv,
+			NULL, NULL, NULL);
+#endif
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i],
+				cookie, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
+			(uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				NULL, NULL, NULL);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
+			NULL, 0, cipher_iv, digest, auth_iv, ofs,
+			(uint32_t)data_len)))
+		return -1;
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				NULL, 0,
+				&vec->iv[i], &vec->digest[i],
+				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+			break;
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				&vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp;
+	void *resp_opaque;
+	uint32_t i, n, inflight;
+	uint32_t head;
+	uint8_t status;
+
+	*n_success_jobs = 0;
+	*return_status = 0;
+	head = dp_ctx->head;
+
+	inflight = qp->enqueued - qp->dequeued;
+	if (unlikely(inflight == 0))
+		return 0;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			head);
+	/* no operation ready */
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return 0;
+
+	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
+	/* get the dequeue count */
+	if (get_dequeue_count) {
+		n = get_dequeue_count(resp_opaque);
+		if (unlikely(n == 0))
+			return 0;
+	} else {
+		if (unlikely(max_nb_to_dequeue == 0))
+			return 0;
+		n = max_nb_to_dequeue;
+	}
+
+	out_user_data[0] = resp_opaque;
+	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+	post_dequeue(resp_opaque, 0, status);
+	*n_success_jobs += status;
+
+	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+
+	/* we already finished dequeue when n == 1 */
+	if (unlikely(n == 1)) {
+		i = 1;
+		goto end_deq;
+	}
+
+	if (is_user_data_array) {
+		for (i = 1; i < n; i++) {
+			resp = (struct icp_qat_fw_comn_resp *)(
+				(uint8_t *)rx_queue->base_addr + head);
+			if (unlikely(*(uint32_t *)resp ==
+					ADF_RING_EMPTY_SIG))
+				goto end_deq;
+			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
+			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+			*n_success_jobs += status;
+			post_dequeue(out_user_data[i], i, status);
+			head = (head + rx_queue->msg_size) &
+					rx_queue->modulo_mask;
+		}
+
+		goto end_deq;
+	}
+
+	/* opaque is not array */
+	for (i = 1; i < n; i++) {
+		resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+			goto end_deq;
+		head = (head + rx_queue->msg_size) &
+				rx_queue->modulo_mask;
+		post_dequeue(resp_opaque, i, status);
+		*n_success_jobs += status;
+	}
+
+end_deq:
+	dp_ctx->head = head;
+	dp_ctx->cached_dequeue += i;
+	return i;
+}
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	register struct icp_qat_fw_comn_resp *resp;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			dp_ctx->head);
+
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return NULL;
+
+	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
+			rx_queue->modulo_mask;
+	dp_ctx->cached_dequeue++;
+
+	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
+			RTE_CRYPTO_OP_STATUS_SUCCESS :
+			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	*dequeue_status = 0;
+	return (void *)(uintptr_t)resp->opaque_data;
+}
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_enqueue != n))
+		return -1;
+
+	qp->enqueued += n;
+	qp->stats.enqueued_count += n;
+
+	tx_queue->tail = dp_ctx->tail;
+
+	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number, tx_queue->tail);
+	tx_queue->csr_tail = tx_queue->tail;
+	dp_ctx->cached_enqueue = 0;
+
+	return 0;
+}
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_dequeue != n))
+		return -1;
+
+	rx_queue->head = dp_ctx->head;
+	rx_queue->nb_processed_responses += n;
+	qp->dequeued += n;
+	qp->stats.dequeued_count += n;
+	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
+		uint32_t old_head, new_head;
+		uint32_t max_head;
+
+		old_head = rx_queue->csr_head;
+		new_head = rx_queue->head;
+		max_head = qp->nb_descriptors * rx_queue->msg_size;
+
+		/* write out free descriptors */
+		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
+
+		if (new_head < old_head) {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
+					max_head - old_head);
+			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
+					new_head);
+		} else {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
+					old_head);
+		}
+		rx_queue->nb_processed_responses = 0;
+		rx_queue->csr_head = new_head;
+
+		/* write current head to CSR */
+		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
+			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
+			new_head);
+	}
+
+	dp_ctx->cached_dequeue = 0;
+	return 0;
+}
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+
+	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
+	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
+	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
+	raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen1;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_chain_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_chain_gen1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
+			ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_cipher_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_cipher_gen1;
+		}
+	} else
+		return -1;
+
+	return 0;
+}
+
 int
 qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
 {
@@ -518,6 +1172,8 @@ RTE_INIT(qat_sym_crypto_gen1_init)
 			qat_sym_crypto_cap_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
 			qat_sym_crypto_set_session_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index c01266f81c..cf386d0ed0 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -53,11 +53,14 @@ typedef void * (*create_security_ctx_t)(void *cryptodev);
 
 typedef int (*set_session_t)(void *cryptodev, void *session);
 
+typedef int (*set_raw_dp_ctx_t)(void *raw_dp_ctx, void *ctx);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
 	set_session_t set_session;
+	set_raw_dp_ctx_t set_raw_dp_ctx;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 692e1f8f0a..1ccffad5ab 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -89,7 +89,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, (void *)sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -144,7 +144,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, (void *)sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -292,8 +292,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 		if (internals->capa_mz == NULL) {
 			QAT_LOG(DEBUG,
 				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
+				"destroying PMD for %s", name);
 			ret = -EFAULT;
 			goto error;
 		}
@@ -355,6 +354,55 @@ qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
 	return 0;
 }
 
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct qat_cryptodev_private *internals = dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_crypto_gen_dev_ops *gen_dev_ops =
+			&qat_sym_gen_dev_ops[qat_dev_gen];
+	struct qat_qp *qp;
+	struct qat_sym_session *ctx;
+	struct qat_sym_dp_ctx *dp_ctx;
+
+	if (!gen_dev_ops->set_raw_dp_ctx) {
+		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+				qat_dev_gen);
+		return -ENOTSUP;
+	}
+
+	qp = dev->data->queue_pairs[qp_id];
+	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(struct qat_sym_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+		dp_ctx->tail = qp->tx_q.tail;
+		dp_ctx->head = qp->rx_q.head;
+		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
+	}
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -EINVAL;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, qat_sym_driver_id);
+
+	dp_ctx->session = ctx;
+
+	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
+}
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct qat_sym_dp_ctx);
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_sym_driver,
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
deleted file mode 100644
index 5322faff44..0000000000
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ /dev/null
@@ -1,986 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2022 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "adf_transport_access_macros.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_qp.h"
-
-static __rte_always_inline int32_t
-qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
-		struct rte_crypto_vec *data, uint16_t n_data_vecs)
-{
-	struct qat_queue *tx_queue;
-	struct qat_sym_op_cookie *cookie;
-	struct qat_sgl *list;
-	uint32_t i;
-	uint32_t total_len;
-
-	if (likely(n_data_vecs == 1)) {
-		req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			data[0].iova;
-		req->comn_mid.src_length = req->comn_mid.dst_length =
-			data[0].len;
-		return data[0].len;
-	}
-
-	if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
-		return -1;
-
-	total_len = 0;
-	tx_queue = &qp->tx_q;
-
-	ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
-			QAT_COMN_PTR_TYPE_SGL);
-	cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
-	list = (struct qat_sgl *)&cookie->qat_sgl_src;
-
-	for (i = 0; i < n_data_vecs; i++) {
-		list->buffers[i].len = data[i].len;
-		list->buffers[i].resrvd = 0;
-		list->buffers[i].addr = data[i].iova;
-		if (total_len + data[i].len > UINT32_MAX) {
-			QAT_DP_LOG(ERR, "Message too long");
-			return -1;
-		}
-		total_len += data[i].len;
-	}
-
-	list->num_bufs = i;
-	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			cookie->qat_sgl_src_phys_addr;
-	req->comn_mid.src_length = req->comn_mid.dst_length = 0;
-	return total_len;
-}
-
-static __rte_always_inline void
-set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	/* copy IV into request if it fits */
-	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
-				iv_len);
-	else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
-	}
-}
-
-#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
-	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
-	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
-
-static __rte_always_inline void
-qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
-{
-	uint32_t i;
-
-	for (i = 0; i < n; i++)
-		sta[i] = status;
-}
-
-#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
-	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
-
-static __rte_always_inline void
-enqueue_one_cipher_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-
-	/* cipher IV */
-	set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest __rte_unused,
-	struct rte_crypto_va_iova_ptr *aad __rte_unused,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
-			(uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_auth_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = data_len - ofs.ofs.auth.head -
-			ofs.ofs.auth.tail;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
-				ctx->auth_iv.length);
-		break;
-	default:
-		break;
-	}
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv __rte_unused,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
-			(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_auth_job(ctx, req, &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline int
-enqueue_one_chain_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_vec *data,
-	uint16_t n_data_vecs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	rte_iova_t auth_iova_end;
-	int32_t cipher_len, auth_len;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	cipher_len = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
-
-	if (unlikely(cipher_len < 0 || auth_len < 0))
-		return -1;
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = cipher_len;
-	set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = auth_len;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		break;
-	default:
-		break;
-	}
-
-	if (unlikely(n_data_vecs > 1)) {
-		int auth_end_get = 0, i = n_data_vecs - 1;
-		struct rte_crypto_vec *cvec = &data[0];
-		uint32_t len;
-
-		len = data_len - ofs.ofs.auth.tail;
-
-		while (i >= 0 && len > 0) {
-			if (cvec->len >= len) {
-				auth_iova_end = cvec->iova + len;
-				len = 0;
-				auth_end_get = 1;
-				break;
-			}
-			len -= cvec->len;
-			i--;
-			cvec++;
-		}
-
-		if (unlikely(auth_end_get == 0))
-			return -1;
-	} else
-		auth_iova_end = data[0].iova + auth_param->auth_off +
-			auth_param->auth_len;
-
-	/* Then check if digest-encrypted conditions are met */
-	if ((auth_param->auth_off + auth_param->auth_len <
-		cipher_param->cipher_offset +
-		cipher_param->cipher_length) &&
-		(digest->iova == auth_iova_end)) {
-		/* Handle partial digest encryption */
-		if (cipher_param->cipher_offset +
-				cipher_param->cipher_length <
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length)
-			req->comn_mid.dst_length =
-				req->comn_mid.src_length =
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length;
-		struct icp_qat_fw_comn_req_hdr *header =
-			&req->comn_hdr;
-		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-			header->serv_specif_flags,
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-	}
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
-			cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
-		return -1;
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num,
-			&vec->iv[i], &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
-			break;
-
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_aead_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-		(void *)&req->serv_specif_rqpars;
-	struct icp_qat_fw_la_auth_req_params *auth_param =
-		(void *)((uint8_t *)&req->serv_specif_rqpars +
-		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-	uint8_t *aad_data;
-	uint8_t aad_ccm_real_len;
-	uint8_t aad_len_field_sz;
-	uint32_t msg_len_be;
-	rte_iova_t aad_iova = 0;
-	uint8_t q;
-
-	/* CPM 1.7 uses single pass to treat AEAD as cipher operation */
-	if (ctx->is_single_pass) {
-		enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
-
-		if (ctx->is_ucs) {
-			/* QAT GEN4 uses single pass to treat AEAD as cipher
-			 * operation
-			 */
-			struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
-				(void *)&req->serv_specif_rqpars;
-			cipher_param_20->spc_aad_addr = aad->iova;
-			cipher_param_20->spc_auth_res_addr = digest->iova;
-		} else {
-			cipher_param->spc_aad_addr = aad->iova;
-			cipher_param->spc_auth_res_addr = digest->iova;
-		}
-
-		return;
-	}
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
-				ctx->cipher_iv.length);
-		aad_iova = aad->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
-		aad_data = aad->va;
-		aad_iova = aad->iova;
-		aad_ccm_real_len = 0;
-		aad_len_field_sz = 0;
-		msg_len_be = rte_bswap32((uint32_t)data_len -
-				ofs.ofs.cipher.head);
-
-		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			aad_ccm_real_len = ctx->aad_len -
-				ICP_QAT_HW_CCM_AAD_B0_LEN -
-				ICP_QAT_HW_CCM_AAD_LEN_INFO;
-		} else {
-			aad_data = iv->va;
-			aad_iova = iv->iova;
-		}
-
-		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-			aad_len_field_sz, ctx->digest_length, q);
-		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				(uint8_t *)&msg_len_be,
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-		} else {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-				(uint8_t *)&msg_len_be +
-				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				- q), q);
-		}
-
-		if (aad_len_field_sz > 0) {
-			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
-				rte_bswap16(aad_ccm_real_len);
-
-			if ((aad_ccm_real_len + aad_len_field_sz)
-				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-				uint8_t pad_len = 0;
-				uint8_t pad_idx = 0;
-
-				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len +
-					aad_len_field_sz) %
-					ICP_QAT_HW_CCM_AAD_B0_LEN);
-				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					aad_ccm_real_len +
-					aad_len_field_sz;
-				memset(&aad_data[pad_idx], 0, pad_len);
-			}
-		}
-
-		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
-			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va +
-			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
-		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-		rte_memcpy((uint8_t *)aad->va +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			ctx->cipher_iv.length);
-		break;
-	default:
-		break;
-	}
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_param->auth_off = ofs.ofs.cipher.head;
-	auth_param->auth_len = cipher_param->cipher_length;
-	auth_param->auth_res_addr = digest->iova;
-	auth_param->u1.aad_adr = aad_iova;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
-		(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
-			&vec->aad[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
-	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
-	uint32_t max_nb_to_dequeue,
-	rte_cryptodev_raw_post_dequeue_t post_dequeue,
-	void **out_user_data, uint8_t is_user_data_array,
-	uint32_t *n_success_jobs, int *return_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct icp_qat_fw_comn_resp *resp;
-	void *resp_opaque;
-	uint32_t i, n, inflight;
-	uint32_t head;
-	uint8_t status;
-
-	*n_success_jobs = 0;
-	*return_status = 0;
-	head = dp_ctx->head;
-
-	inflight = qp->enqueued - qp->dequeued;
-	if (unlikely(inflight == 0))
-		return 0;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			head);
-	/* no operation ready */
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return 0;
-
-	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
-	/* get the dequeue count */
-	if (get_dequeue_count) {
-		n = get_dequeue_count(resp_opaque);
-		if (unlikely(n == 0))
-			return 0;
-	} else {
-		if (unlikely(max_nb_to_dequeue == 0))
-			return 0;
-		n = max_nb_to_dequeue;
-	}
-
-	out_user_data[0] = resp_opaque;
-	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-	post_dequeue(resp_opaque, 0, status);
-	*n_success_jobs += status;
-
-	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
-
-	/* we already finished dequeue when n == 1 */
-	if (unlikely(n == 1)) {
-		i = 1;
-		goto end_deq;
-	}
-
-	if (is_user_data_array) {
-		for (i = 1; i < n; i++) {
-			resp = (struct icp_qat_fw_comn_resp *)(
-				(uint8_t *)rx_queue->base_addr + head);
-			if (unlikely(*(uint32_t *)resp ==
-					ADF_RING_EMPTY_SIG))
-				goto end_deq;
-			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
-			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-			*n_success_jobs += status;
-			post_dequeue(out_user_data[i], i, status);
-			head = (head + rx_queue->msg_size) &
-					rx_queue->modulo_mask;
-		}
-
-		goto end_deq;
-	}
-
-	/* opaque is not array */
-	for (i = 1; i < n; i++) {
-		resp = (struct icp_qat_fw_comn_resp *)(
-			(uint8_t *)rx_queue->base_addr + head);
-		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-			goto end_deq;
-		head = (head + rx_queue->msg_size) &
-				rx_queue->modulo_mask;
-		post_dequeue(resp_opaque, i, status);
-		*n_success_jobs += status;
-	}
-
-end_deq:
-	dp_ctx->head = head;
-	dp_ctx->cached_dequeue += i;
-	return i;
-}
-
-static __rte_always_inline void *
-qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
-		enum rte_crypto_op_status *op_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	register struct icp_qat_fw_comn_resp *resp;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			dp_ctx->head);
-
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return NULL;
-
-	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
-			rx_queue->modulo_mask;
-	dp_ctx->cached_dequeue++;
-
-	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
-			RTE_CRYPTO_OP_STATUS_SUCCESS :
-			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	*dequeue_status = 0;
-	return (void *)(uintptr_t)resp->opaque_data;
-}
-
-static __rte_always_inline int
-qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_enqueue != n))
-		return -1;
-
-	qp->enqueued += n;
-	qp->stats.enqueued_count += n;
-
-	tx_queue->tail = dp_ctx->tail;
-
-	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
-			tx_queue->hw_bundle_number,
-			tx_queue->hw_queue_number, tx_queue->tail);
-	tx_queue->csr_tail = tx_queue->tail;
-	dp_ctx->cached_enqueue = 0;
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_dequeue != n))
-		return -1;
-
-	rx_queue->head = dp_ctx->head;
-	rx_queue->nb_processed_responses += n;
-	qp->dequeued += n;
-	qp->stats.dequeued_count += n;
-	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
-		uint32_t old_head, new_head;
-		uint32_t max_head;
-
-		old_head = rx_queue->csr_head;
-		new_head = rx_queue->head;
-		max_head = qp->nb_descriptors * rx_queue->msg_size;
-
-		/* write out free descriptors */
-		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
-
-		if (new_head < old_head) {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
-					max_head - old_head);
-			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
-					new_head);
-		} else {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
-					old_head);
-		}
-		rx_queue->nb_processed_responses = 0;
-		rx_queue->csr_head = new_head;
-
-		/* write current head to CSR */
-		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
-			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
-			new_head);
-	}
-
-	dp_ctx->cached_dequeue = 0;
-	return 0;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
-	struct qat_qp *qp;
-	struct qat_sym_session *ctx;
-	struct qat_sym_dp_ctx *dp_ctx;
-
-	qp = dev->data->queue_pairs[qp_id];
-	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
-	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(struct qat_sym_dp_ctx));
-		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
-		dp_ctx->tail = qp->tx_q.tail;
-		dp_ctx->head = qp->rx_q.head;
-		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
-	}
-
-	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
-		return -EINVAL;
-
-	ctx = (struct qat_sym_session *)get_sym_session_private_data(
-			session_ctx.crypto_sess, qat_sym_driver_id);
-
-	dp_ctx->session = ctx;
-
-	raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
-	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
-	raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
-	raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_chain_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
-		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
-		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_cipher_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
-		}
-	} else
-		return -1;
-
-	return 0;
-}
-
-int
-qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
-{
-	return sizeof(struct qat_sym_dp_ctx);
-}
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v8 10/10] crypto/qat: support out of place SG list
  2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
                                     ` (8 preceding siblings ...)
  2022-02-17 16:29                   ` [dpdk-dev v8 09/10] crypto/qat: raw dp api integration Kai Ji
@ 2022-02-17 16:29                   ` Kai Ji
  2022-02-17 17:59                   ` [EXT] [dpdk-dev v8 00/10] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
  2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
  11 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-17 16:29 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji

This patch adds the SGL out of place support to QAT PMD

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++--
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 55 +++++++++++++++++---
 3 files changed, 83 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index ffa093a7a3..5084a5fcd1 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -468,8 +468,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -565,8 +575,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index f803bc1459..bd7f3785df 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -297,8 +297,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index fee6507512..83d9b66f34 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -526,9 +526,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i],
-				cookie, vec->src_sgl[i].vec,
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
 				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
@@ -625,8 +634,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
@@ -725,8 +744,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -830,8 +859,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [EXT] [dpdk-dev v8 00/10] drivers/qat: QAT symmetric crypto datapatch rework
  2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
                                     ` (9 preceding siblings ...)
  2022-02-17 16:29                   ` [dpdk-dev v8 10/10] crypto/qat: support out of place SG list Kai Ji
@ 2022-02-17 17:59                   ` Akhil Goyal
  2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
  11 siblings, 0 replies; 156+ messages in thread
From: Akhil Goyal @ 2022-02-17 17:59 UTC (permalink / raw)
  To: Kai Ji, dev, roy.fan.zhang

> ----------------------------------------------------------------------
> This patch reworks QAT symmetric crypto datapatch implementation where
> each
> generation request building separated and the crypto operation under the
> raw datapath api implementation are unified.
> 
> In addtion this patchset also enables QAT OOP support in raw datapath api
> implementation.
> 
> v8:
> - rebase to 22.03-rc1
> 
> v7:
> - fix of pointer cast compile error in x86
> 
> v6:
> - fix of pointer cast error in x86
> - rebase to the lastest for-main
> 
> v5:
> - rebase to the latest for-main
> - patchset reconstruct
> 
> v4:
> - patchset break down and reconstruct
> 
> v3:
> - sperate a single patch 6 to two different patches
> 
> v2:
> - review comments addressed
> 
> Kai Ji (10):
>   common/qat: define build op request and dequeue op
>   crypto/qat: sym build op request specific implementation
>   crypto/qat: qat generation specific enqueue
>   crypto/qat: rework session APIs
>   crypto/qat: rework asymmetric crypto build operation
>   crypto/qat: unify qat sym pmd apis
>   crypto/qat: unify qat asym pmd apis
>   crypto/qat: op burst data path rework
>   crypto/qat: raw dp api integration
>   crypto/qat: support out of place SG list

Reword the  patch titles as follows.
crypto/qat: support out of place SG list
crypto/qat: unify raw data path functions
crypto/qat: rework burst data path
crypto/qat: unify asymmetric functions
crypto/qat: unify symmetric functions
crypto/qat: rework asymmetric op build operation
crypto/qat: rework session functions
crypto/qat: add generation specific enqueue
crypto/qat: support symmetric build op request
common/qat: define build request and dequeue ops

Infact I changed some of the descriptions also.
Please reword the descriptions also.

> 
>  drivers/common/qat/meson.build               |   6 +-
>  drivers/common/qat/qat_device.c              |   4 +-
>  drivers/common/qat/qat_qp.c                  |  42 +-
>  drivers/common/qat/qat_qp.h                  |  54 +-
>  drivers/compress/qat/qat_comp_pmd.c          |  14 +-
>  drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
>  drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  93 +-
>  drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 490 ++++++++-
>  drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 257 ++++-
>  drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 913 ++++++++++++++++-
>  drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 942 +++++++++++++++++-
>  drivers/crypto/qat/qat_asym.c                | 303 +++++-
>  drivers/crypto/qat/qat_asym.h                |  79 +-
>  drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
>  drivers/crypto/qat/qat_asym_pmd.h            |  54 -
>  drivers/crypto/qat/qat_crypto.h              |  16 +-
>  drivers/crypto/qat/qat_sym.c                 | 979 ++++++------------
>  drivers/crypto/qat/qat_sym.h                 | 148 ++-
>  drivers/crypto/qat/qat_sym_hw_dp.c           | 995 -------------------
>  drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
>  drivers/crypto/qat/qat_sym_pmd.h             |  95 --
>  drivers/crypto/qat/qat_sym_session.c         | 115 +--
>  drivers/crypto/qat/qat_sym_session.h         |  15 +-
>  23 files changed, 3582 insertions(+), 2523 deletions(-)
>  delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
>  delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
>  delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
>  delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
>  delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h
> 
Individual patches do not compile

ninja: Entering directory `./build-gcc-shared'
[9/9] Linking target drivers/librte_common_qat.so.22.1.
ninja: Entering directory `./build-clang-static'
[1/27] Compiling C object 'drivers/a715181@@tmp_rte_common_qat@sta/crypto_qat_dev_qat_crypto_pmd_gen4.c.o'.
FAILED: drivers/a715181@@tmp_rte_common_qat@sta/crypto_qat_dev_qat_crypto_pmd_gen4.c.o
clang -Idrivers/a715181@@tmp_rte_common_qat@sta -Idrivers -I../drivers -Idrivers/common/qat -I../drivers/common/qat -I../drivers/common/qat/qat_adf -I../drivers/common/qat/../../crypto/qat -I../drivers/common/qat/../../compress/qat -Idrivers/bus/pci -I../drivers/bus/pci -I../drivers/bus/pci/linux -I. -I../ -Iconfig -I../config -Ilib/eal/include -I../lib/eal/include -Ilib/eal/linux/include -I../lib/eal/linux/include -Ilib/eal/x86/include -I../lib/eal/x86/include -Ilib/eal/common -I../lib/eal/common -Ilib/eal -I../lib/eal -Ilib/kvargs -I../lib/kvargs -Ilib/telemetry/../metrics -I../lib/telemetry/../metrics -Ilib/telemetry -I../lib/telemetry -Ilib/pci -I../lib/pci -Ilib/cryptodev -I../lib/cryptodev -Ilib/mbuf -I../lib/mbuf -Ilib/mempool -I../lib/mempool -Ilib/ring -I../lib/ring -Ilib/rcu -I../lib/rcu -Ilib/net -I../lib/net -Ilib/compressdev -I../lib/compressdev -Ilib/security -I../lib/security -Xclang -fcolor-diagnostics -pipe -D_FILE_OFFSET_BITS=64 -Wall -Winvalid-pch -Werror -O2 -g -include rte_config.h -Wcast-qual -Wdeprecated -Wformat -Wformat-nonliteral -Wformat-security -Wmissing-declarations -Wmissing-prototypes -Wnested-externs -Wold-style-definition -Wpointer-arith -Wsign-compare -Wstrict-prototypes -Wundef -Wwrite-strings -Wno-address-of-packed-member -Wno-missing-field-initializers -D_GNU_SOURCE -fPIC -march=native -DALLOW_EXPERIMENTAL_API -DALLOW_INTERNAL_API -DBUILD_QAT_SYM -DBUILD_QAT_ASYM -DRTE_LOG_DEFAULT_LOGTYPE=pmd.common.qat -MD -MQ 'drivers/a715181@@tmp_rte_common_qat@sta/crypto_qat_dev_qat_crypto_pmd_gen4.c.o' -MF 'drivers/a715181@@tmp_rte_common_qat@sta/crypto_qat_dev_qat_crypto_pmd_gen4.c.o.d' -o 'drivers/a715181@@tmp_rte_common_qat@sta/crypto_qat_dev_qat_crypto_pmd_gen4.c.o' -c ../drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
../drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c:107:1: error: unused function 'enqueue_one_aead_job_gen4' [-Werror,-Wunused-function]
enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
^
1 error generated.
[2/27] Compiling C object 'drivers/a715181@@tmp_rte_common_qat@sta/crypto_qat_dev_qat_crypto_pmd_gen3.c.o'.
FAILED: drivers/a715181@@tmp_rte_common_qat@sta/crypto_qat_dev_qat_crypto_pmd_gen3.c.o
clang -Idrivers/a715181@@tmp_rte_common_qat@sta -Idrivers -I../drivers -Idrivers/common/qat -I../drivers/common/qat -I../drivers/common/qat/qat_adf -I../drivers/common/qat/../../crypto/qat -I../drivers/common/qat/../../compress/qat -Idrivers/bus/pci -I../drivers/bus/pci -I../drivers/bus/pci/linux -I. -I../ -Iconfig -I../config -Ilib/eal/include -I../lib/eal/include -Ilib/eal/linux/include -I../lib/eal/linux/include -Ilib/eal/x86/include -I../lib/eal/x86/include -Ilib/eal/common -I../lib/eal/common -Ilib/eal -I../lib/eal -Ilib/kvargs -I../lib/kvargs -Ilib/telemetry/../metrics -I../lib/telemetry/../metrics -Ilib/telemetry -I../lib/telemetry -Ilib/pci -I../lib/pci -Ilib/cryptodev -I../lib/cryptodev -Ilib/mbuf -I../lib/mbuf -Ilib/mempool -I../lib/mempool -Ilib/ring -I../lib/ring -Ilib/rcu -I../lib/rcu -Ilib/net -I../lib/net -Ilib/compressdev -I../lib/compressdev -Ilib/security -I../lib/security -Xclang -fcolor-diagnostics -pipe -D_FILE_OFFSET_BITS=64 -Wall -Winvalid-pch -Werror -O2 -g -include rte_config.h -Wcast-qual -Wdeprecated -Wformat -Wformat-nonliteral -Wformat-security -Wmissing-declarations -Wmissing-prototypes -Wnested-externs -Wold-style-definition -Wpointer-arith -Wsign-compare -Wstrict-prototypes -Wundef -Wwrite-strings -Wno-address-of-packed-member -Wno-missing-field-initializers -D_GNU_SOURCE -fPIC -march=native -DALLOW_EXPERIMENTAL_API -DALLOW_INTERNAL_API -DBUILD_QAT_SYM -DBUILD_QAT_ASYM -DRTE_LOG_DEFAULT_LOGTYPE=pmd.common.qat -MD -MQ 'drivers/a715181@@tmp_rte_common_qat@sta/crypto_qat_dev_qat_crypto_pmd_gen3.c.o' -MF 'drivers/a715181@@tmp_rte_common_qat@sta/crypto_qat_dev_qat_crypto_pmd_gen3.c.o.d' -o 'drivers/a715181@@tmp_rte_common_qat@sta/crypto_qat_dev_qat_crypto_pmd_gen3.c.o' -c ../drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
../drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c:147:1: error: unused function 'enqueue_one_aead_job_gen3' [-Werror,-Wunused-function]
enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
^
../drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c:178:1: error: unused function 'enqueue_one_auth_job_gen3' [-Werror,-Wunused-function]
enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
^
2 errors generated.
ninja: build stopped: subcommand failed.

^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v9 0/9] drivers/qat: QAT symmetric crypto datapatch rework
  2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
                                     ` (10 preceding siblings ...)
  2022-02-17 17:59                   ` [EXT] [dpdk-dev v8 00/10] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
@ 2022-02-18 17:15                   ` Kai Ji
  2022-02-18 17:15                     ` [dpdk-dev v9 1/9] common/qat: define build request and dequeue ops Kai Ji
                                       ` (9 more replies)
  11 siblings, 10 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-18 17:15 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch reworks QAT symmetric crypto datapatch implementation where each
generation request building separated and the crypto operation under the
raw datapath api implementation are unified.

In addtion this patchset also enables QAT OOP support in raw datapath api
implementation.

v9:
- commit messages reword
- fix of unused function error

v8:
- rebase to 22.03-rc1

v7:
- fix of pointer cast compile error in x86

v6:
- fix of pointer cast error in x86
- rebase to the lastest for-main

v5:
- rebase to the latest for-main
- patchset reconstruct

v4:
- patchset break down and reconstruct

v3:
- sperate a single patch 6 to two different patches

v2:
- review comments addressed

Kai Ji (9):
  common/qat: define build request and dequeue ops
  crypto/qat: support symmetric build op request
  crypto/qat: rework session functions
  crypto/qat: rework asymmetric op build operation
  crypto/qat: unify symmetric functions
  crypto/qat: unify asymmetric functions
  crypto/qat: rework burst data path
  crypto/qat: unify raw data path functions
  crypto/qat: support out of place SG list

 drivers/common/qat/meson.build               |   6 +-
 drivers/common/qat/qat_device.c              |   4 +-
 drivers/common/qat/qat_qp.c                  |  42 +-
 drivers/common/qat/qat_qp.h                  |  54 +-
 drivers/compress/qat/qat_comp_pmd.c          |  14 +-
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  93 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 490 ++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 257 ++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 913 ++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 942 +++++++++++++++++-
 drivers/crypto/qat/qat_asym.c                | 303 +++++-
 drivers/crypto/qat/qat_asym.h                |  79 +-
 drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
 drivers/crypto/qat/qat_asym_pmd.h            |  54 -
 drivers/crypto/qat/qat_crypto.h              |  16 +-
 drivers/crypto/qat/qat_sym.c                 | 979 ++++++------------
 drivers/crypto/qat/qat_sym.h                 | 148 ++-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 995 -------------------
 drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
 drivers/crypto/qat/qat_sym_pmd.h             |  95 --
 drivers/crypto/qat/qat_sym_session.c         | 115 +--
 drivers/crypto/qat/qat_sym_session.h         |  15 +-
 23 files changed, 3582 insertions(+), 2523 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

--
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v9 1/9] common/qat: define build request and dequeue ops
  2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
@ 2022-02-18 17:15                     ` Kai Ji
  2022-02-18 17:15                     ` [dpdk-dev v9 2/9] crypto/qat: support symmetric build op request Kai Ji
                                       ` (8 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-18 17:15 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch introduce build request and dequeue op function
pointers to the qat queue pair implementation. The function
poniters are assigned during qat session generation based on input
crypto operation request.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c          | 10 ++++--
 drivers/common/qat/qat_qp.h          | 54 ++++++++++++++++++++++++++--
 drivers/compress/qat/qat_comp_pmd.c  |  4 +--
 drivers/crypto/qat/qat_asym_pmd.c    |  4 +--
 drivers/crypto/qat/qat_sym_pmd.c     |  4 +--
 drivers/crypto/qat/qat_sym_session.h | 13 ++++++-
 6 files changed, 76 insertions(+), 13 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 57ac8fefca..56234ca1a4 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_common.h>
@@ -547,7 +547,9 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_enqueue_op_burst(void *qp,
+		__rte_unused qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -814,7 +816,9 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 }
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_dequeue_op_burst(void *qp, void **ops,
+		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
+		uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index deafb407b3..66f00943a5 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 #ifndef _QAT_QP_H_
 #define _QAT_QP_H_
@@ -36,6 +36,51 @@ struct qat_queue {
 	/* number of responses processed since last CSR head write */
 };
 
+/**
+ * Type define qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ *
+ * @param in_op
+ *    An input op pointer
+ * @param out_msg
+ *    out_meg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param opaque
+ *    an opaque data may be used to store context may be useful between
+ *    2 enqueue operations.
+ * @param dev_gen
+ *    qat device gen id
+ * @return
+ *   - 0 if the crypto request is build successfully,
+ *   - EINVAL if error
+ **/
+typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
+
+/**
+ * Type define qat_op_dequeue_t function pointer, passed in as argument
+ * in dequeue op burst, where a dequeue op assigned base on the type of
+ * crypto op.
+ *
+ * @param op
+ *    An input op pointer
+ * @param resp
+ *    qat response msg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param dequeue_err_count
+ *    dequeue error counter
+ * @return
+ *    - 0 if dequeue OP is successful
+ *    - EINVAL if error
+ **/
+typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused);
+
+#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE	2
+
 struct qat_qp {
 	void			*mmap_bar_addr;
 	struct qat_queue	tx_q;
@@ -44,6 +89,7 @@ struct qat_qp {
 	struct rte_mempool *op_cookie_pool;
 	void **op_cookies;
 	uint32_t nb_descriptors;
+	uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
 	enum qat_device_gen qat_dev_gen;
 	enum qat_service_type service_type;
 	struct qat_pci_device *qat_dev;
@@ -78,13 +124,15 @@ struct qat_qp_config {
 };
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops);
 
 uint16_t
 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
 
 int
 qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index da6404c017..8e497e7a09 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_malloc.h>
@@ -620,7 +620,7 @@ static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
index addee384e3..9a7596b227 100644
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ b/drivers/crypto/qat/qat_asym_pmd.c
@@ -62,13 +62,13 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
 uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index b835245f17..28a26260fb 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -49,14 +49,14 @@ static uint16_t
 qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 static uint16_t
 qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 6ebc176729..fe875a7fd0 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 #ifndef _QAT_SYM_SESSION_H_
 #define _QAT_SYM_SESSION_H_
@@ -63,6 +63,16 @@ enum qat_sym_proto_flag {
 	QAT_CRYPTO_PROTO_FLAG_ZUC = 4
 };
 
+struct qat_sym_session;
+
+/*
+ * typedef qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ */
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* Common content descriptor */
 struct qat_sym_cd {
 	struct icp_qat_hw_cipher_algo_blk cipher;
@@ -107,6 +117,7 @@ struct qat_sym_session {
 	/* Some generations need different setup of counter */
 	uint32_t slice_types;
 	enum qat_sym_proto_flag qat_proto_flag;
+	qat_sym_build_request_t build_request[2];
 };
 
 int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v9 2/9] crypto/qat: support symmetric build op request
  2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
  2022-02-18 17:15                     ` [dpdk-dev v9 1/9] common/qat: define build request and dequeue ops Kai Ji
@ 2022-02-18 17:15                     ` Kai Ji
  2022-02-18 17:15                     ` [dpdk-dev v9 3/9] crypto/qat: rework session functions Kai Ji
                                       ` (7 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-18 17:15 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch adds common inline functions for QAT symmetric
crypto driver to process crypto op, and the implementation of
build op request function for QAT generation 1.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 832 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 187 ++++-
 drivers/crypto/qat/qat_sym.c                 |  90 +-
 3 files changed, 1019 insertions(+), 90 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 67a4d2cb2c..1130e0e76f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #ifndef _QAT_CRYPTO_PMD_GENS_H_
@@ -8,14 +8,844 @@
 #include <rte_cryptodev.h>
 #include "qat_crypto.h"
 #include "qat_sym_session.h"
+#include "qat_sym.h"
+
+#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
+	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
+
+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
+	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
+	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
+
+static __rte_always_inline int
+op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_decrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_decrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+	return -EINVAL;
+}
+
+static __rte_always_inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		/* Decrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely((sym_op->m_dst != NULL)
+				&& (sym_op->m_dst != sym_op->m_src)))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = last_block - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
+			dst, last_block_len);
+#endif
+		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+			dst, last_block_len);
+#endif
+	}
+
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+static __rte_always_inline int
+qat_auth_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_cipher_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+			((op->sym->cipher.data.offset %
+			BYTE_LENGTH) != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int32_t
+qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
+		void *opaque, struct qat_sym_op_cookie *cookie,
+		struct rte_crypto_vec *src_vec, uint16_t n_src,
+		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
+{
+	struct qat_sgl *list;
+	uint32_t i;
+	uint32_t tl_src = 0, total_len_src, total_len_dst;
+	uint64_t src_data_start = 0, dst_data_start = 0;
+	int is_sgl = n_src > 1 || n_dst > 1;
+
+	if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
+			n_dst > QAT_SYM_SGL_MAX_NUMBER))
+		return -1;
+
+	if (likely(!is_sgl)) {
+		src_data_start = src_vec[0].iova;
+		tl_src = total_len_src =
+				src_vec[0].len;
+		if (unlikely(n_dst)) { /* oop */
+			total_len_dst = dst_vec[0].len;
+
+			dst_data_start = dst_vec[0].iova;
+			if (unlikely(total_len_src != total_len_dst))
+				return -EINVAL;
+		} else {
+			dst_data_start = src_data_start;
+			total_len_dst = tl_src;
+		}
+	} else { /* sgl */
+		total_len_dst = total_len_src = 0;
+
+		ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+			QAT_COMN_PTR_TYPE_SGL);
+
+		list = (struct qat_sgl *)&cookie->qat_sgl_src;
+		for (i = 0; i < n_src; i++) {
+			list->buffers[i].len = src_vec[i].len;
+			list->buffers[i].resrvd = 0;
+			list->buffers[i].addr = src_vec[i].iova;
+			if (tl_src + src_vec[i].len > UINT32_MAX) {
+				QAT_DP_LOG(ERR, "Message too long");
+				return -1;
+			}
+			tl_src += src_vec[i].len;
+		}
+
+		list->num_bufs = i;
+		src_data_start = cookie->qat_sgl_src_phys_addr;
+
+		if (unlikely(n_dst > 0)) { /* oop sgl */
+			uint32_t tl_dst = 0;
+
+			list = (struct qat_sgl *)&cookie->qat_sgl_dst;
+
+			for (i = 0; i < n_dst; i++) {
+				list->buffers[i].len = dst_vec[i].len;
+				list->buffers[i].resrvd = 0;
+				list->buffers[i].addr = dst_vec[i].iova;
+				if (tl_dst + dst_vec[i].len > UINT32_MAX) {
+					QAT_DP_LOG(ERR, "Message too long");
+					return -ENOTSUP;
+				}
+
+				tl_dst += dst_vec[i].len;
+			}
+
+			if (tl_src != tl_dst)
+				return -EINVAL;
+			list->num_bufs = i;
+			dst_data_start = cookie->qat_sgl_dst_phys_addr;
+		} else
+			dst_data_start = src_data_start;
+	}
+
+	req->comn_mid.src_data_addr = src_data_start;
+	req->comn_mid.dest_data_addr = dst_data_start;
+	req->comn_mid.src_length = total_len_src;
+	req->comn_mid.dst_length = total_len_dst;
+	req->comn_mid.opaque_data = (uintptr_t)opaque;
+
+	return tl_src;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int n_src = 0;
+	int ret;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->cipher.data.length >> 3;
+		cipher_ofs = op->sym->cipher.data.offset >> 3;
+		break;
+	case 0:
+		if (ctx->bpi_ctx) {
+			/* DOCSIS - only send complete blocks to device.
+			 * Process any partial block using CFB mode.
+			 * Even if 0 complete blocks, still send this to device
+			 * to get into rx queue for post-process and dequeuing
+			 */
+			cipher_len = qat_bpicipher_preprocess(ctx, op);
+			cipher_ofs = op->sym->cipher.data.offset;
+		} else {
+			cipher_len = op->sym->cipher.data.length;
+			cipher_ofs = op->sym->cipher.data.offset;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
+			cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t auth_ofs = 0, auth_len = 0;
+	int n_src, ret;
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+				ctx->auth_iv.offset);
+		auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+				ctx->auth_iv.offset);
+		break;
+	case 0:
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+			/* AES-GMAC */
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+					ctx->auth_iv.offset);
+			auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+					ctx->auth_iv.offset);
+		} else {
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = NULL;
+			auth_iv->iova = 0;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
+			auth_ofs + auth_len, in_sgl->vec,
+			QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
+				auth_ofs + auth_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	union rte_crypto_sym_ofs ofs;
+	uint32_t min_ofs = 0, max_len = 0;
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	uint32_t auth_len = 0, auth_ofs = 0;
+	int is_oop = (op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src);
+	int is_sgl = op->sym->m_src->nb_segs > 1;
+	int n_src;
+	int ret;
+
+	if (unlikely(is_oop))
+		is_sgl |= op->sym->m_dst->nb_segs > 1;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->auth_iv.offset);
+	auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->auth_iv.offset);
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->aead.data.length >> 3;
+		cipher_ofs = op->sym->aead.data.offset >> 3;
+		break;
+	case 0:
+		cipher_len = op->sym->aead.data.length;
+		cipher_ofs = op->sym->aead.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		break;
+	case 0:
+		auth_len = op->sym->auth.data.length;
+		auth_ofs = op->sym->auth.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
+
+	/* digest in buffer check. Needed only for wireless algos */
+	if (ret == 1) {
+		/* Handle digest-encrypted cases, i.e.
+		 * auth-gen-then-cipher-encrypt and
+		 * cipher-decrypt-then-auth-verify
+		 */
+		uint64_t auth_end_iova;
+
+		if (unlikely(is_sgl)) {
+			uint32_t remaining_off = auth_ofs + auth_len;
+			struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
+				op->sym->m_src);
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else
+			auth_end_iova = (is_oop ?
+				rte_pktmbuf_iova(op->sym->m_dst) :
+				rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
+					auth_len;
+
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
+				(digest->iova == auth_end_iova))
+			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
+					ctx->digest_length);
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return -1;
+	}
+	in_sgl->num = n_src;
+
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs,
+				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	ofs.ofs.cipher.head = cipher_ofs;
+	ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
+	ofs.ofs.auth.head = auth_ofs;
+	ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
+
+	return ofs.raw;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int32_t n_src = 0;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
+	auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
+	digest->va = (void *)op->sym->aead.digest.data;
+	digest->iova = op->sym->aead.digest.phys_addr;
+
+	cipher_len = op->sym->aead.data.length;
+	cipher_ofs = op->sym->aead.data.offset;
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline void
+qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
+		struct icp_qat_fw_la_bulk_req *qat_req)
+{
+	/* copy IV into request if it fits */
+	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
+				iv_len);
+	else {
+		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+				qat_req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
+	}
+}
+
+static __rte_always_inline void
+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
+{
+	uint32_t i;
+
+	for (i = 0; i < n; i++)
+		sta[i] = status;
+}
+
+static __rte_always_inline void
+enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+
+	/* cipher IV */
+	qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
+				ctx->auth_iv.length);
+		break;
+	default:
+		break;
+	}
+}
+
+static __rte_always_inline int
+enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_vec *src_vec,
+	uint16_t n_src_vecs,
+	struct rte_crypto_vec *dst_vec,
+	uint16_t n_dst_vecs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
+			dst_vec : src_vec;
+	rte_iova_t auth_iova_end;
+	int cipher_len, auth_len;
+	int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	cipher_len = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (unlikely(cipher_len < 0 || auth_len < 0))
+		return -1;
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = cipher_len;
+	qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = auth_len;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		break;
+	default:
+		break;
+	}
+
+	if (unlikely(is_sgl)) {
+		/* sgl */
+		int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
+		uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
+
+		while (remaining_off >= cvec->len && i >= 1) {
+			i--;
+			remaining_off -= cvec->len;
+			cvec++;
+		}
+
+		auth_iova_end = cvec->iova + remaining_off;
+	} else
+		auth_iova_end = cvec[0].iova + auth_param->auth_off +
+			auth_param->auth_len;
+
+	/* Then check if digest-encrypted conditions are met */
+	if ((auth_param->auth_off + auth_param->auth_len <
+		cipher_param->cipher_offset + cipher_param->cipher_length) &&
+			(digest->iova == auth_iova_end)) {
+		/* Handle partial digest encryption */
+		if (cipher_param->cipher_offset + cipher_param->cipher_length <
+			auth_param->auth_off + auth_param->auth_len +
+				ctx->digest_length && !is_sgl)
+			req->comn_mid.dst_length = req->comn_mid.src_length =
+				auth_param->auth_off + auth_param->auth_len +
+					ctx->digest_length;
+		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void
+enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param =
+		(void *)&req->serv_specif_rqpars;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(void *)((uint8_t *)&req->serv_specif_rqpars +
+		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+	uint8_t *aad_data;
+	uint8_t aad_ccm_real_len;
+	uint8_t aad_len_field_sz;
+	uint32_t msg_len_be;
+	rte_iova_t aad_iova = 0;
+	uint8_t q;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
+				ctx->cipher_iv.length);
+		aad_iova = aad->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		aad_data = aad->va;
+		aad_iova = aad->iova;
+		aad_ccm_real_len = 0;
+		aad_len_field_sz = 0;
+		msg_len_be = rte_bswap32((uint32_t)data_len -
+				ofs.ofs.cipher.head);
+
+		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			aad_ccm_real_len = ctx->aad_len -
+				ICP_QAT_HW_CCM_AAD_B0_LEN -
+				ICP_QAT_HW_CCM_AAD_LEN_INFO;
+		} else {
+			aad_data = iv->va;
+			aad_iova = iv->iova;
+		}
+
+		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+			aad_len_field_sz, ctx->digest_length, q);
+		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+				(uint8_t *)&msg_len_be,
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+		} else {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+				(uint8_t *)&msg_len_be +
+				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+				- q), q);
+		}
+
+		if (aad_len_field_sz > 0) {
+			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+				rte_bswap16(aad_ccm_real_len);
+
+			if ((aad_ccm_real_len + aad_len_field_sz)
+				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+				uint8_t pad_len = 0;
+				uint8_t pad_idx = 0;
+
+				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+					((aad_ccm_real_len +
+					aad_len_field_sz) %
+					ICP_QAT_HW_CCM_AAD_B0_LEN);
+				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+					aad_ccm_real_len +
+					aad_len_field_sz;
+				memset(&aad_data[pad_idx], 0, pad_len);
+			}
+		}
+
+		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va +
+			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
+		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+		rte_memcpy((uint8_t *)aad->va +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			ctx->cipher_iv.length);
+		break;
+	default:
+		break;
+	}
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_param->auth_off = ofs.ofs.cipher.head;
+	auth_param->auth_len = cipher_param->cipher_length;
+	auth_param->auth_res_addr = digest->iova;
+	auth_param->u1.aad_adr = aad_iova;
+}
 
 extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
 extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
 
+/* -----------------GEN 1 sym crypto op data path APIs ---------------- */
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 void
 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 		uint8_t hash_flag);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 90b3ec803c..c429825a67 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -179,6 +179,191 @@ qat_sym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, NULL, NULL);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, NULL, NULL);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
+			total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
+			out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			&auth_iv, &digest);
+#endif
+
+	return 0;
+}
+
 #ifdef RTE_LIB_SECURITY
 
 #define QAT_SECURITY_SYM_CAPABILITIES					\
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 00ec703754..f814bf8f75 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/evp.h>
@@ -11,93 +11,7 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-
-
-/** Decrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_decrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_decrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
-	return -EINVAL;
-}
-
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
-		/* Decrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely((sym_op->m_dst != NULL)
-				&& (sym_op->m_dst != sym_op->m_src)))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = last_block - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
-			dst, last_block_len);
-#endif
-		bpi_cipher_decrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
-			dst, last_block_len);
-#endif
-	}
-
-	return sym_op->cipher.data.length - last_block_len;
-}
+#include "dev/qat_crypto_pmd_gens.h"
 
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v9 3/9] crypto/qat: rework session functions
  2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
  2022-02-18 17:15                     ` [dpdk-dev v9 1/9] common/qat: define build request and dequeue ops Kai Ji
  2022-02-18 17:15                     ` [dpdk-dev v9 2/9] crypto/qat: support symmetric build op request Kai Ji
@ 2022-02-18 17:15                     ` Kai Ji
  2022-02-18 17:15                     ` [dpdk-dev v9 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
                                       ` (6 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-18 17:15 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch introduces a set of set_session methods to QAT
generations. In addition, the reuse of QAT session between
generations is prohibit as the support of min_qat_dev_gen_id'
is removed.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  91 ++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 256 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 125 ++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |   3 +
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  64 +++++
 drivers/crypto/qat/qat_crypto.h              |   8 +-
 drivers/crypto/qat/qat_sym.c                 |  12 +-
 drivers/crypto/qat/qat_sym_session.c         | 113 ++------
 drivers/crypto/qat/qat_sym_session.h         |   2 +-
 10 files changed, 574 insertions(+), 109 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
index 9ed1f21d9d..01a897a21f 100644
--- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_asym_crypto_set_session_gen1(void *cdev __rte_unused,
+		void *session __rte_unused)
+{
+	return 0;
+}
+
 RTE_INIT(qat_asym_crypto_gen1_init)
 {
 	qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops =
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index b4ec440e05..64e6ae66ec 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -166,6 +166,91 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
 	return 0;
 }
 
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+		uint8_t hash_flag)
+{
+	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+	/* Set the Use Extended Protocol Flags bit in LW 1 */
+	QAT_FIELD_SET(header->comn_req_flags,
+			QAT_COMN_EXT_FLAGS_USED,
+			QAT_COMN_EXT_FLAGS_BITPOS,
+			QAT_COMN_EXT_FLAGS_MASK);
+
+	/* Set Hash Flags in LW 28 */
+	cd_ctrl->hash_flags |= hash_flag;
+
+	/* Set proto flags in LW 1 */
+	switch (session->qat_cipher_alg) {
+	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_SNOW_3G_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags,
+				ICP_QAT_FW_LA_ZUC_3G_PROTO);
+		break;
+	default:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	}
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+	struct rte_cryptodev *dev = cdev;
+	struct qat_sym_session *ctx = session;
+	const struct qat_cryptodev_private *qat_private =
+			dev->data->dev_private;
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * but some are not supported by GEN2, so checking here
+		 */
+		if ((qat_private->internal_capabilities &
+				QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+			return -ENOTSUP;
+
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
 
 	/* Device related operations */
@@ -204,6 +289,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
 			qat_sym_crypto_cap_get_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_sym_crypto_set_session_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
@@ -221,4 +308,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
 			qat_asym_crypto_cap_get_gen1;
 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_asym_crypto_feature_flags_get_gen1;
+	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_asym_crypto_set_session_gen1;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index d3336cf4a1..db864d973a 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -143,6 +143,257 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass) {
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN3 uses single pass to treat AEAD as
+		 * cipher operation
+		 */
+		cipher_param = (void *)&req->serv_specif_rqpars;
+
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+				ofs.ofs.cipher.tail;
+
+		cipher_param->spc_aad_addr = aad->iova;
+		cipher_param->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+	struct qat_sym_op_cookie *cookie,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	uint32_t ver_key_offset;
+	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+
+	if (!ctx->is_single_pass_gmac ||
+			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+				data_len);
+		return;
+	}
+
+	cipher_cd_ctrl = (void *) &req->cd_ctrl;
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+			sizeof(struct icp_qat_hw_cipher_config);
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+		/* AES-GMAC */
+		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+				req);
+	}
+
+	/* Fill separate Content Descriptor for this op */
+	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+				ctx->cd.cipher.key :
+				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+			ctx->auth_key_length);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+				ICP_QAT_HW_CIPHER_AEAD_MODE,
+				ctx->qat_cipher_alg,
+				ICP_QAT_HW_CIPHER_NO_CONVERT,
+				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+					ICP_QAT_HW_CIPHER_ENCRYPT :
+					ICP_QAT_HW_CIPHER_DECRYPT));
+	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+			ctx->digest_length,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+	/* Update the request */
+	req->cd_pars.u.s.content_desc_addr =
+			cookie->opt.spc_gmac.cd_phys_addr;
+	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+			sizeof(struct icp_qat_hw_cipher_config) +
+			ctx->auth_key_length, 8) >> 3;
+	req->comn_mid.src_length = data_len;
+	req->comn_mid.dst_length = 0;
+
+	cipher_param->spc_aad_addr = 0;
+	cipher_param->spc_auth_res_addr = digest->iova;
+	cipher_param->spc_aad_sz = auth_data_len;
+	cipher_param->reserved = 0;
+	cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+	ICP_QAT_FW_LA_PROTO_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_NO_PROTO);
+}
+
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN3 */
+	if (ctx->is_single_pass)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+	else if (ctx->is_single_pass_gmac)
+		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN3
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -150,6 +401,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_cap_get_gen3;
 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+			qat_sym_crypto_set_session_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
@@ -161,4 +414,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 37a58c026f..7642a87d55 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -103,11 +103,133 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+			(void *)&req->serv_specif_rqpars;
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN4 uses single pass to treat AEAD as cipher
+		 * operation
+		 */
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+				req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len -
+				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+		cipher_param_20->spc_aad_addr = aad->iova;
+		cipher_param_20->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN4 */
+	if (ctx->is_single_pass && ctx->is_ucs)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN4
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
 			qat_sym_crypto_cap_get_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+			qat_sym_crypto_set_session_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
@@ -121,4 +243,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
 	qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 1130e0e76f..96cdb97a26 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -856,6 +856,9 @@ qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev);
 uint64_t
 qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 #ifdef RTE_LIB_SECURITY
 extern struct rte_security_ops security_qat_ops_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index c429825a67..501132a448 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -452,12 +452,76 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int handle_mixed = 0;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			/* do_aead = 1; */
+			build_request = qat_sym_build_op_aead_gen1;
+		} else {
+			/* do_auth = 1; do_cipher = 1; */
+			build_request = qat_sym_build_op_chain_gen1;
+			handle_mixed = 1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		/* do_auth = 1; do_cipher = 0;*/
+		build_request = qat_sym_build_op_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		/* do_auth = 0; do_cipher = 1; */
+		build_request = qat_sym_build_op_cipher_gen1;
+	}
+
+	if (build_request)
+		ctx->build_request[proc_type] = build_request;
+	else
+		return -EINVAL;
+
+	/* no more work if not mixed op */
+	if (!handle_mixed)
+		return 0;
+
+	/* Check none supported algs if mixed */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		return -ENOTSUP;
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		return -ENOTSUP;
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
 
 RTE_INIT(qat_sym_crypto_gen1_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
 			qat_sym_crypto_cap_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+			qat_sym_crypto_set_session_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6eaa15b975..5ca76fcaa6 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
  #ifndef _QAT_CRYPTO_H_
@@ -48,15 +48,21 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
 
 typedef void * (*create_security_ctx_t)(void *cryptodev);
 
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
+	set_session_t set_session;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
 };
 
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
+
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
 		struct rte_cryptodev_config *config);
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index f814bf8f75..83bf55c933 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -13,6 +13,10 @@
 #include "qat_sym.h"
 #include "dev/qat_crypto_pmd_gens.h"
 
+uint8_t qat_sym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
@@ -126,7 +130,7 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
 
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen)
+		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
 {
 	int ret = 0;
 	struct qat_sym_session *ctx = NULL;
@@ -191,12 +195,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		return -EINVAL;
 	}
 
-	if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
-		QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -EINVAL;
-	}
-
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 8ca475ca8b..3a880096c4 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/sha.h>	/* Needed to calculate pre-compute values */
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
 	return 0;
 }
 
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
-		uint8_t hash_flag)
-{
-	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
-	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
-			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
-			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
-	/* Set the Use Extended Protocol Flags bit in LW 1 */
-	QAT_FIELD_SET(header->comn_req_flags,
-			QAT_COMN_EXT_FLAGS_USED,
-			QAT_COMN_EXT_FLAGS_BITPOS,
-			QAT_COMN_EXT_FLAGS_MASK);
-
-	/* Set Hash Flags in LW 28 */
-	cd_ctrl->hash_flags |= hash_flag;
-
-	/* Set proto flags in LW 1 */
-	switch (session->qat_cipher_alg) {
-	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_SNOW_3G_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_ZUC_3G_PROTO);
-		break;
-	default:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	}
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
-		struct qat_sym_session *session)
-{
-	const struct qat_cryptodev_private *qat_private =
-			dev->data->dev_private;
-	enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
-			QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
-	if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
-	} else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
-	} else if ((session->aes_cmac ||
-			session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
-			(session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session, 0);
-	}
-}
-
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
 	int ret;
 	int qat_cmd_id;
-	int handle_mixed = 0;
 
 	/* Verify the session physical address is known */
 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
+	session->dev_id = internals->dev_id;
 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
 	session->is_ucs = 0;
 
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		return -ENOTSUP;
 	}
 	qat_sym_session_finalize(session);
-	if (handle_mixed) {
-		/* Special handling of mixed hash+cipher algorithms */
-		qat_sym_session_handle_mixed(dev, session);
-	}
 
-	return 0;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+			(void *)session);
 }
 
 static int
@@ -678,14 +598,13 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 {
 	session->is_single_pass = 1;
 	session->is_auth = 1;
-	session->min_qat_dev_gen = QAT_GEN3;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
-	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
 		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
-	} else {
+	else
 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
-	}
+
 	session->cipher_iv.offset = aead_xform->iv.offset;
 	session->cipher_iv.length = aead_xform->iv.length;
 	session->aad_len = aead_xform->aad_length;
@@ -1205,9 +1124,9 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-			uint8_t *data_in,
-			uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in, uint8_t *data_out)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1573,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1920,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2180,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
-
 	/* Get requested QAT command id - should be cipher */
 	qat_cmd_id = qat_get_cmd_id(xform);
 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2204,9 @@ qat_security_session_create(void *dev,
 {
 	void *sess_private_data;
 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct qat_cryptodev_private *internals = cdev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_sym_session *sym_session = NULL;
 	int ret;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2230,11 @@ qat_security_session_create(void *dev,
 	}
 
 	set_sec_session_private_data(sess, sess_private_data);
+	sym_session = (struct qat_sym_session *)sess_private_data;
+	sym_session->dev_id = internals->dev_id;
 
-	return ret;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+			sess_private_data);
 }
 
 int
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index fe875a7fd0..01908abd9e 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -105,7 +105,7 @@ struct qat_sym_session {
 	uint16_t auth_key_length;
 	uint16_t digest_length;
 	rte_spinlock_t lock;	/* protects this struct */
-	enum qat_device_gen min_qat_dev_gen;
+	uint16_t dev_id;
 	uint8_t aes_cmac;
 	uint8_t is_single_pass;
 	uint8_t is_single_pass_gmac;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v9 4/9] crypto/qat: rework asymmetric op build operation
  2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
                                       ` (2 preceding siblings ...)
  2022-02-18 17:15                     ` [dpdk-dev v9 3/9] crypto/qat: rework session functions Kai Ji
@ 2022-02-18 17:15                     ` Kai Ji
  2022-02-18 17:15                     ` [dpdk-dev v9 5/9] crypto/qat: unify symmetric functions Kai Ji
                                       ` (5 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-18 17:15 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch reworks the asymmetric crypto data path
implementation in QAT driver. The changes include asymmetric
crypto data path separation for QAT hardware generations, and
code optimisation of the device capabilities declaration.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c   |   5 +-
 drivers/crypto/qat/qat_asym.c | 129 +++++++++++++++++++---------------
 drivers/crypto/qat/qat_asym.h |  63 +++++++++++++++--
 3 files changed, 131 insertions(+), 66 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 56234ca1a4..7f2fdc53ce 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -619,7 +619,7 @@ qat_enqueue_op_burst(void *qp,
 #ifdef BUILD_QAT_ASYM
 			ret = qat_asym_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
+				NULL, tmp_qp->qat_dev_gen);
 #endif
 		}
 		if (ret != 0) {
@@ -847,7 +847,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 #ifdef BUILD_QAT_ASYM
 		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
 			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 #endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index f46eefd4b3..845e905a89 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -1,68 +1,36 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2022 Intel Corporation
  */
 
 #include <stdarg.h>
 
-#include "qat_asym.h"
+#include <cryptodev_pmd.h>
+
 #include "icp_qat_fw_pke.h"
 #include "icp_qat_fw.h"
 #include "qat_pke_functionality_arrays.h"
 
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
-
-static int qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
-		size_t arr_sz, size_t *size, uint32_t *func_id)
-{
-	size_t i;
-
-	for (i = 0; i < arr_sz; i++) {
-		if (*size <= arr[i][0]) {
-			*size = arr[i][0];
-			*func_id = arr[i][1];
-			return 0;
-		}
-	}
-	return -1;
-}
-
-static inline void qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
-{
-	memset(qat_req, 0, sizeof(*qat_req));
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
-
-	qat_req->pke_hdr.hdr_flags =
-			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
-			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
-}
-
-static inline void qat_asym_build_req_tmpl(void *sess_private_data)
-{
-	struct icp_qat_fw_pke_request *qat_req;
-	struct qat_asym_session *session = sess_private_data;
+#include "qat_device.h"
 
-	qat_req = &session->req_tmpl;
-	qat_fill_req_tmpl(qat_req);
-}
+#include "qat_logs.h"
+#include "qat_asym.h"
 
-static size_t max_of(int n, ...)
-{
-	va_list args;
-	size_t len = 0, num;
-	int i;
+uint8_t qat_asym_driver_id;
 
-	va_start(args, n);
-	len = va_arg(args, size_t);
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
 
-	for (i = 0; i < n - 1; i++) {
-		num = va_arg(args, size_t);
-		if (num > len)
-			len = num;
-	}
-	va_end(args);
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+	.name = qat_asym_drv_name,
+	.alias = qat_asym_drv_name
+};
 
-	return len;
-}
 
 static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
 		int in_count, int out_count, int alg_size)
@@ -106,7 +74,46 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
 	}
 }
 
-static int qat_asym_check_nonzero(rte_crypto_param n)
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+		size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+	size_t i;
+
+	for (i = 0; i < arr_sz; i++) {
+		if (*size <= arr[i][0]) {
+			*size = arr[i][0];
+			*func_id = arr[i][1];
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+	va_list args;
+	size_t len = 0, num;
+	int i;
+
+	va_start(args, n);
+	len = va_arg(args, size_t);
+
+	for (i = 0; i < n - 1; i++) {
+		num = va_arg(args, size_t);
+		if (num > len)
+			len = num;
+	}
+	va_end(args);
+
+	return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
 {
 	if (n.length < 8) {
 		/* Not a case for any cryptographic function except for DH
@@ -475,10 +482,9 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 }
 
 int
-qat_asym_build_request(void *in_op,
-			uint8_t *out_msg,
-			void *op_cookie,
-			__rte_unused enum qat_device_gen qat_dev_gen)
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
 {
 	struct qat_asym_session *ctx;
 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
@@ -677,9 +683,9 @@ static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
 	qat_clear_arrays_by_alg(cookie, xform, alg_size_in_bytes);
 }
 
-void
+int
 qat_asym_process_response(void **op, uint8_t *resp,
-		void *op_cookie)
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
 {
 	struct qat_asym_session *ctx;
 	struct icp_qat_fw_pke_resp *resp_msg =
@@ -722,6 +728,8 @@ qat_asym_process_response(void **op, uint8_t *resp,
 	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
 			sizeof(struct icp_qat_fw_pke_resp));
 #endif
+
+	return 1;
 }
 
 int
@@ -779,3 +787,8 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
 	if (sess_priv)
 		memset(s, 0, qat_asym_session_get_private_size(dev));
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_asym_driver,
+		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index c9242a12ca..3ae95f2e7b 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #ifndef _QAT_ASYM_H_
@@ -8,10 +8,13 @@
 #include <cryptodev_pmd.h>
 #include <rte_crypto_asym.h>
 #include "icp_qat_fw_pke.h"
-#include "qat_common.h"
-#include "qat_asym_pmd.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
 #include "icp_qat_fw.h"
 
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
+
 typedef uint64_t large_int_ptr;
 #define MAX_PKE_PARAMS	8
 #define QAT_PKE_MAX_LN_SIZE 512
@@ -26,6 +29,28 @@ typedef uint64_t large_int_ptr;
 #define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
 #define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
 
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
+		{.asym = {						\
+			.xform_capa = {					\
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+				.op_types = o,				\
+				{					\
+				.modlen = {				\
+				.min = l,				\
+				.max = r,				\
+				.increment = i				\
+				}, }					\
+			}						\
+		},							\
+		}							\
+	}
+
 struct qat_asym_op_cookie {
 	size_t alg_size;
 	uint64_t error;
@@ -45,6 +70,27 @@ struct qat_asym_session {
 	struct rte_crypto_asym_xform *xform;
 };
 
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+	memset(qat_req, 0, sizeof(*qat_req));
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+	qat_req->pke_hdr.hdr_flags =
+			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+	struct icp_qat_fw_pke_request *qat_req;
+	struct qat_asym_session *session = sess_private_data;
+
+	qat_req = &session->req_tmpl;
+	qat_fill_req_tmpl(qat_req);
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
 		struct rte_crypto_asym_xform *xform,
@@ -75,7 +121,9 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
  */
 int
 qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
+		void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		enum qat_device_gen qat_dev_gen);
 
 /*
  * Process PKE response received from outgoing queue of QAT
@@ -87,8 +135,11 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg,
  * @param	op_cookie	Cookie pointer that holds private metadata
  *
  */
+int
+qat_asym_process_response(void **op, uint8_t *resp,
+		void *op_cookie,  __rte_unused uint64_t *dequeue_err_count);
+
 void
-qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
-		void *op_cookie);
+qat_asym_init_op_cookie(void *cookie);
 
 #endif /* _QAT_ASYM_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v9 5/9] crypto/qat: unify symmetric functions
  2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
                                       ` (3 preceding siblings ...)
  2022-02-18 17:15                     ` [dpdk-dev v9 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
@ 2022-02-18 17:15                     ` Kai Ji
  2022-02-18 17:15                     ` [dpdk-dev v9 6/9] crypto/qat: unify asymmetric functions Kai Ji
                                       ` (4 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-18 17:15 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch removes qat_sym_pmd.c and integrates all the functions into
qat_sym.c. The unified/integrated qat sym crypto pmd functions should
make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build       |   4 +-
 drivers/common/qat/qat_device.c      |   4 +-
 drivers/common/qat/qat_qp.c          |   3 +-
 drivers/crypto/qat/qat_crypto.h      |   5 +-
 drivers/crypto/qat/qat_sym.c         |  21 +++
 drivers/crypto/qat/qat_sym.h         | 147 ++++++++++++++--
 drivers/crypto/qat/qat_sym_hw_dp.c   |  11 +-
 drivers/crypto/qat/qat_sym_pmd.c     | 251 ---------------------------
 drivers/crypto/qat/qat_sym_pmd.h     |  95 ----------
 drivers/crypto/qat/qat_sym_session.c |   2 +-
 10 files changed, 168 insertions(+), 375 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index af92271a75..1bf6896a7e 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017-2018 Intel Corporation
+# Copyright(c) 2017-2022 Intel Corporation
 
 if is_windows
     build = false
@@ -73,7 +73,7 @@ if qat_compress
 endif
 
 if qat_crypto
-    foreach f: ['qat_sym_pmd.c', 'qat_sym.c', 'qat_sym_session.c',
+    foreach f: ['qat_sym.c', 'qat_sym_session.c',
             'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 1f870d689a..6824d97050 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2020 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 
 #include <rte_string_fns.h>
@@ -8,7 +8,7 @@
 
 #include "qat_device.h"
 #include "adf_transport_access_macros.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 #include "qat_comp_pmd.h"
 #include "adf_pf2vf_msg.h"
 #include "qat_pf2vf.h"
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 7f2fdc53ce..b36ffa6f6d 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -838,7 +838,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
 			nb_fw_responses = qat_comp_process_response(
 				ops, resp_msg,
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 5ca76fcaa6..c01266f81c 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -12,7 +12,10 @@
 extern uint8_t qat_sym_driver_id;
 extern uint8_t qat_asym_driver_id;
 
-/** helper macro to set cryptodev capability range **/
+/**
+ * helper macro to set cryptodev capability range
+ * <n: name> <l: min > <r: max> <i: increment> <v: value>
+ **/
 #define CAP_RNG(n, l, r, i) .n = {.min = l, .max = r, .increment = i}
 
 #define CAP_RNG_ZERO(n) .n = {.min = 0, .max = 0, .increment = 0}
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 83bf55c933..aad4b243b7 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -17,6 +17,27 @@ uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_sym_init_op_cookie(void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+
+	cookie->qat_sgl_src_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_src);
+
+	cookie->qat_sgl_dst_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_dst);
+
+	cookie->opt.spc_gmac.cd_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			opt.spc_gmac.cd_cipher);
+}
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index e3ec7f0de4..f4ff2ce4cd 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #ifndef _QAT_SYM_H_
@@ -15,7 +15,7 @@
 
 #include "qat_common.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_crypto.h"
 #include "qat_logs.h"
 
 #define BYTE_LENGTH    8
@@ -24,6 +24,67 @@
  */
 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
+/** Intel(R) QAT Symmetric Crypto PMD name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
+
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
+#define QAT_SYM_CAP_VALID		(1 << 31)
+
+/**
+ * Macro to add a sym capability
+ * helper function to add an sym capability
+ * <n: name> <b: block size> <k: key size> <d: digest size>
+ * <a: aad_size> <i: iv_size>
+ **/
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, d					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_##n,		\
+				b, k, i					\
+			}, }						\
+		}, }							\
+	}
+
 /*
  * Maximum number of SGL entries
  */
@@ -54,6 +115,22 @@ struct qat_sym_op_cookie {
 	} opt;
 };
 
+struct qat_sym_dp_ctx {
+	struct qat_sym_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		void *op_cookie, enum qat_device_gen qat_dev_gen);
@@ -213,17 +290,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
 		}
 	}
 }
-#else
-
-static inline void
-qat_sym_preprocess_requests(void **ops __rte_unused,
-				uint16_t nb_ops __rte_unused)
-{
-}
 #endif
 
-static inline void
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused)
 {
 	struct icp_qat_fw_comn_resp *resp_msg =
 			(struct icp_qat_fw_comn_resp *)resp;
@@ -282,6 +353,12 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
 	}
 
 	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
 }
 
 int
@@ -293,6 +370,52 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 int
 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
 
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_vec *vec, uint32_t vec_len,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t i;
+
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_la_bulk_req));
+	for (i = 0; i < vec_len; i++)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+	if (cipher_iv && ctx->cipher_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+				ctx->cipher_iv.length);
+	if (auth_iv && ctx->auth_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+				ctx->auth_iv.length);
+	if (aad && ctx->aad_len > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+				ctx->aad_len);
+	if (digest && ctx->digest_length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+				ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+		struct qat_sym_session *ctx __rte_unused,
+		struct rte_crypto_vec *vec __rte_unused,
+		uint32_t vec_len __rte_unused,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+}
+#endif
+
 #else
 
 static inline void
@@ -307,5 +430,5 @@ qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
 {
 }
 
-#endif
+#endif /* BUILD_QAT_SYM */
 #endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 792ad2b213..5322faff44 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #include <cryptodev_pmd.h>
@@ -9,18 +9,9 @@
 #include "icp_qat_fw_la.h"
 
 #include "qat_sym.h"
-#include "qat_sym_pmd.h"
 #include "qat_sym_session.h"
 #include "qat_qp.h"
 
-struct qat_sym_dp_ctx {
-	struct qat_sym_session *session;
-	uint32_t tail;
-	uint32_t head;
-	uint16_t cached_enqueue;
-	uint16_t cached_dequeue;
-};
-
 static __rte_always_inline int32_t
 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
 		struct rte_crypto_vec *data, uint16_t n_data_vecs)
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
deleted file mode 100644
index 28a26260fb..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security_driver.h>
-#endif
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
-
-#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
-	struct qat_sym_op_cookie *cookie = op_cookie;
-
-	cookie->qat_sgl_src_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_src);
-
-	cookie->qat_sgl_dst_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_dst);
-
-	cookie->opt.spc_gmac.cd_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			opt.spc_gmac.cd_cipher);
-}
-
-static uint16_t
-qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-static uint16_t
-qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
-	.name = qat_sym_drv_name,
-	.alias = qat_sym_drv_name
-};
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
-	int i = 0, ret = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "sym");
-	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	/*
-	 * All processes must use same driver id so they can share sessions.
-	 * Store driver_id so we can validate that all processes have the same
-	 * value, typically they have, but could differ if binaries built
-	 * separately.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_sym_driver_id =
-				qat_sym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_sym_driver_id !=
-				qat_sym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
-	qat_dev_instance->sym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->sym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->sym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_sym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-#ifdef RTE_LIB_SECURITY
-	if (gen_dev_ops->create_security_ctx) {
-		cryptodev->security_ctx =
-			gen_dev_ops->create_security_ctx((void *)cryptodev);
-		if (cryptodev->security_ctx == NULL) {
-			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
-	} else
-		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-
-#endif
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_SYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->service_type = QAT_SERVICE_SYMMETRIC;
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating capability memzon for %s",
-				name);
-			ret = -EFAULT;
-			goto error;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->sym_dev = internals;
-	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	memset(&qat_dev_instance->sym_rte_dev, 0,
-		sizeof(qat_dev_instance->sym_rte_dev));
-
-	return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->sym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
-	qat_pci_dev->sym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_sym_driver,
-		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
deleted file mode 100644
index 59fbdefa12..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_PMD_H_
-#define _QAT_SYM_PMD_H_
-
-#ifdef BUILD_QAT_SYM
-
-#include <rte_ether.h>
-#include <rte_cryptodev.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security.h>
-#endif
-
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Symmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
-#define QAT_SYM_CAP_VALID		(1 << 31)
-
-/**
- * Macro to add a sym capability
- * helper function to add an sym capability
- * <n: name> <b: block size> <k: key size> <d: digest size>
- * <a: aad_size> <i: iv_size>
- **/
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, d					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
-			{.aead = {					\
-				.algo = RTE_CRYPTO_AEAD_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_##n,		\
-				b, k, i					\
-			}, }						\
-		}, }							\
-	}
-
-extern uint8_t qat_sym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param);
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
-
-void
-qat_sym_init_op_cookie(void *op_cookie);
-
-#endif
-#endif /* _QAT_SYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 3a880096c4..9d6a19c0be 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -20,7 +20,7 @@
 
 #include "qat_logs.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 
 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
 static const uint8_t sha1InitialState[] = {
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v9 6/9] crypto/qat: unify asymmetric functions
  2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
                                       ` (4 preceding siblings ...)
  2022-02-18 17:15                     ` [dpdk-dev v9 5/9] crypto/qat: unify symmetric functions Kai Ji
@ 2022-02-18 17:15                     ` Kai Ji
  2022-02-18 17:15                     ` [dpdk-dev v9 7/9] crypto/qat: rework burst data path Kai Ji
                                       ` (3 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-18 17:15 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch removes qat_asym_pmd.c and integrates all the
functions into qat_asym.c. The unified/integrated asym crypto
pmd functions should make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build    |   2 +-
 drivers/crypto/qat/qat_asym.c     | 180 +++++++++++++++++++++++
 drivers/crypto/qat/qat_asym_pmd.c | 231 ------------------------------
 drivers/crypto/qat/qat_asym_pmd.h |  54 -------
 4 files changed, 181 insertions(+), 286 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 1bf6896a7e..f687f5c9d8 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 845e905a89..76d50dd6f9 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -19,6 +19,32 @@ uint8_t qat_asym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_asym_init_op_cookie(void *op_cookie)
+{
+	int j;
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	cookie->input_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					input_params_ptrs);
+
+	cookie->output_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					output_params_ptrs);
+
+	for (j = 0; j < 8; j++) {
+		cookie->input_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						input_array[j]);
+		cookie->output_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						output_array[j]);
+	}
+}
+
 /* An rte_driver is needed in the registration of both the device and the driver
  * with cryptodev.
  * The actual qat pci's rte_driver can't be used as its name represents
@@ -788,6 +814,160 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
 		memset(s, 0, qat_asym_session_get_private_size(dev));
 }
 
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+			nb_ops);
+}
+
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+				nb_ops);
+}
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param)
+{
+	struct qat_cryptodev_private *internals;
+	struct rte_cryptodev *cryptodev;
+	struct qat_device_info *qat_dev_instance =
+		&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	uint64_t capa_size;
+	int i = 0;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "asym");
+	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+				name);
+		return -(EFAULT);
+	}
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_asym_driver_id =
+				qat_asym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_asym_driver_id !=
+				qat_asym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
+		}
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+	qat_dev_instance->asym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->asym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_asym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_ASYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			rte_cryptodev_pmd_destroy(cryptodev);
+			memset(&qat_dev_instance->asym_rte_dev, 0,
+				sizeof(qat_dev_instance->asym_rte_dev));
+			return -EFAULT;
+		}
+	}
+
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
+
+	qat_pci_dev->asym_dev = internals;
+	internals->service_type = QAT_SERVICE_ASYMMETRIC;
+	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+	return 0;
+}
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->asym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(
+			qat_pci_dev->asym_dev->dev_id);
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+	qat_pci_dev->asym_dev = NULL;
+
+	return 0;
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_asym_driver,
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
deleted file mode 100644
index 9a7596b227..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "qat_logs.h"
-
-#include "qat_crypto.h"
-#include "qat_asym.h"
-#include "qat_asym_pmd.h"
-
-uint8_t qat_asym_driver_id;
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
-	int j;
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	cookie->input_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					input_params_ptrs);
-
-	cookie->output_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					output_params_ptrs);
-
-	for (j = 0; j < 8; j++) {
-		cookie->input_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						input_array[j]);
-		cookie->output_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						output_array[j]);
-	}
-}
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
-	/* Device related operations */
-	.dev_configure		= qat_cryptodev_config,
-	.dev_start		= qat_cryptodev_start,
-	.dev_stop		= qat_cryptodev_stop,
-	.dev_close		= qat_cryptodev_close,
-	.dev_infos_get		= qat_cryptodev_info_get,
-
-	.stats_get		= qat_cryptodev_stats_get,
-	.stats_reset		= qat_cryptodev_stats_reset,
-	.queue_pair_setup	= qat_cryptodev_qp_setup,
-	.queue_pair_release	= qat_cryptodev_qp_release,
-
-	/* Crypto related operations */
-	.asym_session_get_size	= qat_asym_session_get_private_size,
-	.asym_session_configure	= qat_asym_session_configure,
-	.asym_session_clear	= qat_asym_session_clear
-};
-
-uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
-	.name = qat_asym_drv_name,
-	.alias = qat_asym_drv_name
-};
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
-	int i = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "asym");
-	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_asym_driver_id =
-				qat_asym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_asym_driver_id !=
-				qat_asym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
-	qat_dev_instance->asym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->asym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->asym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_asym_driver_id;
-	cryptodev->dev_ops = &crypto_qat_ops;
-
-	cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst;
-
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_ASYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->dev_id = cryptodev->data->dev_id;
-	internals->service_type = QAT_SERVICE_ASYMMETRIC;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			rte_cryptodev_pmd_destroy(cryptodev);
-			memset(&qat_dev_instance->asym_rte_dev, 0,
-				sizeof(qat_dev_instance->asym_rte_dev));
-			return -EFAULT;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->asym_dev = internals;
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-	return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->asym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(
-			qat_pci_dev->asym_dev->dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
-	qat_pci_dev->asym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_asym_driver,
-		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_pmd.h b/drivers/crypto/qat/qat_asym_pmd.h
deleted file mode 100644
index f988d646e5..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-
-#ifndef _QAT_ASYM_PMD_H_
-#define _QAT_ASYM_PMD_H_
-
-#include <rte_cryptodev.h>
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
-
-
-/**
- * Helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
-		{.asym = {						\
-			.xform_capa = {					\
-				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
-				.op_types = o,				\
-				{					\
-				.modlen = {				\
-				.min = l,				\
-				.max = r,				\
-				.increment = i				\
-				}, }					\
-			}						\
-		},							\
-		}							\
-	}
-
-extern uint8_t qat_asym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
-
-void
-qat_asym_init_op_cookie(void *op_cookie);
-
-uint16_t
-qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-uint16_t
-qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-#endif /* _QAT_ASYM_PMD_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v9 7/9] crypto/qat: rework burst data path
  2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
                                       ` (5 preceding siblings ...)
  2022-02-18 17:15                     ` [dpdk-dev v9 6/9] crypto/qat: unify asymmetric functions Kai Ji
@ 2022-02-18 17:15                     ` Kai Ji
  2022-02-18 17:15                     ` [dpdk-dev v9 8/9] crypto/qat: unify raw data path functions Kai Ji
                                       ` (2 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-18 17:15 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch enable the op_build_request function in
qat_enqueue_op_burst, and the qat_dequeue_process_response
function in qat_dequeue_op_burst.
The op_build_request invoked in crypto build request op is based
on crypto operations setup'd during session init.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c               |  42 +-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c |   4 -
 drivers/crypto/qat/qat_asym.c             |   2 +-
 drivers/crypto/qat/qat_asym.h             |  22 -
 drivers/crypto/qat/qat_sym.c              | 830 +++++++---------------
 drivers/crypto/qat/qat_sym.h              |   5 -
 6 files changed, 271 insertions(+), 634 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index b36ffa6f6d..08ac91eac4 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,7 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp,
-		__rte_unused qat_op_build_request_t op_build_request,
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
 		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
@@ -599,29 +598,18 @@ qat_enqueue_op_burst(void *qp,
 		}
 	}
 
-#ifdef BUILD_QAT_SYM
+#ifdef RTE_LIB_SECURITY
 	if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 		qat_sym_preprocess_requests(ops, nb_ops_possible);
 #endif
 
+	memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque));
+
 	while (nb_ops_sent != nb_ops_possible) {
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
-#ifdef BUILD_QAT_SYM
-			ret = qat_sym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		} else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
-			ret = qat_comp_build_request(*ops, base_addr + tail,
+		ret = op_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-		} else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
-#ifdef BUILD_QAT_ASYM
-			ret = qat_asym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				NULL, tmp_qp->qat_dev_gen);
-#endif
-		}
+				tmp_qp->opaque, tmp_qp->qat_dev_gen);
+
 		if (ret != 0) {
 			tmp_qp->stats.enqueue_err_count++;
 			/* This message cannot be enqueued */
@@ -817,8 +805,7 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 
 uint16_t
 qat_dequeue_op_burst(void *qp, void **ops,
-		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
-		uint16_t nb_ops)
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -836,21 +823,10 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		nb_fw_responses = 1;
 
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
-			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
-			nb_fw_responses = qat_comp_process_response(
+		nb_fw_responses = qat_dequeue_process_response(
 				ops, resp_msg,
 				tmp_qp->op_cookies[head >> rx_queue->trailz],
 				&tmp_qp->stats.dequeue_err_count);
-#ifdef BUILD_QAT_ASYM
-		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
-			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-#endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
 				  rx_queue->modulo_mask);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 501132a448..c58a628915 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,10 +146,6 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
-
-	/* Raw data-path API related operations */
-	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
-	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 76d50dd6f9..3c7cba6140 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -507,7 +507,7 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	return 0;
 }
 
-int
+static __rte_always_inline int
 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 		__rte_unused uint64_t *opaque,
 		__rte_unused enum qat_device_gen dev_gen)
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 3ae95f2e7b..78caa5649c 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -103,28 +103,6 @@ void
 qat_asym_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_asym_session *sess);
 
-/*
- * Build PKE request to be sent to the fw, partially uses template
- * request generated during session creation.
- *
- * @param	in_op		Pointer to the crypto operation, for every
- *				service it points to service specific struct.
- * @param	out_msg		Message to be returned to enqueue function
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	qat_dev_gen	Generation of QAT hardware
- *
- * @return
- *	This function always returns zero,
- *	it is because of backward compatibility.
- *	- 0: Always returned
- *
- */
-int
-qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		enum qat_device_gen qat_dev_gen);
-
 /*
  * Process PKE response received from outgoing queue of QAT
  *
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index aad4b243b7..692e1f8f0a 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -11,12 +11,25 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-#include "dev/qat_crypto_pmd_gens.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
 
 uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
 void
 qat_sym_init_op_cookie(void *op_cookie)
 {
@@ -38,160 +51,68 @@ qat_sym_init_op_cookie(void *op_cookie)
 			opt.spc_gmac.cd_cipher);
 }
 
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op,
-		struct icp_qat_fw_la_bulk_req *qat_req)
+static __rte_always_inline int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
 {
-	/* copy IV into request if it fits */
-	if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
-		rte_memcpy(cipher_param->u.cipher_IV_array,
-				rte_crypto_op_ctod_offset(op, uint8_t *,
-					iv_offset),
-				iv_length);
-	} else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr =
-				rte_crypto_op_ctophys_offset(op,
-					iv_offset);
-	}
-}
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	uintptr_t sess = (uintptr_t)opaque[0];
+	uintptr_t build_request_p = (uintptr_t)opaque[1];
+	qat_sym_build_request_t build_request = (void *)build_request_p;
+	struct qat_sym_session *ctx = NULL;
 
-/** Set IV for CCM is special case, 0th byte is set to q-1
- *  where q is padding of nonce in 16 byte block
- */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
-	rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
-			ICP_QAT_HW_CCM_NONCE_OFFSET,
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-	*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-	if (aad_len_field_sz)
-		rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-}
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != (uintptr_t)ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
 
-/** Handle Single-Pass AES-GMAC on QAT GEN3 */
-static inline void
-handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
-		struct qat_sym_op_cookie *cookie,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	static const uint32_t ver_key_offset =
-			sizeof(struct icp_qat_hw_auth_setup) +
-			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
-			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
-			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
-			sizeof(struct icp_qat_hw_cipher_config);
-	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
-			(void *) &qat_req->cd_ctrl;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-			(void *) &qat_req->serv_specif_rqpars;
-	uint32_t data_length = op->sym->auth.data.length;
-
-	/* Fill separate Content Descriptor for this op */
-	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
-			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-				ctx->cd.cipher.key :
-				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
-			ctx->auth_key_length);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
-				ICP_QAT_HW_CIPHER_AEAD_MODE,
-				ctx->qat_cipher_alg,
-				ICP_QAT_HW_CIPHER_NO_CONVERT,
-				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-					ICP_QAT_HW_CIPHER_ENCRYPT :
-					ICP_QAT_HW_CIPHER_DECRYPT));
-	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
-			ctx->digest_length,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
-
-	/* Update the request */
-	qat_req->cd_pars.u.s.content_desc_addr =
-			cookie->opt.spc_gmac.cd_phys_addr;
-	qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
-			sizeof(struct icp_qat_hw_cipher_config) +
-			ctx->auth_key_length, 8) >> 3;
-	qat_req->comn_mid.src_length = data_length;
-	qat_req->comn_mid.dst_length = 0;
-
-	cipher_param->spc_aad_addr = 0;
-	cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
-	cipher_param->spc_aad_sz = data_length;
-	cipher_param->reserved = 0;
-	cipher_param->spc_auth_res_sz = ctx->digest_length;
-
-	qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
-	cipher_cd_ctrl->cipher_cfg_offset = 0;
-	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
-	ICP_QAT_FW_LA_PROTO_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_NO_PROTO);
-}
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, (void *)sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
-{
-	int ret = 0;
-	struct qat_sym_session *ctx = NULL;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	register struct icp_qat_fw_la_bulk_req *qat_req;
-	uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
-	uint32_t cipher_len = 0, cipher_ofs = 0;
-	uint32_t auth_len = 0, auth_ofs = 0;
-	uint32_t min_ofs = 0;
-	uint64_t src_buf_start = 0, dst_buf_start = 0;
-	uint64_t auth_data_end = 0;
-	uint8_t do_sgl = 0;
-	uint8_t in_place = 1;
-	int alignment_adjustment = 0;
-	int oop_shift = 0;
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *)op_cookie;
-
-	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
-				"operation requests, op (%p) is not a "
-				"symmetric operation.", op);
-		return -EINVAL;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
 	}
 
-	if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
-				" requests, op (%p) is sessionless.", op);
-		return -EINVAL;
-	} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_sym_session *)get_sym_session_private_data(
-				op->sym->session, qat_sym_driver_id);
 #ifdef RTE_LIB_SECURITY
-	} else {
-		ctx = (struct qat_sym_session *)get_sec_session_private_data(
-				op->sym->sec_session);
-		if (likely(ctx)) {
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if ((void *)sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
 			if (unlikely(ctx->bpi_ctx == NULL)) {
 				QAT_DP_LOG(ERR, "QAT PMD only supports security"
 						" operation requests for"
@@ -207,463 +128,234 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 				return -EINVAL;
 			}
-		}
-#endif
-	}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
 
-	if (unlikely(ctx == NULL)) {
-		QAT_DP_LOG(ERR, "Session was not created for this device");
-		return -EINVAL;
-	}
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, (void *)sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
-	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
-	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
-	cipher_param = (void *)&qat_req->serv_specif_rqpars;
-	cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			do_aead = 1;
-		} else {
-			do_auth = 1;
-			do_cipher = 1;
+			sess = (uintptr_t)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = sess;
+			opaque[1] = (uintptr_t)build_request;
 		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		do_auth = 1;
-		do_cipher = 0;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		do_auth = 0;
-		do_cipher = 1;
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
 	}
 
-	if (do_cipher) {
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
 
-		if (ctx->qat_cipher_alg ==
-					 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
 
-			if (unlikely(
-			    (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
-			    (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cipher_len = op->sym->cipher.data.length >> 3;
-			cipher_ofs = op->sym->cipher.data.offset >> 3;
-
-		} else if (ctx->bpi_ctx) {
-			/* DOCSIS - only send complete blocks to device.
-			 * Process any partial block using CFB mode.
-			 * Even if 0 complete blocks, still send this to device
-			 * to get into rx queue for post-process and dequeuing
-			 */
-			cipher_len = qat_bpicipher_preprocess(ctx, op);
-			cipher_ofs = op->sym->cipher.data.offset;
-		} else {
-			cipher_len = op->sym->cipher.data.length;
-			cipher_ofs = op->sym->cipher.data.offset;
-		}
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
 
-		set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
-				cipher_param, op, qat_req);
-		min_ofs = cipher_ofs;
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	if (do_auth) {
-
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
-			ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
-			if (unlikely(
-			    (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
-			    (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			auth_ofs = op->sym->auth.data.offset >> 3;
-			auth_len = op->sym->auth.data.length >> 3;
-
-			auth_param->u1.aad_adr =
-					rte_crypto_op_ctophys_offset(op,
-							ctx->auth_iv.offset);
-
-		} else if (ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-			/* AES-GMAC */
-			set_cipher_iv(ctx->auth_iv.length,
-				ctx->auth_iv.offset,
-				cipher_param, op, qat_req);
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
-			auth_param->u1.aad_adr = 0;
-			auth_param->u2.aad_sz = 0;
-
-		} else {
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-		min_ofs = auth_ofs;
-
-		if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
-				ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
-			auth_param->auth_res_addr =
-					op->sym->auth.digest.phys_addr;
-
 	}
 
-	if (do_aead) {
-		/*
-		 * This address may used for setting AAD physical pointer
-		 * into IV offset from op
-		 */
-		rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
-		if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-
-			set_cipher_iv(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, qat_req);
-
-		} else if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
-			/* In case of AES-CCM this may point to user selected
-			 * memory or iv offset in crypto_op
-			 */
-			uint8_t *aad_data = op->sym->aead.aad.data;
-			/* This is true AAD length, it not includes 18 bytes of
-			 * preceding data
-			 */
-			uint8_t aad_ccm_real_len = 0;
-			uint8_t aad_len_field_sz = 0;
-			uint32_t msg_len_be =
-					rte_bswap32(op->sym->aead.data.length);
-
-			if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-				aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-				aad_ccm_real_len = ctx->aad_len -
-					ICP_QAT_HW_CCM_AAD_B0_LEN -
-					ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			} else {
-				/*
-				 * aad_len not greater than 18, so no actual aad
-				 *  data, then use IV after op for B0 block
-				 */
-				aad_data = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-				aad_phys_addr_aead =
-						rte_crypto_op_ctophys_offset(op,
-							ctx->cipher_iv.offset);
-			}
-
-			uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
-							ctx->cipher_iv.length;
-
-			aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-							aad_len_field_sz,
-							ctx->digest_length, q);
-
-			if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET +
-				    (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				    (uint8_t *)&msg_len_be,
-				    ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-			} else {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET,
-				    (uint8_t *)&msg_len_be
-				    + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				    - q), q);
-			}
-
-			if (aad_len_field_sz > 0) {
-				*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
-						= rte_bswap16(aad_ccm_real_len);
-
-				if ((aad_ccm_real_len + aad_len_field_sz)
-						% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-					uint8_t pad_len = 0;
-					uint8_t pad_idx = 0;
-
-					pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len + aad_len_field_sz) %
-						ICP_QAT_HW_CCM_AAD_B0_LEN);
-					pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					    aad_ccm_real_len + aad_len_field_sz;
-					memset(&aad_data[pad_idx],
-							0, pad_len);
-				}
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
 
-			}
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
 
-			set_cipher_iv_ccm(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, q,
-					aad_len_field_sz);
+	if (cryptodev == NULL)
+		return -ENODEV;
 
-		}
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
-		cipher_len = op->sym->aead.data.length;
-		cipher_ofs = op->sym->aead.data.offset;
-		auth_len = op->sym->aead.data.length;
-		auth_ofs = op->sym->aead.data.offset;
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
-		auth_param->u1.aad_adr = aad_phys_addr_aead;
-		auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
-		min_ofs = op->sym->aead.data.offset;
-	}
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
-	if (op->sym->m_src->nb_segs > 1 ||
-			(op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
-		do_sgl = 1;
-
-	/* adjust for chain case */
-	if (do_cipher && do_auth)
-		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
-	if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
-		min_ofs = 0;
-
-	if (unlikely((op->sym->m_dst != NULL) &&
-			(op->sym->m_dst != op->sym->m_src))) {
-		/* Out-of-place operation (OOP)
-		 * Don't align DMA start. DMA the minimum data-set
-		 * so as not to overwrite data in dest buffer
-		 */
-		in_place = 0;
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-		dst_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-		oop_shift = min_ofs;
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
 
-	} else {
-		/* In-place operation
-		 * Start DMA at nearest aligned address below min_ofs
-		 */
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
-						& QAT_64_BTYE_ALIGN_MASK;
-
-		if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
-					rte_pktmbuf_headroom(op->sym->m_src))
-							> src_buf_start)) {
-			/* alignment has pushed addr ahead of start of mbuf
-			 * so revert and take the performance hit
-			 */
-			src_buf_start =
-				rte_pktmbuf_iova_offset(op->sym->m_src,
-								min_ofs);
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
 		}
-		dst_buf_start = src_buf_start;
 
-		/* remember any adjustment for later, note, can be +/- */
-		alignment_adjustment = src_buf_start -
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-	}
-
-	if (do_cipher || do_aead) {
-		cipher_param->cipher_offset =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, cipher_ofs) - src_buf_start;
-		cipher_param->cipher_length = cipher_len;
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
 	} else {
-		cipher_param->cipher_offset = 0;
-		cipher_param->cipher_length = 0;
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
 	}
-
-	if (!ctx->is_single_pass) {
-		/* Do not let to overwrite spc_aad len */
-		if (do_auth || do_aead) {
-			auth_param->auth_off =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, auth_ofs) - src_buf_start;
-			auth_param->auth_len = auth_len;
-		} else {
-			auth_param->auth_off = 0;
-			auth_param->auth_len = 0;
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
 		}
 	}
 
-	qat_req->comn_mid.dst_length =
-		qat_req->comn_mid.src_length =
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		> (auth_param->auth_off + auth_param->auth_len) ?
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		: (auth_param->auth_off + auth_param->auth_len);
-
-	if (do_auth && do_cipher) {
-		/* Handle digest-encrypted cases, i.e.
-		 * auth-gen-then-cipher-encrypt and
-		 * cipher-decrypt-then-auth-verify
-		 */
-		 /* First find the end of the data */
-		if (do_sgl) {
-			uint32_t remaining_off = auth_param->auth_off +
-				auth_param->auth_len + alignment_adjustment + oop_shift;
-			struct rte_mbuf *sgl_buf =
-				(in_place ?
-					op->sym->m_src : op->sym->m_dst);
-
-			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
-					&& sgl_buf->next != NULL) {
-				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
-				sgl_buf = sgl_buf->next;
-			}
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-			auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
-				sgl_buf, remaining_off);
-		} else {
-			auth_data_end = (in_place ?
-				src_buf_start : dst_buf_start) +
-				auth_param->auth_off + auth_param->auth_len;
-		}
-		/* Then check if digest-encrypted conditions are met */
-		if ((auth_param->auth_off + auth_param->auth_len <
-					cipher_param->cipher_offset +
-					cipher_param->cipher_length) &&
-				(op->sym->auth.digest.phys_addr ==
-					auth_data_end)) {
-			/* Handle partial digest encryption */
-			if (cipher_param->cipher_offset +
-					cipher_param->cipher_length <
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length)
-				qat_req->comn_mid.dst_length =
-					qat_req->comn_mid.src_length =
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length;
-			struct icp_qat_fw_comn_req_hdr *header =
-				&qat_req->comn_hdr;
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-		}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
 	}
 
-	if (do_sgl) {
-
-		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
-				QAT_COMN_PTR_TYPE_SGL);
-		ret = qat_sgl_fill_array(op->sym->m_src,
-		   (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
-		   &cookie->qat_sgl_src,
-		   qat_req->comn_mid.src_length,
-		   QAT_SYM_SGL_MAX_NUMBER);
-
-		if (unlikely(ret)) {
-			QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
-			return ret;
-		}
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
 
-		if (in_place)
-			qat_req->comn_mid.dest_data_addr =
-				qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-		else {
-			ret = qat_sgl_fill_array(op->sym->m_dst,
-				(int64_t)(dst_buf_start -
-					  rte_pktmbuf_iova(op->sym->m_dst)),
-				 &cookie->qat_sgl_dst,
-				 qat_req->comn_mid.dst_length,
-				 QAT_SYM_SGL_MAX_NUMBER);
-
-			if (unlikely(ret)) {
-				QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
-				return ret;
-			}
+	return 0;
 
-			qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-			qat_req->comn_mid.dest_data_addr =
-					cookie->qat_sgl_dst_phys_addr;
-		}
-		qat_req->comn_mid.src_length = 0;
-		qat_req->comn_mid.dst_length = 0;
-	} else {
-		qat_req->comn_mid.src_data_addr = src_buf_start;
-		qat_req->comn_mid.dest_data_addr = dst_buf_start;
-	}
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
 
-	if (ctx->is_single_pass) {
-		if (ctx->is_ucs) {
-			/* GEN 4 */
-			cipher_param20->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param20->spc_auth_res_addr =
-				op->sym->aead.digest.phys_addr;
-		} else {
-			cipher_param->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param->spc_auth_res_addr =
-					op->sym->aead.digest.phys_addr;
-		}
-	} else if (ctx->is_single_pass_gmac &&
-		       op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
-		/* Handle Single-Pass AES-GMAC */
-		handle_spc_gmac(ctx, op, cookie, qat_req);
-	}
+	return ret;
+}
 
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
-			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
-			rte_pktmbuf_data_len(op->sym->m_src));
-	if (do_cipher) {
-		uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
-				ctx->cipher_iv.length);
-	}
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
 
-	if (do_auth) {
-		if (ctx->auth_iv.length) {
-			uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
-							uint8_t *,
-							ctx->auth_iv.offset);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
-						ctx->auth_iv.length);
-		}
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
-				ctx->digest_length);
-	}
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
 
-	if (do_aead) {
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
-				ctx->digest_length);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
-				ctx->aad_len);
-	}
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
 #endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
+
 	return 0;
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f4ff2ce4cd..074612c11b 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -131,11 +131,6 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
-
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v9 8/9] crypto/qat: unify raw data path functions
  2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
                                       ` (6 preceding siblings ...)
  2022-02-18 17:15                     ` [dpdk-dev v9 7/9] crypto/qat: rework burst data path Kai Ji
@ 2022-02-18 17:15                     ` Kai Ji
  2022-02-18 17:15                     ` [dpdk-dev v9 9/9] crypto/qat: support out of place SG list Kai Ji
  2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-18 17:15 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch unifies QAT's raw dp api implementations
to the same enqueue/dequeue methods used in crypto operations.
The specific functions for different QAT generation are updated
respectively. The qat_sym_hw_dp.c is removed as no longer required.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build               |   2 +-
 drivers/compress/qat/qat_comp_pmd.c          |  12 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |   2 +
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 214 ++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 122 +++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |  78 ++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 656 ++++++++++++
 drivers/crypto/qat/qat_crypto.h              |   3 +
 drivers/crypto/qat/qat_sym.c                 |  56 +-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 986 -------------------
 10 files changed, 1137 insertions(+), 994 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index f687f5c9d8..b7027f3164 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 8e497e7a09..dc8db84a68 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -616,11 +616,18 @@ static struct rte_compressdev_ops compress_qat_dummy_ops = {
 	.private_xform_free	= qat_comp_private_xform_free
 };
 
+static uint16_t
+qat_comp_dequeue_burst(void *qp, struct rte_comp_op **ops,  uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_comp_process_response,
+				nb_ops);
+}
+
 static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
+	uint16_t ret = qat_comp_dequeue_burst(qp, ops, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
@@ -638,8 +645,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 
 		} else {
 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
-					(compressdev_dequeue_pkt_burst_t)
-					qat_dequeue_op_burst;
+					qat_comp_dequeue_burst;
 		}
 	}
 	return ret;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 64e6ae66ec..0c64c1e43f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -291,6 +291,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 			qat_sym_crypto_cap_get_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
 			qat_sym_crypto_set_session_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index db864d973a..ffa093a7a3 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -394,6 +394,218 @@ qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
+	} else if (ctx->is_single_pass_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
+	}
+
+	return 0;
+}
+
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -403,6 +615,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_feature_flags_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
 			qat_sym_crypto_set_session_gen3;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 7642a87d55..f803bc1459 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -223,6 +223,126 @@ qat_sym_crypto_set_session_gen4(void *cdev, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -230,6 +350,8 @@ RTE_INIT(qat_sym_crypto_gen4_init)
 			qat_sym_crypto_cap_get_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
 			qat_sym_crypto_set_session_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 96cdb97a26..50a9c5ad5b 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -839,6 +839,84 @@ int
 qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
 		uint8_t *out_msg, void *op_cookie);
 
+/* -----------------GEN 1 sym crypto raw data path APIs ---------------- */
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status);
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status);
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index c58a628915..fee6507512 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,6 +146,10 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
+
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
@@ -448,6 +452,656 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct qat_sym_op_cookie *cookie;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, &iv,
+			NULL, NULL, NULL);
+#endif
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i],
+				cookie, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
+			(uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				NULL, NULL, NULL);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
+			NULL, 0, cipher_iv, digest, auth_iv, ofs,
+			(uint32_t)data_len)))
+		return -1;
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				NULL, 0,
+				&vec->iv[i], &vec->digest[i],
+				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+			break;
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				&vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp;
+	void *resp_opaque;
+	uint32_t i, n, inflight;
+	uint32_t head;
+	uint8_t status;
+
+	*n_success_jobs = 0;
+	*return_status = 0;
+	head = dp_ctx->head;
+
+	inflight = qp->enqueued - qp->dequeued;
+	if (unlikely(inflight == 0))
+		return 0;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			head);
+	/* no operation ready */
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return 0;
+
+	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
+	/* get the dequeue count */
+	if (get_dequeue_count) {
+		n = get_dequeue_count(resp_opaque);
+		if (unlikely(n == 0))
+			return 0;
+	} else {
+		if (unlikely(max_nb_to_dequeue == 0))
+			return 0;
+		n = max_nb_to_dequeue;
+	}
+
+	out_user_data[0] = resp_opaque;
+	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+	post_dequeue(resp_opaque, 0, status);
+	*n_success_jobs += status;
+
+	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+
+	/* we already finished dequeue when n == 1 */
+	if (unlikely(n == 1)) {
+		i = 1;
+		goto end_deq;
+	}
+
+	if (is_user_data_array) {
+		for (i = 1; i < n; i++) {
+			resp = (struct icp_qat_fw_comn_resp *)(
+				(uint8_t *)rx_queue->base_addr + head);
+			if (unlikely(*(uint32_t *)resp ==
+					ADF_RING_EMPTY_SIG))
+				goto end_deq;
+			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
+			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+			*n_success_jobs += status;
+			post_dequeue(out_user_data[i], i, status);
+			head = (head + rx_queue->msg_size) &
+					rx_queue->modulo_mask;
+		}
+
+		goto end_deq;
+	}
+
+	/* opaque is not array */
+	for (i = 1; i < n; i++) {
+		resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+			goto end_deq;
+		head = (head + rx_queue->msg_size) &
+				rx_queue->modulo_mask;
+		post_dequeue(resp_opaque, i, status);
+		*n_success_jobs += status;
+	}
+
+end_deq:
+	dp_ctx->head = head;
+	dp_ctx->cached_dequeue += i;
+	return i;
+}
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	register struct icp_qat_fw_comn_resp *resp;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			dp_ctx->head);
+
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return NULL;
+
+	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
+			rx_queue->modulo_mask;
+	dp_ctx->cached_dequeue++;
+
+	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
+			RTE_CRYPTO_OP_STATUS_SUCCESS :
+			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	*dequeue_status = 0;
+	return (void *)(uintptr_t)resp->opaque_data;
+}
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_enqueue != n))
+		return -1;
+
+	qp->enqueued += n;
+	qp->stats.enqueued_count += n;
+
+	tx_queue->tail = dp_ctx->tail;
+
+	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number, tx_queue->tail);
+	tx_queue->csr_tail = tx_queue->tail;
+	dp_ctx->cached_enqueue = 0;
+
+	return 0;
+}
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_dequeue != n))
+		return -1;
+
+	rx_queue->head = dp_ctx->head;
+	rx_queue->nb_processed_responses += n;
+	qp->dequeued += n;
+	qp->stats.dequeued_count += n;
+	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
+		uint32_t old_head, new_head;
+		uint32_t max_head;
+
+		old_head = rx_queue->csr_head;
+		new_head = rx_queue->head;
+		max_head = qp->nb_descriptors * rx_queue->msg_size;
+
+		/* write out free descriptors */
+		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
+
+		if (new_head < old_head) {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
+					max_head - old_head);
+			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
+					new_head);
+		} else {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
+					old_head);
+		}
+		rx_queue->nb_processed_responses = 0;
+		rx_queue->csr_head = new_head;
+
+		/* write current head to CSR */
+		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
+			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
+			new_head);
+	}
+
+	dp_ctx->cached_dequeue = 0;
+	return 0;
+}
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+
+	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
+	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
+	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
+	raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen1;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_chain_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_chain_gen1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
+			ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_cipher_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_cipher_gen1;
+		}
+	} else
+		return -1;
+
+	return 0;
+}
+
 int
 qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
 {
@@ -518,6 +1172,8 @@ RTE_INIT(qat_sym_crypto_gen1_init)
 			qat_sym_crypto_cap_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
 			qat_sym_crypto_set_session_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index c01266f81c..cf386d0ed0 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -53,11 +53,14 @@ typedef void * (*create_security_ctx_t)(void *cryptodev);
 
 typedef int (*set_session_t)(void *cryptodev, void *session);
 
+typedef int (*set_raw_dp_ctx_t)(void *raw_dp_ctx, void *ctx);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
 	set_session_t set_session;
+	set_raw_dp_ctx_t set_raw_dp_ctx;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 692e1f8f0a..1ccffad5ab 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -89,7 +89,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, (void *)sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -144,7 +144,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, (void *)sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -292,8 +292,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 		if (internals->capa_mz == NULL) {
 			QAT_LOG(DEBUG,
 				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
+				"destroying PMD for %s", name);
 			ret = -EFAULT;
 			goto error;
 		}
@@ -355,6 +354,55 @@ qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
 	return 0;
 }
 
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct qat_cryptodev_private *internals = dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_crypto_gen_dev_ops *gen_dev_ops =
+			&qat_sym_gen_dev_ops[qat_dev_gen];
+	struct qat_qp *qp;
+	struct qat_sym_session *ctx;
+	struct qat_sym_dp_ctx *dp_ctx;
+
+	if (!gen_dev_ops->set_raw_dp_ctx) {
+		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+				qat_dev_gen);
+		return -ENOTSUP;
+	}
+
+	qp = dev->data->queue_pairs[qp_id];
+	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(struct qat_sym_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+		dp_ctx->tail = qp->tx_q.tail;
+		dp_ctx->head = qp->rx_q.head;
+		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
+	}
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -EINVAL;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, qat_sym_driver_id);
+
+	dp_ctx->session = ctx;
+
+	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
+}
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct qat_sym_dp_ctx);
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_sym_driver,
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
deleted file mode 100644
index 5322faff44..0000000000
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ /dev/null
@@ -1,986 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2022 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "adf_transport_access_macros.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_qp.h"
-
-static __rte_always_inline int32_t
-qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
-		struct rte_crypto_vec *data, uint16_t n_data_vecs)
-{
-	struct qat_queue *tx_queue;
-	struct qat_sym_op_cookie *cookie;
-	struct qat_sgl *list;
-	uint32_t i;
-	uint32_t total_len;
-
-	if (likely(n_data_vecs == 1)) {
-		req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			data[0].iova;
-		req->comn_mid.src_length = req->comn_mid.dst_length =
-			data[0].len;
-		return data[0].len;
-	}
-
-	if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
-		return -1;
-
-	total_len = 0;
-	tx_queue = &qp->tx_q;
-
-	ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
-			QAT_COMN_PTR_TYPE_SGL);
-	cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
-	list = (struct qat_sgl *)&cookie->qat_sgl_src;
-
-	for (i = 0; i < n_data_vecs; i++) {
-		list->buffers[i].len = data[i].len;
-		list->buffers[i].resrvd = 0;
-		list->buffers[i].addr = data[i].iova;
-		if (total_len + data[i].len > UINT32_MAX) {
-			QAT_DP_LOG(ERR, "Message too long");
-			return -1;
-		}
-		total_len += data[i].len;
-	}
-
-	list->num_bufs = i;
-	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			cookie->qat_sgl_src_phys_addr;
-	req->comn_mid.src_length = req->comn_mid.dst_length = 0;
-	return total_len;
-}
-
-static __rte_always_inline void
-set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	/* copy IV into request if it fits */
-	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
-				iv_len);
-	else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
-	}
-}
-
-#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
-	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
-	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
-
-static __rte_always_inline void
-qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
-{
-	uint32_t i;
-
-	for (i = 0; i < n; i++)
-		sta[i] = status;
-}
-
-#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
-	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
-
-static __rte_always_inline void
-enqueue_one_cipher_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-
-	/* cipher IV */
-	set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest __rte_unused,
-	struct rte_crypto_va_iova_ptr *aad __rte_unused,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
-			(uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_auth_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = data_len - ofs.ofs.auth.head -
-			ofs.ofs.auth.tail;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
-				ctx->auth_iv.length);
-		break;
-	default:
-		break;
-	}
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv __rte_unused,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
-			(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_auth_job(ctx, req, &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline int
-enqueue_one_chain_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_vec *data,
-	uint16_t n_data_vecs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	rte_iova_t auth_iova_end;
-	int32_t cipher_len, auth_len;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	cipher_len = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
-
-	if (unlikely(cipher_len < 0 || auth_len < 0))
-		return -1;
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = cipher_len;
-	set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = auth_len;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		break;
-	default:
-		break;
-	}
-
-	if (unlikely(n_data_vecs > 1)) {
-		int auth_end_get = 0, i = n_data_vecs - 1;
-		struct rte_crypto_vec *cvec = &data[0];
-		uint32_t len;
-
-		len = data_len - ofs.ofs.auth.tail;
-
-		while (i >= 0 && len > 0) {
-			if (cvec->len >= len) {
-				auth_iova_end = cvec->iova + len;
-				len = 0;
-				auth_end_get = 1;
-				break;
-			}
-			len -= cvec->len;
-			i--;
-			cvec++;
-		}
-
-		if (unlikely(auth_end_get == 0))
-			return -1;
-	} else
-		auth_iova_end = data[0].iova + auth_param->auth_off +
-			auth_param->auth_len;
-
-	/* Then check if digest-encrypted conditions are met */
-	if ((auth_param->auth_off + auth_param->auth_len <
-		cipher_param->cipher_offset +
-		cipher_param->cipher_length) &&
-		(digest->iova == auth_iova_end)) {
-		/* Handle partial digest encryption */
-		if (cipher_param->cipher_offset +
-				cipher_param->cipher_length <
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length)
-			req->comn_mid.dst_length =
-				req->comn_mid.src_length =
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length;
-		struct icp_qat_fw_comn_req_hdr *header =
-			&req->comn_hdr;
-		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-			header->serv_specif_flags,
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-	}
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
-			cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
-		return -1;
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num,
-			&vec->iv[i], &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
-			break;
-
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_aead_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-		(void *)&req->serv_specif_rqpars;
-	struct icp_qat_fw_la_auth_req_params *auth_param =
-		(void *)((uint8_t *)&req->serv_specif_rqpars +
-		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-	uint8_t *aad_data;
-	uint8_t aad_ccm_real_len;
-	uint8_t aad_len_field_sz;
-	uint32_t msg_len_be;
-	rte_iova_t aad_iova = 0;
-	uint8_t q;
-
-	/* CPM 1.7 uses single pass to treat AEAD as cipher operation */
-	if (ctx->is_single_pass) {
-		enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
-
-		if (ctx->is_ucs) {
-			/* QAT GEN4 uses single pass to treat AEAD as cipher
-			 * operation
-			 */
-			struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
-				(void *)&req->serv_specif_rqpars;
-			cipher_param_20->spc_aad_addr = aad->iova;
-			cipher_param_20->spc_auth_res_addr = digest->iova;
-		} else {
-			cipher_param->spc_aad_addr = aad->iova;
-			cipher_param->spc_auth_res_addr = digest->iova;
-		}
-
-		return;
-	}
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
-				ctx->cipher_iv.length);
-		aad_iova = aad->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
-		aad_data = aad->va;
-		aad_iova = aad->iova;
-		aad_ccm_real_len = 0;
-		aad_len_field_sz = 0;
-		msg_len_be = rte_bswap32((uint32_t)data_len -
-				ofs.ofs.cipher.head);
-
-		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			aad_ccm_real_len = ctx->aad_len -
-				ICP_QAT_HW_CCM_AAD_B0_LEN -
-				ICP_QAT_HW_CCM_AAD_LEN_INFO;
-		} else {
-			aad_data = iv->va;
-			aad_iova = iv->iova;
-		}
-
-		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-			aad_len_field_sz, ctx->digest_length, q);
-		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				(uint8_t *)&msg_len_be,
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-		} else {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-				(uint8_t *)&msg_len_be +
-				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				- q), q);
-		}
-
-		if (aad_len_field_sz > 0) {
-			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
-				rte_bswap16(aad_ccm_real_len);
-
-			if ((aad_ccm_real_len + aad_len_field_sz)
-				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-				uint8_t pad_len = 0;
-				uint8_t pad_idx = 0;
-
-				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len +
-					aad_len_field_sz) %
-					ICP_QAT_HW_CCM_AAD_B0_LEN);
-				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					aad_ccm_real_len +
-					aad_len_field_sz;
-				memset(&aad_data[pad_idx], 0, pad_len);
-			}
-		}
-
-		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
-			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va +
-			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
-		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-		rte_memcpy((uint8_t *)aad->va +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			ctx->cipher_iv.length);
-		break;
-	default:
-		break;
-	}
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_param->auth_off = ofs.ofs.cipher.head;
-	auth_param->auth_len = cipher_param->cipher_length;
-	auth_param->auth_res_addr = digest->iova;
-	auth_param->u1.aad_adr = aad_iova;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
-		(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
-			&vec->aad[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
-	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
-	uint32_t max_nb_to_dequeue,
-	rte_cryptodev_raw_post_dequeue_t post_dequeue,
-	void **out_user_data, uint8_t is_user_data_array,
-	uint32_t *n_success_jobs, int *return_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct icp_qat_fw_comn_resp *resp;
-	void *resp_opaque;
-	uint32_t i, n, inflight;
-	uint32_t head;
-	uint8_t status;
-
-	*n_success_jobs = 0;
-	*return_status = 0;
-	head = dp_ctx->head;
-
-	inflight = qp->enqueued - qp->dequeued;
-	if (unlikely(inflight == 0))
-		return 0;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			head);
-	/* no operation ready */
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return 0;
-
-	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
-	/* get the dequeue count */
-	if (get_dequeue_count) {
-		n = get_dequeue_count(resp_opaque);
-		if (unlikely(n == 0))
-			return 0;
-	} else {
-		if (unlikely(max_nb_to_dequeue == 0))
-			return 0;
-		n = max_nb_to_dequeue;
-	}
-
-	out_user_data[0] = resp_opaque;
-	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-	post_dequeue(resp_opaque, 0, status);
-	*n_success_jobs += status;
-
-	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
-
-	/* we already finished dequeue when n == 1 */
-	if (unlikely(n == 1)) {
-		i = 1;
-		goto end_deq;
-	}
-
-	if (is_user_data_array) {
-		for (i = 1; i < n; i++) {
-			resp = (struct icp_qat_fw_comn_resp *)(
-				(uint8_t *)rx_queue->base_addr + head);
-			if (unlikely(*(uint32_t *)resp ==
-					ADF_RING_EMPTY_SIG))
-				goto end_deq;
-			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
-			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-			*n_success_jobs += status;
-			post_dequeue(out_user_data[i], i, status);
-			head = (head + rx_queue->msg_size) &
-					rx_queue->modulo_mask;
-		}
-
-		goto end_deq;
-	}
-
-	/* opaque is not array */
-	for (i = 1; i < n; i++) {
-		resp = (struct icp_qat_fw_comn_resp *)(
-			(uint8_t *)rx_queue->base_addr + head);
-		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-			goto end_deq;
-		head = (head + rx_queue->msg_size) &
-				rx_queue->modulo_mask;
-		post_dequeue(resp_opaque, i, status);
-		*n_success_jobs += status;
-	}
-
-end_deq:
-	dp_ctx->head = head;
-	dp_ctx->cached_dequeue += i;
-	return i;
-}
-
-static __rte_always_inline void *
-qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
-		enum rte_crypto_op_status *op_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	register struct icp_qat_fw_comn_resp *resp;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			dp_ctx->head);
-
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return NULL;
-
-	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
-			rx_queue->modulo_mask;
-	dp_ctx->cached_dequeue++;
-
-	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
-			RTE_CRYPTO_OP_STATUS_SUCCESS :
-			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	*dequeue_status = 0;
-	return (void *)(uintptr_t)resp->opaque_data;
-}
-
-static __rte_always_inline int
-qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_enqueue != n))
-		return -1;
-
-	qp->enqueued += n;
-	qp->stats.enqueued_count += n;
-
-	tx_queue->tail = dp_ctx->tail;
-
-	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
-			tx_queue->hw_bundle_number,
-			tx_queue->hw_queue_number, tx_queue->tail);
-	tx_queue->csr_tail = tx_queue->tail;
-	dp_ctx->cached_enqueue = 0;
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_dequeue != n))
-		return -1;
-
-	rx_queue->head = dp_ctx->head;
-	rx_queue->nb_processed_responses += n;
-	qp->dequeued += n;
-	qp->stats.dequeued_count += n;
-	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
-		uint32_t old_head, new_head;
-		uint32_t max_head;
-
-		old_head = rx_queue->csr_head;
-		new_head = rx_queue->head;
-		max_head = qp->nb_descriptors * rx_queue->msg_size;
-
-		/* write out free descriptors */
-		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
-
-		if (new_head < old_head) {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
-					max_head - old_head);
-			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
-					new_head);
-		} else {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
-					old_head);
-		}
-		rx_queue->nb_processed_responses = 0;
-		rx_queue->csr_head = new_head;
-
-		/* write current head to CSR */
-		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
-			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
-			new_head);
-	}
-
-	dp_ctx->cached_dequeue = 0;
-	return 0;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
-	struct qat_qp *qp;
-	struct qat_sym_session *ctx;
-	struct qat_sym_dp_ctx *dp_ctx;
-
-	qp = dev->data->queue_pairs[qp_id];
-	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
-	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(struct qat_sym_dp_ctx));
-		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
-		dp_ctx->tail = qp->tx_q.tail;
-		dp_ctx->head = qp->rx_q.head;
-		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
-	}
-
-	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
-		return -EINVAL;
-
-	ctx = (struct qat_sym_session *)get_sym_session_private_data(
-			session_ctx.crypto_sess, qat_sym_driver_id);
-
-	dp_ctx->session = ctx;
-
-	raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
-	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
-	raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
-	raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_chain_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
-		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
-		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_cipher_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
-		}
-	} else
-		return -1;
-
-	return 0;
-}
-
-int
-qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
-{
-	return sizeof(struct qat_sym_dp_ctx);
-}
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v9 9/9] crypto/qat: support out of place SG list
  2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
                                       ` (7 preceding siblings ...)
  2022-02-18 17:15                     ` [dpdk-dev v9 8/9] crypto/qat: unify raw data path functions Kai Ji
@ 2022-02-18 17:15                     ` Kai Ji
  2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-18 17:15 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch adds the SGL out of place support to QAT PMD

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++--
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 55 +++++++++++++++++---
 3 files changed, 83 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index ffa093a7a3..5084a5fcd1 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -468,8 +468,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -565,8 +575,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index f803bc1459..bd7f3785df 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -297,8 +297,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index fee6507512..83d9b66f34 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -526,9 +526,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i],
-				cookie, vec->src_sgl[i].vec,
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
 				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
@@ -625,8 +634,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
@@ -725,8 +744,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -830,8 +859,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework
  2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
                                       ` (8 preceding siblings ...)
  2022-02-18 17:15                     ` [dpdk-dev v9 9/9] crypto/qat: support out of place SG list Kai Ji
@ 2022-02-22 17:02                     ` Kai Ji
  2022-02-22 17:02                       ` [dpdk-dev v10 1/9] common/qat: define build request and dequeue ops Kai Ji
                                         ` (10 more replies)
  9 siblings, 11 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-22 17:02 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch reworks QAT symmetric crypto datapatch implementation where each
generation request building separated and the crypto operation under the
raw datapath api implementation are unified.

In addtion this patchset also enables QAT OOP support in raw datapath api
implementation.

v10:
- rebase to the lastest for-main
- fix of build rerror when RTE_LOG_DEBUG enabled

v9:
- commit messages reword
- fix of unused function error

v8:
- rebase to 22.03-rc1

v7:
- fix of pointer cast compile error in x86

v6:
- fix of pointer cast error in x86
- rebase to the lastest for-main

v5:
- rebase to the latest for-main
- patchset reconstruct

v4:
- patchset break down and reconstruct

v3:
- sperate a single patch 6 to two different patches

v2:
- review comments addressed

Kai Ji (9):
  common/qat: define build request and dequeue ops
  crypto/qat: support symmetric build op request
  crypto/qat: rework session functions
  crypto/qat: rework asymmetric op build operation
  crypto/qat: unify symmetric functions
  crypto/qat: unify asymmetric functions
  crypto/qat: rework burst data path
  crypto/qat: unify raw data path functions
  crypto/qat: support out of place SG list

 drivers/common/qat/meson.build               |   6 +-
 drivers/common/qat/qat_device.c              |   4 +-
 drivers/common/qat/qat_qp.c                  |  42 +-
 drivers/common/qat/qat_qp.h                  |  54 +-
 drivers/compress/qat/qat_comp_pmd.c          |  14 +-
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  93 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 490 ++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 257 ++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 913 ++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 942 +++++++++++++++++-
 drivers/crypto/qat/qat_asym.c                | 303 +++++-
 drivers/crypto/qat/qat_asym.h                |  79 +-
 drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
 drivers/crypto/qat/qat_asym_pmd.h            |  54 -
 drivers/crypto/qat/qat_crypto.h              |  16 +-
 drivers/crypto/qat/qat_sym.c                 | 979 ++++++------------
 drivers/crypto/qat/qat_sym.h                 | 148 ++-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 995 -------------------
 drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
 drivers/crypto/qat/qat_sym_pmd.h             |  95 --
 drivers/crypto/qat/qat_sym_session.c         | 115 +--
 drivers/crypto/qat/qat_sym_session.h         |  15 +-
 23 files changed, 3582 insertions(+), 2523 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

--
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v10 1/9] common/qat: define build request and dequeue ops
  2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
@ 2022-02-22 17:02                       ` Kai Ji
  2022-02-22 18:52                         ` Zhang, Roy Fan
  2022-02-22 17:02                       ` [dpdk-dev v10 2/9] crypto/qat: support symmetric build op request Kai Ji
                                         ` (9 subsequent siblings)
  10 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-22 17:02 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch introduce build request and dequeue op function
pointers to the qat queue pair implementation. The function
poniters are assigned during qat session generation based on input
crypto operation request.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c          | 10 ++++--
 drivers/common/qat/qat_qp.h          | 54 ++++++++++++++++++++++++++--
 drivers/compress/qat/qat_comp_pmd.c  |  4 +--
 drivers/crypto/qat/qat_asym_pmd.c    |  4 +--
 drivers/crypto/qat/qat_sym_pmd.c     |  4 +--
 drivers/crypto/qat/qat_sym_session.h | 13 ++++++-
 6 files changed, 76 insertions(+), 13 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 57ac8fefca..56234ca1a4 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_common.h>
@@ -547,7 +547,9 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_enqueue_op_burst(void *qp,
+		__rte_unused qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -814,7 +816,9 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 }
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_dequeue_op_burst(void *qp, void **ops,
+		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
+		uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index deafb407b3..66f00943a5 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 #ifndef _QAT_QP_H_
 #define _QAT_QP_H_
@@ -36,6 +36,51 @@ struct qat_queue {
 	/* number of responses processed since last CSR head write */
 };
 
+/**
+ * Type define qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ *
+ * @param in_op
+ *    An input op pointer
+ * @param out_msg
+ *    out_meg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param opaque
+ *    an opaque data may be used to store context may be useful between
+ *    2 enqueue operations.
+ * @param dev_gen
+ *    qat device gen id
+ * @return
+ *   - 0 if the crypto request is build successfully,
+ *   - EINVAL if error
+ **/
+typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
+
+/**
+ * Type define qat_op_dequeue_t function pointer, passed in as argument
+ * in dequeue op burst, where a dequeue op assigned base on the type of
+ * crypto op.
+ *
+ * @param op
+ *    An input op pointer
+ * @param resp
+ *    qat response msg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param dequeue_err_count
+ *    dequeue error counter
+ * @return
+ *    - 0 if dequeue OP is successful
+ *    - EINVAL if error
+ **/
+typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused);
+
+#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE	2
+
 struct qat_qp {
 	void			*mmap_bar_addr;
 	struct qat_queue	tx_q;
@@ -44,6 +89,7 @@ struct qat_qp {
 	struct rte_mempool *op_cookie_pool;
 	void **op_cookies;
 	uint32_t nb_descriptors;
+	uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
 	enum qat_device_gen qat_dev_gen;
 	enum qat_service_type service_type;
 	struct qat_pci_device *qat_dev;
@@ -78,13 +124,15 @@ struct qat_qp_config {
 };
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops);
 
 uint16_t
 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
 
 int
 qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index da6404c017..8e497e7a09 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_malloc.h>
@@ -620,7 +620,7 @@ static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
index addee384e3..9a7596b227 100644
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ b/drivers/crypto/qat/qat_asym_pmd.c
@@ -62,13 +62,13 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
 uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index b835245f17..28a26260fb 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -49,14 +49,14 @@ static uint16_t
 qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 static uint16_t
 qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 6ebc176729..fe875a7fd0 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 #ifndef _QAT_SYM_SESSION_H_
 #define _QAT_SYM_SESSION_H_
@@ -63,6 +63,16 @@ enum qat_sym_proto_flag {
 	QAT_CRYPTO_PROTO_FLAG_ZUC = 4
 };
 
+struct qat_sym_session;
+
+/*
+ * typedef qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ */
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* Common content descriptor */
 struct qat_sym_cd {
 	struct icp_qat_hw_cipher_algo_blk cipher;
@@ -107,6 +117,7 @@ struct qat_sym_session {
 	/* Some generations need different setup of counter */
 	uint32_t slice_types;
 	enum qat_sym_proto_flag qat_proto_flag;
+	qat_sym_build_request_t build_request[2];
 };
 
 int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v10 2/9] crypto/qat: support symmetric build op request
  2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2022-02-22 17:02                       ` [dpdk-dev v10 1/9] common/qat: define build request and dequeue ops Kai Ji
@ 2022-02-22 17:02                       ` Kai Ji
  2022-02-22 18:52                         ` Zhang, Roy Fan
  2022-02-22 17:02                       ` [dpdk-dev v10 3/9] crypto/qat: rework session functions Kai Ji
                                         ` (8 subsequent siblings)
  10 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-22 17:02 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch adds common inline functions for QAT symmetric
crypto driver to process crypto op, and the implementation of
build op request function for QAT generation 1.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 832 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 187 ++++-
 drivers/crypto/qat/qat_sym.c                 |  90 +-
 3 files changed, 1019 insertions(+), 90 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 67a4d2cb2c..1130e0e76f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #ifndef _QAT_CRYPTO_PMD_GENS_H_
@@ -8,14 +8,844 @@
 #include <rte_cryptodev.h>
 #include "qat_crypto.h"
 #include "qat_sym_session.h"
+#include "qat_sym.h"
+
+#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
+	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
+
+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
+	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
+	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
+
+static __rte_always_inline int
+op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_decrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_decrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+	return -EINVAL;
+}
+
+static __rte_always_inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		/* Decrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely((sym_op->m_dst != NULL)
+				&& (sym_op->m_dst != sym_op->m_src)))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = last_block - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
+			dst, last_block_len);
+#endif
+		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+			dst, last_block_len);
+#endif
+	}
+
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+static __rte_always_inline int
+qat_auth_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_cipher_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+			((op->sym->cipher.data.offset %
+			BYTE_LENGTH) != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int32_t
+qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
+		void *opaque, struct qat_sym_op_cookie *cookie,
+		struct rte_crypto_vec *src_vec, uint16_t n_src,
+		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
+{
+	struct qat_sgl *list;
+	uint32_t i;
+	uint32_t tl_src = 0, total_len_src, total_len_dst;
+	uint64_t src_data_start = 0, dst_data_start = 0;
+	int is_sgl = n_src > 1 || n_dst > 1;
+
+	if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
+			n_dst > QAT_SYM_SGL_MAX_NUMBER))
+		return -1;
+
+	if (likely(!is_sgl)) {
+		src_data_start = src_vec[0].iova;
+		tl_src = total_len_src =
+				src_vec[0].len;
+		if (unlikely(n_dst)) { /* oop */
+			total_len_dst = dst_vec[0].len;
+
+			dst_data_start = dst_vec[0].iova;
+			if (unlikely(total_len_src != total_len_dst))
+				return -EINVAL;
+		} else {
+			dst_data_start = src_data_start;
+			total_len_dst = tl_src;
+		}
+	} else { /* sgl */
+		total_len_dst = total_len_src = 0;
+
+		ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+			QAT_COMN_PTR_TYPE_SGL);
+
+		list = (struct qat_sgl *)&cookie->qat_sgl_src;
+		for (i = 0; i < n_src; i++) {
+			list->buffers[i].len = src_vec[i].len;
+			list->buffers[i].resrvd = 0;
+			list->buffers[i].addr = src_vec[i].iova;
+			if (tl_src + src_vec[i].len > UINT32_MAX) {
+				QAT_DP_LOG(ERR, "Message too long");
+				return -1;
+			}
+			tl_src += src_vec[i].len;
+		}
+
+		list->num_bufs = i;
+		src_data_start = cookie->qat_sgl_src_phys_addr;
+
+		if (unlikely(n_dst > 0)) { /* oop sgl */
+			uint32_t tl_dst = 0;
+
+			list = (struct qat_sgl *)&cookie->qat_sgl_dst;
+
+			for (i = 0; i < n_dst; i++) {
+				list->buffers[i].len = dst_vec[i].len;
+				list->buffers[i].resrvd = 0;
+				list->buffers[i].addr = dst_vec[i].iova;
+				if (tl_dst + dst_vec[i].len > UINT32_MAX) {
+					QAT_DP_LOG(ERR, "Message too long");
+					return -ENOTSUP;
+				}
+
+				tl_dst += dst_vec[i].len;
+			}
+
+			if (tl_src != tl_dst)
+				return -EINVAL;
+			list->num_bufs = i;
+			dst_data_start = cookie->qat_sgl_dst_phys_addr;
+		} else
+			dst_data_start = src_data_start;
+	}
+
+	req->comn_mid.src_data_addr = src_data_start;
+	req->comn_mid.dest_data_addr = dst_data_start;
+	req->comn_mid.src_length = total_len_src;
+	req->comn_mid.dst_length = total_len_dst;
+	req->comn_mid.opaque_data = (uintptr_t)opaque;
+
+	return tl_src;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int n_src = 0;
+	int ret;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->cipher.data.length >> 3;
+		cipher_ofs = op->sym->cipher.data.offset >> 3;
+		break;
+	case 0:
+		if (ctx->bpi_ctx) {
+			/* DOCSIS - only send complete blocks to device.
+			 * Process any partial block using CFB mode.
+			 * Even if 0 complete blocks, still send this to device
+			 * to get into rx queue for post-process and dequeuing
+			 */
+			cipher_len = qat_bpicipher_preprocess(ctx, op);
+			cipher_ofs = op->sym->cipher.data.offset;
+		} else {
+			cipher_len = op->sym->cipher.data.length;
+			cipher_ofs = op->sym->cipher.data.offset;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
+			cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t auth_ofs = 0, auth_len = 0;
+	int n_src, ret;
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+				ctx->auth_iv.offset);
+		auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+				ctx->auth_iv.offset);
+		break;
+	case 0:
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+			/* AES-GMAC */
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+					ctx->auth_iv.offset);
+			auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+					ctx->auth_iv.offset);
+		} else {
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = NULL;
+			auth_iv->iova = 0;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
+			auth_ofs + auth_len, in_sgl->vec,
+			QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
+				auth_ofs + auth_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	union rte_crypto_sym_ofs ofs;
+	uint32_t min_ofs = 0, max_len = 0;
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	uint32_t auth_len = 0, auth_ofs = 0;
+	int is_oop = (op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src);
+	int is_sgl = op->sym->m_src->nb_segs > 1;
+	int n_src;
+	int ret;
+
+	if (unlikely(is_oop))
+		is_sgl |= op->sym->m_dst->nb_segs > 1;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->auth_iv.offset);
+	auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->auth_iv.offset);
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->aead.data.length >> 3;
+		cipher_ofs = op->sym->aead.data.offset >> 3;
+		break;
+	case 0:
+		cipher_len = op->sym->aead.data.length;
+		cipher_ofs = op->sym->aead.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		break;
+	case 0:
+		auth_len = op->sym->auth.data.length;
+		auth_ofs = op->sym->auth.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
+
+	/* digest in buffer check. Needed only for wireless algos */
+	if (ret == 1) {
+		/* Handle digest-encrypted cases, i.e.
+		 * auth-gen-then-cipher-encrypt and
+		 * cipher-decrypt-then-auth-verify
+		 */
+		uint64_t auth_end_iova;
+
+		if (unlikely(is_sgl)) {
+			uint32_t remaining_off = auth_ofs + auth_len;
+			struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
+				op->sym->m_src);
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else
+			auth_end_iova = (is_oop ?
+				rte_pktmbuf_iova(op->sym->m_dst) :
+				rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
+					auth_len;
+
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
+				(digest->iova == auth_end_iova))
+			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
+					ctx->digest_length);
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return -1;
+	}
+	in_sgl->num = n_src;
+
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs,
+				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	ofs.ofs.cipher.head = cipher_ofs;
+	ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
+	ofs.ofs.auth.head = auth_ofs;
+	ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
+
+	return ofs.raw;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int32_t n_src = 0;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
+	auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
+	digest->va = (void *)op->sym->aead.digest.data;
+	digest->iova = op->sym->aead.digest.phys_addr;
+
+	cipher_len = op->sym->aead.data.length;
+	cipher_ofs = op->sym->aead.data.offset;
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline void
+qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
+		struct icp_qat_fw_la_bulk_req *qat_req)
+{
+	/* copy IV into request if it fits */
+	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
+				iv_len);
+	else {
+		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+				qat_req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
+	}
+}
+
+static __rte_always_inline void
+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
+{
+	uint32_t i;
+
+	for (i = 0; i < n; i++)
+		sta[i] = status;
+}
+
+static __rte_always_inline void
+enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+
+	/* cipher IV */
+	qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
+				ctx->auth_iv.length);
+		break;
+	default:
+		break;
+	}
+}
+
+static __rte_always_inline int
+enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_vec *src_vec,
+	uint16_t n_src_vecs,
+	struct rte_crypto_vec *dst_vec,
+	uint16_t n_dst_vecs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
+			dst_vec : src_vec;
+	rte_iova_t auth_iova_end;
+	int cipher_len, auth_len;
+	int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	cipher_len = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (unlikely(cipher_len < 0 || auth_len < 0))
+		return -1;
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = cipher_len;
+	qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = auth_len;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		break;
+	default:
+		break;
+	}
+
+	if (unlikely(is_sgl)) {
+		/* sgl */
+		int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
+		uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
+
+		while (remaining_off >= cvec->len && i >= 1) {
+			i--;
+			remaining_off -= cvec->len;
+			cvec++;
+		}
+
+		auth_iova_end = cvec->iova + remaining_off;
+	} else
+		auth_iova_end = cvec[0].iova + auth_param->auth_off +
+			auth_param->auth_len;
+
+	/* Then check if digest-encrypted conditions are met */
+	if ((auth_param->auth_off + auth_param->auth_len <
+		cipher_param->cipher_offset + cipher_param->cipher_length) &&
+			(digest->iova == auth_iova_end)) {
+		/* Handle partial digest encryption */
+		if (cipher_param->cipher_offset + cipher_param->cipher_length <
+			auth_param->auth_off + auth_param->auth_len +
+				ctx->digest_length && !is_sgl)
+			req->comn_mid.dst_length = req->comn_mid.src_length =
+				auth_param->auth_off + auth_param->auth_len +
+					ctx->digest_length;
+		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void
+enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param =
+		(void *)&req->serv_specif_rqpars;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(void *)((uint8_t *)&req->serv_specif_rqpars +
+		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+	uint8_t *aad_data;
+	uint8_t aad_ccm_real_len;
+	uint8_t aad_len_field_sz;
+	uint32_t msg_len_be;
+	rte_iova_t aad_iova = 0;
+	uint8_t q;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
+				ctx->cipher_iv.length);
+		aad_iova = aad->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		aad_data = aad->va;
+		aad_iova = aad->iova;
+		aad_ccm_real_len = 0;
+		aad_len_field_sz = 0;
+		msg_len_be = rte_bswap32((uint32_t)data_len -
+				ofs.ofs.cipher.head);
+
+		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			aad_ccm_real_len = ctx->aad_len -
+				ICP_QAT_HW_CCM_AAD_B0_LEN -
+				ICP_QAT_HW_CCM_AAD_LEN_INFO;
+		} else {
+			aad_data = iv->va;
+			aad_iova = iv->iova;
+		}
+
+		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+			aad_len_field_sz, ctx->digest_length, q);
+		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+				(uint8_t *)&msg_len_be,
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+		} else {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+				(uint8_t *)&msg_len_be +
+				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+				- q), q);
+		}
+
+		if (aad_len_field_sz > 0) {
+			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+				rte_bswap16(aad_ccm_real_len);
+
+			if ((aad_ccm_real_len + aad_len_field_sz)
+				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+				uint8_t pad_len = 0;
+				uint8_t pad_idx = 0;
+
+				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+					((aad_ccm_real_len +
+					aad_len_field_sz) %
+					ICP_QAT_HW_CCM_AAD_B0_LEN);
+				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+					aad_ccm_real_len +
+					aad_len_field_sz;
+				memset(&aad_data[pad_idx], 0, pad_len);
+			}
+		}
+
+		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va +
+			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
+		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+		rte_memcpy((uint8_t *)aad->va +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			ctx->cipher_iv.length);
+		break;
+	default:
+		break;
+	}
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_param->auth_off = ofs.ofs.cipher.head;
+	auth_param->auth_len = cipher_param->cipher_length;
+	auth_param->auth_res_addr = digest->iova;
+	auth_param->u1.aad_adr = aad_iova;
+}
 
 extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
 extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
 
+/* -----------------GEN 1 sym crypto op data path APIs ---------------- */
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 void
 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 		uint8_t hash_flag);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 90b3ec803c..2dd7475c77 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -179,6 +179,191 @@ qat_sym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, NULL, NULL);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, NULL, NULL);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
+			total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
+			out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
 #ifdef RTE_LIB_SECURITY
 
 #define QAT_SECURITY_SYM_CAPABILITIES					\
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 00ec703754..f814bf8f75 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/evp.h>
@@ -11,93 +11,7 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-
-
-/** Decrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_decrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_decrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
-	return -EINVAL;
-}
-
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
-		/* Decrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely((sym_op->m_dst != NULL)
-				&& (sym_op->m_dst != sym_op->m_src)))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = last_block - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
-			dst, last_block_len);
-#endif
-		bpi_cipher_decrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
-			dst, last_block_len);
-#endif
-	}
-
-	return sym_op->cipher.data.length - last_block_len;
-}
+#include "dev/qat_crypto_pmd_gens.h"
 
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v10 3/9] crypto/qat: rework session functions
  2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2022-02-22 17:02                       ` [dpdk-dev v10 1/9] common/qat: define build request and dequeue ops Kai Ji
  2022-02-22 17:02                       ` [dpdk-dev v10 2/9] crypto/qat: support symmetric build op request Kai Ji
@ 2022-02-22 17:02                       ` Kai Ji
  2022-02-22 18:53                         ` Zhang, Roy Fan
  2022-02-22 17:02                       ` [dpdk-dev v10 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
                                         ` (7 subsequent siblings)
  10 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-22 17:02 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch introduces a set of set_session methods to QAT
generations. In addition, the reuse of QAT session between
generations is prohibit as the support of min_qat_dev_gen_id'
is removed.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  91 ++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 256 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 125 ++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |   3 +
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  64 +++++
 drivers/crypto/qat/qat_crypto.h              |   8 +-
 drivers/crypto/qat/qat_sym.c                 |  12 +-
 drivers/crypto/qat/qat_sym_session.c         | 113 ++------
 drivers/crypto/qat/qat_sym_session.h         |   2 +-
 10 files changed, 574 insertions(+), 109 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
index 9ed1f21d9d..01a897a21f 100644
--- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_asym_crypto_set_session_gen1(void *cdev __rte_unused,
+		void *session __rte_unused)
+{
+	return 0;
+}
+
 RTE_INIT(qat_asym_crypto_gen1_init)
 {
 	qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops =
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index b4ec440e05..64e6ae66ec 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -166,6 +166,91 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
 	return 0;
 }
 
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+		uint8_t hash_flag)
+{
+	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+	/* Set the Use Extended Protocol Flags bit in LW 1 */
+	QAT_FIELD_SET(header->comn_req_flags,
+			QAT_COMN_EXT_FLAGS_USED,
+			QAT_COMN_EXT_FLAGS_BITPOS,
+			QAT_COMN_EXT_FLAGS_MASK);
+
+	/* Set Hash Flags in LW 28 */
+	cd_ctrl->hash_flags |= hash_flag;
+
+	/* Set proto flags in LW 1 */
+	switch (session->qat_cipher_alg) {
+	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_SNOW_3G_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags,
+				ICP_QAT_FW_LA_ZUC_3G_PROTO);
+		break;
+	default:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	}
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+	struct rte_cryptodev *dev = cdev;
+	struct qat_sym_session *ctx = session;
+	const struct qat_cryptodev_private *qat_private =
+			dev->data->dev_private;
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * but some are not supported by GEN2, so checking here
+		 */
+		if ((qat_private->internal_capabilities &
+				QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+			return -ENOTSUP;
+
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
 
 	/* Device related operations */
@@ -204,6 +289,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
 			qat_sym_crypto_cap_get_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_sym_crypto_set_session_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
@@ -221,4 +308,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
 			qat_asym_crypto_cap_get_gen1;
 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_asym_crypto_feature_flags_get_gen1;
+	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_asym_crypto_set_session_gen1;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index d3336cf4a1..db864d973a 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -143,6 +143,257 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass) {
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN3 uses single pass to treat AEAD as
+		 * cipher operation
+		 */
+		cipher_param = (void *)&req->serv_specif_rqpars;
+
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+				ofs.ofs.cipher.tail;
+
+		cipher_param->spc_aad_addr = aad->iova;
+		cipher_param->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+	struct qat_sym_op_cookie *cookie,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	uint32_t ver_key_offset;
+	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+
+	if (!ctx->is_single_pass_gmac ||
+			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+				data_len);
+		return;
+	}
+
+	cipher_cd_ctrl = (void *) &req->cd_ctrl;
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+			sizeof(struct icp_qat_hw_cipher_config);
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+		/* AES-GMAC */
+		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+				req);
+	}
+
+	/* Fill separate Content Descriptor for this op */
+	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+				ctx->cd.cipher.key :
+				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+			ctx->auth_key_length);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+				ICP_QAT_HW_CIPHER_AEAD_MODE,
+				ctx->qat_cipher_alg,
+				ICP_QAT_HW_CIPHER_NO_CONVERT,
+				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+					ICP_QAT_HW_CIPHER_ENCRYPT :
+					ICP_QAT_HW_CIPHER_DECRYPT));
+	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+			ctx->digest_length,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+	/* Update the request */
+	req->cd_pars.u.s.content_desc_addr =
+			cookie->opt.spc_gmac.cd_phys_addr;
+	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+			sizeof(struct icp_qat_hw_cipher_config) +
+			ctx->auth_key_length, 8) >> 3;
+	req->comn_mid.src_length = data_len;
+	req->comn_mid.dst_length = 0;
+
+	cipher_param->spc_aad_addr = 0;
+	cipher_param->spc_auth_res_addr = digest->iova;
+	cipher_param->spc_aad_sz = auth_data_len;
+	cipher_param->reserved = 0;
+	cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+	ICP_QAT_FW_LA_PROTO_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_NO_PROTO);
+}
+
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN3 */
+	if (ctx->is_single_pass)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+	else if (ctx->is_single_pass_gmac)
+		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN3
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -150,6 +401,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_cap_get_gen3;
 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+			qat_sym_crypto_set_session_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
@@ -161,4 +414,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 37a58c026f..7642a87d55 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -103,11 +103,133 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+			(void *)&req->serv_specif_rqpars;
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN4 uses single pass to treat AEAD as cipher
+		 * operation
+		 */
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+				req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len -
+				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+		cipher_param_20->spc_aad_addr = aad->iova;
+		cipher_param_20->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN4 */
+	if (ctx->is_single_pass && ctx->is_ucs)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN4
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
 			qat_sym_crypto_cap_get_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+			qat_sym_crypto_set_session_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
@@ -121,4 +243,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
 	qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 1130e0e76f..96cdb97a26 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -856,6 +856,9 @@ qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev);
 uint64_t
 qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 #ifdef RTE_LIB_SECURITY
 extern struct rte_security_ops security_qat_ops_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 2dd7475c77..ff176dddf1 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -452,12 +452,76 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int handle_mixed = 0;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			/* do_aead = 1; */
+			build_request = qat_sym_build_op_aead_gen1;
+		} else {
+			/* do_auth = 1; do_cipher = 1; */
+			build_request = qat_sym_build_op_chain_gen1;
+			handle_mixed = 1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		/* do_auth = 1; do_cipher = 0;*/
+		build_request = qat_sym_build_op_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		/* do_auth = 0; do_cipher = 1; */
+		build_request = qat_sym_build_op_cipher_gen1;
+	}
+
+	if (build_request)
+		ctx->build_request[proc_type] = build_request;
+	else
+		return -EINVAL;
+
+	/* no more work if not mixed op */
+	if (!handle_mixed)
+		return 0;
+
+	/* Check none supported algs if mixed */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		return -ENOTSUP;
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		return -ENOTSUP;
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
 
 RTE_INIT(qat_sym_crypto_gen1_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
 			qat_sym_crypto_cap_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+			qat_sym_crypto_set_session_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6eaa15b975..5ca76fcaa6 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
  #ifndef _QAT_CRYPTO_H_
@@ -48,15 +48,21 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
 
 typedef void * (*create_security_ctx_t)(void *cryptodev);
 
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
+	set_session_t set_session;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
 };
 
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
+
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
 		struct rte_cryptodev_config *config);
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index f814bf8f75..83bf55c933 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -13,6 +13,10 @@
 #include "qat_sym.h"
 #include "dev/qat_crypto_pmd_gens.h"
 
+uint8_t qat_sym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
@@ -126,7 +130,7 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
 
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen)
+		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
 {
 	int ret = 0;
 	struct qat_sym_session *ctx = NULL;
@@ -191,12 +195,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		return -EINVAL;
 	}
 
-	if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
-		QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -EINVAL;
-	}
-
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 8ca475ca8b..3a880096c4 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/sha.h>	/* Needed to calculate pre-compute values */
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
 	return 0;
 }
 
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
-		uint8_t hash_flag)
-{
-	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
-	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
-			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
-			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
-	/* Set the Use Extended Protocol Flags bit in LW 1 */
-	QAT_FIELD_SET(header->comn_req_flags,
-			QAT_COMN_EXT_FLAGS_USED,
-			QAT_COMN_EXT_FLAGS_BITPOS,
-			QAT_COMN_EXT_FLAGS_MASK);
-
-	/* Set Hash Flags in LW 28 */
-	cd_ctrl->hash_flags |= hash_flag;
-
-	/* Set proto flags in LW 1 */
-	switch (session->qat_cipher_alg) {
-	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_SNOW_3G_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_ZUC_3G_PROTO);
-		break;
-	default:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	}
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
-		struct qat_sym_session *session)
-{
-	const struct qat_cryptodev_private *qat_private =
-			dev->data->dev_private;
-	enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
-			QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
-	if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
-	} else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
-	} else if ((session->aes_cmac ||
-			session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
-			(session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session, 0);
-	}
-}
-
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
 	int ret;
 	int qat_cmd_id;
-	int handle_mixed = 0;
 
 	/* Verify the session physical address is known */
 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
+	session->dev_id = internals->dev_id;
 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
 	session->is_ucs = 0;
 
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		return -ENOTSUP;
 	}
 	qat_sym_session_finalize(session);
-	if (handle_mixed) {
-		/* Special handling of mixed hash+cipher algorithms */
-		qat_sym_session_handle_mixed(dev, session);
-	}
 
-	return 0;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+			(void *)session);
 }
 
 static int
@@ -678,14 +598,13 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 {
 	session->is_single_pass = 1;
 	session->is_auth = 1;
-	session->min_qat_dev_gen = QAT_GEN3;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
-	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
 		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
-	} else {
+	else
 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
-	}
+
 	session->cipher_iv.offset = aead_xform->iv.offset;
 	session->cipher_iv.length = aead_xform->iv.length;
 	session->aad_len = aead_xform->aad_length;
@@ -1205,9 +1124,9 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-			uint8_t *data_in,
-			uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in, uint8_t *data_out)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1573,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1920,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2180,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
-
 	/* Get requested QAT command id - should be cipher */
 	qat_cmd_id = qat_get_cmd_id(xform);
 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2204,9 @@ qat_security_session_create(void *dev,
 {
 	void *sess_private_data;
 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct qat_cryptodev_private *internals = cdev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_sym_session *sym_session = NULL;
 	int ret;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2230,11 @@ qat_security_session_create(void *dev,
 	}
 
 	set_sec_session_private_data(sess, sess_private_data);
+	sym_session = (struct qat_sym_session *)sess_private_data;
+	sym_session->dev_id = internals->dev_id;
 
-	return ret;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+			sess_private_data);
 }
 
 int
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index fe875a7fd0..01908abd9e 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -105,7 +105,7 @@ struct qat_sym_session {
 	uint16_t auth_key_length;
 	uint16_t digest_length;
 	rte_spinlock_t lock;	/* protects this struct */
-	enum qat_device_gen min_qat_dev_gen;
+	uint16_t dev_id;
 	uint8_t aes_cmac;
 	uint8_t is_single_pass;
 	uint8_t is_single_pass_gmac;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v10 4/9] crypto/qat: rework asymmetric op build operation
  2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                         ` (2 preceding siblings ...)
  2022-02-22 17:02                       ` [dpdk-dev v10 3/9] crypto/qat: rework session functions Kai Ji
@ 2022-02-22 17:02                       ` Kai Ji
  2022-02-22 18:53                         ` Zhang, Roy Fan
  2022-02-22 17:02                       ` [dpdk-dev v10 5/9] crypto/qat: unify symmetric functions Kai Ji
                                         ` (6 subsequent siblings)
  10 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-22 17:02 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch reworks the asymmetric crypto data path
implementation in QAT driver. The changes include asymmetric
crypto data path separation for QAT hardware generations, and
code optimisation of the device capabilities declaration.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c   |   5 +-
 drivers/crypto/qat/qat_asym.c | 129 +++++++++++++++++++---------------
 drivers/crypto/qat/qat_asym.h |  63 +++++++++++++++--
 3 files changed, 131 insertions(+), 66 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 56234ca1a4..7f2fdc53ce 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -619,7 +619,7 @@ qat_enqueue_op_burst(void *qp,
 #ifdef BUILD_QAT_ASYM
 			ret = qat_asym_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
+				NULL, tmp_qp->qat_dev_gen);
 #endif
 		}
 		if (ret != 0) {
@@ -847,7 +847,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 #ifdef BUILD_QAT_ASYM
 		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
 			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 #endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index f46eefd4b3..845e905a89 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -1,68 +1,36 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2022 Intel Corporation
  */
 
 #include <stdarg.h>
 
-#include "qat_asym.h"
+#include <cryptodev_pmd.h>
+
 #include "icp_qat_fw_pke.h"
 #include "icp_qat_fw.h"
 #include "qat_pke_functionality_arrays.h"
 
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
-
-static int qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
-		size_t arr_sz, size_t *size, uint32_t *func_id)
-{
-	size_t i;
-
-	for (i = 0; i < arr_sz; i++) {
-		if (*size <= arr[i][0]) {
-			*size = arr[i][0];
-			*func_id = arr[i][1];
-			return 0;
-		}
-	}
-	return -1;
-}
-
-static inline void qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
-{
-	memset(qat_req, 0, sizeof(*qat_req));
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
-
-	qat_req->pke_hdr.hdr_flags =
-			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
-			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
-}
-
-static inline void qat_asym_build_req_tmpl(void *sess_private_data)
-{
-	struct icp_qat_fw_pke_request *qat_req;
-	struct qat_asym_session *session = sess_private_data;
+#include "qat_device.h"
 
-	qat_req = &session->req_tmpl;
-	qat_fill_req_tmpl(qat_req);
-}
+#include "qat_logs.h"
+#include "qat_asym.h"
 
-static size_t max_of(int n, ...)
-{
-	va_list args;
-	size_t len = 0, num;
-	int i;
+uint8_t qat_asym_driver_id;
 
-	va_start(args, n);
-	len = va_arg(args, size_t);
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
 
-	for (i = 0; i < n - 1; i++) {
-		num = va_arg(args, size_t);
-		if (num > len)
-			len = num;
-	}
-	va_end(args);
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+	.name = qat_asym_drv_name,
+	.alias = qat_asym_drv_name
+};
 
-	return len;
-}
 
 static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
 		int in_count, int out_count, int alg_size)
@@ -106,7 +74,46 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
 	}
 }
 
-static int qat_asym_check_nonzero(rte_crypto_param n)
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+		size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+	size_t i;
+
+	for (i = 0; i < arr_sz; i++) {
+		if (*size <= arr[i][0]) {
+			*size = arr[i][0];
+			*func_id = arr[i][1];
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+	va_list args;
+	size_t len = 0, num;
+	int i;
+
+	va_start(args, n);
+	len = va_arg(args, size_t);
+
+	for (i = 0; i < n - 1; i++) {
+		num = va_arg(args, size_t);
+		if (num > len)
+			len = num;
+	}
+	va_end(args);
+
+	return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
 {
 	if (n.length < 8) {
 		/* Not a case for any cryptographic function except for DH
@@ -475,10 +482,9 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 }
 
 int
-qat_asym_build_request(void *in_op,
-			uint8_t *out_msg,
-			void *op_cookie,
-			__rte_unused enum qat_device_gen qat_dev_gen)
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
 {
 	struct qat_asym_session *ctx;
 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
@@ -677,9 +683,9 @@ static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
 	qat_clear_arrays_by_alg(cookie, xform, alg_size_in_bytes);
 }
 
-void
+int
 qat_asym_process_response(void **op, uint8_t *resp,
-		void *op_cookie)
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
 {
 	struct qat_asym_session *ctx;
 	struct icp_qat_fw_pke_resp *resp_msg =
@@ -722,6 +728,8 @@ qat_asym_process_response(void **op, uint8_t *resp,
 	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
 			sizeof(struct icp_qat_fw_pke_resp));
 #endif
+
+	return 1;
 }
 
 int
@@ -779,3 +787,8 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
 	if (sess_priv)
 		memset(s, 0, qat_asym_session_get_private_size(dev));
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_asym_driver,
+		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index c9242a12ca..3ae95f2e7b 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #ifndef _QAT_ASYM_H_
@@ -8,10 +8,13 @@
 #include <cryptodev_pmd.h>
 #include <rte_crypto_asym.h>
 #include "icp_qat_fw_pke.h"
-#include "qat_common.h"
-#include "qat_asym_pmd.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
 #include "icp_qat_fw.h"
 
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
+
 typedef uint64_t large_int_ptr;
 #define MAX_PKE_PARAMS	8
 #define QAT_PKE_MAX_LN_SIZE 512
@@ -26,6 +29,28 @@ typedef uint64_t large_int_ptr;
 #define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
 #define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
 
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
+		{.asym = {						\
+			.xform_capa = {					\
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+				.op_types = o,				\
+				{					\
+				.modlen = {				\
+				.min = l,				\
+				.max = r,				\
+				.increment = i				\
+				}, }					\
+			}						\
+		},							\
+		}							\
+	}
+
 struct qat_asym_op_cookie {
 	size_t alg_size;
 	uint64_t error;
@@ -45,6 +70,27 @@ struct qat_asym_session {
 	struct rte_crypto_asym_xform *xform;
 };
 
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+	memset(qat_req, 0, sizeof(*qat_req));
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+	qat_req->pke_hdr.hdr_flags =
+			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+	struct icp_qat_fw_pke_request *qat_req;
+	struct qat_asym_session *session = sess_private_data;
+
+	qat_req = &session->req_tmpl;
+	qat_fill_req_tmpl(qat_req);
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
 		struct rte_crypto_asym_xform *xform,
@@ -75,7 +121,9 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
  */
 int
 qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
+		void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		enum qat_device_gen qat_dev_gen);
 
 /*
  * Process PKE response received from outgoing queue of QAT
@@ -87,8 +135,11 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg,
  * @param	op_cookie	Cookie pointer that holds private metadata
  *
  */
+int
+qat_asym_process_response(void **op, uint8_t *resp,
+		void *op_cookie,  __rte_unused uint64_t *dequeue_err_count);
+
 void
-qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
-		void *op_cookie);
+qat_asym_init_op_cookie(void *cookie);
 
 #endif /* _QAT_ASYM_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v10 5/9] crypto/qat: unify symmetric functions
  2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                         ` (3 preceding siblings ...)
  2022-02-22 17:02                       ` [dpdk-dev v10 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
@ 2022-02-22 17:02                       ` Kai Ji
  2022-02-22 18:53                         ` Zhang, Roy Fan
  2022-02-22 17:02                       ` [dpdk-dev v10 6/9] crypto/qat: unify asymmetric functions Kai Ji
                                         ` (5 subsequent siblings)
  10 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-22 17:02 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch removes qat_sym_pmd.c and integrates all the functions into
qat_sym.c. The unified/integrated qat sym crypto pmd functions should
make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build       |   4 +-
 drivers/common/qat/qat_device.c      |   4 +-
 drivers/common/qat/qat_qp.c          |   3 +-
 drivers/crypto/qat/qat_crypto.h      |   5 +-
 drivers/crypto/qat/qat_sym.c         |  21 +++
 drivers/crypto/qat/qat_sym.h         | 147 ++++++++++++++--
 drivers/crypto/qat/qat_sym_hw_dp.c   |  11 +-
 drivers/crypto/qat/qat_sym_pmd.c     | 251 ---------------------------
 drivers/crypto/qat/qat_sym_pmd.h     |  95 ----------
 drivers/crypto/qat/qat_sym_session.c |   2 +-
 10 files changed, 168 insertions(+), 375 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index af92271a75..1bf6896a7e 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017-2018 Intel Corporation
+# Copyright(c) 2017-2022 Intel Corporation
 
 if is_windows
     build = false
@@ -73,7 +73,7 @@ if qat_compress
 endif
 
 if qat_crypto
-    foreach f: ['qat_sym_pmd.c', 'qat_sym.c', 'qat_sym_session.c',
+    foreach f: ['qat_sym.c', 'qat_sym_session.c',
             'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 1f870d689a..6824d97050 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2020 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 
 #include <rte_string_fns.h>
@@ -8,7 +8,7 @@
 
 #include "qat_device.h"
 #include "adf_transport_access_macros.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 #include "qat_comp_pmd.h"
 #include "adf_pf2vf_msg.h"
 #include "qat_pf2vf.h"
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 7f2fdc53ce..b36ffa6f6d 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -838,7 +838,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
 			nb_fw_responses = qat_comp_process_response(
 				ops, resp_msg,
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 5ca76fcaa6..c01266f81c 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -12,7 +12,10 @@
 extern uint8_t qat_sym_driver_id;
 extern uint8_t qat_asym_driver_id;
 
-/** helper macro to set cryptodev capability range **/
+/**
+ * helper macro to set cryptodev capability range
+ * <n: name> <l: min > <r: max> <i: increment> <v: value>
+ **/
 #define CAP_RNG(n, l, r, i) .n = {.min = l, .max = r, .increment = i}
 
 #define CAP_RNG_ZERO(n) .n = {.min = 0, .max = 0, .increment = 0}
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 83bf55c933..aad4b243b7 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -17,6 +17,27 @@ uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_sym_init_op_cookie(void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+
+	cookie->qat_sgl_src_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_src);
+
+	cookie->qat_sgl_dst_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_dst);
+
+	cookie->opt.spc_gmac.cd_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			opt.spc_gmac.cd_cipher);
+}
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index e3ec7f0de4..f4ff2ce4cd 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #ifndef _QAT_SYM_H_
@@ -15,7 +15,7 @@
 
 #include "qat_common.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_crypto.h"
 #include "qat_logs.h"
 
 #define BYTE_LENGTH    8
@@ -24,6 +24,67 @@
  */
 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
+/** Intel(R) QAT Symmetric Crypto PMD name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
+
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
+#define QAT_SYM_CAP_VALID		(1 << 31)
+
+/**
+ * Macro to add a sym capability
+ * helper function to add an sym capability
+ * <n: name> <b: block size> <k: key size> <d: digest size>
+ * <a: aad_size> <i: iv_size>
+ **/
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, d					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_##n,		\
+				b, k, i					\
+			}, }						\
+		}, }							\
+	}
+
 /*
  * Maximum number of SGL entries
  */
@@ -54,6 +115,22 @@ struct qat_sym_op_cookie {
 	} opt;
 };
 
+struct qat_sym_dp_ctx {
+	struct qat_sym_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		void *op_cookie, enum qat_device_gen qat_dev_gen);
@@ -213,17 +290,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
 		}
 	}
 }
-#else
-
-static inline void
-qat_sym_preprocess_requests(void **ops __rte_unused,
-				uint16_t nb_ops __rte_unused)
-{
-}
 #endif
 
-static inline void
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused)
 {
 	struct icp_qat_fw_comn_resp *resp_msg =
 			(struct icp_qat_fw_comn_resp *)resp;
@@ -282,6 +353,12 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
 	}
 
 	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
 }
 
 int
@@ -293,6 +370,52 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 int
 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
 
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_vec *vec, uint32_t vec_len,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t i;
+
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_la_bulk_req));
+	for (i = 0; i < vec_len; i++)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+	if (cipher_iv && ctx->cipher_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+				ctx->cipher_iv.length);
+	if (auth_iv && ctx->auth_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+				ctx->auth_iv.length);
+	if (aad && ctx->aad_len > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+				ctx->aad_len);
+	if (digest && ctx->digest_length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+				ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+		struct qat_sym_session *ctx __rte_unused,
+		struct rte_crypto_vec *vec __rte_unused,
+		uint32_t vec_len __rte_unused,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+}
+#endif
+
 #else
 
 static inline void
@@ -307,5 +430,5 @@ qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
 {
 }
 
-#endif
+#endif /* BUILD_QAT_SYM */
 #endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 792ad2b213..5322faff44 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #include <cryptodev_pmd.h>
@@ -9,18 +9,9 @@
 #include "icp_qat_fw_la.h"
 
 #include "qat_sym.h"
-#include "qat_sym_pmd.h"
 #include "qat_sym_session.h"
 #include "qat_qp.h"
 
-struct qat_sym_dp_ctx {
-	struct qat_sym_session *session;
-	uint32_t tail;
-	uint32_t head;
-	uint16_t cached_enqueue;
-	uint16_t cached_dequeue;
-};
-
 static __rte_always_inline int32_t
 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
 		struct rte_crypto_vec *data, uint16_t n_data_vecs)
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
deleted file mode 100644
index 28a26260fb..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security_driver.h>
-#endif
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
-
-#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
-	struct qat_sym_op_cookie *cookie = op_cookie;
-
-	cookie->qat_sgl_src_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_src);
-
-	cookie->qat_sgl_dst_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_dst);
-
-	cookie->opt.spc_gmac.cd_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			opt.spc_gmac.cd_cipher);
-}
-
-static uint16_t
-qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-static uint16_t
-qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
-	.name = qat_sym_drv_name,
-	.alias = qat_sym_drv_name
-};
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
-	int i = 0, ret = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "sym");
-	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	/*
-	 * All processes must use same driver id so they can share sessions.
-	 * Store driver_id so we can validate that all processes have the same
-	 * value, typically they have, but could differ if binaries built
-	 * separately.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_sym_driver_id =
-				qat_sym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_sym_driver_id !=
-				qat_sym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
-	qat_dev_instance->sym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->sym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->sym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_sym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-#ifdef RTE_LIB_SECURITY
-	if (gen_dev_ops->create_security_ctx) {
-		cryptodev->security_ctx =
-			gen_dev_ops->create_security_ctx((void *)cryptodev);
-		if (cryptodev->security_ctx == NULL) {
-			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
-	} else
-		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-
-#endif
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_SYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->service_type = QAT_SERVICE_SYMMETRIC;
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating capability memzon for %s",
-				name);
-			ret = -EFAULT;
-			goto error;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->sym_dev = internals;
-	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	memset(&qat_dev_instance->sym_rte_dev, 0,
-		sizeof(qat_dev_instance->sym_rte_dev));
-
-	return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->sym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
-	qat_pci_dev->sym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_sym_driver,
-		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
deleted file mode 100644
index 59fbdefa12..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_PMD_H_
-#define _QAT_SYM_PMD_H_
-
-#ifdef BUILD_QAT_SYM
-
-#include <rte_ether.h>
-#include <rte_cryptodev.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security.h>
-#endif
-
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Symmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
-#define QAT_SYM_CAP_VALID		(1 << 31)
-
-/**
- * Macro to add a sym capability
- * helper function to add an sym capability
- * <n: name> <b: block size> <k: key size> <d: digest size>
- * <a: aad_size> <i: iv_size>
- **/
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, d					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
-			{.aead = {					\
-				.algo = RTE_CRYPTO_AEAD_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_##n,		\
-				b, k, i					\
-			}, }						\
-		}, }							\
-	}
-
-extern uint8_t qat_sym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param);
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
-
-void
-qat_sym_init_op_cookie(void *op_cookie);
-
-#endif
-#endif /* _QAT_SYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 3a880096c4..9d6a19c0be 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -20,7 +20,7 @@
 
 #include "qat_logs.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 
 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
 static const uint8_t sha1InitialState[] = {
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v10 6/9] crypto/qat: unify asymmetric functions
  2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                         ` (4 preceding siblings ...)
  2022-02-22 17:02                       ` [dpdk-dev v10 5/9] crypto/qat: unify symmetric functions Kai Ji
@ 2022-02-22 17:02                       ` Kai Ji
  2022-02-22 18:52                         ` Zhang, Roy Fan
  2022-02-22 17:02                       ` [dpdk-dev v10 7/9] crypto/qat: rework burst data path Kai Ji
                                         ` (4 subsequent siblings)
  10 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-22 17:02 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch removes qat_asym_pmd.c and integrates all the
functions into qat_asym.c. The unified/integrated asym crypto
pmd functions should make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build    |   2 +-
 drivers/crypto/qat/qat_asym.c     | 180 +++++++++++++++++++++++
 drivers/crypto/qat/qat_asym_pmd.c | 231 ------------------------------
 drivers/crypto/qat/qat_asym_pmd.h |  54 -------
 4 files changed, 181 insertions(+), 286 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 1bf6896a7e..f687f5c9d8 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 845e905a89..76d50dd6f9 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -19,6 +19,32 @@ uint8_t qat_asym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_asym_init_op_cookie(void *op_cookie)
+{
+	int j;
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	cookie->input_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					input_params_ptrs);
+
+	cookie->output_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					output_params_ptrs);
+
+	for (j = 0; j < 8; j++) {
+		cookie->input_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						input_array[j]);
+		cookie->output_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						output_array[j]);
+	}
+}
+
 /* An rte_driver is needed in the registration of both the device and the driver
  * with cryptodev.
  * The actual qat pci's rte_driver can't be used as its name represents
@@ -788,6 +814,160 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
 		memset(s, 0, qat_asym_session_get_private_size(dev));
 }
 
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+			nb_ops);
+}
+
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+				nb_ops);
+}
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param)
+{
+	struct qat_cryptodev_private *internals;
+	struct rte_cryptodev *cryptodev;
+	struct qat_device_info *qat_dev_instance =
+		&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	uint64_t capa_size;
+	int i = 0;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "asym");
+	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+				name);
+		return -(EFAULT);
+	}
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_asym_driver_id =
+				qat_asym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_asym_driver_id !=
+				qat_asym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
+		}
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+	qat_dev_instance->asym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->asym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_asym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_ASYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			rte_cryptodev_pmd_destroy(cryptodev);
+			memset(&qat_dev_instance->asym_rte_dev, 0,
+				sizeof(qat_dev_instance->asym_rte_dev));
+			return -EFAULT;
+		}
+	}
+
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
+
+	qat_pci_dev->asym_dev = internals;
+	internals->service_type = QAT_SERVICE_ASYMMETRIC;
+	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+	return 0;
+}
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->asym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(
+			qat_pci_dev->asym_dev->dev_id);
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+	qat_pci_dev->asym_dev = NULL;
+
+	return 0;
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_asym_driver,
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
deleted file mode 100644
index 9a7596b227..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "qat_logs.h"
-
-#include "qat_crypto.h"
-#include "qat_asym.h"
-#include "qat_asym_pmd.h"
-
-uint8_t qat_asym_driver_id;
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
-	int j;
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	cookie->input_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					input_params_ptrs);
-
-	cookie->output_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					output_params_ptrs);
-
-	for (j = 0; j < 8; j++) {
-		cookie->input_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						input_array[j]);
-		cookie->output_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						output_array[j]);
-	}
-}
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
-	/* Device related operations */
-	.dev_configure		= qat_cryptodev_config,
-	.dev_start		= qat_cryptodev_start,
-	.dev_stop		= qat_cryptodev_stop,
-	.dev_close		= qat_cryptodev_close,
-	.dev_infos_get		= qat_cryptodev_info_get,
-
-	.stats_get		= qat_cryptodev_stats_get,
-	.stats_reset		= qat_cryptodev_stats_reset,
-	.queue_pair_setup	= qat_cryptodev_qp_setup,
-	.queue_pair_release	= qat_cryptodev_qp_release,
-
-	/* Crypto related operations */
-	.asym_session_get_size	= qat_asym_session_get_private_size,
-	.asym_session_configure	= qat_asym_session_configure,
-	.asym_session_clear	= qat_asym_session_clear
-};
-
-uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
-	.name = qat_asym_drv_name,
-	.alias = qat_asym_drv_name
-};
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
-	int i = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "asym");
-	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_asym_driver_id =
-				qat_asym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_asym_driver_id !=
-				qat_asym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
-	qat_dev_instance->asym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->asym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->asym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_asym_driver_id;
-	cryptodev->dev_ops = &crypto_qat_ops;
-
-	cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst;
-
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_ASYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->dev_id = cryptodev->data->dev_id;
-	internals->service_type = QAT_SERVICE_ASYMMETRIC;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			rte_cryptodev_pmd_destroy(cryptodev);
-			memset(&qat_dev_instance->asym_rte_dev, 0,
-				sizeof(qat_dev_instance->asym_rte_dev));
-			return -EFAULT;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->asym_dev = internals;
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-	return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->asym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(
-			qat_pci_dev->asym_dev->dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
-	qat_pci_dev->asym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_asym_driver,
-		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_pmd.h b/drivers/crypto/qat/qat_asym_pmd.h
deleted file mode 100644
index f988d646e5..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-
-#ifndef _QAT_ASYM_PMD_H_
-#define _QAT_ASYM_PMD_H_
-
-#include <rte_cryptodev.h>
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
-
-
-/**
- * Helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
-		{.asym = {						\
-			.xform_capa = {					\
-				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
-				.op_types = o,				\
-				{					\
-				.modlen = {				\
-				.min = l,				\
-				.max = r,				\
-				.increment = i				\
-				}, }					\
-			}						\
-		},							\
-		}							\
-	}
-
-extern uint8_t qat_asym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
-
-void
-qat_asym_init_op_cookie(void *op_cookie);
-
-uint16_t
-qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-uint16_t
-qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-#endif /* _QAT_ASYM_PMD_H_ */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v10 7/9] crypto/qat: rework burst data path
  2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                         ` (5 preceding siblings ...)
  2022-02-22 17:02                       ` [dpdk-dev v10 6/9] crypto/qat: unify asymmetric functions Kai Ji
@ 2022-02-22 17:02                       ` Kai Ji
  2022-02-22 18:54                         ` Zhang, Roy Fan
  2022-02-22 17:02                       ` [dpdk-dev v10 8/9] crypto/qat: unify raw data path functions Kai Ji
                                         ` (3 subsequent siblings)
  10 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-22 17:02 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch enable the op_build_request function in
qat_enqueue_op_burst, and the qat_dequeue_process_response
function in qat_dequeue_op_burst.
The op_build_request invoked in crypto build request op is based
on crypto operations setup'd during session init.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c               |  42 +-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c |   4 -
 drivers/crypto/qat/qat_asym.c             |   2 +-
 drivers/crypto/qat/qat_asym.h             |  22 -
 drivers/crypto/qat/qat_sym.c              | 830 +++++++---------------
 drivers/crypto/qat/qat_sym.h              |   5 -
 6 files changed, 271 insertions(+), 634 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index b36ffa6f6d..08ac91eac4 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,7 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp,
-		__rte_unused qat_op_build_request_t op_build_request,
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
 		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
@@ -599,29 +598,18 @@ qat_enqueue_op_burst(void *qp,
 		}
 	}
 
-#ifdef BUILD_QAT_SYM
+#ifdef RTE_LIB_SECURITY
 	if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 		qat_sym_preprocess_requests(ops, nb_ops_possible);
 #endif
 
+	memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque));
+
 	while (nb_ops_sent != nb_ops_possible) {
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
-#ifdef BUILD_QAT_SYM
-			ret = qat_sym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		} else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
-			ret = qat_comp_build_request(*ops, base_addr + tail,
+		ret = op_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-		} else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
-#ifdef BUILD_QAT_ASYM
-			ret = qat_asym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				NULL, tmp_qp->qat_dev_gen);
-#endif
-		}
+				tmp_qp->opaque, tmp_qp->qat_dev_gen);
+
 		if (ret != 0) {
 			tmp_qp->stats.enqueue_err_count++;
 			/* This message cannot be enqueued */
@@ -817,8 +805,7 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 
 uint16_t
 qat_dequeue_op_burst(void *qp, void **ops,
-		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
-		uint16_t nb_ops)
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -836,21 +823,10 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		nb_fw_responses = 1;
 
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
-			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
-			nb_fw_responses = qat_comp_process_response(
+		nb_fw_responses = qat_dequeue_process_response(
 				ops, resp_msg,
 				tmp_qp->op_cookies[head >> rx_queue->trailz],
 				&tmp_qp->stats.dequeue_err_count);
-#ifdef BUILD_QAT_ASYM
-		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
-			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-#endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
 				  rx_queue->modulo_mask);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index ff176dddf1..c088fd50d2 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,10 +146,6 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
-
-	/* Raw data-path API related operations */
-	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
-	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 76d50dd6f9..3c7cba6140 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -507,7 +507,7 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	return 0;
 }
 
-int
+static __rte_always_inline int
 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 		__rte_unused uint64_t *opaque,
 		__rte_unused enum qat_device_gen dev_gen)
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 3ae95f2e7b..78caa5649c 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -103,28 +103,6 @@ void
 qat_asym_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_asym_session *sess);
 
-/*
- * Build PKE request to be sent to the fw, partially uses template
- * request generated during session creation.
- *
- * @param	in_op		Pointer to the crypto operation, for every
- *				service it points to service specific struct.
- * @param	out_msg		Message to be returned to enqueue function
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	qat_dev_gen	Generation of QAT hardware
- *
- * @return
- *	This function always returns zero,
- *	it is because of backward compatibility.
- *	- 0: Always returned
- *
- */
-int
-qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		enum qat_device_gen qat_dev_gen);
-
 /*
  * Process PKE response received from outgoing queue of QAT
  *
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index aad4b243b7..692e1f8f0a 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -11,12 +11,25 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-#include "dev/qat_crypto_pmd_gens.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
 
 uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
 void
 qat_sym_init_op_cookie(void *op_cookie)
 {
@@ -38,160 +51,68 @@ qat_sym_init_op_cookie(void *op_cookie)
 			opt.spc_gmac.cd_cipher);
 }
 
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op,
-		struct icp_qat_fw_la_bulk_req *qat_req)
+static __rte_always_inline int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
 {
-	/* copy IV into request if it fits */
-	if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
-		rte_memcpy(cipher_param->u.cipher_IV_array,
-				rte_crypto_op_ctod_offset(op, uint8_t *,
-					iv_offset),
-				iv_length);
-	} else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr =
-				rte_crypto_op_ctophys_offset(op,
-					iv_offset);
-	}
-}
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	uintptr_t sess = (uintptr_t)opaque[0];
+	uintptr_t build_request_p = (uintptr_t)opaque[1];
+	qat_sym_build_request_t build_request = (void *)build_request_p;
+	struct qat_sym_session *ctx = NULL;
 
-/** Set IV for CCM is special case, 0th byte is set to q-1
- *  where q is padding of nonce in 16 byte block
- */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
-	rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
-			ICP_QAT_HW_CCM_NONCE_OFFSET,
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-	*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-	if (aad_len_field_sz)
-		rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-}
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != (uintptr_t)ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
 
-/** Handle Single-Pass AES-GMAC on QAT GEN3 */
-static inline void
-handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
-		struct qat_sym_op_cookie *cookie,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	static const uint32_t ver_key_offset =
-			sizeof(struct icp_qat_hw_auth_setup) +
-			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
-			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
-			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
-			sizeof(struct icp_qat_hw_cipher_config);
-	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
-			(void *) &qat_req->cd_ctrl;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-			(void *) &qat_req->serv_specif_rqpars;
-	uint32_t data_length = op->sym->auth.data.length;
-
-	/* Fill separate Content Descriptor for this op */
-	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
-			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-				ctx->cd.cipher.key :
-				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
-			ctx->auth_key_length);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
-				ICP_QAT_HW_CIPHER_AEAD_MODE,
-				ctx->qat_cipher_alg,
-				ICP_QAT_HW_CIPHER_NO_CONVERT,
-				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-					ICP_QAT_HW_CIPHER_ENCRYPT :
-					ICP_QAT_HW_CIPHER_DECRYPT));
-	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
-			ctx->digest_length,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
-
-	/* Update the request */
-	qat_req->cd_pars.u.s.content_desc_addr =
-			cookie->opt.spc_gmac.cd_phys_addr;
-	qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
-			sizeof(struct icp_qat_hw_cipher_config) +
-			ctx->auth_key_length, 8) >> 3;
-	qat_req->comn_mid.src_length = data_length;
-	qat_req->comn_mid.dst_length = 0;
-
-	cipher_param->spc_aad_addr = 0;
-	cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
-	cipher_param->spc_aad_sz = data_length;
-	cipher_param->reserved = 0;
-	cipher_param->spc_auth_res_sz = ctx->digest_length;
-
-	qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
-	cipher_cd_ctrl->cipher_cfg_offset = 0;
-	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
-	ICP_QAT_FW_LA_PROTO_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_NO_PROTO);
-}
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, (void *)sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
-{
-	int ret = 0;
-	struct qat_sym_session *ctx = NULL;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	register struct icp_qat_fw_la_bulk_req *qat_req;
-	uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
-	uint32_t cipher_len = 0, cipher_ofs = 0;
-	uint32_t auth_len = 0, auth_ofs = 0;
-	uint32_t min_ofs = 0;
-	uint64_t src_buf_start = 0, dst_buf_start = 0;
-	uint64_t auth_data_end = 0;
-	uint8_t do_sgl = 0;
-	uint8_t in_place = 1;
-	int alignment_adjustment = 0;
-	int oop_shift = 0;
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *)op_cookie;
-
-	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
-				"operation requests, op (%p) is not a "
-				"symmetric operation.", op);
-		return -EINVAL;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
 	}
 
-	if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
-				" requests, op (%p) is sessionless.", op);
-		return -EINVAL;
-	} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_sym_session *)get_sym_session_private_data(
-				op->sym->session, qat_sym_driver_id);
 #ifdef RTE_LIB_SECURITY
-	} else {
-		ctx = (struct qat_sym_session *)get_sec_session_private_data(
-				op->sym->sec_session);
-		if (likely(ctx)) {
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if ((void *)sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
 			if (unlikely(ctx->bpi_ctx == NULL)) {
 				QAT_DP_LOG(ERR, "QAT PMD only supports security"
 						" operation requests for"
@@ -207,463 +128,234 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 				return -EINVAL;
 			}
-		}
-#endif
-	}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
 
-	if (unlikely(ctx == NULL)) {
-		QAT_DP_LOG(ERR, "Session was not created for this device");
-		return -EINVAL;
-	}
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, (void *)sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
-	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
-	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
-	cipher_param = (void *)&qat_req->serv_specif_rqpars;
-	cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			do_aead = 1;
-		} else {
-			do_auth = 1;
-			do_cipher = 1;
+			sess = (uintptr_t)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = sess;
+			opaque[1] = (uintptr_t)build_request;
 		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		do_auth = 1;
-		do_cipher = 0;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		do_auth = 0;
-		do_cipher = 1;
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
 	}
 
-	if (do_cipher) {
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
 
-		if (ctx->qat_cipher_alg ==
-					 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
 
-			if (unlikely(
-			    (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
-			    (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cipher_len = op->sym->cipher.data.length >> 3;
-			cipher_ofs = op->sym->cipher.data.offset >> 3;
-
-		} else if (ctx->bpi_ctx) {
-			/* DOCSIS - only send complete blocks to device.
-			 * Process any partial block using CFB mode.
-			 * Even if 0 complete blocks, still send this to device
-			 * to get into rx queue for post-process and dequeuing
-			 */
-			cipher_len = qat_bpicipher_preprocess(ctx, op);
-			cipher_ofs = op->sym->cipher.data.offset;
-		} else {
-			cipher_len = op->sym->cipher.data.length;
-			cipher_ofs = op->sym->cipher.data.offset;
-		}
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
 
-		set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
-				cipher_param, op, qat_req);
-		min_ofs = cipher_ofs;
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	if (do_auth) {
-
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
-			ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
-			if (unlikely(
-			    (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
-			    (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			auth_ofs = op->sym->auth.data.offset >> 3;
-			auth_len = op->sym->auth.data.length >> 3;
-
-			auth_param->u1.aad_adr =
-					rte_crypto_op_ctophys_offset(op,
-							ctx->auth_iv.offset);
-
-		} else if (ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-			/* AES-GMAC */
-			set_cipher_iv(ctx->auth_iv.length,
-				ctx->auth_iv.offset,
-				cipher_param, op, qat_req);
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
-			auth_param->u1.aad_adr = 0;
-			auth_param->u2.aad_sz = 0;
-
-		} else {
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-		min_ofs = auth_ofs;
-
-		if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
-				ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
-			auth_param->auth_res_addr =
-					op->sym->auth.digest.phys_addr;
-
 	}
 
-	if (do_aead) {
-		/*
-		 * This address may used for setting AAD physical pointer
-		 * into IV offset from op
-		 */
-		rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
-		if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-
-			set_cipher_iv(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, qat_req);
-
-		} else if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
-			/* In case of AES-CCM this may point to user selected
-			 * memory or iv offset in crypto_op
-			 */
-			uint8_t *aad_data = op->sym->aead.aad.data;
-			/* This is true AAD length, it not includes 18 bytes of
-			 * preceding data
-			 */
-			uint8_t aad_ccm_real_len = 0;
-			uint8_t aad_len_field_sz = 0;
-			uint32_t msg_len_be =
-					rte_bswap32(op->sym->aead.data.length);
-
-			if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-				aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-				aad_ccm_real_len = ctx->aad_len -
-					ICP_QAT_HW_CCM_AAD_B0_LEN -
-					ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			} else {
-				/*
-				 * aad_len not greater than 18, so no actual aad
-				 *  data, then use IV after op for B0 block
-				 */
-				aad_data = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-				aad_phys_addr_aead =
-						rte_crypto_op_ctophys_offset(op,
-							ctx->cipher_iv.offset);
-			}
-
-			uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
-							ctx->cipher_iv.length;
-
-			aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-							aad_len_field_sz,
-							ctx->digest_length, q);
-
-			if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET +
-				    (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				    (uint8_t *)&msg_len_be,
-				    ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-			} else {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET,
-				    (uint8_t *)&msg_len_be
-				    + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				    - q), q);
-			}
-
-			if (aad_len_field_sz > 0) {
-				*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
-						= rte_bswap16(aad_ccm_real_len);
-
-				if ((aad_ccm_real_len + aad_len_field_sz)
-						% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-					uint8_t pad_len = 0;
-					uint8_t pad_idx = 0;
-
-					pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len + aad_len_field_sz) %
-						ICP_QAT_HW_CCM_AAD_B0_LEN);
-					pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					    aad_ccm_real_len + aad_len_field_sz;
-					memset(&aad_data[pad_idx],
-							0, pad_len);
-				}
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
 
-			}
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
 
-			set_cipher_iv_ccm(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, q,
-					aad_len_field_sz);
+	if (cryptodev == NULL)
+		return -ENODEV;
 
-		}
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
-		cipher_len = op->sym->aead.data.length;
-		cipher_ofs = op->sym->aead.data.offset;
-		auth_len = op->sym->aead.data.length;
-		auth_ofs = op->sym->aead.data.offset;
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
-		auth_param->u1.aad_adr = aad_phys_addr_aead;
-		auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
-		min_ofs = op->sym->aead.data.offset;
-	}
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
-	if (op->sym->m_src->nb_segs > 1 ||
-			(op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
-		do_sgl = 1;
-
-	/* adjust for chain case */
-	if (do_cipher && do_auth)
-		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
-	if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
-		min_ofs = 0;
-
-	if (unlikely((op->sym->m_dst != NULL) &&
-			(op->sym->m_dst != op->sym->m_src))) {
-		/* Out-of-place operation (OOP)
-		 * Don't align DMA start. DMA the minimum data-set
-		 * so as not to overwrite data in dest buffer
-		 */
-		in_place = 0;
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-		dst_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-		oop_shift = min_ofs;
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
 
-	} else {
-		/* In-place operation
-		 * Start DMA at nearest aligned address below min_ofs
-		 */
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
-						& QAT_64_BTYE_ALIGN_MASK;
-
-		if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
-					rte_pktmbuf_headroom(op->sym->m_src))
-							> src_buf_start)) {
-			/* alignment has pushed addr ahead of start of mbuf
-			 * so revert and take the performance hit
-			 */
-			src_buf_start =
-				rte_pktmbuf_iova_offset(op->sym->m_src,
-								min_ofs);
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
 		}
-		dst_buf_start = src_buf_start;
 
-		/* remember any adjustment for later, note, can be +/- */
-		alignment_adjustment = src_buf_start -
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-	}
-
-	if (do_cipher || do_aead) {
-		cipher_param->cipher_offset =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, cipher_ofs) - src_buf_start;
-		cipher_param->cipher_length = cipher_len;
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
 	} else {
-		cipher_param->cipher_offset = 0;
-		cipher_param->cipher_length = 0;
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
 	}
-
-	if (!ctx->is_single_pass) {
-		/* Do not let to overwrite spc_aad len */
-		if (do_auth || do_aead) {
-			auth_param->auth_off =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, auth_ofs) - src_buf_start;
-			auth_param->auth_len = auth_len;
-		} else {
-			auth_param->auth_off = 0;
-			auth_param->auth_len = 0;
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
 		}
 	}
 
-	qat_req->comn_mid.dst_length =
-		qat_req->comn_mid.src_length =
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		> (auth_param->auth_off + auth_param->auth_len) ?
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		: (auth_param->auth_off + auth_param->auth_len);
-
-	if (do_auth && do_cipher) {
-		/* Handle digest-encrypted cases, i.e.
-		 * auth-gen-then-cipher-encrypt and
-		 * cipher-decrypt-then-auth-verify
-		 */
-		 /* First find the end of the data */
-		if (do_sgl) {
-			uint32_t remaining_off = auth_param->auth_off +
-				auth_param->auth_len + alignment_adjustment + oop_shift;
-			struct rte_mbuf *sgl_buf =
-				(in_place ?
-					op->sym->m_src : op->sym->m_dst);
-
-			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
-					&& sgl_buf->next != NULL) {
-				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
-				sgl_buf = sgl_buf->next;
-			}
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-			auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
-				sgl_buf, remaining_off);
-		} else {
-			auth_data_end = (in_place ?
-				src_buf_start : dst_buf_start) +
-				auth_param->auth_off + auth_param->auth_len;
-		}
-		/* Then check if digest-encrypted conditions are met */
-		if ((auth_param->auth_off + auth_param->auth_len <
-					cipher_param->cipher_offset +
-					cipher_param->cipher_length) &&
-				(op->sym->auth.digest.phys_addr ==
-					auth_data_end)) {
-			/* Handle partial digest encryption */
-			if (cipher_param->cipher_offset +
-					cipher_param->cipher_length <
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length)
-				qat_req->comn_mid.dst_length =
-					qat_req->comn_mid.src_length =
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length;
-			struct icp_qat_fw_comn_req_hdr *header =
-				&qat_req->comn_hdr;
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-		}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
 	}
 
-	if (do_sgl) {
-
-		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
-				QAT_COMN_PTR_TYPE_SGL);
-		ret = qat_sgl_fill_array(op->sym->m_src,
-		   (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
-		   &cookie->qat_sgl_src,
-		   qat_req->comn_mid.src_length,
-		   QAT_SYM_SGL_MAX_NUMBER);
-
-		if (unlikely(ret)) {
-			QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
-			return ret;
-		}
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
 
-		if (in_place)
-			qat_req->comn_mid.dest_data_addr =
-				qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-		else {
-			ret = qat_sgl_fill_array(op->sym->m_dst,
-				(int64_t)(dst_buf_start -
-					  rte_pktmbuf_iova(op->sym->m_dst)),
-				 &cookie->qat_sgl_dst,
-				 qat_req->comn_mid.dst_length,
-				 QAT_SYM_SGL_MAX_NUMBER);
-
-			if (unlikely(ret)) {
-				QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
-				return ret;
-			}
+	return 0;
 
-			qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-			qat_req->comn_mid.dest_data_addr =
-					cookie->qat_sgl_dst_phys_addr;
-		}
-		qat_req->comn_mid.src_length = 0;
-		qat_req->comn_mid.dst_length = 0;
-	} else {
-		qat_req->comn_mid.src_data_addr = src_buf_start;
-		qat_req->comn_mid.dest_data_addr = dst_buf_start;
-	}
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
 
-	if (ctx->is_single_pass) {
-		if (ctx->is_ucs) {
-			/* GEN 4 */
-			cipher_param20->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param20->spc_auth_res_addr =
-				op->sym->aead.digest.phys_addr;
-		} else {
-			cipher_param->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param->spc_auth_res_addr =
-					op->sym->aead.digest.phys_addr;
-		}
-	} else if (ctx->is_single_pass_gmac &&
-		       op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
-		/* Handle Single-Pass AES-GMAC */
-		handle_spc_gmac(ctx, op, cookie, qat_req);
-	}
+	return ret;
+}
 
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
-			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
-			rte_pktmbuf_data_len(op->sym->m_src));
-	if (do_cipher) {
-		uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
-				ctx->cipher_iv.length);
-	}
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
 
-	if (do_auth) {
-		if (ctx->auth_iv.length) {
-			uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
-							uint8_t *,
-							ctx->auth_iv.offset);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
-						ctx->auth_iv.length);
-		}
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
-				ctx->digest_length);
-	}
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
 
-	if (do_aead) {
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
-				ctx->digest_length);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
-				ctx->aad_len);
-	}
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
 #endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
+
 	return 0;
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f4ff2ce4cd..074612c11b 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -131,11 +131,6 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
-
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v10 8/9] crypto/qat: unify raw data path functions
  2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                         ` (6 preceding siblings ...)
  2022-02-22 17:02                       ` [dpdk-dev v10 7/9] crypto/qat: rework burst data path Kai Ji
@ 2022-02-22 17:02                       ` Kai Ji
  2022-02-22 18:54                         ` Zhang, Roy Fan
  2022-02-22 17:02                       ` [dpdk-dev v10 9/9] crypto/qat: support out of place SG list Kai Ji
                                         ` (2 subsequent siblings)
  10 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-22 17:02 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch unifies QAT's raw dp api implementations
to the same enqueue/dequeue methods used in crypto operations.
The specific functions for different QAT generation are updated
respectively. The qat_sym_hw_dp.c is removed as no longer required.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/meson.build               |   2 +-
 drivers/compress/qat/qat_comp_pmd.c          |  12 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |   2 +
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 214 ++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 122 +++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |  78 ++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 656 ++++++++++++
 drivers/crypto/qat/qat_crypto.h              |   3 +
 drivers/crypto/qat/qat_sym.c                 |  56 +-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 986 -------------------
 10 files changed, 1137 insertions(+), 994 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index f687f5c9d8..b7027f3164 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 8e497e7a09..dc8db84a68 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -616,11 +616,18 @@ static struct rte_compressdev_ops compress_qat_dummy_ops = {
 	.private_xform_free	= qat_comp_private_xform_free
 };
 
+static uint16_t
+qat_comp_dequeue_burst(void *qp, struct rte_comp_op **ops,  uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_comp_process_response,
+				nb_ops);
+}
+
 static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
+	uint16_t ret = qat_comp_dequeue_burst(qp, ops, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
@@ -638,8 +645,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 
 		} else {
 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
-					(compressdev_dequeue_pkt_burst_t)
-					qat_dequeue_op_burst;
+					qat_comp_dequeue_burst;
 		}
 	}
 	return ret;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 64e6ae66ec..0c64c1e43f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -291,6 +291,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 			qat_sym_crypto_cap_get_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
 			qat_sym_crypto_set_session_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index db864d973a..ffa093a7a3 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -394,6 +394,218 @@ qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
+	} else if (ctx->is_single_pass_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
+	}
+
+	return 0;
+}
+
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -403,6 +615,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_feature_flags_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
 			qat_sym_crypto_set_session_gen3;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 7642a87d55..f803bc1459 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -223,6 +223,126 @@ qat_sym_crypto_set_session_gen4(void *cdev, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -230,6 +350,8 @@ RTE_INIT(qat_sym_crypto_gen4_init)
 			qat_sym_crypto_cap_get_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
 			qat_sym_crypto_set_session_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 96cdb97a26..50a9c5ad5b 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -839,6 +839,84 @@ int
 qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
 		uint8_t *out_msg, void *op_cookie);
 
+/* -----------------GEN 1 sym crypto raw data path APIs ---------------- */
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status);
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status);
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index c088fd50d2..58f07eb0b3 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,6 +146,10 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
+
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
@@ -448,6 +452,656 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct qat_sym_op_cookie *cookie;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, NULL, NULL);
+#endif
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i],
+				cookie, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
+			(uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				NULL, NULL, NULL);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
+			NULL, 0, cipher_iv, digest, auth_iv, ofs,
+			(uint32_t)data_len)))
+		return -1;
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				NULL, 0,
+				&vec->iv[i], &vec->digest[i],
+				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+			break;
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				&vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp;
+	void *resp_opaque;
+	uint32_t i, n, inflight;
+	uint32_t head;
+	uint8_t status;
+
+	*n_success_jobs = 0;
+	*return_status = 0;
+	head = dp_ctx->head;
+
+	inflight = qp->enqueued - qp->dequeued;
+	if (unlikely(inflight == 0))
+		return 0;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			head);
+	/* no operation ready */
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return 0;
+
+	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
+	/* get the dequeue count */
+	if (get_dequeue_count) {
+		n = get_dequeue_count(resp_opaque);
+		if (unlikely(n == 0))
+			return 0;
+	} else {
+		if (unlikely(max_nb_to_dequeue == 0))
+			return 0;
+		n = max_nb_to_dequeue;
+	}
+
+	out_user_data[0] = resp_opaque;
+	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+	post_dequeue(resp_opaque, 0, status);
+	*n_success_jobs += status;
+
+	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+
+	/* we already finished dequeue when n == 1 */
+	if (unlikely(n == 1)) {
+		i = 1;
+		goto end_deq;
+	}
+
+	if (is_user_data_array) {
+		for (i = 1; i < n; i++) {
+			resp = (struct icp_qat_fw_comn_resp *)(
+				(uint8_t *)rx_queue->base_addr + head);
+			if (unlikely(*(uint32_t *)resp ==
+					ADF_RING_EMPTY_SIG))
+				goto end_deq;
+			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
+			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+			*n_success_jobs += status;
+			post_dequeue(out_user_data[i], i, status);
+			head = (head + rx_queue->msg_size) &
+					rx_queue->modulo_mask;
+		}
+
+		goto end_deq;
+	}
+
+	/* opaque is not array */
+	for (i = 1; i < n; i++) {
+		resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+			goto end_deq;
+		head = (head + rx_queue->msg_size) &
+				rx_queue->modulo_mask;
+		post_dequeue(resp_opaque, i, status);
+		*n_success_jobs += status;
+	}
+
+end_deq:
+	dp_ctx->head = head;
+	dp_ctx->cached_dequeue += i;
+	return i;
+}
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	register struct icp_qat_fw_comn_resp *resp;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			dp_ctx->head);
+
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return NULL;
+
+	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
+			rx_queue->modulo_mask;
+	dp_ctx->cached_dequeue++;
+
+	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
+			RTE_CRYPTO_OP_STATUS_SUCCESS :
+			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	*dequeue_status = 0;
+	return (void *)(uintptr_t)resp->opaque_data;
+}
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_enqueue != n))
+		return -1;
+
+	qp->enqueued += n;
+	qp->stats.enqueued_count += n;
+
+	tx_queue->tail = dp_ctx->tail;
+
+	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number, tx_queue->tail);
+	tx_queue->csr_tail = tx_queue->tail;
+	dp_ctx->cached_enqueue = 0;
+
+	return 0;
+}
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_dequeue != n))
+		return -1;
+
+	rx_queue->head = dp_ctx->head;
+	rx_queue->nb_processed_responses += n;
+	qp->dequeued += n;
+	qp->stats.dequeued_count += n;
+	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
+		uint32_t old_head, new_head;
+		uint32_t max_head;
+
+		old_head = rx_queue->csr_head;
+		new_head = rx_queue->head;
+		max_head = qp->nb_descriptors * rx_queue->msg_size;
+
+		/* write out free descriptors */
+		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
+
+		if (new_head < old_head) {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
+					max_head - old_head);
+			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
+					new_head);
+		} else {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
+					old_head);
+		}
+		rx_queue->nb_processed_responses = 0;
+		rx_queue->csr_head = new_head;
+
+		/* write current head to CSR */
+		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
+			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
+			new_head);
+	}
+
+	dp_ctx->cached_dequeue = 0;
+	return 0;
+}
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+
+	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
+	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
+	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
+	raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen1;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_chain_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_chain_gen1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
+			ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_cipher_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_cipher_gen1;
+		}
+	} else
+		return -1;
+
+	return 0;
+}
+
 int
 qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
 {
@@ -518,6 +1172,8 @@ RTE_INIT(qat_sym_crypto_gen1_init)
 			qat_sym_crypto_cap_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
 			qat_sym_crypto_set_session_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index c01266f81c..cf386d0ed0 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -53,11 +53,14 @@ typedef void * (*create_security_ctx_t)(void *cryptodev);
 
 typedef int (*set_session_t)(void *cryptodev, void *session);
 
+typedef int (*set_raw_dp_ctx_t)(void *raw_dp_ctx, void *ctx);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
 	set_session_t set_session;
+	set_raw_dp_ctx_t set_raw_dp_ctx;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 692e1f8f0a..1ccffad5ab 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -89,7 +89,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, (void *)sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -144,7 +144,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, (void *)sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -292,8 +292,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 		if (internals->capa_mz == NULL) {
 			QAT_LOG(DEBUG,
 				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
+				"destroying PMD for %s", name);
 			ret = -EFAULT;
 			goto error;
 		}
@@ -355,6 +354,55 @@ qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
 	return 0;
 }
 
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct qat_cryptodev_private *internals = dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_crypto_gen_dev_ops *gen_dev_ops =
+			&qat_sym_gen_dev_ops[qat_dev_gen];
+	struct qat_qp *qp;
+	struct qat_sym_session *ctx;
+	struct qat_sym_dp_ctx *dp_ctx;
+
+	if (!gen_dev_ops->set_raw_dp_ctx) {
+		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+				qat_dev_gen);
+		return -ENOTSUP;
+	}
+
+	qp = dev->data->queue_pairs[qp_id];
+	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(struct qat_sym_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+		dp_ctx->tail = qp->tx_q.tail;
+		dp_ctx->head = qp->rx_q.head;
+		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
+	}
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -EINVAL;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, qat_sym_driver_id);
+
+	dp_ctx->session = ctx;
+
+	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
+}
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct qat_sym_dp_ctx);
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_sym_driver,
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
deleted file mode 100644
index 5322faff44..0000000000
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ /dev/null
@@ -1,986 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2022 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "adf_transport_access_macros.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_qp.h"
-
-static __rte_always_inline int32_t
-qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
-		struct rte_crypto_vec *data, uint16_t n_data_vecs)
-{
-	struct qat_queue *tx_queue;
-	struct qat_sym_op_cookie *cookie;
-	struct qat_sgl *list;
-	uint32_t i;
-	uint32_t total_len;
-
-	if (likely(n_data_vecs == 1)) {
-		req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			data[0].iova;
-		req->comn_mid.src_length = req->comn_mid.dst_length =
-			data[0].len;
-		return data[0].len;
-	}
-
-	if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
-		return -1;
-
-	total_len = 0;
-	tx_queue = &qp->tx_q;
-
-	ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
-			QAT_COMN_PTR_TYPE_SGL);
-	cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
-	list = (struct qat_sgl *)&cookie->qat_sgl_src;
-
-	for (i = 0; i < n_data_vecs; i++) {
-		list->buffers[i].len = data[i].len;
-		list->buffers[i].resrvd = 0;
-		list->buffers[i].addr = data[i].iova;
-		if (total_len + data[i].len > UINT32_MAX) {
-			QAT_DP_LOG(ERR, "Message too long");
-			return -1;
-		}
-		total_len += data[i].len;
-	}
-
-	list->num_bufs = i;
-	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			cookie->qat_sgl_src_phys_addr;
-	req->comn_mid.src_length = req->comn_mid.dst_length = 0;
-	return total_len;
-}
-
-static __rte_always_inline void
-set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	/* copy IV into request if it fits */
-	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
-				iv_len);
-	else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
-	}
-}
-
-#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
-	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
-	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
-
-static __rte_always_inline void
-qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
-{
-	uint32_t i;
-
-	for (i = 0; i < n; i++)
-		sta[i] = status;
-}
-
-#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
-	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
-
-static __rte_always_inline void
-enqueue_one_cipher_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-
-	/* cipher IV */
-	set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest __rte_unused,
-	struct rte_crypto_va_iova_ptr *aad __rte_unused,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
-			(uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_auth_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = data_len - ofs.ofs.auth.head -
-			ofs.ofs.auth.tail;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
-				ctx->auth_iv.length);
-		break;
-	default:
-		break;
-	}
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv __rte_unused,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
-			(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_auth_job(ctx, req, &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline int
-enqueue_one_chain_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_vec *data,
-	uint16_t n_data_vecs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	rte_iova_t auth_iova_end;
-	int32_t cipher_len, auth_len;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	cipher_len = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
-
-	if (unlikely(cipher_len < 0 || auth_len < 0))
-		return -1;
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = cipher_len;
-	set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = auth_len;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		break;
-	default:
-		break;
-	}
-
-	if (unlikely(n_data_vecs > 1)) {
-		int auth_end_get = 0, i = n_data_vecs - 1;
-		struct rte_crypto_vec *cvec = &data[0];
-		uint32_t len;
-
-		len = data_len - ofs.ofs.auth.tail;
-
-		while (i >= 0 && len > 0) {
-			if (cvec->len >= len) {
-				auth_iova_end = cvec->iova + len;
-				len = 0;
-				auth_end_get = 1;
-				break;
-			}
-			len -= cvec->len;
-			i--;
-			cvec++;
-		}
-
-		if (unlikely(auth_end_get == 0))
-			return -1;
-	} else
-		auth_iova_end = data[0].iova + auth_param->auth_off +
-			auth_param->auth_len;
-
-	/* Then check if digest-encrypted conditions are met */
-	if ((auth_param->auth_off + auth_param->auth_len <
-		cipher_param->cipher_offset +
-		cipher_param->cipher_length) &&
-		(digest->iova == auth_iova_end)) {
-		/* Handle partial digest encryption */
-		if (cipher_param->cipher_offset +
-				cipher_param->cipher_length <
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length)
-			req->comn_mid.dst_length =
-				req->comn_mid.src_length =
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length;
-		struct icp_qat_fw_comn_req_hdr *header =
-			&req->comn_hdr;
-		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-			header->serv_specif_flags,
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-	}
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
-			cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
-		return -1;
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num,
-			&vec->iv[i], &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
-			break;
-
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_aead_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-		(void *)&req->serv_specif_rqpars;
-	struct icp_qat_fw_la_auth_req_params *auth_param =
-		(void *)((uint8_t *)&req->serv_specif_rqpars +
-		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-	uint8_t *aad_data;
-	uint8_t aad_ccm_real_len;
-	uint8_t aad_len_field_sz;
-	uint32_t msg_len_be;
-	rte_iova_t aad_iova = 0;
-	uint8_t q;
-
-	/* CPM 1.7 uses single pass to treat AEAD as cipher operation */
-	if (ctx->is_single_pass) {
-		enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
-
-		if (ctx->is_ucs) {
-			/* QAT GEN4 uses single pass to treat AEAD as cipher
-			 * operation
-			 */
-			struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
-				(void *)&req->serv_specif_rqpars;
-			cipher_param_20->spc_aad_addr = aad->iova;
-			cipher_param_20->spc_auth_res_addr = digest->iova;
-		} else {
-			cipher_param->spc_aad_addr = aad->iova;
-			cipher_param->spc_auth_res_addr = digest->iova;
-		}
-
-		return;
-	}
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
-				ctx->cipher_iv.length);
-		aad_iova = aad->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
-		aad_data = aad->va;
-		aad_iova = aad->iova;
-		aad_ccm_real_len = 0;
-		aad_len_field_sz = 0;
-		msg_len_be = rte_bswap32((uint32_t)data_len -
-				ofs.ofs.cipher.head);
-
-		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			aad_ccm_real_len = ctx->aad_len -
-				ICP_QAT_HW_CCM_AAD_B0_LEN -
-				ICP_QAT_HW_CCM_AAD_LEN_INFO;
-		} else {
-			aad_data = iv->va;
-			aad_iova = iv->iova;
-		}
-
-		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-			aad_len_field_sz, ctx->digest_length, q);
-		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				(uint8_t *)&msg_len_be,
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-		} else {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-				(uint8_t *)&msg_len_be +
-				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				- q), q);
-		}
-
-		if (aad_len_field_sz > 0) {
-			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
-				rte_bswap16(aad_ccm_real_len);
-
-			if ((aad_ccm_real_len + aad_len_field_sz)
-				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-				uint8_t pad_len = 0;
-				uint8_t pad_idx = 0;
-
-				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len +
-					aad_len_field_sz) %
-					ICP_QAT_HW_CCM_AAD_B0_LEN);
-				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					aad_ccm_real_len +
-					aad_len_field_sz;
-				memset(&aad_data[pad_idx], 0, pad_len);
-			}
-		}
-
-		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
-			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va +
-			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
-		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-		rte_memcpy((uint8_t *)aad->va +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			ctx->cipher_iv.length);
-		break;
-	default:
-		break;
-	}
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_param->auth_off = ofs.ofs.cipher.head;
-	auth_param->auth_len = cipher_param->cipher_length;
-	auth_param->auth_res_addr = digest->iova;
-	auth_param->u1.aad_adr = aad_iova;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
-		(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
-			&vec->aad[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
-	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
-	uint32_t max_nb_to_dequeue,
-	rte_cryptodev_raw_post_dequeue_t post_dequeue,
-	void **out_user_data, uint8_t is_user_data_array,
-	uint32_t *n_success_jobs, int *return_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct icp_qat_fw_comn_resp *resp;
-	void *resp_opaque;
-	uint32_t i, n, inflight;
-	uint32_t head;
-	uint8_t status;
-
-	*n_success_jobs = 0;
-	*return_status = 0;
-	head = dp_ctx->head;
-
-	inflight = qp->enqueued - qp->dequeued;
-	if (unlikely(inflight == 0))
-		return 0;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			head);
-	/* no operation ready */
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return 0;
-
-	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
-	/* get the dequeue count */
-	if (get_dequeue_count) {
-		n = get_dequeue_count(resp_opaque);
-		if (unlikely(n == 0))
-			return 0;
-	} else {
-		if (unlikely(max_nb_to_dequeue == 0))
-			return 0;
-		n = max_nb_to_dequeue;
-	}
-
-	out_user_data[0] = resp_opaque;
-	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-	post_dequeue(resp_opaque, 0, status);
-	*n_success_jobs += status;
-
-	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
-
-	/* we already finished dequeue when n == 1 */
-	if (unlikely(n == 1)) {
-		i = 1;
-		goto end_deq;
-	}
-
-	if (is_user_data_array) {
-		for (i = 1; i < n; i++) {
-			resp = (struct icp_qat_fw_comn_resp *)(
-				(uint8_t *)rx_queue->base_addr + head);
-			if (unlikely(*(uint32_t *)resp ==
-					ADF_RING_EMPTY_SIG))
-				goto end_deq;
-			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
-			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-			*n_success_jobs += status;
-			post_dequeue(out_user_data[i], i, status);
-			head = (head + rx_queue->msg_size) &
-					rx_queue->modulo_mask;
-		}
-
-		goto end_deq;
-	}
-
-	/* opaque is not array */
-	for (i = 1; i < n; i++) {
-		resp = (struct icp_qat_fw_comn_resp *)(
-			(uint8_t *)rx_queue->base_addr + head);
-		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-			goto end_deq;
-		head = (head + rx_queue->msg_size) &
-				rx_queue->modulo_mask;
-		post_dequeue(resp_opaque, i, status);
-		*n_success_jobs += status;
-	}
-
-end_deq:
-	dp_ctx->head = head;
-	dp_ctx->cached_dequeue += i;
-	return i;
-}
-
-static __rte_always_inline void *
-qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
-		enum rte_crypto_op_status *op_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	register struct icp_qat_fw_comn_resp *resp;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			dp_ctx->head);
-
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return NULL;
-
-	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
-			rx_queue->modulo_mask;
-	dp_ctx->cached_dequeue++;
-
-	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
-			RTE_CRYPTO_OP_STATUS_SUCCESS :
-			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	*dequeue_status = 0;
-	return (void *)(uintptr_t)resp->opaque_data;
-}
-
-static __rte_always_inline int
-qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_enqueue != n))
-		return -1;
-
-	qp->enqueued += n;
-	qp->stats.enqueued_count += n;
-
-	tx_queue->tail = dp_ctx->tail;
-
-	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
-			tx_queue->hw_bundle_number,
-			tx_queue->hw_queue_number, tx_queue->tail);
-	tx_queue->csr_tail = tx_queue->tail;
-	dp_ctx->cached_enqueue = 0;
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_dequeue != n))
-		return -1;
-
-	rx_queue->head = dp_ctx->head;
-	rx_queue->nb_processed_responses += n;
-	qp->dequeued += n;
-	qp->stats.dequeued_count += n;
-	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
-		uint32_t old_head, new_head;
-		uint32_t max_head;
-
-		old_head = rx_queue->csr_head;
-		new_head = rx_queue->head;
-		max_head = qp->nb_descriptors * rx_queue->msg_size;
-
-		/* write out free descriptors */
-		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
-
-		if (new_head < old_head) {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
-					max_head - old_head);
-			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
-					new_head);
-		} else {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
-					old_head);
-		}
-		rx_queue->nb_processed_responses = 0;
-		rx_queue->csr_head = new_head;
-
-		/* write current head to CSR */
-		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
-			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
-			new_head);
-	}
-
-	dp_ctx->cached_dequeue = 0;
-	return 0;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
-	struct qat_qp *qp;
-	struct qat_sym_session *ctx;
-	struct qat_sym_dp_ctx *dp_ctx;
-
-	qp = dev->data->queue_pairs[qp_id];
-	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
-	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(struct qat_sym_dp_ctx));
-		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
-		dp_ctx->tail = qp->tx_q.tail;
-		dp_ctx->head = qp->rx_q.head;
-		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
-	}
-
-	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
-		return -EINVAL;
-
-	ctx = (struct qat_sym_session *)get_sym_session_private_data(
-			session_ctx.crypto_sess, qat_sym_driver_id);
-
-	dp_ctx->session = ctx;
-
-	raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
-	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
-	raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
-	raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_chain_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
-		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
-		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_cipher_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
-		}
-	} else
-		return -1;
-
-	return 0;
-}
-
-int
-qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
-{
-	return sizeof(struct qat_sym_dp_ctx);
-}
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v10 9/9] crypto/qat: support out of place SG list
  2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                         ` (7 preceding siblings ...)
  2022-02-22 17:02                       ` [dpdk-dev v10 8/9] crypto/qat: unify raw data path functions Kai Ji
@ 2022-02-22 17:02                       ` Kai Ji
  2022-02-22 18:55                         ` Zhang, Roy Fan
  2022-02-22 18:23                       ` [EXT] [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
  2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
  10 siblings, 1 reply; 156+ messages in thread
From: Kai Ji @ 2022-02-22 17:02 UTC (permalink / raw)
  To: dev; +Cc: gakhil, roy.fan.zhang, Kai Ji

This patch adds the SGL out of place support to QAT PMD

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++--
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 55 +++++++++++++++++---
 3 files changed, 83 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index ffa093a7a3..5084a5fcd1 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -468,8 +468,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -565,8 +575,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index f803bc1459..bd7f3785df 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -297,8 +297,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 58f07eb0b3..3bcb53cf9f 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -526,9 +526,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i],
-				cookie, vec->src_sgl[i].vec,
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
 				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
@@ -625,8 +634,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
@@ -725,8 +744,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -830,8 +859,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [EXT] [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework
  2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                         ` (8 preceding siblings ...)
  2022-02-22 17:02                       ` [dpdk-dev v10 9/9] crypto/qat: support out of place SG list Kai Ji
@ 2022-02-22 18:23                       ` Akhil Goyal
  2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
  10 siblings, 0 replies; 156+ messages in thread
From: Akhil Goyal @ 2022-02-22 18:23 UTC (permalink / raw)
  To: Kai Ji, dev; +Cc: roy.fan.zhang

Hi Kai,

This is again not compiling. It has been v10 and most of the version are fixing one or the other compilation error.
You should fix your build system before sending out the next version.

/usr/bin/ld: drivers/a715181@@tmp_rte_common_qat@sta/crypto_qat_qat_sym.c.o:/home/gakhil/up/dpdk-next-crypto/build-clang-static/../drivers/crypto/qat/qat_sym.c:16: multiple definition of `qat_sym_driver_id'; drivers/a715181@@tmp_rte_common_qat@sta/crypto_qat_qat_sym_pmd.c.o:/home/gakhil/up/dpdk-next-crypto/build-clang-static/../drivers/crypto/qat/qat_sym_pmd.c:23: first defined here
/usr/bin/ld: drivers/a715181@@tmp_rte_common_qat@sta/crypto_qat_qat_sym.c.o:/home/gakhil/up/dpdk-next-crypto/build-clang-static/../drivers/crypto/qat/qat_sym.c:18: multiple definition of `qat_sym_gen_dev_ops'; drivers/a715181@@tmp_rte_common_qat@sta/crypto_qat_qat_sym_pmd.c.o:/home/gakhil/up/dpdk-next-crypto/build-clang-static/../drivers/crypto/qat/qat_sym_pmd.c:25: first defined here
clang: error: linker command failed with exit code 1 (use -v to see invocation)


> This patch reworks QAT symmetric crypto datapatch implementation where
> each
> generation request building separated and the crypto operation under the
> raw datapath api implementation are unified.
> 
> In addtion this patchset also enables QAT OOP support in raw datapath api
> implementation.
> 
> v10:
> - rebase to the lastest for-main
> - fix of build rerror when RTE_LOG_DEBUG enabled
> 
> v9:
> - commit messages reword
> - fix of unused function error
> 
> v8:
> - rebase to 22.03-rc1
> 
> v7:
> - fix of pointer cast compile error in x86
> 
> v6:
> - fix of pointer cast error in x86
> - rebase to the lastest for-main
> 
> v5:
> - rebase to the latest for-main
> - patchset reconstruct
> 
> v4:
> - patchset break down and reconstruct
> 
> v3:
> - sperate a single patch 6 to two different patches
> 
> v2:
> - review comments addressed
> 
> Kai Ji (9):
>   common/qat: define build request and dequeue ops
>   crypto/qat: support symmetric build op request
>   crypto/qat: rework session functions
>   crypto/qat: rework asymmetric op build operation
>   crypto/qat: unify symmetric functions
>   crypto/qat: unify asymmetric functions
>   crypto/qat: rework burst data path
>   crypto/qat: unify raw data path functions
>   crypto/qat: support out of place SG list
> 
>  drivers/common/qat/meson.build               |   6 +-
>  drivers/common/qat/qat_device.c              |   4 +-
>  drivers/common/qat/qat_qp.c                  |  42 +-
>  drivers/common/qat/qat_qp.h                  |  54 +-
>  drivers/compress/qat/qat_comp_pmd.c          |  14 +-
>  drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
>  drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  93 +-
>  drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 490 ++++++++-
>  drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 257 ++++-
>  drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 913 ++++++++++++++++-
>  drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 942 +++++++++++++++++-
>  drivers/crypto/qat/qat_asym.c                | 303 +++++-
>  drivers/crypto/qat/qat_asym.h                |  79 +-
>  drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
>  drivers/crypto/qat/qat_asym_pmd.h            |  54 -
>  drivers/crypto/qat/qat_crypto.h              |  16 +-
>  drivers/crypto/qat/qat_sym.c                 | 979 ++++++------------
>  drivers/crypto/qat/qat_sym.h                 | 148 ++-
>  drivers/crypto/qat/qat_sym_hw_dp.c           | 995 -------------------
>  drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
>  drivers/crypto/qat/qat_sym_pmd.h             |  95 --
>  drivers/crypto/qat/qat_sym_session.c         | 115 +--
>  drivers/crypto/qat/qat_sym_session.h         |  15 +-
>  23 files changed, 3582 insertions(+), 2523 deletions(-)
>  delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
>  delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
>  delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
>  delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
>  delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h
> 
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v10 1/9] common/qat: define build request and dequeue ops
  2022-02-22 17:02                       ` [dpdk-dev v10 1/9] common/qat: define build request and dequeue ops Kai Ji
@ 2022-02-22 18:52                         ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-22 18:52 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil

> -----Original Message-----
> From: Ji, Kai <kai.ji@intel.com>
> Sent: Tuesday, February 22, 2022 5:02 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Zhang, Roy Fan <roy.fan.zhang@intel.com>; Ji, Kai
> <kai.ji@intel.com>
> Subject: [dpdk-dev v10 1/9] common/qat: define build request and dequeue
> ops
> 
> This patch introduce build request and dequeue op function
> pointers to the qat queue pair implementation. The function
> poniters are assigned during qat session generation based on input
> crypto operation request.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
May need to address the compile issue
Other than that
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v10 6/9] crypto/qat: unify asymmetric functions
  2022-02-22 17:02                       ` [dpdk-dev v10 6/9] crypto/qat: unify asymmetric functions Kai Ji
@ 2022-02-22 18:52                         ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-22 18:52 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil

> -----Original Message-----
> From: Ji, Kai <kai.ji@intel.com>
> Sent: Tuesday, February 22, 2022 5:02 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Zhang, Roy Fan <roy.fan.zhang@intel.com>; Ji, Kai
> <kai.ji@intel.com>
> Subject: [dpdk-dev v10 6/9] crypto/qat: unify asymmetric functions
> 
> This patch removes qat_asym_pmd.c and integrates all the
> functions into qat_asym.c. The unified/integrated asym crypto
> pmd functions should make them easier to maintain.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
May need to address the compile issue
Other than that
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v10 2/9] crypto/qat: support symmetric build op request
  2022-02-22 17:02                       ` [dpdk-dev v10 2/9] crypto/qat: support symmetric build op request Kai Ji
@ 2022-02-22 18:52                         ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-22 18:52 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil

> -----Original Message-----
> From: Ji, Kai <kai.ji@intel.com>
> Sent: Tuesday, February 22, 2022 5:02 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Zhang, Roy Fan <roy.fan.zhang@intel.com>; Ji, Kai
> <kai.ji@intel.com>
> Subject: [dpdk-dev v10 2/9] crypto/qat: support symmetric build op request
> 
> This patch adds common inline functions for QAT symmetric
> crypto driver to process crypto op, and the implementation of
> build op request function for QAT generation 1.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
May need to address the compile issue
Other than that
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v10 3/9] crypto/qat: rework session functions
  2022-02-22 17:02                       ` [dpdk-dev v10 3/9] crypto/qat: rework session functions Kai Ji
@ 2022-02-22 18:53                         ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-22 18:53 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil

> -----Original Message-----
> From: Ji, Kai <kai.ji@intel.com>
> Sent: Tuesday, February 22, 2022 5:02 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Zhang, Roy Fan <roy.fan.zhang@intel.com>; Ji, Kai
> <kai.ji@intel.com>
> Subject: [dpdk-dev v10 3/9] crypto/qat: rework session functions
> 
> This patch introduces a set of set_session methods to QAT
> generations. In addition, the reuse of QAT session between
> generations is prohibit as the support of min_qat_dev_gen_id'
> is removed.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
May need to address the compile issue
Other than that
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v10 4/9] crypto/qat: rework asymmetric op build operation
  2022-02-22 17:02                       ` [dpdk-dev v10 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
@ 2022-02-22 18:53                         ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-22 18:53 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil

> -----Original Message-----
> From: Ji, Kai <kai.ji@intel.com>
> Sent: Tuesday, February 22, 2022 5:02 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Zhang, Roy Fan <roy.fan.zhang@intel.com>; Ji, Kai
> <kai.ji@intel.com>
> Subject: [dpdk-dev v10 4/9] crypto/qat: rework asymmetric op build
> operation
> 
> This patch reworks the asymmetric crypto data path
> implementation in QAT driver. The changes include asymmetric
> crypto data path separation for QAT hardware generations, and
> code optimisation of the device capabilities declaration.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
May need to address the compile issue
Other than that
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v10 5/9] crypto/qat: unify symmetric functions
  2022-02-22 17:02                       ` [dpdk-dev v10 5/9] crypto/qat: unify symmetric functions Kai Ji
@ 2022-02-22 18:53                         ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-22 18:53 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil

> -----Original Message-----
> From: Ji, Kai <kai.ji@intel.com>
> Sent: Tuesday, February 22, 2022 5:02 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Zhang, Roy Fan <roy.fan.zhang@intel.com>; Ji, Kai
> <kai.ji@intel.com>
> Subject: [dpdk-dev v10 5/9] crypto/qat: unify symmetric functions
> 
> This patch removes qat_sym_pmd.c and integrates all the functions into
> qat_sym.c. The unified/integrated qat sym crypto pmd functions should
> make them easier to maintain.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
May need to address the compile issue
Other than that
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v10 7/9] crypto/qat: rework burst data path
  2022-02-22 17:02                       ` [dpdk-dev v10 7/9] crypto/qat: rework burst data path Kai Ji
@ 2022-02-22 18:54                         ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-22 18:54 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil

> -----Original Message-----
> From: Ji, Kai <kai.ji@intel.com>
> Sent: Tuesday, February 22, 2022 5:02 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Zhang, Roy Fan <roy.fan.zhang@intel.com>; Ji, Kai
> <kai.ji@intel.com>
> Subject: [dpdk-dev v10 7/9] crypto/qat: rework burst data path
> 
> This patch enable the op_build_request function in
> qat_enqueue_op_burst, and the qat_dequeue_process_response
> function in qat_dequeue_op_burst.
> The op_build_request invoked in crypto build request op is based
> on crypto operations setup'd during session init.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
May need to address the compile issue
Other than that
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v10 8/9] crypto/qat: unify raw data path functions
  2022-02-22 17:02                       ` [dpdk-dev v10 8/9] crypto/qat: unify raw data path functions Kai Ji
@ 2022-02-22 18:54                         ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-22 18:54 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil

> -----Original Message-----
> From: Ji, Kai <kai.ji@intel.com>
> Sent: Tuesday, February 22, 2022 5:02 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Zhang, Roy Fan <roy.fan.zhang@intel.com>; Ji, Kai
> <kai.ji@intel.com>
> Subject: [dpdk-dev v10 8/9] crypto/qat: unify raw data path functions
> 
> This patch unifies QAT's raw dp api implementations
> to the same enqueue/dequeue methods used in crypto operations.
> The specific functions for different QAT generation are updated
> respectively. The qat_sym_hw_dp.c is removed as no longer required.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
May need to address the compile issue
Other than that
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [dpdk-dev v10 9/9] crypto/qat: support out of place SG list
  2022-02-22 17:02                       ` [dpdk-dev v10 9/9] crypto/qat: support out of place SG list Kai Ji
@ 2022-02-22 18:55                         ` Zhang, Roy Fan
  0 siblings, 0 replies; 156+ messages in thread
From: Zhang, Roy Fan @ 2022-02-22 18:55 UTC (permalink / raw)
  To: Ji, Kai, dev; +Cc: gakhil

> -----Original Message-----
> From: Ji, Kai <kai.ji@intel.com>
> Sent: Tuesday, February 22, 2022 5:02 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Zhang, Roy Fan <roy.fan.zhang@intel.com>; Ji, Kai
> <kai.ji@intel.com>
> Subject: [dpdk-dev v10 9/9] crypto/qat: support out of place SG list
> 
> This patch adds the SGL out of place support to QAT PMD
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
May need to address the compile issue
Other than that
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 156+ messages in thread

* [PATCH v11 0/9] drivers/qat: QAT symmetric crypto datapatch rework
  2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                         ` (9 preceding siblings ...)
  2022-02-22 18:23                       ` [EXT] [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
@ 2022-02-22 20:30                       ` Fan Zhang
  2022-02-22 20:30                         ` [PATCH v11 1/9] common/qat: define build request and dequeue ops Fan Zhang
                                           ` (9 more replies)
  10 siblings, 10 replies; 156+ messages in thread
From: Fan Zhang @ 2022-02-22 20:30 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Fan Zhang

This patch reworks QAT symmetric crypto datapatch implementation where each
generation request building separated and the crypto operation under the
raw datapath api implementation are unified.

In addtion this patchset also enables QAT OOP support in raw datapath api
implementation.

v11:
- fixed a compile issue

v10:
- rebase to the lastest for-main
- fix of build rerror when RTE_LOG_DEBUG enabled

v9:
- commit messages reword
- fix of unused function error

v8:
- rebase to 22.03-rc1

v7:
- fix of pointer cast compile error in x86

v6:
- fix of pointer cast error in x86
- rebase to the lastest for-main

v5:
- rebase to the latest for-main
- patchset reconstruct

v4:
- patchset break down and reconstruct

v3:
- sperate a single patch 6 to two different patches

v2:
- review comments addressed

Kai Ji (9):
  common/qat: define build request and dequeue ops
  crypto/qat: support symmetric build op request
  crypto/qat: rework session functions
  crypto/qat: rework asymmetric op build operation
  crypto/qat: unify symmetric functions
  crypto/qat: unify asymmetric functions
  crypto/qat: rework burst data path
  crypto/qat: unify raw data path functions
  crypto/qat: support out of place SG list

 drivers/common/qat/meson.build               |   6 +-
 drivers/common/qat/qat_device.c              |   4 +-
 drivers/common/qat/qat_qp.c                  |  42 +-
 drivers/common/qat/qat_qp.h                  |  54 +-
 drivers/compress/qat/qat_comp_pmd.c          |  14 +-
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  93 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 490 ++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 257 ++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 913 ++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 942 +++++++++++++++++-
 drivers/crypto/qat/qat_asym.c                | 303 +++++-
 drivers/crypto/qat/qat_asym.h                |  79 +-
 drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
 drivers/crypto/qat/qat_asym_pmd.h            |  54 -
 drivers/crypto/qat/qat_crypto.h              |  16 +-
 drivers/crypto/qat/qat_sym.c                 | 979 ++++++------------
 drivers/crypto/qat/qat_sym.h                 | 148 ++-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 995 -------------------
 drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
 drivers/crypto/qat/qat_sym_pmd.h             |  95 --
 drivers/crypto/qat/qat_sym_session.c         | 115 +--
 drivers/crypto/qat/qat_sym_session.h         |  15 +-
 23 files changed, 3582 insertions(+), 2523 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

-- 
2.25.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [PATCH v11 1/9] common/qat: define build request and dequeue ops
  2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
@ 2022-02-22 20:30                         ` Fan Zhang
  2022-02-22 20:30                         ` [PATCH v11 2/9] crypto/qat: support symmetric build op request Fan Zhang
                                           ` (8 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Fan Zhang @ 2022-02-22 20:30 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji, Fan Zhang

From: Kai Ji <kai.ji@intel.com>

This patch introduce build request and dequeue op function
pointers to the qat queue pair implementation. The function
poniters are assigned during qat session generation based on input
crypto operation request.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/qat_qp.c          | 10 ++++--
 drivers/common/qat/qat_qp.h          | 54 ++++++++++++++++++++++++++--
 drivers/compress/qat/qat_comp_pmd.c  |  6 ++--
 drivers/crypto/qat/qat_asym_pmd.c    |  4 +--
 drivers/crypto/qat/qat_sym_pmd.c     |  4 +--
 drivers/crypto/qat/qat_sym_session.h | 13 ++++++-
 6 files changed, 77 insertions(+), 14 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 57ac8fefca..56234ca1a4 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_common.h>
@@ -547,7 +547,9 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_enqueue_op_burst(void *qp,
+		__rte_unused qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -814,7 +816,9 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 }
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_dequeue_op_burst(void *qp, void **ops,
+		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
+		uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index deafb407b3..66f00943a5 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 #ifndef _QAT_QP_H_
 #define _QAT_QP_H_
@@ -36,6 +36,51 @@ struct qat_queue {
 	/* number of responses processed since last CSR head write */
 };
 
+/**
+ * Type define qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ *
+ * @param in_op
+ *    An input op pointer
+ * @param out_msg
+ *    out_meg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param opaque
+ *    an opaque data may be used to store context may be useful between
+ *    2 enqueue operations.
+ * @param dev_gen
+ *    qat device gen id
+ * @return
+ *   - 0 if the crypto request is build successfully,
+ *   - EINVAL if error
+ **/
+typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
+
+/**
+ * Type define qat_op_dequeue_t function pointer, passed in as argument
+ * in dequeue op burst, where a dequeue op assigned base on the type of
+ * crypto op.
+ *
+ * @param op
+ *    An input op pointer
+ * @param resp
+ *    qat response msg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param dequeue_err_count
+ *    dequeue error counter
+ * @return
+ *    - 0 if dequeue OP is successful
+ *    - EINVAL if error
+ **/
+typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused);
+
+#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE	2
+
 struct qat_qp {
 	void			*mmap_bar_addr;
 	struct qat_queue	tx_q;
@@ -44,6 +89,7 @@ struct qat_qp {
 	struct rte_mempool *op_cookie_pool;
 	void **op_cookies;
 	uint32_t nb_descriptors;
+	uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
 	enum qat_device_gen qat_dev_gen;
 	enum qat_service_type service_type;
 	struct qat_pci_device *qat_dev;
@@ -78,13 +124,15 @@ struct qat_qp_config {
 };
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops);
 
 uint16_t
 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
 
 int
 qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index da6404c017..efe5d08a99 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_malloc.h>
@@ -620,7 +620,7 @@ static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
@@ -639,7 +639,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 		} else {
 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
 					(compressdev_dequeue_pkt_burst_t)
-					qat_dequeue_op_burst;
+					qat_comp_pmd_enq_deq_dummy_op_burst;
 		}
 	}
 	return ret;
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
index addee384e3..9a7596b227 100644
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ b/drivers/crypto/qat/qat_asym_pmd.c
@@ -62,13 +62,13 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
 uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index b835245f17..28a26260fb 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -49,14 +49,14 @@ static uint16_t
 qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 static uint16_t
 qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 6ebc176729..fe875a7fd0 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 #ifndef _QAT_SYM_SESSION_H_
 #define _QAT_SYM_SESSION_H_
@@ -63,6 +63,16 @@ enum qat_sym_proto_flag {
 	QAT_CRYPTO_PROTO_FLAG_ZUC = 4
 };
 
+struct qat_sym_session;
+
+/*
+ * typedef qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ */
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* Common content descriptor */
 struct qat_sym_cd {
 	struct icp_qat_hw_cipher_algo_blk cipher;
@@ -107,6 +117,7 @@ struct qat_sym_session {
 	/* Some generations need different setup of counter */
 	uint32_t slice_types;
 	enum qat_sym_proto_flag qat_proto_flag;
+	qat_sym_build_request_t build_request[2];
 };
 
 int
-- 
2.25.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [PATCH v11 2/9] crypto/qat: support symmetric build op request
  2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
  2022-02-22 20:30                         ` [PATCH v11 1/9] common/qat: define build request and dequeue ops Fan Zhang
@ 2022-02-22 20:30                         ` Fan Zhang
  2022-02-22 20:30                         ` [PATCH v11 3/9] crypto/qat: rework session functions Fan Zhang
                                           ` (7 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Fan Zhang @ 2022-02-22 20:30 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji, Fan Zhang

From: Kai Ji <kai.ji@intel.com>

This patch adds common inline functions for QAT symmetric
crypto driver to process crypto op, and the implementation of
build op request function for QAT generation 1.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 832 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 187 ++++-
 drivers/crypto/qat/qat_sym.c                 |  90 +-
 3 files changed, 1019 insertions(+), 90 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 67a4d2cb2c..1130e0e76f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #ifndef _QAT_CRYPTO_PMD_GENS_H_
@@ -8,14 +8,844 @@
 #include <rte_cryptodev.h>
 #include "qat_crypto.h"
 #include "qat_sym_session.h"
+#include "qat_sym.h"
+
+#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
+	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
+
+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
+	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
+	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
+
+static __rte_always_inline int
+op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_decrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_decrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+	return -EINVAL;
+}
+
+static __rte_always_inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		/* Decrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely((sym_op->m_dst != NULL)
+				&& (sym_op->m_dst != sym_op->m_src)))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = last_block - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
+			dst, last_block_len);
+#endif
+		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+			dst, last_block_len);
+#endif
+	}
+
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+static __rte_always_inline int
+qat_auth_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_cipher_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+			((op->sym->cipher.data.offset %
+			BYTE_LENGTH) != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int32_t
+qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
+		void *opaque, struct qat_sym_op_cookie *cookie,
+		struct rte_crypto_vec *src_vec, uint16_t n_src,
+		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
+{
+	struct qat_sgl *list;
+	uint32_t i;
+	uint32_t tl_src = 0, total_len_src, total_len_dst;
+	uint64_t src_data_start = 0, dst_data_start = 0;
+	int is_sgl = n_src > 1 || n_dst > 1;
+
+	if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
+			n_dst > QAT_SYM_SGL_MAX_NUMBER))
+		return -1;
+
+	if (likely(!is_sgl)) {
+		src_data_start = src_vec[0].iova;
+		tl_src = total_len_src =
+				src_vec[0].len;
+		if (unlikely(n_dst)) { /* oop */
+			total_len_dst = dst_vec[0].len;
+
+			dst_data_start = dst_vec[0].iova;
+			if (unlikely(total_len_src != total_len_dst))
+				return -EINVAL;
+		} else {
+			dst_data_start = src_data_start;
+			total_len_dst = tl_src;
+		}
+	} else { /* sgl */
+		total_len_dst = total_len_src = 0;
+
+		ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+			QAT_COMN_PTR_TYPE_SGL);
+
+		list = (struct qat_sgl *)&cookie->qat_sgl_src;
+		for (i = 0; i < n_src; i++) {
+			list->buffers[i].len = src_vec[i].len;
+			list->buffers[i].resrvd = 0;
+			list->buffers[i].addr = src_vec[i].iova;
+			if (tl_src + src_vec[i].len > UINT32_MAX) {
+				QAT_DP_LOG(ERR, "Message too long");
+				return -1;
+			}
+			tl_src += src_vec[i].len;
+		}
+
+		list->num_bufs = i;
+		src_data_start = cookie->qat_sgl_src_phys_addr;
+
+		if (unlikely(n_dst > 0)) { /* oop sgl */
+			uint32_t tl_dst = 0;
+
+			list = (struct qat_sgl *)&cookie->qat_sgl_dst;
+
+			for (i = 0; i < n_dst; i++) {
+				list->buffers[i].len = dst_vec[i].len;
+				list->buffers[i].resrvd = 0;
+				list->buffers[i].addr = dst_vec[i].iova;
+				if (tl_dst + dst_vec[i].len > UINT32_MAX) {
+					QAT_DP_LOG(ERR, "Message too long");
+					return -ENOTSUP;
+				}
+
+				tl_dst += dst_vec[i].len;
+			}
+
+			if (tl_src != tl_dst)
+				return -EINVAL;
+			list->num_bufs = i;
+			dst_data_start = cookie->qat_sgl_dst_phys_addr;
+		} else
+			dst_data_start = src_data_start;
+	}
+
+	req->comn_mid.src_data_addr = src_data_start;
+	req->comn_mid.dest_data_addr = dst_data_start;
+	req->comn_mid.src_length = total_len_src;
+	req->comn_mid.dst_length = total_len_dst;
+	req->comn_mid.opaque_data = (uintptr_t)opaque;
+
+	return tl_src;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int n_src = 0;
+	int ret;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->cipher.data.length >> 3;
+		cipher_ofs = op->sym->cipher.data.offset >> 3;
+		break;
+	case 0:
+		if (ctx->bpi_ctx) {
+			/* DOCSIS - only send complete blocks to device.
+			 * Process any partial block using CFB mode.
+			 * Even if 0 complete blocks, still send this to device
+			 * to get into rx queue for post-process and dequeuing
+			 */
+			cipher_len = qat_bpicipher_preprocess(ctx, op);
+			cipher_ofs = op->sym->cipher.data.offset;
+		} else {
+			cipher_len = op->sym->cipher.data.length;
+			cipher_ofs = op->sym->cipher.data.offset;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
+			cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t auth_ofs = 0, auth_len = 0;
+	int n_src, ret;
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+				ctx->auth_iv.offset);
+		auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+				ctx->auth_iv.offset);
+		break;
+	case 0:
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+			/* AES-GMAC */
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+					ctx->auth_iv.offset);
+			auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+					ctx->auth_iv.offset);
+		} else {
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = NULL;
+			auth_iv->iova = 0;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
+			auth_ofs + auth_len, in_sgl->vec,
+			QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
+				auth_ofs + auth_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	union rte_crypto_sym_ofs ofs;
+	uint32_t min_ofs = 0, max_len = 0;
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	uint32_t auth_len = 0, auth_ofs = 0;
+	int is_oop = (op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src);
+	int is_sgl = op->sym->m_src->nb_segs > 1;
+	int n_src;
+	int ret;
+
+	if (unlikely(is_oop))
+		is_sgl |= op->sym->m_dst->nb_segs > 1;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->auth_iv.offset);
+	auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->auth_iv.offset);
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->aead.data.length >> 3;
+		cipher_ofs = op->sym->aead.data.offset >> 3;
+		break;
+	case 0:
+		cipher_len = op->sym->aead.data.length;
+		cipher_ofs = op->sym->aead.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		break;
+	case 0:
+		auth_len = op->sym->auth.data.length;
+		auth_ofs = op->sym->auth.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
+
+	/* digest in buffer check. Needed only for wireless algos */
+	if (ret == 1) {
+		/* Handle digest-encrypted cases, i.e.
+		 * auth-gen-then-cipher-encrypt and
+		 * cipher-decrypt-then-auth-verify
+		 */
+		uint64_t auth_end_iova;
+
+		if (unlikely(is_sgl)) {
+			uint32_t remaining_off = auth_ofs + auth_len;
+			struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
+				op->sym->m_src);
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else
+			auth_end_iova = (is_oop ?
+				rte_pktmbuf_iova(op->sym->m_dst) :
+				rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
+					auth_len;
+
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
+				(digest->iova == auth_end_iova))
+			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
+					ctx->digest_length);
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return -1;
+	}
+	in_sgl->num = n_src;
+
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs,
+				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	ofs.ofs.cipher.head = cipher_ofs;
+	ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
+	ofs.ofs.auth.head = auth_ofs;
+	ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
+
+	return ofs.raw;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int32_t n_src = 0;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
+	auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
+	digest->va = (void *)op->sym->aead.digest.data;
+	digest->iova = op->sym->aead.digest.phys_addr;
+
+	cipher_len = op->sym->aead.data.length;
+	cipher_ofs = op->sym->aead.data.offset;
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline void
+qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
+		struct icp_qat_fw_la_bulk_req *qat_req)
+{
+	/* copy IV into request if it fits */
+	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
+				iv_len);
+	else {
+		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+				qat_req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
+	}
+}
+
+static __rte_always_inline void
+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
+{
+	uint32_t i;
+
+	for (i = 0; i < n; i++)
+		sta[i] = status;
+}
+
+static __rte_always_inline void
+enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+
+	/* cipher IV */
+	qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
+				ctx->auth_iv.length);
+		break;
+	default:
+		break;
+	}
+}
+
+static __rte_always_inline int
+enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_vec *src_vec,
+	uint16_t n_src_vecs,
+	struct rte_crypto_vec *dst_vec,
+	uint16_t n_dst_vecs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
+			dst_vec : src_vec;
+	rte_iova_t auth_iova_end;
+	int cipher_len, auth_len;
+	int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	cipher_len = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (unlikely(cipher_len < 0 || auth_len < 0))
+		return -1;
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = cipher_len;
+	qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = auth_len;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		break;
+	default:
+		break;
+	}
+
+	if (unlikely(is_sgl)) {
+		/* sgl */
+		int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
+		uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
+
+		while (remaining_off >= cvec->len && i >= 1) {
+			i--;
+			remaining_off -= cvec->len;
+			cvec++;
+		}
+
+		auth_iova_end = cvec->iova + remaining_off;
+	} else
+		auth_iova_end = cvec[0].iova + auth_param->auth_off +
+			auth_param->auth_len;
+
+	/* Then check if digest-encrypted conditions are met */
+	if ((auth_param->auth_off + auth_param->auth_len <
+		cipher_param->cipher_offset + cipher_param->cipher_length) &&
+			(digest->iova == auth_iova_end)) {
+		/* Handle partial digest encryption */
+		if (cipher_param->cipher_offset + cipher_param->cipher_length <
+			auth_param->auth_off + auth_param->auth_len +
+				ctx->digest_length && !is_sgl)
+			req->comn_mid.dst_length = req->comn_mid.src_length =
+				auth_param->auth_off + auth_param->auth_len +
+					ctx->digest_length;
+		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void
+enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param =
+		(void *)&req->serv_specif_rqpars;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(void *)((uint8_t *)&req->serv_specif_rqpars +
+		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+	uint8_t *aad_data;
+	uint8_t aad_ccm_real_len;
+	uint8_t aad_len_field_sz;
+	uint32_t msg_len_be;
+	rte_iova_t aad_iova = 0;
+	uint8_t q;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
+				ctx->cipher_iv.length);
+		aad_iova = aad->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		aad_data = aad->va;
+		aad_iova = aad->iova;
+		aad_ccm_real_len = 0;
+		aad_len_field_sz = 0;
+		msg_len_be = rte_bswap32((uint32_t)data_len -
+				ofs.ofs.cipher.head);
+
+		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			aad_ccm_real_len = ctx->aad_len -
+				ICP_QAT_HW_CCM_AAD_B0_LEN -
+				ICP_QAT_HW_CCM_AAD_LEN_INFO;
+		} else {
+			aad_data = iv->va;
+			aad_iova = iv->iova;
+		}
+
+		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+			aad_len_field_sz, ctx->digest_length, q);
+		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+				(uint8_t *)&msg_len_be,
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+		} else {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+				(uint8_t *)&msg_len_be +
+				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+				- q), q);
+		}
+
+		if (aad_len_field_sz > 0) {
+			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+				rte_bswap16(aad_ccm_real_len);
+
+			if ((aad_ccm_real_len + aad_len_field_sz)
+				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+				uint8_t pad_len = 0;
+				uint8_t pad_idx = 0;
+
+				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+					((aad_ccm_real_len +
+					aad_len_field_sz) %
+					ICP_QAT_HW_CCM_AAD_B0_LEN);
+				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+					aad_ccm_real_len +
+					aad_len_field_sz;
+				memset(&aad_data[pad_idx], 0, pad_len);
+			}
+		}
+
+		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va +
+			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
+		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+		rte_memcpy((uint8_t *)aad->va +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			ctx->cipher_iv.length);
+		break;
+	default:
+		break;
+	}
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_param->auth_off = ofs.ofs.cipher.head;
+	auth_param->auth_len = cipher_param->cipher_length;
+	auth_param->auth_res_addr = digest->iova;
+	auth_param->u1.aad_adr = aad_iova;
+}
 
 extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
 extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
 
+/* -----------------GEN 1 sym crypto op data path APIs ---------------- */
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 void
 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 		uint8_t hash_flag);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 90b3ec803c..2dd7475c77 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -179,6 +179,191 @@ qat_sym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, NULL, NULL);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, NULL, NULL);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
+			total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
+			out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
 #ifdef RTE_LIB_SECURITY
 
 #define QAT_SECURITY_SYM_CAPABILITIES					\
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 00ec703754..f814bf8f75 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/evp.h>
@@ -11,93 +11,7 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-
-
-/** Decrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_decrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_decrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
-	return -EINVAL;
-}
-
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
-		/* Decrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely((sym_op->m_dst != NULL)
-				&& (sym_op->m_dst != sym_op->m_src)))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = last_block - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
-			dst, last_block_len);
-#endif
-		bpi_cipher_decrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
-			dst, last_block_len);
-#endif
-	}
-
-	return sym_op->cipher.data.length - last_block_len;
-}
+#include "dev/qat_crypto_pmd_gens.h"
 
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [PATCH v11 3/9] crypto/qat: rework session functions
  2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
  2022-02-22 20:30                         ` [PATCH v11 1/9] common/qat: define build request and dequeue ops Fan Zhang
  2022-02-22 20:30                         ` [PATCH v11 2/9] crypto/qat: support symmetric build op request Fan Zhang
@ 2022-02-22 20:30                         ` Fan Zhang
  2022-02-22 20:30                         ` [PATCH v11 4/9] crypto/qat: rework asymmetric op build operation Fan Zhang
                                           ` (6 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Fan Zhang @ 2022-02-22 20:30 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji, Fan Zhang

From: Kai Ji <kai.ji@intel.com>

This patch introduces a set of set_session methods to QAT
generations. In addition, the reuse of QAT session between
generations is prohibit as the support of min_qat_dev_gen_id'
is removed.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  91 ++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 256 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 125 ++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |   3 +
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  64 +++++
 drivers/crypto/qat/qat_crypto.h              |   8 +-
 drivers/crypto/qat/qat_sym.c                 |  12 +-
 drivers/crypto/qat/qat_sym_session.c         | 113 ++------
 drivers/crypto/qat/qat_sym_session.h         |   2 +-
 10 files changed, 574 insertions(+), 109 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
index 9ed1f21d9d..01a897a21f 100644
--- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_asym_crypto_set_session_gen1(void *cdev __rte_unused,
+		void *session __rte_unused)
+{
+	return 0;
+}
+
 RTE_INIT(qat_asym_crypto_gen1_init)
 {
 	qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops =
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index b4ec440e05..64e6ae66ec 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -166,6 +166,91 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
 	return 0;
 }
 
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+		uint8_t hash_flag)
+{
+	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+	/* Set the Use Extended Protocol Flags bit in LW 1 */
+	QAT_FIELD_SET(header->comn_req_flags,
+			QAT_COMN_EXT_FLAGS_USED,
+			QAT_COMN_EXT_FLAGS_BITPOS,
+			QAT_COMN_EXT_FLAGS_MASK);
+
+	/* Set Hash Flags in LW 28 */
+	cd_ctrl->hash_flags |= hash_flag;
+
+	/* Set proto flags in LW 1 */
+	switch (session->qat_cipher_alg) {
+	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_SNOW_3G_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags,
+				ICP_QAT_FW_LA_ZUC_3G_PROTO);
+		break;
+	default:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	}
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+	struct rte_cryptodev *dev = cdev;
+	struct qat_sym_session *ctx = session;
+	const struct qat_cryptodev_private *qat_private =
+			dev->data->dev_private;
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * but some are not supported by GEN2, so checking here
+		 */
+		if ((qat_private->internal_capabilities &
+				QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+			return -ENOTSUP;
+
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
 
 	/* Device related operations */
@@ -204,6 +289,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
 			qat_sym_crypto_cap_get_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_sym_crypto_set_session_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
@@ -221,4 +308,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
 			qat_asym_crypto_cap_get_gen1;
 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_asym_crypto_feature_flags_get_gen1;
+	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_asym_crypto_set_session_gen1;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index d3336cf4a1..db864d973a 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -143,6 +143,257 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass) {
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN3 uses single pass to treat AEAD as
+		 * cipher operation
+		 */
+		cipher_param = (void *)&req->serv_specif_rqpars;
+
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+				ofs.ofs.cipher.tail;
+
+		cipher_param->spc_aad_addr = aad->iova;
+		cipher_param->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+	struct qat_sym_op_cookie *cookie,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	uint32_t ver_key_offset;
+	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+
+	if (!ctx->is_single_pass_gmac ||
+			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+				data_len);
+		return;
+	}
+
+	cipher_cd_ctrl = (void *) &req->cd_ctrl;
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+			sizeof(struct icp_qat_hw_cipher_config);
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+		/* AES-GMAC */
+		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+				req);
+	}
+
+	/* Fill separate Content Descriptor for this op */
+	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+				ctx->cd.cipher.key :
+				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+			ctx->auth_key_length);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+				ICP_QAT_HW_CIPHER_AEAD_MODE,
+				ctx->qat_cipher_alg,
+				ICP_QAT_HW_CIPHER_NO_CONVERT,
+				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+					ICP_QAT_HW_CIPHER_ENCRYPT :
+					ICP_QAT_HW_CIPHER_DECRYPT));
+	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+			ctx->digest_length,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+	/* Update the request */
+	req->cd_pars.u.s.content_desc_addr =
+			cookie->opt.spc_gmac.cd_phys_addr;
+	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+			sizeof(struct icp_qat_hw_cipher_config) +
+			ctx->auth_key_length, 8) >> 3;
+	req->comn_mid.src_length = data_len;
+	req->comn_mid.dst_length = 0;
+
+	cipher_param->spc_aad_addr = 0;
+	cipher_param->spc_auth_res_addr = digest->iova;
+	cipher_param->spc_aad_sz = auth_data_len;
+	cipher_param->reserved = 0;
+	cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+	ICP_QAT_FW_LA_PROTO_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_NO_PROTO);
+}
+
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN3 */
+	if (ctx->is_single_pass)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+	else if (ctx->is_single_pass_gmac)
+		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN3
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -150,6 +401,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_cap_get_gen3;
 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+			qat_sym_crypto_set_session_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
@@ -161,4 +414,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 37a58c026f..7642a87d55 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -103,11 +103,133 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+			(void *)&req->serv_specif_rqpars;
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN4 uses single pass to treat AEAD as cipher
+		 * operation
+		 */
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+				req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len -
+				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+		cipher_param_20->spc_aad_addr = aad->iova;
+		cipher_param_20->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN4 */
+	if (ctx->is_single_pass && ctx->is_ucs)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN4
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
 			qat_sym_crypto_cap_get_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+			qat_sym_crypto_set_session_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
@@ -121,4 +243,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
 	qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 1130e0e76f..96cdb97a26 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -856,6 +856,9 @@ qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev);
 uint64_t
 qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 #ifdef RTE_LIB_SECURITY
 extern struct rte_security_ops security_qat_ops_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 2dd7475c77..ff176dddf1 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -452,12 +452,76 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int handle_mixed = 0;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			/* do_aead = 1; */
+			build_request = qat_sym_build_op_aead_gen1;
+		} else {
+			/* do_auth = 1; do_cipher = 1; */
+			build_request = qat_sym_build_op_chain_gen1;
+			handle_mixed = 1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		/* do_auth = 1; do_cipher = 0;*/
+		build_request = qat_sym_build_op_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		/* do_auth = 0; do_cipher = 1; */
+		build_request = qat_sym_build_op_cipher_gen1;
+	}
+
+	if (build_request)
+		ctx->build_request[proc_type] = build_request;
+	else
+		return -EINVAL;
+
+	/* no more work if not mixed op */
+	if (!handle_mixed)
+		return 0;
+
+	/* Check none supported algs if mixed */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		return -ENOTSUP;
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		return -ENOTSUP;
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
 
 RTE_INIT(qat_sym_crypto_gen1_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
 			qat_sym_crypto_cap_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+			qat_sym_crypto_set_session_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6eaa15b975..5ca76fcaa6 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
  #ifndef _QAT_CRYPTO_H_
@@ -48,15 +48,21 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
 
 typedef void * (*create_security_ctx_t)(void *cryptodev);
 
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
+	set_session_t set_session;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
 };
 
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
+
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
 		struct rte_cryptodev_config *config);
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index f814bf8f75..83bf55c933 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -13,6 +13,10 @@
 #include "qat_sym.h"
 #include "dev/qat_crypto_pmd_gens.h"
 
+uint8_t qat_sym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
@@ -126,7 +130,7 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
 
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen)
+		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
 {
 	int ret = 0;
 	struct qat_sym_session *ctx = NULL;
@@ -191,12 +195,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		return -EINVAL;
 	}
 
-	if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
-		QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -EINVAL;
-	}
-
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 8ca475ca8b..3a880096c4 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/sha.h>	/* Needed to calculate pre-compute values */
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
 	return 0;
 }
 
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
-		uint8_t hash_flag)
-{
-	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
-	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
-			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
-			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
-	/* Set the Use Extended Protocol Flags bit in LW 1 */
-	QAT_FIELD_SET(header->comn_req_flags,
-			QAT_COMN_EXT_FLAGS_USED,
-			QAT_COMN_EXT_FLAGS_BITPOS,
-			QAT_COMN_EXT_FLAGS_MASK);
-
-	/* Set Hash Flags in LW 28 */
-	cd_ctrl->hash_flags |= hash_flag;
-
-	/* Set proto flags in LW 1 */
-	switch (session->qat_cipher_alg) {
-	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_SNOW_3G_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_ZUC_3G_PROTO);
-		break;
-	default:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	}
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
-		struct qat_sym_session *session)
-{
-	const struct qat_cryptodev_private *qat_private =
-			dev->data->dev_private;
-	enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
-			QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
-	if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
-	} else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
-	} else if ((session->aes_cmac ||
-			session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
-			(session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session, 0);
-	}
-}
-
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
 	int ret;
 	int qat_cmd_id;
-	int handle_mixed = 0;
 
 	/* Verify the session physical address is known */
 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
+	session->dev_id = internals->dev_id;
 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
 	session->is_ucs = 0;
 
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		return -ENOTSUP;
 	}
 	qat_sym_session_finalize(session);
-	if (handle_mixed) {
-		/* Special handling of mixed hash+cipher algorithms */
-		qat_sym_session_handle_mixed(dev, session);
-	}
 
-	return 0;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+			(void *)session);
 }
 
 static int
@@ -678,14 +598,13 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 {
 	session->is_single_pass = 1;
 	session->is_auth = 1;
-	session->min_qat_dev_gen = QAT_GEN3;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
-	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
 		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
-	} else {
+	else
 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
-	}
+
 	session->cipher_iv.offset = aead_xform->iv.offset;
 	session->cipher_iv.length = aead_xform->iv.length;
 	session->aad_len = aead_xform->aad_length;
@@ -1205,9 +1124,9 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-			uint8_t *data_in,
-			uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in, uint8_t *data_out)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1573,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1920,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2180,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
-
 	/* Get requested QAT command id - should be cipher */
 	qat_cmd_id = qat_get_cmd_id(xform);
 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2204,9 @@ qat_security_session_create(void *dev,
 {
 	void *sess_private_data;
 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct qat_cryptodev_private *internals = cdev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_sym_session *sym_session = NULL;
 	int ret;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2230,11 @@ qat_security_session_create(void *dev,
 	}
 
 	set_sec_session_private_data(sess, sess_private_data);
+	sym_session = (struct qat_sym_session *)sess_private_data;
+	sym_session->dev_id = internals->dev_id;
 
-	return ret;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+			sess_private_data);
 }
 
 int
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index fe875a7fd0..01908abd9e 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -105,7 +105,7 @@ struct qat_sym_session {
 	uint16_t auth_key_length;
 	uint16_t digest_length;
 	rte_spinlock_t lock;	/* protects this struct */
-	enum qat_device_gen min_qat_dev_gen;
+	uint16_t dev_id;
 	uint8_t aes_cmac;
 	uint8_t is_single_pass;
 	uint8_t is_single_pass_gmac;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [PATCH v11 4/9] crypto/qat: rework asymmetric op build operation
  2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
                                           ` (2 preceding siblings ...)
  2022-02-22 20:30                         ` [PATCH v11 3/9] crypto/qat: rework session functions Fan Zhang
@ 2022-02-22 20:30                         ` Fan Zhang
  2022-02-22 20:30                         ` [PATCH v11 5/9] crypto/qat: unify symmetric functions Fan Zhang
                                           ` (5 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Fan Zhang @ 2022-02-22 20:30 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji, Fan Zhang

From: Kai Ji <kai.ji@intel.com>

This patch reworks the asymmetric crypto data path
implementation in QAT driver. The changes include asymmetric
crypto data path separation for QAT hardware generations, and
code optimisation of the device capabilities declaration.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/qat_qp.c   |   5 +-
 drivers/crypto/qat/qat_asym.c | 129 +++++++++++++++++++---------------
 drivers/crypto/qat/qat_asym.h |  63 +++++++++++++++--
 3 files changed, 131 insertions(+), 66 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 56234ca1a4..7f2fdc53ce 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -619,7 +619,7 @@ qat_enqueue_op_burst(void *qp,
 #ifdef BUILD_QAT_ASYM
 			ret = qat_asym_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
+				NULL, tmp_qp->qat_dev_gen);
 #endif
 		}
 		if (ret != 0) {
@@ -847,7 +847,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 #ifdef BUILD_QAT_ASYM
 		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
 			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 #endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 27ce0337a7..d6ca1afae8 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -1,68 +1,36 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2022 Intel Corporation
  */
 
 #include <stdarg.h>
 
-#include "qat_asym.h"
+#include <cryptodev_pmd.h>
+
 #include "icp_qat_fw_pke.h"
 #include "icp_qat_fw.h"
 #include "qat_pke_functionality_arrays.h"
 
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
-
-static int qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
-		size_t arr_sz, size_t *size, uint32_t *func_id)
-{
-	size_t i;
-
-	for (i = 0; i < arr_sz; i++) {
-		if (*size <= arr[i][0]) {
-			*size = arr[i][0];
-			*func_id = arr[i][1];
-			return 0;
-		}
-	}
-	return -1;
-}
-
-static inline void qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
-{
-	memset(qat_req, 0, sizeof(*qat_req));
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
-
-	qat_req->pke_hdr.hdr_flags =
-			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
-			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
-}
-
-static inline void qat_asym_build_req_tmpl(void *sess_private_data)
-{
-	struct icp_qat_fw_pke_request *qat_req;
-	struct qat_asym_session *session = sess_private_data;
+#include "qat_device.h"
 
-	qat_req = &session->req_tmpl;
-	qat_fill_req_tmpl(qat_req);
-}
+#include "qat_logs.h"
+#include "qat_asym.h"
 
-static size_t max_of(int n, ...)
-{
-	va_list args;
-	size_t len = 0, num;
-	int i;
+uint8_t qat_asym_driver_id;
 
-	va_start(args, n);
-	len = va_arg(args, size_t);
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
 
-	for (i = 0; i < n - 1; i++) {
-		num = va_arg(args, size_t);
-		if (num > len)
-			len = num;
-	}
-	va_end(args);
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+	.name = qat_asym_drv_name,
+	.alias = qat_asym_drv_name
+};
 
-	return len;
-}
 
 static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
 		int in_count, int out_count, int alg_size)
@@ -106,7 +74,46 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
 	}
 }
 
-static int qat_asym_check_nonzero(rte_crypto_param n)
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+		size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+	size_t i;
+
+	for (i = 0; i < arr_sz; i++) {
+		if (*size <= arr[i][0]) {
+			*size = arr[i][0];
+			*func_id = arr[i][1];
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+	va_list args;
+	size_t len = 0, num;
+	int i;
+
+	va_start(args, n);
+	len = va_arg(args, size_t);
+
+	for (i = 0; i < n - 1; i++) {
+		num = va_arg(args, size_t);
+		if (num > len)
+			len = num;
+	}
+	va_end(args);
+
+	return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
 {
 	if (n.length < 8) {
 		/* Not a case for any cryptographic function except for DH
@@ -475,10 +482,9 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 }
 
 int
-qat_asym_build_request(void *in_op,
-			uint8_t *out_msg,
-			void *op_cookie,
-			__rte_unused enum qat_device_gen qat_dev_gen)
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
 {
 	struct qat_asym_session *ctx;
 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
@@ -677,9 +683,9 @@ static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
 	qat_clear_arrays_by_alg(cookie, xform, alg_size_in_bytes);
 }
 
-void
+int
 qat_asym_process_response(void **op, uint8_t *resp,
-		void *op_cookie)
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
 {
 	struct qat_asym_session *ctx;
 	struct icp_qat_fw_pke_resp *resp_msg =
@@ -722,6 +728,8 @@ qat_asym_process_response(void **op, uint8_t *resp,
 	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
 			sizeof(struct icp_qat_fw_pke_resp));
 #endif
+
+	return 1;
 }
 
 int
@@ -779,3 +787,8 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
 	if (sess_priv)
 		memset(s, 0, qat_asym_session_get_private_size(dev));
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_asym_driver,
+		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index c9242a12ca..3ae95f2e7b 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #ifndef _QAT_ASYM_H_
@@ -8,10 +8,13 @@
 #include <cryptodev_pmd.h>
 #include <rte_crypto_asym.h>
 #include "icp_qat_fw_pke.h"
-#include "qat_common.h"
-#include "qat_asym_pmd.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
 #include "icp_qat_fw.h"
 
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
+
 typedef uint64_t large_int_ptr;
 #define MAX_PKE_PARAMS	8
 #define QAT_PKE_MAX_LN_SIZE 512
@@ -26,6 +29,28 @@ typedef uint64_t large_int_ptr;
 #define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
 #define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
 
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
+		{.asym = {						\
+			.xform_capa = {					\
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+				.op_types = o,				\
+				{					\
+				.modlen = {				\
+				.min = l,				\
+				.max = r,				\
+				.increment = i				\
+				}, }					\
+			}						\
+		},							\
+		}							\
+	}
+
 struct qat_asym_op_cookie {
 	size_t alg_size;
 	uint64_t error;
@@ -45,6 +70,27 @@ struct qat_asym_session {
 	struct rte_crypto_asym_xform *xform;
 };
 
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+	memset(qat_req, 0, sizeof(*qat_req));
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+	qat_req->pke_hdr.hdr_flags =
+			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+	struct icp_qat_fw_pke_request *qat_req;
+	struct qat_asym_session *session = sess_private_data;
+
+	qat_req = &session->req_tmpl;
+	qat_fill_req_tmpl(qat_req);
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
 		struct rte_crypto_asym_xform *xform,
@@ -75,7 +121,9 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
  */
 int
 qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
+		void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		enum qat_device_gen qat_dev_gen);
 
 /*
  * Process PKE response received from outgoing queue of QAT
@@ -87,8 +135,11 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg,
  * @param	op_cookie	Cookie pointer that holds private metadata
  *
  */
+int
+qat_asym_process_response(void **op, uint8_t *resp,
+		void *op_cookie,  __rte_unused uint64_t *dequeue_err_count);
+
 void
-qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
-		void *op_cookie);
+qat_asym_init_op_cookie(void *cookie);
 
 #endif /* _QAT_ASYM_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [PATCH v11 5/9] crypto/qat: unify symmetric functions
  2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
                                           ` (3 preceding siblings ...)
  2022-02-22 20:30                         ` [PATCH v11 4/9] crypto/qat: rework asymmetric op build operation Fan Zhang
@ 2022-02-22 20:30                         ` Fan Zhang
  2022-02-22 20:30                         ` [PATCH v11 6/9] crypto/qat: unify asymmetric functions Fan Zhang
                                           ` (4 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Fan Zhang @ 2022-02-22 20:30 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji, Fan Zhang

From: Kai Ji <kai.ji@intel.com>

This patch removes qat_sym_pmd.c and integrates all the functions into
qat_sym.c. The unified/integrated qat sym crypto pmd functions should
make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/meson.build       |   4 +-
 drivers/common/qat/qat_device.c      |   4 +-
 drivers/common/qat/qat_qp.c          |   3 +-
 drivers/crypto/qat/qat_crypto.h      |   5 +-
 drivers/crypto/qat/qat_sym.c         |  21 +++
 drivers/crypto/qat/qat_sym.h         | 147 ++++++++++++++--
 drivers/crypto/qat/qat_sym_hw_dp.c   |  11 +-
 drivers/crypto/qat/qat_sym_pmd.c     | 251 ---------------------------
 drivers/crypto/qat/qat_sym_pmd.h     |  95 ----------
 drivers/crypto/qat/qat_sym_session.c |   2 +-
 10 files changed, 168 insertions(+), 375 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index af92271a75..1bf6896a7e 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017-2018 Intel Corporation
+# Copyright(c) 2017-2022 Intel Corporation
 
 if is_windows
     build = false
@@ -73,7 +73,7 @@ if qat_compress
 endif
 
 if qat_crypto
-    foreach f: ['qat_sym_pmd.c', 'qat_sym.c', 'qat_sym_session.c',
+    foreach f: ['qat_sym.c', 'qat_sym_session.c',
             'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 1f870d689a..6824d97050 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2020 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 
 #include <rte_string_fns.h>
@@ -8,7 +8,7 @@
 
 #include "qat_device.h"
 #include "adf_transport_access_macros.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 #include "qat_comp_pmd.h"
 #include "adf_pf2vf_msg.h"
 #include "qat_pf2vf.h"
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 7f2fdc53ce..b36ffa6f6d 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -838,7 +838,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
 			nb_fw_responses = qat_comp_process_response(
 				ops, resp_msg,
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 5ca76fcaa6..c01266f81c 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -12,7 +12,10 @@
 extern uint8_t qat_sym_driver_id;
 extern uint8_t qat_asym_driver_id;
 
-/** helper macro to set cryptodev capability range **/
+/**
+ * helper macro to set cryptodev capability range
+ * <n: name> <l: min > <r: max> <i: increment> <v: value>
+ **/
 #define CAP_RNG(n, l, r, i) .n = {.min = l, .max = r, .increment = i}
 
 #define CAP_RNG_ZERO(n) .n = {.min = 0, .max = 0, .increment = 0}
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 83bf55c933..aad4b243b7 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -17,6 +17,27 @@ uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_sym_init_op_cookie(void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+
+	cookie->qat_sgl_src_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_src);
+
+	cookie->qat_sgl_dst_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_dst);
+
+	cookie->opt.spc_gmac.cd_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			opt.spc_gmac.cd_cipher);
+}
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index e3ec7f0de4..f4ff2ce4cd 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #ifndef _QAT_SYM_H_
@@ -15,7 +15,7 @@
 
 #include "qat_common.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_crypto.h"
 #include "qat_logs.h"
 
 #define BYTE_LENGTH    8
@@ -24,6 +24,67 @@
  */
 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
+/** Intel(R) QAT Symmetric Crypto PMD name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
+
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
+#define QAT_SYM_CAP_VALID		(1 << 31)
+
+/**
+ * Macro to add a sym capability
+ * helper function to add an sym capability
+ * <n: name> <b: block size> <k: key size> <d: digest size>
+ * <a: aad_size> <i: iv_size>
+ **/
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, d					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_##n,		\
+				b, k, i					\
+			}, }						\
+		}, }							\
+	}
+
 /*
  * Maximum number of SGL entries
  */
@@ -54,6 +115,22 @@ struct qat_sym_op_cookie {
 	} opt;
 };
 
+struct qat_sym_dp_ctx {
+	struct qat_sym_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		void *op_cookie, enum qat_device_gen qat_dev_gen);
@@ -213,17 +290,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
 		}
 	}
 }
-#else
-
-static inline void
-qat_sym_preprocess_requests(void **ops __rte_unused,
-				uint16_t nb_ops __rte_unused)
-{
-}
 #endif
 
-static inline void
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused)
 {
 	struct icp_qat_fw_comn_resp *resp_msg =
 			(struct icp_qat_fw_comn_resp *)resp;
@@ -282,6 +353,12 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
 	}
 
 	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
 }
 
 int
@@ -293,6 +370,52 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 int
 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
 
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_vec *vec, uint32_t vec_len,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t i;
+
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_la_bulk_req));
+	for (i = 0; i < vec_len; i++)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+	if (cipher_iv && ctx->cipher_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+				ctx->cipher_iv.length);
+	if (auth_iv && ctx->auth_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+				ctx->auth_iv.length);
+	if (aad && ctx->aad_len > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+				ctx->aad_len);
+	if (digest && ctx->digest_length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+				ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+		struct qat_sym_session *ctx __rte_unused,
+		struct rte_crypto_vec *vec __rte_unused,
+		uint32_t vec_len __rte_unused,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+}
+#endif
+
 #else
 
 static inline void
@@ -307,5 +430,5 @@ qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
 {
 }
 
-#endif
+#endif /* BUILD_QAT_SYM */
 #endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 792ad2b213..5322faff44 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #include <cryptodev_pmd.h>
@@ -9,18 +9,9 @@
 #include "icp_qat_fw_la.h"
 
 #include "qat_sym.h"
-#include "qat_sym_pmd.h"
 #include "qat_sym_session.h"
 #include "qat_qp.h"
 
-struct qat_sym_dp_ctx {
-	struct qat_sym_session *session;
-	uint32_t tail;
-	uint32_t head;
-	uint16_t cached_enqueue;
-	uint16_t cached_dequeue;
-};
-
 static __rte_always_inline int32_t
 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
 		struct rte_crypto_vec *data, uint16_t n_data_vecs)
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
deleted file mode 100644
index 28a26260fb..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security_driver.h>
-#endif
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
-
-#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
-
-uint8_t qat_sym_driver_id;
-
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
-	struct qat_sym_op_cookie *cookie = op_cookie;
-
-	cookie->qat_sgl_src_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_src);
-
-	cookie->qat_sgl_dst_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_dst);
-
-	cookie->opt.spc_gmac.cd_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			opt.spc_gmac.cd_cipher);
-}
-
-static uint16_t
-qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-static uint16_t
-qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
-	.name = qat_sym_drv_name,
-	.alias = qat_sym_drv_name
-};
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
-	int i = 0, ret = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "sym");
-	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	/*
-	 * All processes must use same driver id so they can share sessions.
-	 * Store driver_id so we can validate that all processes have the same
-	 * value, typically they have, but could differ if binaries built
-	 * separately.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_sym_driver_id =
-				qat_sym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_sym_driver_id !=
-				qat_sym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
-	qat_dev_instance->sym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->sym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->sym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_sym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-#ifdef RTE_LIB_SECURITY
-	if (gen_dev_ops->create_security_ctx) {
-		cryptodev->security_ctx =
-			gen_dev_ops->create_security_ctx((void *)cryptodev);
-		if (cryptodev->security_ctx == NULL) {
-			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
-	} else
-		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-
-#endif
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_SYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->service_type = QAT_SERVICE_SYMMETRIC;
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating capability memzon for %s",
-				name);
-			ret = -EFAULT;
-			goto error;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->sym_dev = internals;
-	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	memset(&qat_dev_instance->sym_rte_dev, 0,
-		sizeof(qat_dev_instance->sym_rte_dev));
-
-	return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->sym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
-	qat_pci_dev->sym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_sym_driver,
-		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
deleted file mode 100644
index 59fbdefa12..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_PMD_H_
-#define _QAT_SYM_PMD_H_
-
-#ifdef BUILD_QAT_SYM
-
-#include <rte_ether.h>
-#include <rte_cryptodev.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security.h>
-#endif
-
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Symmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
-#define QAT_SYM_CAP_VALID		(1 << 31)
-
-/**
- * Macro to add a sym capability
- * helper function to add an sym capability
- * <n: name> <b: block size> <k: key size> <d: digest size>
- * <a: aad_size> <i: iv_size>
- **/
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, d					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
-			{.aead = {					\
-				.algo = RTE_CRYPTO_AEAD_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_##n,		\
-				b, k, i					\
-			}, }						\
-		}, }							\
-	}
-
-extern uint8_t qat_sym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param);
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
-
-void
-qat_sym_init_op_cookie(void *op_cookie);
-
-#endif
-#endif /* _QAT_SYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 3a880096c4..9d6a19c0be 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -20,7 +20,7 @@
 
 #include "qat_logs.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 
 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
 static const uint8_t sha1InitialState[] = {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [PATCH v11 6/9] crypto/qat: unify asymmetric functions
  2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
                                           ` (4 preceding siblings ...)
  2022-02-22 20:30                         ` [PATCH v11 5/9] crypto/qat: unify symmetric functions Fan Zhang
@ 2022-02-22 20:30                         ` Fan Zhang
  2022-02-22 20:30                         ` [PATCH v11 7/9] crypto/qat: rework burst data path Fan Zhang
                                           ` (3 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Fan Zhang @ 2022-02-22 20:30 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji, Fan Zhang

From: Kai Ji <kai.ji@intel.com>

This patch removes qat_asym_pmd.c and integrates all the
functions into qat_asym.c. The unified/integrated asym crypto
pmd functions should make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/meson.build    |   2 +-
 drivers/crypto/qat/qat_asym.c     | 180 +++++++++++++++++++++++
 drivers/crypto/qat/qat_asym_pmd.c | 231 ------------------------------
 drivers/crypto/qat/qat_asym_pmd.h |  54 -------
 4 files changed, 181 insertions(+), 286 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 1bf6896a7e..f687f5c9d8 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index d6ca1afae8..353c36534a 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -19,6 +19,32 @@ uint8_t qat_asym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_asym_init_op_cookie(void *op_cookie)
+{
+	int j;
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	cookie->input_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					input_params_ptrs);
+
+	cookie->output_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					output_params_ptrs);
+
+	for (j = 0; j < 8; j++) {
+		cookie->input_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						input_array[j]);
+		cookie->output_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						output_array[j]);
+	}
+}
+
 /* An rte_driver is needed in the registration of both the device and the driver
  * with cryptodev.
  * The actual qat pci's rte_driver can't be used as its name represents
@@ -788,6 +814,160 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
 		memset(s, 0, qat_asym_session_get_private_size(dev));
 }
 
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+			nb_ops);
+}
+
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+				nb_ops);
+}
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param)
+{
+	struct qat_cryptodev_private *internals;
+	struct rte_cryptodev *cryptodev;
+	struct qat_device_info *qat_dev_instance =
+		&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	uint64_t capa_size;
+	int i = 0;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "asym");
+	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+				name);
+		return -(EFAULT);
+	}
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_asym_driver_id =
+				qat_asym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_asym_driver_id !=
+				qat_asym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
+		}
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+	qat_dev_instance->asym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->asym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_asym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_ASYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			rte_cryptodev_pmd_destroy(cryptodev);
+			memset(&qat_dev_instance->asym_rte_dev, 0,
+				sizeof(qat_dev_instance->asym_rte_dev));
+			return -EFAULT;
+		}
+	}
+
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
+
+	qat_pci_dev->asym_dev = internals;
+	internals->service_type = QAT_SERVICE_ASYMMETRIC;
+	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+	return 0;
+}
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->asym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(
+			qat_pci_dev->asym_dev->dev_id);
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+	qat_pci_dev->asym_dev = NULL;
+
+	return 0;
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_asym_driver,
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
deleted file mode 100644
index 9a7596b227..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "qat_logs.h"
-
-#include "qat_crypto.h"
-#include "qat_asym.h"
-#include "qat_asym_pmd.h"
-
-uint8_t qat_asym_driver_id;
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
-	int j;
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	cookie->input_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					input_params_ptrs);
-
-	cookie->output_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					output_params_ptrs);
-
-	for (j = 0; j < 8; j++) {
-		cookie->input_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						input_array[j]);
-		cookie->output_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						output_array[j]);
-	}
-}
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
-	/* Device related operations */
-	.dev_configure		= qat_cryptodev_config,
-	.dev_start		= qat_cryptodev_start,
-	.dev_stop		= qat_cryptodev_stop,
-	.dev_close		= qat_cryptodev_close,
-	.dev_infos_get		= qat_cryptodev_info_get,
-
-	.stats_get		= qat_cryptodev_stats_get,
-	.stats_reset		= qat_cryptodev_stats_reset,
-	.queue_pair_setup	= qat_cryptodev_qp_setup,
-	.queue_pair_release	= qat_cryptodev_qp_release,
-
-	/* Crypto related operations */
-	.asym_session_get_size	= qat_asym_session_get_private_size,
-	.asym_session_configure	= qat_asym_session_configure,
-	.asym_session_clear	= qat_asym_session_clear
-};
-
-uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
-	.name = qat_asym_drv_name,
-	.alias = qat_asym_drv_name
-};
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
-	int i = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "asym");
-	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_asym_driver_id =
-				qat_asym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_asym_driver_id !=
-				qat_asym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
-	qat_dev_instance->asym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->asym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->asym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_asym_driver_id;
-	cryptodev->dev_ops = &crypto_qat_ops;
-
-	cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst;
-
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_ASYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->dev_id = cryptodev->data->dev_id;
-	internals->service_type = QAT_SERVICE_ASYMMETRIC;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			rte_cryptodev_pmd_destroy(cryptodev);
-			memset(&qat_dev_instance->asym_rte_dev, 0,
-				sizeof(qat_dev_instance->asym_rte_dev));
-			return -EFAULT;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->asym_dev = internals;
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-	return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->asym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(
-			qat_pci_dev->asym_dev->dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
-	qat_pci_dev->asym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_asym_driver,
-		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_pmd.h b/drivers/crypto/qat/qat_asym_pmd.h
deleted file mode 100644
index f988d646e5..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-
-#ifndef _QAT_ASYM_PMD_H_
-#define _QAT_ASYM_PMD_H_
-
-#include <rte_cryptodev.h>
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
-
-
-/**
- * Helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
-		{.asym = {						\
-			.xform_capa = {					\
-				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
-				.op_types = o,				\
-				{					\
-				.modlen = {				\
-				.min = l,				\
-				.max = r,				\
-				.increment = i				\
-				}, }					\
-			}						\
-		},							\
-		}							\
-	}
-
-extern uint8_t qat_asym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
-
-void
-qat_asym_init_op_cookie(void *op_cookie);
-
-uint16_t
-qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-uint16_t
-qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-#endif /* _QAT_ASYM_PMD_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [PATCH v11 7/9] crypto/qat: rework burst data path
  2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
                                           ` (5 preceding siblings ...)
  2022-02-22 20:30                         ` [PATCH v11 6/9] crypto/qat: unify asymmetric functions Fan Zhang
@ 2022-02-22 20:30                         ` Fan Zhang
  2022-02-22 20:30                         ` [PATCH v11 8/9] crypto/qat: unify raw data path functions Fan Zhang
                                           ` (2 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Fan Zhang @ 2022-02-22 20:30 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji, Fan Zhang

From: Kai Ji <kai.ji@intel.com>

This patch enable the op_build_request function in
qat_enqueue_op_burst, and the qat_dequeue_process_response
function in qat_dequeue_op_burst.
The op_build_request invoked in crypto build request op is based
on crypto operations setup'd during session init.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/qat_qp.c               |  42 +-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c |   4 -
 drivers/crypto/qat/qat_asym.c             |   2 +-
 drivers/crypto/qat/qat_asym.h             |  22 -
 drivers/crypto/qat/qat_sym.c              | 830 +++++++---------------
 drivers/crypto/qat/qat_sym.h              |   5 -
 6 files changed, 271 insertions(+), 634 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index b36ffa6f6d..08ac91eac4 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,7 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp,
-		__rte_unused qat_op_build_request_t op_build_request,
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
 		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
@@ -599,29 +598,18 @@ qat_enqueue_op_burst(void *qp,
 		}
 	}
 
-#ifdef BUILD_QAT_SYM
+#ifdef RTE_LIB_SECURITY
 	if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 		qat_sym_preprocess_requests(ops, nb_ops_possible);
 #endif
 
+	memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque));
+
 	while (nb_ops_sent != nb_ops_possible) {
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
-#ifdef BUILD_QAT_SYM
-			ret = qat_sym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		} else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
-			ret = qat_comp_build_request(*ops, base_addr + tail,
+		ret = op_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-		} else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
-#ifdef BUILD_QAT_ASYM
-			ret = qat_asym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				NULL, tmp_qp->qat_dev_gen);
-#endif
-		}
+				tmp_qp->opaque, tmp_qp->qat_dev_gen);
+
 		if (ret != 0) {
 			tmp_qp->stats.enqueue_err_count++;
 			/* This message cannot be enqueued */
@@ -817,8 +805,7 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 
 uint16_t
 qat_dequeue_op_burst(void *qp, void **ops,
-		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
-		uint16_t nb_ops)
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -836,21 +823,10 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		nb_fw_responses = 1;
 
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
-			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
-			nb_fw_responses = qat_comp_process_response(
+		nb_fw_responses = qat_dequeue_process_response(
 				ops, resp_msg,
 				tmp_qp->op_cookies[head >> rx_queue->trailz],
 				&tmp_qp->stats.dequeue_err_count);
-#ifdef BUILD_QAT_ASYM
-		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
-			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-#endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
 				  rx_queue->modulo_mask);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index ff176dddf1..c088fd50d2 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,10 +146,6 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
-
-	/* Raw data-path API related operations */
-	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
-	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 353c36534a..6e65950baf 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -507,7 +507,7 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	return 0;
 }
 
-int
+static __rte_always_inline int
 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 		__rte_unused uint64_t *opaque,
 		__rte_unused enum qat_device_gen dev_gen)
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 3ae95f2e7b..78caa5649c 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -103,28 +103,6 @@ void
 qat_asym_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_asym_session *sess);
 
-/*
- * Build PKE request to be sent to the fw, partially uses template
- * request generated during session creation.
- *
- * @param	in_op		Pointer to the crypto operation, for every
- *				service it points to service specific struct.
- * @param	out_msg		Message to be returned to enqueue function
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	qat_dev_gen	Generation of QAT hardware
- *
- * @return
- *	This function always returns zero,
- *	it is because of backward compatibility.
- *	- 0: Always returned
- *
- */
-int
-qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		enum qat_device_gen qat_dev_gen);
-
 /*
  * Process PKE response received from outgoing queue of QAT
  *
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index aad4b243b7..692e1f8f0a 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -11,12 +11,25 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-#include "dev/qat_crypto_pmd_gens.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
 
 uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
 void
 qat_sym_init_op_cookie(void *op_cookie)
 {
@@ -38,160 +51,68 @@ qat_sym_init_op_cookie(void *op_cookie)
 			opt.spc_gmac.cd_cipher);
 }
 
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op,
-		struct icp_qat_fw_la_bulk_req *qat_req)
+static __rte_always_inline int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
 {
-	/* copy IV into request if it fits */
-	if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
-		rte_memcpy(cipher_param->u.cipher_IV_array,
-				rte_crypto_op_ctod_offset(op, uint8_t *,
-					iv_offset),
-				iv_length);
-	} else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr =
-				rte_crypto_op_ctophys_offset(op,
-					iv_offset);
-	}
-}
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	uintptr_t sess = (uintptr_t)opaque[0];
+	uintptr_t build_request_p = (uintptr_t)opaque[1];
+	qat_sym_build_request_t build_request = (void *)build_request_p;
+	struct qat_sym_session *ctx = NULL;
 
-/** Set IV for CCM is special case, 0th byte is set to q-1
- *  where q is padding of nonce in 16 byte block
- */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
-	rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
-			ICP_QAT_HW_CCM_NONCE_OFFSET,
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-	*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-	if (aad_len_field_sz)
-		rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-}
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != (uintptr_t)ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
 
-/** Handle Single-Pass AES-GMAC on QAT GEN3 */
-static inline void
-handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
-		struct qat_sym_op_cookie *cookie,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	static const uint32_t ver_key_offset =
-			sizeof(struct icp_qat_hw_auth_setup) +
-			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
-			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
-			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
-			sizeof(struct icp_qat_hw_cipher_config);
-	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
-			(void *) &qat_req->cd_ctrl;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-			(void *) &qat_req->serv_specif_rqpars;
-	uint32_t data_length = op->sym->auth.data.length;
-
-	/* Fill separate Content Descriptor for this op */
-	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
-			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-				ctx->cd.cipher.key :
-				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
-			ctx->auth_key_length);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
-				ICP_QAT_HW_CIPHER_AEAD_MODE,
-				ctx->qat_cipher_alg,
-				ICP_QAT_HW_CIPHER_NO_CONVERT,
-				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-					ICP_QAT_HW_CIPHER_ENCRYPT :
-					ICP_QAT_HW_CIPHER_DECRYPT));
-	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
-			ctx->digest_length,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
-
-	/* Update the request */
-	qat_req->cd_pars.u.s.content_desc_addr =
-			cookie->opt.spc_gmac.cd_phys_addr;
-	qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
-			sizeof(struct icp_qat_hw_cipher_config) +
-			ctx->auth_key_length, 8) >> 3;
-	qat_req->comn_mid.src_length = data_length;
-	qat_req->comn_mid.dst_length = 0;
-
-	cipher_param->spc_aad_addr = 0;
-	cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
-	cipher_param->spc_aad_sz = data_length;
-	cipher_param->reserved = 0;
-	cipher_param->spc_auth_res_sz = ctx->digest_length;
-
-	qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
-	cipher_cd_ctrl->cipher_cfg_offset = 0;
-	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
-	ICP_QAT_FW_LA_PROTO_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_NO_PROTO);
-}
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, (void *)sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
-{
-	int ret = 0;
-	struct qat_sym_session *ctx = NULL;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	register struct icp_qat_fw_la_bulk_req *qat_req;
-	uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
-	uint32_t cipher_len = 0, cipher_ofs = 0;
-	uint32_t auth_len = 0, auth_ofs = 0;
-	uint32_t min_ofs = 0;
-	uint64_t src_buf_start = 0, dst_buf_start = 0;
-	uint64_t auth_data_end = 0;
-	uint8_t do_sgl = 0;
-	uint8_t in_place = 1;
-	int alignment_adjustment = 0;
-	int oop_shift = 0;
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *)op_cookie;
-
-	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
-				"operation requests, op (%p) is not a "
-				"symmetric operation.", op);
-		return -EINVAL;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
 	}
 
-	if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
-				" requests, op (%p) is sessionless.", op);
-		return -EINVAL;
-	} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_sym_session *)get_sym_session_private_data(
-				op->sym->session, qat_sym_driver_id);
 #ifdef RTE_LIB_SECURITY
-	} else {
-		ctx = (struct qat_sym_session *)get_sec_session_private_data(
-				op->sym->sec_session);
-		if (likely(ctx)) {
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if ((void *)sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
 			if (unlikely(ctx->bpi_ctx == NULL)) {
 				QAT_DP_LOG(ERR, "QAT PMD only supports security"
 						" operation requests for"
@@ -207,463 +128,234 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 				return -EINVAL;
 			}
-		}
-#endif
-	}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
 
-	if (unlikely(ctx == NULL)) {
-		QAT_DP_LOG(ERR, "Session was not created for this device");
-		return -EINVAL;
-	}
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, (void *)sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
-	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
-	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
-	cipher_param = (void *)&qat_req->serv_specif_rqpars;
-	cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			do_aead = 1;
-		} else {
-			do_auth = 1;
-			do_cipher = 1;
+			sess = (uintptr_t)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = sess;
+			opaque[1] = (uintptr_t)build_request;
 		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		do_auth = 1;
-		do_cipher = 0;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		do_auth = 0;
-		do_cipher = 1;
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
 	}
 
-	if (do_cipher) {
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
 
-		if (ctx->qat_cipher_alg ==
-					 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
 
-			if (unlikely(
-			    (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
-			    (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cipher_len = op->sym->cipher.data.length >> 3;
-			cipher_ofs = op->sym->cipher.data.offset >> 3;
-
-		} else if (ctx->bpi_ctx) {
-			/* DOCSIS - only send complete blocks to device.
-			 * Process any partial block using CFB mode.
-			 * Even if 0 complete blocks, still send this to device
-			 * to get into rx queue for post-process and dequeuing
-			 */
-			cipher_len = qat_bpicipher_preprocess(ctx, op);
-			cipher_ofs = op->sym->cipher.data.offset;
-		} else {
-			cipher_len = op->sym->cipher.data.length;
-			cipher_ofs = op->sym->cipher.data.offset;
-		}
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
 
-		set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
-				cipher_param, op, qat_req);
-		min_ofs = cipher_ofs;
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	if (do_auth) {
-
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
-			ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
-			if (unlikely(
-			    (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
-			    (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			auth_ofs = op->sym->auth.data.offset >> 3;
-			auth_len = op->sym->auth.data.length >> 3;
-
-			auth_param->u1.aad_adr =
-					rte_crypto_op_ctophys_offset(op,
-							ctx->auth_iv.offset);
-
-		} else if (ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-			/* AES-GMAC */
-			set_cipher_iv(ctx->auth_iv.length,
-				ctx->auth_iv.offset,
-				cipher_param, op, qat_req);
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
-			auth_param->u1.aad_adr = 0;
-			auth_param->u2.aad_sz = 0;
-
-		} else {
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-		min_ofs = auth_ofs;
-
-		if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
-				ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
-			auth_param->auth_res_addr =
-					op->sym->auth.digest.phys_addr;
-
 	}
 
-	if (do_aead) {
-		/*
-		 * This address may used for setting AAD physical pointer
-		 * into IV offset from op
-		 */
-		rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
-		if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-
-			set_cipher_iv(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, qat_req);
-
-		} else if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
-			/* In case of AES-CCM this may point to user selected
-			 * memory or iv offset in crypto_op
-			 */
-			uint8_t *aad_data = op->sym->aead.aad.data;
-			/* This is true AAD length, it not includes 18 bytes of
-			 * preceding data
-			 */
-			uint8_t aad_ccm_real_len = 0;
-			uint8_t aad_len_field_sz = 0;
-			uint32_t msg_len_be =
-					rte_bswap32(op->sym->aead.data.length);
-
-			if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-				aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-				aad_ccm_real_len = ctx->aad_len -
-					ICP_QAT_HW_CCM_AAD_B0_LEN -
-					ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			} else {
-				/*
-				 * aad_len not greater than 18, so no actual aad
-				 *  data, then use IV after op for B0 block
-				 */
-				aad_data = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-				aad_phys_addr_aead =
-						rte_crypto_op_ctophys_offset(op,
-							ctx->cipher_iv.offset);
-			}
-
-			uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
-							ctx->cipher_iv.length;
-
-			aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-							aad_len_field_sz,
-							ctx->digest_length, q);
-
-			if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET +
-				    (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				    (uint8_t *)&msg_len_be,
-				    ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-			} else {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET,
-				    (uint8_t *)&msg_len_be
-				    + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				    - q), q);
-			}
-
-			if (aad_len_field_sz > 0) {
-				*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
-						= rte_bswap16(aad_ccm_real_len);
-
-				if ((aad_ccm_real_len + aad_len_field_sz)
-						% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-					uint8_t pad_len = 0;
-					uint8_t pad_idx = 0;
-
-					pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len + aad_len_field_sz) %
-						ICP_QAT_HW_CCM_AAD_B0_LEN);
-					pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					    aad_ccm_real_len + aad_len_field_sz;
-					memset(&aad_data[pad_idx],
-							0, pad_len);
-				}
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
 
-			}
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
 
-			set_cipher_iv_ccm(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, q,
-					aad_len_field_sz);
+	if (cryptodev == NULL)
+		return -ENODEV;
 
-		}
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
-		cipher_len = op->sym->aead.data.length;
-		cipher_ofs = op->sym->aead.data.offset;
-		auth_len = op->sym->aead.data.length;
-		auth_ofs = op->sym->aead.data.offset;
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
-		auth_param->u1.aad_adr = aad_phys_addr_aead;
-		auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
-		min_ofs = op->sym->aead.data.offset;
-	}
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
-	if (op->sym->m_src->nb_segs > 1 ||
-			(op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
-		do_sgl = 1;
-
-	/* adjust for chain case */
-	if (do_cipher && do_auth)
-		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
-	if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
-		min_ofs = 0;
-
-	if (unlikely((op->sym->m_dst != NULL) &&
-			(op->sym->m_dst != op->sym->m_src))) {
-		/* Out-of-place operation (OOP)
-		 * Don't align DMA start. DMA the minimum data-set
-		 * so as not to overwrite data in dest buffer
-		 */
-		in_place = 0;
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-		dst_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-		oop_shift = min_ofs;
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
 
-	} else {
-		/* In-place operation
-		 * Start DMA at nearest aligned address below min_ofs
-		 */
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
-						& QAT_64_BTYE_ALIGN_MASK;
-
-		if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
-					rte_pktmbuf_headroom(op->sym->m_src))
-							> src_buf_start)) {
-			/* alignment has pushed addr ahead of start of mbuf
-			 * so revert and take the performance hit
-			 */
-			src_buf_start =
-				rte_pktmbuf_iova_offset(op->sym->m_src,
-								min_ofs);
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
 		}
-		dst_buf_start = src_buf_start;
 
-		/* remember any adjustment for later, note, can be +/- */
-		alignment_adjustment = src_buf_start -
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-	}
-
-	if (do_cipher || do_aead) {
-		cipher_param->cipher_offset =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, cipher_ofs) - src_buf_start;
-		cipher_param->cipher_length = cipher_len;
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
 	} else {
-		cipher_param->cipher_offset = 0;
-		cipher_param->cipher_length = 0;
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
 	}
-
-	if (!ctx->is_single_pass) {
-		/* Do not let to overwrite spc_aad len */
-		if (do_auth || do_aead) {
-			auth_param->auth_off =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, auth_ofs) - src_buf_start;
-			auth_param->auth_len = auth_len;
-		} else {
-			auth_param->auth_off = 0;
-			auth_param->auth_len = 0;
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
 		}
 	}
 
-	qat_req->comn_mid.dst_length =
-		qat_req->comn_mid.src_length =
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		> (auth_param->auth_off + auth_param->auth_len) ?
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		: (auth_param->auth_off + auth_param->auth_len);
-
-	if (do_auth && do_cipher) {
-		/* Handle digest-encrypted cases, i.e.
-		 * auth-gen-then-cipher-encrypt and
-		 * cipher-decrypt-then-auth-verify
-		 */
-		 /* First find the end of the data */
-		if (do_sgl) {
-			uint32_t remaining_off = auth_param->auth_off +
-				auth_param->auth_len + alignment_adjustment + oop_shift;
-			struct rte_mbuf *sgl_buf =
-				(in_place ?
-					op->sym->m_src : op->sym->m_dst);
-
-			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
-					&& sgl_buf->next != NULL) {
-				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
-				sgl_buf = sgl_buf->next;
-			}
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-			auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
-				sgl_buf, remaining_off);
-		} else {
-			auth_data_end = (in_place ?
-				src_buf_start : dst_buf_start) +
-				auth_param->auth_off + auth_param->auth_len;
-		}
-		/* Then check if digest-encrypted conditions are met */
-		if ((auth_param->auth_off + auth_param->auth_len <
-					cipher_param->cipher_offset +
-					cipher_param->cipher_length) &&
-				(op->sym->auth.digest.phys_addr ==
-					auth_data_end)) {
-			/* Handle partial digest encryption */
-			if (cipher_param->cipher_offset +
-					cipher_param->cipher_length <
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length)
-				qat_req->comn_mid.dst_length =
-					qat_req->comn_mid.src_length =
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length;
-			struct icp_qat_fw_comn_req_hdr *header =
-				&qat_req->comn_hdr;
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-		}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
 	}
 
-	if (do_sgl) {
-
-		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
-				QAT_COMN_PTR_TYPE_SGL);
-		ret = qat_sgl_fill_array(op->sym->m_src,
-		   (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
-		   &cookie->qat_sgl_src,
-		   qat_req->comn_mid.src_length,
-		   QAT_SYM_SGL_MAX_NUMBER);
-
-		if (unlikely(ret)) {
-			QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
-			return ret;
-		}
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
 
-		if (in_place)
-			qat_req->comn_mid.dest_data_addr =
-				qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-		else {
-			ret = qat_sgl_fill_array(op->sym->m_dst,
-				(int64_t)(dst_buf_start -
-					  rte_pktmbuf_iova(op->sym->m_dst)),
-				 &cookie->qat_sgl_dst,
-				 qat_req->comn_mid.dst_length,
-				 QAT_SYM_SGL_MAX_NUMBER);
-
-			if (unlikely(ret)) {
-				QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
-				return ret;
-			}
+	return 0;
 
-			qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-			qat_req->comn_mid.dest_data_addr =
-					cookie->qat_sgl_dst_phys_addr;
-		}
-		qat_req->comn_mid.src_length = 0;
-		qat_req->comn_mid.dst_length = 0;
-	} else {
-		qat_req->comn_mid.src_data_addr = src_buf_start;
-		qat_req->comn_mid.dest_data_addr = dst_buf_start;
-	}
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
 
-	if (ctx->is_single_pass) {
-		if (ctx->is_ucs) {
-			/* GEN 4 */
-			cipher_param20->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param20->spc_auth_res_addr =
-				op->sym->aead.digest.phys_addr;
-		} else {
-			cipher_param->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param->spc_auth_res_addr =
-					op->sym->aead.digest.phys_addr;
-		}
-	} else if (ctx->is_single_pass_gmac &&
-		       op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
-		/* Handle Single-Pass AES-GMAC */
-		handle_spc_gmac(ctx, op, cookie, qat_req);
-	}
+	return ret;
+}
 
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
-			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
-			rte_pktmbuf_data_len(op->sym->m_src));
-	if (do_cipher) {
-		uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
-				ctx->cipher_iv.length);
-	}
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
 
-	if (do_auth) {
-		if (ctx->auth_iv.length) {
-			uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
-							uint8_t *,
-							ctx->auth_iv.offset);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
-						ctx->auth_iv.length);
-		}
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
-				ctx->digest_length);
-	}
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
 
-	if (do_aead) {
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
-				ctx->digest_length);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
-				ctx->aad_len);
-	}
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
 #endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
+
 	return 0;
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f4ff2ce4cd..074612c11b 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -131,11 +131,6 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
-
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
-- 
2.25.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [PATCH v11 8/9] crypto/qat: unify raw data path functions
  2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
                                           ` (6 preceding siblings ...)
  2022-02-22 20:30                         ` [PATCH v11 7/9] crypto/qat: rework burst data path Fan Zhang
@ 2022-02-22 20:30                         ` Fan Zhang
  2022-02-22 20:30                         ` [PATCH v11 9/9] crypto/qat: support out of place SG list Fan Zhang
  2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  9 siblings, 0 replies; 156+ messages in thread
From: Fan Zhang @ 2022-02-22 20:30 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji, Fan Zhang

From: Kai Ji <kai.ji@intel.com>

This patch unifies QAT's raw dp api implementations
to the same enqueue/dequeue methods used in crypto operations.
The specific functions for different QAT generation are updated
respectively. The qat_sym_hw_dp.c is removed as no longer required.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/meson.build               |   2 +-
 drivers/compress/qat/qat_comp_pmd.c          |  12 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |   2 +
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 214 ++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 122 +++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |  78 ++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 656 ++++++++++++
 drivers/crypto/qat/qat_crypto.h              |   3 +
 drivers/crypto/qat/qat_sym.c                 |  56 +-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 986 -------------------
 10 files changed, 1137 insertions(+), 994 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index f687f5c9d8..b7027f3164 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index efe5d08a99..dc8db84a68 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -616,11 +616,18 @@ static struct rte_compressdev_ops compress_qat_dummy_ops = {
 	.private_xform_free	= qat_comp_private_xform_free
 };
 
+static uint16_t
+qat_comp_dequeue_burst(void *qp, struct rte_comp_op **ops,  uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_comp_process_response,
+				nb_ops);
+}
+
 static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
+	uint16_t ret = qat_comp_dequeue_burst(qp, ops, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
@@ -638,8 +645,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 
 		} else {
 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
-					(compressdev_dequeue_pkt_burst_t)
-					qat_comp_pmd_enq_deq_dummy_op_burst;
+					qat_comp_dequeue_burst;
 		}
 	}
 	return ret;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 64e6ae66ec..0c64c1e43f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -291,6 +291,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 			qat_sym_crypto_cap_get_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
 			qat_sym_crypto_set_session_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index db864d973a..ffa093a7a3 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -394,6 +394,218 @@ qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
+	} else if (ctx->is_single_pass_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
+	}
+
+	return 0;
+}
+
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -403,6 +615,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_feature_flags_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
 			qat_sym_crypto_set_session_gen3;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 7642a87d55..f803bc1459 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -223,6 +223,126 @@ qat_sym_crypto_set_session_gen4(void *cdev, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -230,6 +350,8 @@ RTE_INIT(qat_sym_crypto_gen4_init)
 			qat_sym_crypto_cap_get_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
 			qat_sym_crypto_set_session_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 96cdb97a26..50a9c5ad5b 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -839,6 +839,84 @@ int
 qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
 		uint8_t *out_msg, void *op_cookie);
 
+/* -----------------GEN 1 sym crypto raw data path APIs ---------------- */
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status);
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status);
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index c088fd50d2..58f07eb0b3 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,6 +146,10 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
+
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
@@ -448,6 +452,656 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct qat_sym_op_cookie *cookie;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, NULL, NULL);
+#endif
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i],
+				cookie, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
+			(uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				NULL, NULL, NULL);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
+			NULL, 0, cipher_iv, digest, auth_iv, ofs,
+			(uint32_t)data_len)))
+		return -1;
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				NULL, 0,
+				&vec->iv[i], &vec->digest[i],
+				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+			break;
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				&vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp;
+	void *resp_opaque;
+	uint32_t i, n, inflight;
+	uint32_t head;
+	uint8_t status;
+
+	*n_success_jobs = 0;
+	*return_status = 0;
+	head = dp_ctx->head;
+
+	inflight = qp->enqueued - qp->dequeued;
+	if (unlikely(inflight == 0))
+		return 0;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			head);
+	/* no operation ready */
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return 0;
+
+	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
+	/* get the dequeue count */
+	if (get_dequeue_count) {
+		n = get_dequeue_count(resp_opaque);
+		if (unlikely(n == 0))
+			return 0;
+	} else {
+		if (unlikely(max_nb_to_dequeue == 0))
+			return 0;
+		n = max_nb_to_dequeue;
+	}
+
+	out_user_data[0] = resp_opaque;
+	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+	post_dequeue(resp_opaque, 0, status);
+	*n_success_jobs += status;
+
+	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+
+	/* we already finished dequeue when n == 1 */
+	if (unlikely(n == 1)) {
+		i = 1;
+		goto end_deq;
+	}
+
+	if (is_user_data_array) {
+		for (i = 1; i < n; i++) {
+			resp = (struct icp_qat_fw_comn_resp *)(
+				(uint8_t *)rx_queue->base_addr + head);
+			if (unlikely(*(uint32_t *)resp ==
+					ADF_RING_EMPTY_SIG))
+				goto end_deq;
+			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
+			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+			*n_success_jobs += status;
+			post_dequeue(out_user_data[i], i, status);
+			head = (head + rx_queue->msg_size) &
+					rx_queue->modulo_mask;
+		}
+
+		goto end_deq;
+	}
+
+	/* opaque is not array */
+	for (i = 1; i < n; i++) {
+		resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+			goto end_deq;
+		head = (head + rx_queue->msg_size) &
+				rx_queue->modulo_mask;
+		post_dequeue(resp_opaque, i, status);
+		*n_success_jobs += status;
+	}
+
+end_deq:
+	dp_ctx->head = head;
+	dp_ctx->cached_dequeue += i;
+	return i;
+}
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	register struct icp_qat_fw_comn_resp *resp;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			dp_ctx->head);
+
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return NULL;
+
+	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
+			rx_queue->modulo_mask;
+	dp_ctx->cached_dequeue++;
+
+	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
+			RTE_CRYPTO_OP_STATUS_SUCCESS :
+			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	*dequeue_status = 0;
+	return (void *)(uintptr_t)resp->opaque_data;
+}
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_enqueue != n))
+		return -1;
+
+	qp->enqueued += n;
+	qp->stats.enqueued_count += n;
+
+	tx_queue->tail = dp_ctx->tail;
+
+	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number, tx_queue->tail);
+	tx_queue->csr_tail = tx_queue->tail;
+	dp_ctx->cached_enqueue = 0;
+
+	return 0;
+}
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_dequeue != n))
+		return -1;
+
+	rx_queue->head = dp_ctx->head;
+	rx_queue->nb_processed_responses += n;
+	qp->dequeued += n;
+	qp->stats.dequeued_count += n;
+	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
+		uint32_t old_head, new_head;
+		uint32_t max_head;
+
+		old_head = rx_queue->csr_head;
+		new_head = rx_queue->head;
+		max_head = qp->nb_descriptors * rx_queue->msg_size;
+
+		/* write out free descriptors */
+		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
+
+		if (new_head < old_head) {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
+					max_head - old_head);
+			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
+					new_head);
+		} else {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
+					old_head);
+		}
+		rx_queue->nb_processed_responses = 0;
+		rx_queue->csr_head = new_head;
+
+		/* write current head to CSR */
+		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
+			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
+			new_head);
+	}
+
+	dp_ctx->cached_dequeue = 0;
+	return 0;
+}
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+
+	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
+	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
+	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
+	raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen1;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_chain_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_chain_gen1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
+			ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_cipher_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_cipher_gen1;
+		}
+	} else
+		return -1;
+
+	return 0;
+}
+
 int
 qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
 {
@@ -518,6 +1172,8 @@ RTE_INIT(qat_sym_crypto_gen1_init)
 			qat_sym_crypto_cap_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
 			qat_sym_crypto_set_session_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index c01266f81c..cf386d0ed0 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -53,11 +53,14 @@ typedef void * (*create_security_ctx_t)(void *cryptodev);
 
 typedef int (*set_session_t)(void *cryptodev, void *session);
 
+typedef int (*set_raw_dp_ctx_t)(void *raw_dp_ctx, void *ctx);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
 	set_session_t set_session;
+	set_raw_dp_ctx_t set_raw_dp_ctx;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 692e1f8f0a..1ccffad5ab 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -89,7 +89,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, (void *)sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -144,7 +144,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, (void *)sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -292,8 +292,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 		if (internals->capa_mz == NULL) {
 			QAT_LOG(DEBUG,
 				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
+				"destroying PMD for %s", name);
 			ret = -EFAULT;
 			goto error;
 		}
@@ -355,6 +354,55 @@ qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
 	return 0;
 }
 
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct qat_cryptodev_private *internals = dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_crypto_gen_dev_ops *gen_dev_ops =
+			&qat_sym_gen_dev_ops[qat_dev_gen];
+	struct qat_qp *qp;
+	struct qat_sym_session *ctx;
+	struct qat_sym_dp_ctx *dp_ctx;
+
+	if (!gen_dev_ops->set_raw_dp_ctx) {
+		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+				qat_dev_gen);
+		return -ENOTSUP;
+	}
+
+	qp = dev->data->queue_pairs[qp_id];
+	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(struct qat_sym_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+		dp_ctx->tail = qp->tx_q.tail;
+		dp_ctx->head = qp->rx_q.head;
+		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
+	}
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -EINVAL;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, qat_sym_driver_id);
+
+	dp_ctx->session = ctx;
+
+	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
+}
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct qat_sym_dp_ctx);
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_sym_driver,
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
deleted file mode 100644
index 5322faff44..0000000000
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ /dev/null
@@ -1,986 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2022 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "adf_transport_access_macros.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_qp.h"
-
-static __rte_always_inline int32_t
-qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
-		struct rte_crypto_vec *data, uint16_t n_data_vecs)
-{
-	struct qat_queue *tx_queue;
-	struct qat_sym_op_cookie *cookie;
-	struct qat_sgl *list;
-	uint32_t i;
-	uint32_t total_len;
-
-	if (likely(n_data_vecs == 1)) {
-		req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			data[0].iova;
-		req->comn_mid.src_length = req->comn_mid.dst_length =
-			data[0].len;
-		return data[0].len;
-	}
-
-	if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
-		return -1;
-
-	total_len = 0;
-	tx_queue = &qp->tx_q;
-
-	ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
-			QAT_COMN_PTR_TYPE_SGL);
-	cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
-	list = (struct qat_sgl *)&cookie->qat_sgl_src;
-
-	for (i = 0; i < n_data_vecs; i++) {
-		list->buffers[i].len = data[i].len;
-		list->buffers[i].resrvd = 0;
-		list->buffers[i].addr = data[i].iova;
-		if (total_len + data[i].len > UINT32_MAX) {
-			QAT_DP_LOG(ERR, "Message too long");
-			return -1;
-		}
-		total_len += data[i].len;
-	}
-
-	list->num_bufs = i;
-	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			cookie->qat_sgl_src_phys_addr;
-	req->comn_mid.src_length = req->comn_mid.dst_length = 0;
-	return total_len;
-}
-
-static __rte_always_inline void
-set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	/* copy IV into request if it fits */
-	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
-				iv_len);
-	else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
-	}
-}
-
-#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
-	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
-	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
-
-static __rte_always_inline void
-qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
-{
-	uint32_t i;
-
-	for (i = 0; i < n; i++)
-		sta[i] = status;
-}
-
-#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
-	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
-
-static __rte_always_inline void
-enqueue_one_cipher_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-
-	/* cipher IV */
-	set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest __rte_unused,
-	struct rte_crypto_va_iova_ptr *aad __rte_unused,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
-			(uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_auth_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = data_len - ofs.ofs.auth.head -
-			ofs.ofs.auth.tail;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
-				ctx->auth_iv.length);
-		break;
-	default:
-		break;
-	}
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv __rte_unused,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
-			(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_auth_job(ctx, req, &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline int
-enqueue_one_chain_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_vec *data,
-	uint16_t n_data_vecs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	rte_iova_t auth_iova_end;
-	int32_t cipher_len, auth_len;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	cipher_len = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
-
-	if (unlikely(cipher_len < 0 || auth_len < 0))
-		return -1;
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = cipher_len;
-	set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = auth_len;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		break;
-	default:
-		break;
-	}
-
-	if (unlikely(n_data_vecs > 1)) {
-		int auth_end_get = 0, i = n_data_vecs - 1;
-		struct rte_crypto_vec *cvec = &data[0];
-		uint32_t len;
-
-		len = data_len - ofs.ofs.auth.tail;
-
-		while (i >= 0 && len > 0) {
-			if (cvec->len >= len) {
-				auth_iova_end = cvec->iova + len;
-				len = 0;
-				auth_end_get = 1;
-				break;
-			}
-			len -= cvec->len;
-			i--;
-			cvec++;
-		}
-
-		if (unlikely(auth_end_get == 0))
-			return -1;
-	} else
-		auth_iova_end = data[0].iova + auth_param->auth_off +
-			auth_param->auth_len;
-
-	/* Then check if digest-encrypted conditions are met */
-	if ((auth_param->auth_off + auth_param->auth_len <
-		cipher_param->cipher_offset +
-		cipher_param->cipher_length) &&
-		(digest->iova == auth_iova_end)) {
-		/* Handle partial digest encryption */
-		if (cipher_param->cipher_offset +
-				cipher_param->cipher_length <
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length)
-			req->comn_mid.dst_length =
-				req->comn_mid.src_length =
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length;
-		struct icp_qat_fw_comn_req_hdr *header =
-			&req->comn_hdr;
-		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-			header->serv_specif_flags,
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-	}
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
-			cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
-		return -1;
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num,
-			&vec->iv[i], &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
-			break;
-
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_aead_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-		(void *)&req->serv_specif_rqpars;
-	struct icp_qat_fw_la_auth_req_params *auth_param =
-		(void *)((uint8_t *)&req->serv_specif_rqpars +
-		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-	uint8_t *aad_data;
-	uint8_t aad_ccm_real_len;
-	uint8_t aad_len_field_sz;
-	uint32_t msg_len_be;
-	rte_iova_t aad_iova = 0;
-	uint8_t q;
-
-	/* CPM 1.7 uses single pass to treat AEAD as cipher operation */
-	if (ctx->is_single_pass) {
-		enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
-
-		if (ctx->is_ucs) {
-			/* QAT GEN4 uses single pass to treat AEAD as cipher
-			 * operation
-			 */
-			struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
-				(void *)&req->serv_specif_rqpars;
-			cipher_param_20->spc_aad_addr = aad->iova;
-			cipher_param_20->spc_auth_res_addr = digest->iova;
-		} else {
-			cipher_param->spc_aad_addr = aad->iova;
-			cipher_param->spc_auth_res_addr = digest->iova;
-		}
-
-		return;
-	}
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
-				ctx->cipher_iv.length);
-		aad_iova = aad->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
-		aad_data = aad->va;
-		aad_iova = aad->iova;
-		aad_ccm_real_len = 0;
-		aad_len_field_sz = 0;
-		msg_len_be = rte_bswap32((uint32_t)data_len -
-				ofs.ofs.cipher.head);
-
-		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			aad_ccm_real_len = ctx->aad_len -
-				ICP_QAT_HW_CCM_AAD_B0_LEN -
-				ICP_QAT_HW_CCM_AAD_LEN_INFO;
-		} else {
-			aad_data = iv->va;
-			aad_iova = iv->iova;
-		}
-
-		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-			aad_len_field_sz, ctx->digest_length, q);
-		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				(uint8_t *)&msg_len_be,
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-		} else {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-				(uint8_t *)&msg_len_be +
-				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				- q), q);
-		}
-
-		if (aad_len_field_sz > 0) {
-			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
-				rte_bswap16(aad_ccm_real_len);
-
-			if ((aad_ccm_real_len + aad_len_field_sz)
-				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-				uint8_t pad_len = 0;
-				uint8_t pad_idx = 0;
-
-				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len +
-					aad_len_field_sz) %
-					ICP_QAT_HW_CCM_AAD_B0_LEN);
-				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					aad_ccm_real_len +
-					aad_len_field_sz;
-				memset(&aad_data[pad_idx], 0, pad_len);
-			}
-		}
-
-		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
-			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va +
-			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
-		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-		rte_memcpy((uint8_t *)aad->va +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			ctx->cipher_iv.length);
-		break;
-	default:
-		break;
-	}
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_param->auth_off = ofs.ofs.cipher.head;
-	auth_param->auth_len = cipher_param->cipher_length;
-	auth_param->auth_res_addr = digest->iova;
-	auth_param->u1.aad_adr = aad_iova;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
-		(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
-			&vec->aad[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
-	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
-	uint32_t max_nb_to_dequeue,
-	rte_cryptodev_raw_post_dequeue_t post_dequeue,
-	void **out_user_data, uint8_t is_user_data_array,
-	uint32_t *n_success_jobs, int *return_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct icp_qat_fw_comn_resp *resp;
-	void *resp_opaque;
-	uint32_t i, n, inflight;
-	uint32_t head;
-	uint8_t status;
-
-	*n_success_jobs = 0;
-	*return_status = 0;
-	head = dp_ctx->head;
-
-	inflight = qp->enqueued - qp->dequeued;
-	if (unlikely(inflight == 0))
-		return 0;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			head);
-	/* no operation ready */
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return 0;
-
-	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
-	/* get the dequeue count */
-	if (get_dequeue_count) {
-		n = get_dequeue_count(resp_opaque);
-		if (unlikely(n == 0))
-			return 0;
-	} else {
-		if (unlikely(max_nb_to_dequeue == 0))
-			return 0;
-		n = max_nb_to_dequeue;
-	}
-
-	out_user_data[0] = resp_opaque;
-	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-	post_dequeue(resp_opaque, 0, status);
-	*n_success_jobs += status;
-
-	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
-
-	/* we already finished dequeue when n == 1 */
-	if (unlikely(n == 1)) {
-		i = 1;
-		goto end_deq;
-	}
-
-	if (is_user_data_array) {
-		for (i = 1; i < n; i++) {
-			resp = (struct icp_qat_fw_comn_resp *)(
-				(uint8_t *)rx_queue->base_addr + head);
-			if (unlikely(*(uint32_t *)resp ==
-					ADF_RING_EMPTY_SIG))
-				goto end_deq;
-			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
-			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-			*n_success_jobs += status;
-			post_dequeue(out_user_data[i], i, status);
-			head = (head + rx_queue->msg_size) &
-					rx_queue->modulo_mask;
-		}
-
-		goto end_deq;
-	}
-
-	/* opaque is not array */
-	for (i = 1; i < n; i++) {
-		resp = (struct icp_qat_fw_comn_resp *)(
-			(uint8_t *)rx_queue->base_addr + head);
-		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-			goto end_deq;
-		head = (head + rx_queue->msg_size) &
-				rx_queue->modulo_mask;
-		post_dequeue(resp_opaque, i, status);
-		*n_success_jobs += status;
-	}
-
-end_deq:
-	dp_ctx->head = head;
-	dp_ctx->cached_dequeue += i;
-	return i;
-}
-
-static __rte_always_inline void *
-qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
-		enum rte_crypto_op_status *op_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	register struct icp_qat_fw_comn_resp *resp;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			dp_ctx->head);
-
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return NULL;
-
-	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
-			rx_queue->modulo_mask;
-	dp_ctx->cached_dequeue++;
-
-	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
-			RTE_CRYPTO_OP_STATUS_SUCCESS :
-			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	*dequeue_status = 0;
-	return (void *)(uintptr_t)resp->opaque_data;
-}
-
-static __rte_always_inline int
-qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_enqueue != n))
-		return -1;
-
-	qp->enqueued += n;
-	qp->stats.enqueued_count += n;
-
-	tx_queue->tail = dp_ctx->tail;
-
-	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
-			tx_queue->hw_bundle_number,
-			tx_queue->hw_queue_number, tx_queue->tail);
-	tx_queue->csr_tail = tx_queue->tail;
-	dp_ctx->cached_enqueue = 0;
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_dequeue != n))
-		return -1;
-
-	rx_queue->head = dp_ctx->head;
-	rx_queue->nb_processed_responses += n;
-	qp->dequeued += n;
-	qp->stats.dequeued_count += n;
-	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
-		uint32_t old_head, new_head;
-		uint32_t max_head;
-
-		old_head = rx_queue->csr_head;
-		new_head = rx_queue->head;
-		max_head = qp->nb_descriptors * rx_queue->msg_size;
-
-		/* write out free descriptors */
-		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
-
-		if (new_head < old_head) {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
-					max_head - old_head);
-			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
-					new_head);
-		} else {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
-					old_head);
-		}
-		rx_queue->nb_processed_responses = 0;
-		rx_queue->csr_head = new_head;
-
-		/* write current head to CSR */
-		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
-			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
-			new_head);
-	}
-
-	dp_ctx->cached_dequeue = 0;
-	return 0;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
-	struct qat_qp *qp;
-	struct qat_sym_session *ctx;
-	struct qat_sym_dp_ctx *dp_ctx;
-
-	qp = dev->data->queue_pairs[qp_id];
-	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
-	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(struct qat_sym_dp_ctx));
-		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
-		dp_ctx->tail = qp->tx_q.tail;
-		dp_ctx->head = qp->rx_q.head;
-		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
-	}
-
-	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
-		return -EINVAL;
-
-	ctx = (struct qat_sym_session *)get_sym_session_private_data(
-			session_ctx.crypto_sess, qat_sym_driver_id);
-
-	dp_ctx->session = ctx;
-
-	raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
-	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
-	raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
-	raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_chain_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
-		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
-		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_cipher_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
-		}
-	} else
-		return -1;
-
-	return 0;
-}
-
-int
-qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
-{
-	return sizeof(struct qat_sym_dp_ctx);
-}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [PATCH v11 9/9] crypto/qat: support out of place SG list
  2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
                                           ` (7 preceding siblings ...)
  2022-02-22 20:30                         ` [PATCH v11 8/9] crypto/qat: unify raw data path functions Fan Zhang
@ 2022-02-22 20:30                         ` Fan Zhang
  2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  9 siblings, 0 replies; 156+ messages in thread
From: Fan Zhang @ 2022-02-22 20:30 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji, Fan Zhang

From: Kai Ji <kai.ji@intel.com>

This patch adds the SGL out of place support to QAT PMD

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++--
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 55 +++++++++++++++++---
 3 files changed, 83 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index ffa093a7a3..5084a5fcd1 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -468,8 +468,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -565,8 +575,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index f803bc1459..bd7f3785df 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -297,8 +297,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 58f07eb0b3..3bcb53cf9f 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -526,9 +526,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i],
-				cookie, vec->src_sgl[i].vec,
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
 				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
@@ -625,8 +634,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
@@ -725,8 +744,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -830,8 +859,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework
  2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
                                           ` (8 preceding siblings ...)
  2022-02-22 20:30                         ` [PATCH v11 9/9] crypto/qat: support out of place SG list Fan Zhang
@ 2022-02-23  0:49                         ` Kai Ji
  2022-02-23  0:49                           ` [dpdk-dev v12 1/9] common/qat: define build request and dequeue ops Kai Ji
                                             ` (9 more replies)
  9 siblings, 10 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-23  0:49 UTC (permalink / raw)
  To: dev; +Cc: roy.fan.zhang, gakhil, Kai Ji

v11 - v12:
- fixed a compile issue

v10:
- rebase to the lastest for-main
- fix of build rerror when RTE_LOG_DEBUG enabled

v9:
- commit messages reword
- fix of unused function error

v8:
- rebase to 22.03-rc1

v7:
- fix of pointer cast compile error in x86

v6:
- fix of pointer cast error in x86
- rebase to the lastest for-main

v5:
- rebase to the latest for-main
- patchset reconstruct

v4:
- patchset break down and reconstruct

v3:
- sperate a single patch 6 to two different patches

v2:
- review comments addressed

Kai Ji (9):
  common/qat: define build request and dequeue ops
  crypto/qat: support symmetric build op request
  crypto/qat: rework session functions
  crypto/qat: rework asymmetric op build operation
  crypto/qat: unify symmetric functions
  crypto/qat: unify asymmetric functions
  crypto/qat: rework burst data path
  crypto/qat: unify raw data path functions
  crypto/qat: support out of place SG list

 drivers/common/qat/meson.build               |   6 +-
 drivers/common/qat/qat_device.c              |   4 +-
 drivers/common/qat/qat_qp.c                  |  42 +-
 drivers/common/qat/qat_qp.h                  |  54 +-
 drivers/compress/qat/qat_comp_pmd.c          |  14 +-
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  93 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 490 ++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 257 ++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 913 ++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 942 +++++++++++++++++-
 drivers/crypto/qat/qat_asym.c                | 303 +++++-
 drivers/crypto/qat/qat_asym.h                |  79 +-
 drivers/crypto/qat/qat_asym_pmd.c            | 231 -----
 drivers/crypto/qat/qat_asym_pmd.h            |  54 -
 drivers/crypto/qat/qat_crypto.h              |  16 +-
 drivers/crypto/qat/qat_sym.c                 | 979 ++++++------------
 drivers/crypto/qat/qat_sym.h                 | 148 ++-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 995 -------------------
 drivers/crypto/qat/qat_sym_pmd.c             | 251 -----
 drivers/crypto/qat/qat_sym_pmd.h             |  95 --
 drivers/crypto/qat/qat_sym_session.c         | 115 +--
 drivers/crypto/qat/qat_sym_session.h         |  15 +-
 23 files changed, 3582 insertions(+), 2523 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

-- 
2.30.2


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v12 1/9] common/qat: define build request and dequeue ops
  2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
@ 2022-02-23  0:49                           ` Kai Ji
  2022-02-23  0:49                           ` [dpdk-dev v12 2/9] crypto/qat: support symmetric build op request Kai Ji
                                             ` (8 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-23  0:49 UTC (permalink / raw)
  To: dev; +Cc: roy.fan.zhang, gakhil, Kai Ji

This patch introduce build request and dequeue op function
pointers to the qat queue pair implementation. The function
poniters are assigned during qat session generation based on input
crypto operation request.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/qat_qp.c          | 10 ++++--
 drivers/common/qat/qat_qp.h          | 54 ++++++++++++++++++++++++++--
 drivers/compress/qat/qat_comp_pmd.c  |  6 ++--
 drivers/crypto/qat/qat_asym_pmd.c    |  4 +--
 drivers/crypto/qat/qat_sym_pmd.c     |  4 +--
 drivers/crypto/qat/qat_sym_session.h | 13 ++++++-
 6 files changed, 77 insertions(+), 14 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 57ac8fefca..56234ca1a4 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_common.h>
@@ -547,7 +547,9 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_enqueue_op_burst(void *qp,
+		__rte_unused qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -814,7 +816,9 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 }
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+qat_dequeue_op_burst(void *qp, void **ops,
+		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
+		uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index deafb407b3..66f00943a5 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 #ifndef _QAT_QP_H_
 #define _QAT_QP_H_
@@ -36,6 +36,51 @@ struct qat_queue {
 	/* number of responses processed since last CSR head write */
 };
 
+/**
+ * Type define qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ *
+ * @param in_op
+ *    An input op pointer
+ * @param out_msg
+ *    out_meg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param opaque
+ *    an opaque data may be used to store context may be useful between
+ *    2 enqueue operations.
+ * @param dev_gen
+ *    qat device gen id
+ * @return
+ *   - 0 if the crypto request is build successfully,
+ *   - EINVAL if error
+ **/
+typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
+
+/**
+ * Type define qat_op_dequeue_t function pointer, passed in as argument
+ * in dequeue op burst, where a dequeue op assigned base on the type of
+ * crypto op.
+ *
+ * @param op
+ *    An input op pointer
+ * @param resp
+ *    qat response msg pointer
+ * @param op_cookie
+ *    op cookie pointer
+ * @param dequeue_err_count
+ *    dequeue error counter
+ * @return
+ *    - 0 if dequeue OP is successful
+ *    - EINVAL if error
+ **/
+typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused);
+
+#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE	2
+
 struct qat_qp {
 	void			*mmap_bar_addr;
 	struct qat_queue	tx_q;
@@ -44,6 +89,7 @@ struct qat_qp {
 	struct rte_mempool *op_cookie_pool;
 	void **op_cookies;
 	uint32_t nb_descriptors;
+	uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
 	enum qat_device_gen qat_dev_gen;
 	enum qat_service_type service_type;
 	struct qat_pci_device *qat_dev;
@@ -78,13 +124,15 @@ struct qat_qp_config {
 };
 
 uint16_t
-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
+		void **ops, uint16_t nb_ops);
 
 uint16_t
 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
 
 uint16_t
-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+qat_dequeue_op_burst(void *qp, void **ops,
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
 
 int
 qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index da6404c017..efe5d08a99 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <rte_malloc.h>
@@ -620,7 +620,7 @@ static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
@@ -639,7 +639,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 		} else {
 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
 					(compressdev_dequeue_pkt_burst_t)
-					qat_dequeue_op_burst;
+					qat_comp_pmd_enq_deq_dummy_op_burst;
 		}
 	}
 	return ret;
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
index addee384e3..9a7596b227 100644
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ b/drivers/crypto/qat/qat_asym_pmd.c
@@ -62,13 +62,13 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
 uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 				       uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index b835245f17..28a26260fb 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -49,14 +49,14 @@ static uint16_t
 qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
 }
 
 static uint16_t
 qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
-	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
 }
 
 /* An rte_driver is needed in the registration of both the device and the driver
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 6ebc176729..fe875a7fd0 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 #ifndef _QAT_SYM_SESSION_H_
 #define _QAT_SYM_SESSION_H_
@@ -63,6 +63,16 @@ enum qat_sym_proto_flag {
 	QAT_CRYPTO_PROTO_FLAG_ZUC = 4
 };
 
+struct qat_sym_session;
+
+/*
+ * typedef qat_op_build_request_t function pointer, passed in as argument
+ * in enqueue op burst, where a build request assigned base on the type of
+ * crypto op.
+ */
+typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* Common content descriptor */
 struct qat_sym_cd {
 	struct icp_qat_hw_cipher_algo_blk cipher;
@@ -107,6 +117,7 @@ struct qat_sym_session {
 	/* Some generations need different setup of counter */
 	uint32_t slice_types;
 	enum qat_sym_proto_flag qat_proto_flag;
+	qat_sym_build_request_t build_request[2];
 };
 
 int
-- 
2.30.2


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v12 2/9] crypto/qat: support symmetric build op request
  2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2022-02-23  0:49                           ` [dpdk-dev v12 1/9] common/qat: define build request and dequeue ops Kai Ji
@ 2022-02-23  0:49                           ` Kai Ji
  2022-02-23  0:50                           ` [dpdk-dev v12 3/9] crypto/qat: rework session functions Kai Ji
                                             ` (7 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-23  0:49 UTC (permalink / raw)
  To: dev; +Cc: roy.fan.zhang, gakhil, Kai Ji

This patch adds common inline functions for QAT symmetric
crypto driver to process crypto op, and the implementation of
build op request function for QAT generation 1.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 832 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 187 ++++-
 drivers/crypto/qat/qat_sym.c                 |  90 +-
 3 files changed, 1019 insertions(+), 90 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 67a4d2cb2c..1130e0e76f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #ifndef _QAT_CRYPTO_PMD_GENS_H_
@@ -8,14 +8,844 @@
 #include <rte_cryptodev.h>
 #include "qat_crypto.h"
 #include "qat_sym_session.h"
+#include "qat_sym.h"
+
+#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
+	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
+
+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
+	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
+	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
+
+static __rte_always_inline int
+op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_decrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_decrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+	return -EINVAL;
+}
+
+static __rte_always_inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		/* Decrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely((sym_op->m_dst != NULL)
+				&& (sym_op->m_dst != sym_op->m_src)))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = last_block - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
+			dst, last_block_len);
+#endif
+		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+			dst, last_block_len);
+#endif
+	}
+
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+static __rte_always_inline int
+qat_auth_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_cipher_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+			((op->sym->cipher.data.offset %
+			BYTE_LENGTH) != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int32_t
+qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
+		void *opaque, struct qat_sym_op_cookie *cookie,
+		struct rte_crypto_vec *src_vec, uint16_t n_src,
+		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
+{
+	struct qat_sgl *list;
+	uint32_t i;
+	uint32_t tl_src = 0, total_len_src, total_len_dst;
+	uint64_t src_data_start = 0, dst_data_start = 0;
+	int is_sgl = n_src > 1 || n_dst > 1;
+
+	if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
+			n_dst > QAT_SYM_SGL_MAX_NUMBER))
+		return -1;
+
+	if (likely(!is_sgl)) {
+		src_data_start = src_vec[0].iova;
+		tl_src = total_len_src =
+				src_vec[0].len;
+		if (unlikely(n_dst)) { /* oop */
+			total_len_dst = dst_vec[0].len;
+
+			dst_data_start = dst_vec[0].iova;
+			if (unlikely(total_len_src != total_len_dst))
+				return -EINVAL;
+		} else {
+			dst_data_start = src_data_start;
+			total_len_dst = tl_src;
+		}
+	} else { /* sgl */
+		total_len_dst = total_len_src = 0;
+
+		ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+			QAT_COMN_PTR_TYPE_SGL);
+
+		list = (struct qat_sgl *)&cookie->qat_sgl_src;
+		for (i = 0; i < n_src; i++) {
+			list->buffers[i].len = src_vec[i].len;
+			list->buffers[i].resrvd = 0;
+			list->buffers[i].addr = src_vec[i].iova;
+			if (tl_src + src_vec[i].len > UINT32_MAX) {
+				QAT_DP_LOG(ERR, "Message too long");
+				return -1;
+			}
+			tl_src += src_vec[i].len;
+		}
+
+		list->num_bufs = i;
+		src_data_start = cookie->qat_sgl_src_phys_addr;
+
+		if (unlikely(n_dst > 0)) { /* oop sgl */
+			uint32_t tl_dst = 0;
+
+			list = (struct qat_sgl *)&cookie->qat_sgl_dst;
+
+			for (i = 0; i < n_dst; i++) {
+				list->buffers[i].len = dst_vec[i].len;
+				list->buffers[i].resrvd = 0;
+				list->buffers[i].addr = dst_vec[i].iova;
+				if (tl_dst + dst_vec[i].len > UINT32_MAX) {
+					QAT_DP_LOG(ERR, "Message too long");
+					return -ENOTSUP;
+				}
+
+				tl_dst += dst_vec[i].len;
+			}
+
+			if (tl_src != tl_dst)
+				return -EINVAL;
+			list->num_bufs = i;
+			dst_data_start = cookie->qat_sgl_dst_phys_addr;
+		} else
+			dst_data_start = src_data_start;
+	}
+
+	req->comn_mid.src_data_addr = src_data_start;
+	req->comn_mid.dest_data_addr = dst_data_start;
+	req->comn_mid.src_length = total_len_src;
+	req->comn_mid.dst_length = total_len_dst;
+	req->comn_mid.opaque_data = (uintptr_t)opaque;
+
+	return tl_src;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int n_src = 0;
+	int ret;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->cipher.data.length >> 3;
+		cipher_ofs = op->sym->cipher.data.offset >> 3;
+		break;
+	case 0:
+		if (ctx->bpi_ctx) {
+			/* DOCSIS - only send complete blocks to device.
+			 * Process any partial block using CFB mode.
+			 * Even if 0 complete blocks, still send this to device
+			 * to get into rx queue for post-process and dequeuing
+			 */
+			cipher_len = qat_bpicipher_preprocess(ctx, op);
+			cipher_ofs = op->sym->cipher.data.offset;
+		} else {
+			cipher_len = op->sym->cipher.data.length;
+			cipher_ofs = op->sym->cipher.data.offset;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
+			cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t auth_ofs = 0, auth_len = 0;
+	int n_src, ret;
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+				ctx->auth_iv.offset);
+		auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+				ctx->auth_iv.offset);
+		break;
+	case 0:
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+			/* AES-GMAC */
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+					ctx->auth_iv.offset);
+			auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+					ctx->auth_iv.offset);
+		} else {
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = NULL;
+			auth_iv->iova = 0;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
+			auth_ofs + auth_len, in_sgl->vec,
+			QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
+				auth_ofs + auth_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	union rte_crypto_sym_ofs ofs;
+	uint32_t min_ofs = 0, max_len = 0;
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	uint32_t auth_len = 0, auth_ofs = 0;
+	int is_oop = (op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src);
+	int is_sgl = op->sym->m_src->nb_segs > 1;
+	int n_src;
+	int ret;
+
+	if (unlikely(is_oop))
+		is_sgl |= op->sym->m_dst->nb_segs > 1;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->auth_iv.offset);
+	auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->auth_iv.offset);
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->aead.data.length >> 3;
+		cipher_ofs = op->sym->aead.data.offset >> 3;
+		break;
+	case 0:
+		cipher_len = op->sym->aead.data.length;
+		cipher_ofs = op->sym->aead.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		break;
+	case 0:
+		auth_len = op->sym->auth.data.length;
+		auth_ofs = op->sym->auth.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
+
+	/* digest in buffer check. Needed only for wireless algos */
+	if (ret == 1) {
+		/* Handle digest-encrypted cases, i.e.
+		 * auth-gen-then-cipher-encrypt and
+		 * cipher-decrypt-then-auth-verify
+		 */
+		uint64_t auth_end_iova;
+
+		if (unlikely(is_sgl)) {
+			uint32_t remaining_off = auth_ofs + auth_len;
+			struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
+				op->sym->m_src);
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else
+			auth_end_iova = (is_oop ?
+				rte_pktmbuf_iova(op->sym->m_dst) :
+				rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
+					auth_len;
+
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
+				(digest->iova == auth_end_iova))
+			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
+					ctx->digest_length);
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return -1;
+	}
+	in_sgl->num = n_src;
+
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs,
+				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	ofs.ofs.cipher.head = cipher_ofs;
+	ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
+	ofs.ofs.auth.head = auth_ofs;
+	ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
+
+	return ofs.raw;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int32_t n_src = 0;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
+	auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
+	digest->va = (void *)op->sym->aead.digest.data;
+	digest->iova = op->sym->aead.digest.phys_addr;
+
+	cipher_len = op->sym->aead.data.length;
+	cipher_ofs = op->sym->aead.data.offset;
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline void
+qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
+		struct icp_qat_fw_la_bulk_req *qat_req)
+{
+	/* copy IV into request if it fits */
+	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
+				iv_len);
+	else {
+		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+				qat_req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
+	}
+}
+
+static __rte_always_inline void
+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
+{
+	uint32_t i;
+
+	for (i = 0; i < n; i++)
+		sta[i] = status;
+}
+
+static __rte_always_inline void
+enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+
+	/* cipher IV */
+	qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
+				ctx->auth_iv.length);
+		break;
+	default:
+		break;
+	}
+}
+
+static __rte_always_inline int
+enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_vec *src_vec,
+	uint16_t n_src_vecs,
+	struct rte_crypto_vec *dst_vec,
+	uint16_t n_dst_vecs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
+			dst_vec : src_vec;
+	rte_iova_t auth_iova_end;
+	int cipher_len, auth_len;
+	int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	cipher_len = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (unlikely(cipher_len < 0 || auth_len < 0))
+		return -1;
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = cipher_len;
+	qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = auth_len;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		break;
+	default:
+		break;
+	}
+
+	if (unlikely(is_sgl)) {
+		/* sgl */
+		int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
+		uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
+
+		while (remaining_off >= cvec->len && i >= 1) {
+			i--;
+			remaining_off -= cvec->len;
+			cvec++;
+		}
+
+		auth_iova_end = cvec->iova + remaining_off;
+	} else
+		auth_iova_end = cvec[0].iova + auth_param->auth_off +
+			auth_param->auth_len;
+
+	/* Then check if digest-encrypted conditions are met */
+	if ((auth_param->auth_off + auth_param->auth_len <
+		cipher_param->cipher_offset + cipher_param->cipher_length) &&
+			(digest->iova == auth_iova_end)) {
+		/* Handle partial digest encryption */
+		if (cipher_param->cipher_offset + cipher_param->cipher_length <
+			auth_param->auth_off + auth_param->auth_len +
+				ctx->digest_length && !is_sgl)
+			req->comn_mid.dst_length = req->comn_mid.src_length =
+				auth_param->auth_off + auth_param->auth_len +
+					ctx->digest_length;
+		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void
+enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param =
+		(void *)&req->serv_specif_rqpars;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(void *)((uint8_t *)&req->serv_specif_rqpars +
+		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+	uint8_t *aad_data;
+	uint8_t aad_ccm_real_len;
+	uint8_t aad_len_field_sz;
+	uint32_t msg_len_be;
+	rte_iova_t aad_iova = 0;
+	uint8_t q;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
+				ctx->cipher_iv.length);
+		aad_iova = aad->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		aad_data = aad->va;
+		aad_iova = aad->iova;
+		aad_ccm_real_len = 0;
+		aad_len_field_sz = 0;
+		msg_len_be = rte_bswap32((uint32_t)data_len -
+				ofs.ofs.cipher.head);
+
+		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			aad_ccm_real_len = ctx->aad_len -
+				ICP_QAT_HW_CCM_AAD_B0_LEN -
+				ICP_QAT_HW_CCM_AAD_LEN_INFO;
+		} else {
+			aad_data = iv->va;
+			aad_iova = iv->iova;
+		}
+
+		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+			aad_len_field_sz, ctx->digest_length, q);
+		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+				(uint8_t *)&msg_len_be,
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+		} else {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+				(uint8_t *)&msg_len_be +
+				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+				- q), q);
+		}
+
+		if (aad_len_field_sz > 0) {
+			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+				rte_bswap16(aad_ccm_real_len);
+
+			if ((aad_ccm_real_len + aad_len_field_sz)
+				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+				uint8_t pad_len = 0;
+				uint8_t pad_idx = 0;
+
+				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+					((aad_ccm_real_len +
+					aad_len_field_sz) %
+					ICP_QAT_HW_CCM_AAD_B0_LEN);
+				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+					aad_ccm_real_len +
+					aad_len_field_sz;
+				memset(&aad_data[pad_idx], 0, pad_len);
+			}
+		}
+
+		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va +
+			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
+		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+		rte_memcpy((uint8_t *)aad->va +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			ctx->cipher_iv.length);
+		break;
+	default:
+		break;
+	}
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_param->auth_off = ofs.ofs.cipher.head;
+	auth_param->auth_len = cipher_param->cipher_length;
+	auth_param->auth_res_addr = digest->iova;
+	auth_param->u1.aad_adr = aad_iova;
+}
 
 extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
 extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
 
+/* -----------------GEN 1 sym crypto op data path APIs ---------------- */
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 void
 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 		uint8_t hash_flag);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 90b3ec803c..2dd7475c77 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -179,6 +179,191 @@ qat_sym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, NULL, NULL);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, NULL, NULL);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
+			total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
+			out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
 #ifdef RTE_LIB_SECURITY
 
 #define QAT_SECURITY_SYM_CAPABILITIES					\
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 00ec703754..f814bf8f75 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/evp.h>
@@ -11,93 +11,7 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-
-
-/** Decrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_decrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_decrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
-	return -EINVAL;
-}
-
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
-		/* Decrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely((sym_op->m_dst != NULL)
-				&& (sym_op->m_dst != sym_op->m_src)))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = last_block - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
-			dst, last_block_len);
-#endif
-		bpi_cipher_decrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
-			dst, last_block_len);
-#endif
-	}
-
-	return sym_op->cipher.data.length - last_block_len;
-}
+#include "dev/qat_crypto_pmd_gens.h"
 
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-- 
2.30.2


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v12 3/9] crypto/qat: rework session functions
  2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
  2022-02-23  0:49                           ` [dpdk-dev v12 1/9] common/qat: define build request and dequeue ops Kai Ji
  2022-02-23  0:49                           ` [dpdk-dev v12 2/9] crypto/qat: support symmetric build op request Kai Ji
@ 2022-02-23  0:50                           ` Kai Ji
  2022-02-23  0:50                           ` [dpdk-dev v12 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
                                             ` (6 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-23  0:50 UTC (permalink / raw)
  To: dev; +Cc: roy.fan.zhang, gakhil, Kai Ji

This patch introduces a set of set_session methods to QAT
generations. In addition, the reuse of QAT session between
generations is prohibit as the support of min_qat_dev_gen_id'
is removed.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/crypto/qat/dev/qat_asym_pmd_gen1.c   |   9 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  91 ++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 256 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 125 ++++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |   3 +
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  64 +++++
 drivers/crypto/qat/qat_crypto.h              |   8 +-
 drivers/crypto/qat/qat_sym.c                 |  12 +-
 drivers/crypto/qat/qat_sym_pmd.c             |   4 +-
 drivers/crypto/qat/qat_sym_session.c         | 113 ++------
 drivers/crypto/qat/qat_sym_session.h         |   2 +-
 11 files changed, 576 insertions(+), 111 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
index 9ed1f21d9d..01a897a21f 100644
--- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_asym_crypto_set_session_gen1(void *cdev __rte_unused,
+		void *session __rte_unused)
+{
+	return 0;
+}
+
 RTE_INIT(qat_asym_crypto_gen1_init)
 {
 	qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops =
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index b4ec440e05..64e6ae66ec 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -166,6 +166,91 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
 	return 0;
 }
 
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+		uint8_t hash_flag)
+{
+	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+	/* Set the Use Extended Protocol Flags bit in LW 1 */
+	QAT_FIELD_SET(header->comn_req_flags,
+			QAT_COMN_EXT_FLAGS_USED,
+			QAT_COMN_EXT_FLAGS_BITPOS,
+			QAT_COMN_EXT_FLAGS_MASK);
+
+	/* Set Hash Flags in LW 28 */
+	cd_ctrl->hash_flags |= hash_flag;
+
+	/* Set proto flags in LW 1 */
+	switch (session->qat_cipher_alg) {
+	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_SNOW_3G_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags,
+				ICP_QAT_FW_LA_ZUC_3G_PROTO);
+		break;
+	default:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	}
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+	struct rte_cryptodev *dev = cdev;
+	struct qat_sym_session *ctx = session;
+	const struct qat_cryptodev_private *qat_private =
+			dev->data->dev_private;
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * but some are not supported by GEN2, so checking here
+		 */
+		if ((qat_private->internal_capabilities &
+				QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+			return -ENOTSUP;
+
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
 
 	/* Device related operations */
@@ -204,6 +289,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
 			qat_sym_crypto_cap_get_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_sym_crypto_set_session_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
@@ -221,4 +308,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
 			qat_asym_crypto_cap_get_gen1;
 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_asym_crypto_feature_flags_get_gen1;
+	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_asym_crypto_set_session_gen1;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index d3336cf4a1..db864d973a 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -143,6 +143,257 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass) {
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN3 uses single pass to treat AEAD as
+		 * cipher operation
+		 */
+		cipher_param = (void *)&req->serv_specif_rqpars;
+
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+				ofs.ofs.cipher.tail;
+
+		cipher_param->spc_aad_addr = aad->iova;
+		cipher_param->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+	struct qat_sym_op_cookie *cookie,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	uint32_t ver_key_offset;
+	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+
+	if (!ctx->is_single_pass_gmac ||
+			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+				data_len);
+		return;
+	}
+
+	cipher_cd_ctrl = (void *) &req->cd_ctrl;
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+			sizeof(struct icp_qat_hw_cipher_config);
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+		/* AES-GMAC */
+		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+				req);
+	}
+
+	/* Fill separate Content Descriptor for this op */
+	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+				ctx->cd.cipher.key :
+				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+			ctx->auth_key_length);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+				ICP_QAT_HW_CIPHER_AEAD_MODE,
+				ctx->qat_cipher_alg,
+				ICP_QAT_HW_CIPHER_NO_CONVERT,
+				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+					ICP_QAT_HW_CIPHER_ENCRYPT :
+					ICP_QAT_HW_CIPHER_DECRYPT));
+	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+			ctx->digest_length,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+	/* Update the request */
+	req->cd_pars.u.s.content_desc_addr =
+			cookie->opt.spc_gmac.cd_phys_addr;
+	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+			sizeof(struct icp_qat_hw_cipher_config) +
+			ctx->auth_key_length, 8) >> 3;
+	req->comn_mid.src_length = data_len;
+	req->comn_mid.dst_length = 0;
+
+	cipher_param->spc_aad_addr = 0;
+	cipher_param->spc_auth_res_addr = digest->iova;
+	cipher_param->spc_aad_sz = auth_data_len;
+	cipher_param->reserved = 0;
+	cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+	ICP_QAT_FW_LA_PROTO_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_NO_PROTO);
+}
+
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN3 */
+	if (ctx->is_single_pass)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+	else if (ctx->is_single_pass_gmac)
+		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN3
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -150,6 +401,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_cap_get_gen3;
 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+			qat_sym_crypto_set_session_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
@@ -161,4 +414,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 37a58c026f..7642a87d55 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -103,11 +103,133 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+			(void *)&req->serv_specif_rqpars;
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN4 uses single pass to treat AEAD as cipher
+		 * operation
+		 */
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+				req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len -
+				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+		cipher_param_20->spc_aad_addr = aad->iova;
+		cipher_param_20->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN4 */
+	if (ctx->is_single_pass && ctx->is_ucs)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+
+	if (ret == -ENOTSUP) {
+		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+		 * this is addressed by GEN4
+		 */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+				ctx->qat_cipher_alg !=
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+		} else if ((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		}
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
 			qat_sym_crypto_cap_get_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+			qat_sym_crypto_set_session_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
@@ -121,4 +243,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
 	qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 1130e0e76f..96cdb97a26 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -856,6 +856,9 @@ qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev);
 uint64_t
 qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_asym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 #ifdef RTE_LIB_SECURITY
 extern struct rte_security_ops security_qat_ops_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 2dd7475c77..ff176dddf1 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -452,12 +452,76 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int handle_mixed = 0;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			/* do_aead = 1; */
+			build_request = qat_sym_build_op_aead_gen1;
+		} else {
+			/* do_auth = 1; do_cipher = 1; */
+			build_request = qat_sym_build_op_chain_gen1;
+			handle_mixed = 1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		/* do_auth = 1; do_cipher = 0;*/
+		build_request = qat_sym_build_op_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		/* do_auth = 0; do_cipher = 1; */
+		build_request = qat_sym_build_op_cipher_gen1;
+	}
+
+	if (build_request)
+		ctx->build_request[proc_type] = build_request;
+	else
+		return -EINVAL;
+
+	/* no more work if not mixed op */
+	if (!handle_mixed)
+		return 0;
+
+	/* Check none supported algs if mixed */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		return -ENOTSUP;
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		return -ENOTSUP;
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
 
 RTE_INIT(qat_sym_crypto_gen1_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
 			qat_sym_crypto_cap_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+			qat_sym_crypto_set_session_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6eaa15b975..5ca76fcaa6 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
  #ifndef _QAT_CRYPTO_H_
@@ -48,15 +48,21 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
 
 typedef void * (*create_security_ctx_t)(void *cryptodev);
 
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
+	set_session_t set_session;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
 };
 
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
+
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
 		struct rte_cryptodev_config *config);
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index f814bf8f75..83bf55c933 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -13,6 +13,10 @@
 #include "qat_sym.h"
 #include "dev/qat_crypto_pmd_gens.h"
 
+uint8_t qat_sym_driver_id;
+
+struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
@@ -126,7 +130,7 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
 
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen)
+		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
 {
 	int ret = 0;
 	struct qat_sym_session *ctx = NULL;
@@ -191,12 +195,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		return -EINVAL;
 	}
 
-	if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
-		QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -EINVAL;
-	}
-
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index 28a26260fb..63ab9a5a8b 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -20,9 +20,9 @@
 
 #define MIXED_CRYPTO_MIN_FW_VER 0x04090000
 
-uint8_t qat_sym_driver_id;
+extern uint8_t qat_sym_driver_id;
 
-struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
 void
 qat_sym_init_op_cookie(void *op_cookie)
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 8ca475ca8b..3a880096c4 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/sha.h>	/* Needed to calculate pre-compute values */
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
 	return 0;
 }
 
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
-		uint8_t hash_flag)
-{
-	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
-	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
-			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
-			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
-	/* Set the Use Extended Protocol Flags bit in LW 1 */
-	QAT_FIELD_SET(header->comn_req_flags,
-			QAT_COMN_EXT_FLAGS_USED,
-			QAT_COMN_EXT_FLAGS_BITPOS,
-			QAT_COMN_EXT_FLAGS_MASK);
-
-	/* Set Hash Flags in LW 28 */
-	cd_ctrl->hash_flags |= hash_flag;
-
-	/* Set proto flags in LW 1 */
-	switch (session->qat_cipher_alg) {
-	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_SNOW_3G_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_ZUC_3G_PROTO);
-		break;
-	default:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	}
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
-		struct qat_sym_session *session)
-{
-	const struct qat_cryptodev_private *qat_private =
-			dev->data->dev_private;
-	enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
-			QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
-	if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
-	} else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
-	} else if ((session->aes_cmac ||
-			session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
-			(session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session, 0);
-	}
-}
-
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
 	int ret;
 	int qat_cmd_id;
-	int handle_mixed = 0;
 
 	/* Verify the session physical address is known */
 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
+	session->dev_id = internals->dev_id;
 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
 	session->is_ucs = 0;
 
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		return -ENOTSUP;
 	}
 	qat_sym_session_finalize(session);
-	if (handle_mixed) {
-		/* Special handling of mixed hash+cipher algorithms */
-		qat_sym_session_handle_mixed(dev, session);
-	}
 
-	return 0;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+			(void *)session);
 }
 
 static int
@@ -678,14 +598,13 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 {
 	session->is_single_pass = 1;
 	session->is_auth = 1;
-	session->min_qat_dev_gen = QAT_GEN3;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
-	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM)
 		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
-	} else {
+	else
 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
-	}
+
 	session->cipher_iv.offset = aead_xform->iv.offset;
 	session->cipher_iv.length = aead_xform->iv.length;
 	session->aad_len = aead_xform->aad_length;
@@ -1205,9 +1124,9 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-			uint8_t *data_in,
-			uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in, uint8_t *data_out)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1573,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1920,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2180,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
-
 	/* Get requested QAT command id - should be cipher */
 	qat_cmd_id = qat_get_cmd_id(xform);
 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2204,9 @@ qat_security_session_create(void *dev,
 {
 	void *sess_private_data;
 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct qat_cryptodev_private *internals = cdev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_sym_session *sym_session = NULL;
 	int ret;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2230,11 @@ qat_security_session_create(void *dev,
 	}
 
 	set_sec_session_private_data(sess, sess_private_data);
+	sym_session = (struct qat_sym_session *)sess_private_data;
+	sym_session->dev_id = internals->dev_id;
 
-	return ret;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+			sess_private_data);
 }
 
 int
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index fe875a7fd0..01908abd9e 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -105,7 +105,7 @@ struct qat_sym_session {
 	uint16_t auth_key_length;
 	uint16_t digest_length;
 	rte_spinlock_t lock;	/* protects this struct */
-	enum qat_device_gen min_qat_dev_gen;
+	uint16_t dev_id;
 	uint8_t aes_cmac;
 	uint8_t is_single_pass;
 	uint8_t is_single_pass_gmac;
-- 
2.30.2


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v12 4/9] crypto/qat: rework asymmetric op build operation
  2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                             ` (2 preceding siblings ...)
  2022-02-23  0:50                           ` [dpdk-dev v12 3/9] crypto/qat: rework session functions Kai Ji
@ 2022-02-23  0:50                           ` Kai Ji
  2022-02-23  0:50                           ` [dpdk-dev v12 5/9] crypto/qat: unify symmetric functions Kai Ji
                                             ` (5 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-23  0:50 UTC (permalink / raw)
  To: dev; +Cc: roy.fan.zhang, gakhil, Kai Ji

This patch reworks the asymmetric crypto data path
implementation in QAT driver. The changes include asymmetric
crypto data path separation for QAT hardware generations, and
code optimisation of the device capabilities declaration.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/qat_qp.c       |   5 +-
 drivers/crypto/qat/qat_asym.c     | 129 ++++++++++++++++--------------
 drivers/crypto/qat/qat_asym.h     |  63 +++++++++++++--
 drivers/crypto/qat/qat_asym_pmd.c |   4 +-
 4 files changed, 133 insertions(+), 68 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 56234ca1a4..7f2fdc53ce 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -619,7 +619,7 @@ qat_enqueue_op_burst(void *qp,
 #ifdef BUILD_QAT_ASYM
 			ret = qat_asym_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
+				NULL, tmp_qp->qat_dev_gen);
 #endif
 		}
 		if (ret != 0) {
@@ -847,7 +847,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 #ifdef BUILD_QAT_ASYM
 		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
 			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 #endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 27ce0337a7..d6ca1afae8 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -1,68 +1,36 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2022 Intel Corporation
  */
 
 #include <stdarg.h>
 
-#include "qat_asym.h"
+#include <cryptodev_pmd.h>
+
 #include "icp_qat_fw_pke.h"
 #include "icp_qat_fw.h"
 #include "qat_pke_functionality_arrays.h"
 
-#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
-
-static int qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
-		size_t arr_sz, size_t *size, uint32_t *func_id)
-{
-	size_t i;
-
-	for (i = 0; i < arr_sz; i++) {
-		if (*size <= arr[i][0]) {
-			*size = arr[i][0];
-			*func_id = arr[i][1];
-			return 0;
-		}
-	}
-	return -1;
-}
-
-static inline void qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
-{
-	memset(qat_req, 0, sizeof(*qat_req));
-	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
-
-	qat_req->pke_hdr.hdr_flags =
-			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
-			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
-}
-
-static inline void qat_asym_build_req_tmpl(void *sess_private_data)
-{
-	struct icp_qat_fw_pke_request *qat_req;
-	struct qat_asym_session *session = sess_private_data;
+#include "qat_device.h"
 
-	qat_req = &session->req_tmpl;
-	qat_fill_req_tmpl(qat_req);
-}
+#include "qat_logs.h"
+#include "qat_asym.h"
 
-static size_t max_of(int n, ...)
-{
-	va_list args;
-	size_t len = 0, num;
-	int i;
+uint8_t qat_asym_driver_id;
 
-	va_start(args, n);
-	len = va_arg(args, size_t);
+struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
 
-	for (i = 0; i < n - 1; i++) {
-		num = va_arg(args, size_t);
-		if (num > len)
-			len = num;
-	}
-	va_end(args);
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
+static const struct rte_driver cryptodev_qat_asym_driver = {
+	.name = qat_asym_drv_name,
+	.alias = qat_asym_drv_name
+};
 
-	return len;
-}
 
 static void qat_clear_arrays(struct qat_asym_op_cookie *cookie,
 		int in_count, int out_count, int alg_size)
@@ -106,7 +74,46 @@ static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie,
 	}
 }
 
-static int qat_asym_check_nonzero(rte_crypto_param n)
+#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg))
+
+static int
+qat_asym_get_sz_and_func_id(const uint32_t arr[][2],
+		size_t arr_sz, size_t *size, uint32_t *func_id)
+{
+	size_t i;
+
+	for (i = 0; i < arr_sz; i++) {
+		if (*size <= arr[i][0]) {
+			*size = arr[i][0];
+			*func_id = arr[i][1];
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static size_t
+max_of(int n, ...)
+{
+	va_list args;
+	size_t len = 0, num;
+	int i;
+
+	va_start(args, n);
+	len = va_arg(args, size_t);
+
+	for (i = 0; i < n - 1; i++) {
+		num = va_arg(args, size_t);
+		if (num > len)
+			len = num;
+	}
+	va_end(args);
+
+	return len;
+}
+
+static int
+qat_asym_check_nonzero(rte_crypto_param n)
 {
 	if (n.length < 8) {
 		/* Not a case for any cryptographic function except for DH
@@ -475,10 +482,9 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 }
 
 int
-qat_asym_build_request(void *in_op,
-			uint8_t *out_msg,
-			void *op_cookie,
-			__rte_unused enum qat_device_gen qat_dev_gen)
+qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		__rte_unused enum qat_device_gen dev_gen)
 {
 	struct qat_asym_session *ctx;
 	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
@@ -677,9 +683,9 @@ static void qat_asym_collect_response(struct rte_crypto_op *rx_op,
 	qat_clear_arrays_by_alg(cookie, xform, alg_size_in_bytes);
 }
 
-void
+int
 qat_asym_process_response(void **op, uint8_t *resp,
-		void *op_cookie)
+		void *op_cookie, __rte_unused uint64_t *dequeue_err_count)
 {
 	struct qat_asym_session *ctx;
 	struct icp_qat_fw_pke_resp *resp_msg =
@@ -722,6 +728,8 @@ qat_asym_process_response(void **op, uint8_t *resp,
 	QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg,
 			sizeof(struct icp_qat_fw_pke_resp));
 #endif
+
+	return 1;
 }
 
 int
@@ -779,3 +787,8 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
 	if (sess_priv)
 		memset(s, 0, qat_asym_session_get_private_size(dev));
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_asym_driver,
+		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index c9242a12ca..3ae95f2e7b 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #ifndef _QAT_ASYM_H_
@@ -8,10 +8,13 @@
 #include <cryptodev_pmd.h>
 #include <rte_crypto_asym.h>
 #include "icp_qat_fw_pke.h"
-#include "qat_common.h"
-#include "qat_asym_pmd.h"
+#include "qat_device.h"
+#include "qat_crypto.h"
 #include "icp_qat_fw.h"
 
+/** Intel(R) QAT Asymmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
+
 typedef uint64_t large_int_ptr;
 #define MAX_PKE_PARAMS	8
 #define QAT_PKE_MAX_LN_SIZE 512
@@ -26,6 +29,28 @@ typedef uint64_t large_int_ptr;
 #define QAT_ASYM_RSA_NUM_OUT_PARAMS		1
 #define QAT_ASYM_RSA_QT_NUM_IN_PARAMS		6
 
+/**
+ * helper function to add an asym capability
+ * <name> <op type> <modlen (min, max, increment)>
+ **/
+#define QAT_ASYM_CAP(n, o, l, r, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
+		{.asym = {						\
+			.xform_capa = {					\
+				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
+				.op_types = o,				\
+				{					\
+				.modlen = {				\
+				.min = l,				\
+				.max = r,				\
+				.increment = i				\
+				}, }					\
+			}						\
+		},							\
+		}							\
+	}
+
 struct qat_asym_op_cookie {
 	size_t alg_size;
 	uint64_t error;
@@ -45,6 +70,27 @@ struct qat_asym_session {
 	struct rte_crypto_asym_xform *xform;
 };
 
+static inline void
+qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req)
+{
+	memset(qat_req, 0, sizeof(*qat_req));
+	qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+
+	qat_req->pke_hdr.hdr_flags =
+			ICP_QAT_FW_COMN_HDR_FLAGS_BUILD
+			(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+}
+
+static inline void
+qat_asym_build_req_tmpl(void *sess_private_data)
+{
+	struct icp_qat_fw_pke_request *qat_req;
+	struct qat_asym_session *session = sess_private_data;
+
+	qat_req = &session->req_tmpl;
+	qat_fill_req_tmpl(qat_req);
+}
+
 int
 qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
 		struct rte_crypto_asym_xform *xform,
@@ -75,7 +121,9 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
  */
 int
 qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
+		void *op_cookie,
+		__rte_unused uint64_t *opaque,
+		enum qat_device_gen qat_dev_gen);
 
 /*
  * Process PKE response received from outgoing queue of QAT
@@ -87,8 +135,11 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg,
  * @param	op_cookie	Cookie pointer that holds private metadata
  *
  */
+int
+qat_asym_process_response(void **op, uint8_t *resp,
+		void *op_cookie,  __rte_unused uint64_t *dequeue_err_count);
+
 void
-qat_asym_process_response(void __rte_unused **op, uint8_t *resp,
-		void *op_cookie);
+qat_asym_init_op_cookie(void *cookie);
 
 #endif /* _QAT_ASYM_H_ */
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
index 9a7596b227..d56294d3d9 100644
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ b/drivers/crypto/qat/qat_asym_pmd.c
@@ -10,8 +10,8 @@
 #include "qat_asym.h"
 #include "qat_asym_pmd.h"
 
-uint8_t qat_asym_driver_id;
-struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
+extern uint8_t qat_asym_driver_id;
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
 
 void
 qat_asym_init_op_cookie(void *op_cookie)
-- 
2.30.2


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v12 5/9] crypto/qat: unify symmetric functions
  2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                             ` (3 preceding siblings ...)
  2022-02-23  0:50                           ` [dpdk-dev v12 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
@ 2022-02-23  0:50                           ` Kai Ji
  2022-02-23  0:50                           ` [dpdk-dev v12 6/9] crypto/qat: unify asymmetric functions Kai Ji
                                             ` (4 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-23  0:50 UTC (permalink / raw)
  To: dev; +Cc: roy.fan.zhang, gakhil, Kai Ji

This patch removes qat_sym_pmd.c and integrates all the functions into
qat_sym.c. The unified/integrated qat sym crypto pmd functions should
make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/meson.build       |   4 +-
 drivers/common/qat/qat_device.c      |   4 +-
 drivers/common/qat/qat_qp.c          |   3 +-
 drivers/crypto/qat/qat_crypto.h      |   5 +-
 drivers/crypto/qat/qat_sym.c         |  21 +++
 drivers/crypto/qat/qat_sym.h         | 147 ++++++++++++++--
 drivers/crypto/qat/qat_sym_hw_dp.c   |  11 +-
 drivers/crypto/qat/qat_sym_pmd.c     | 251 ---------------------------
 drivers/crypto/qat/qat_sym_pmd.h     |  95 ----------
 drivers/crypto/qat/qat_sym_session.c |   2 +-
 10 files changed, 168 insertions(+), 375 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_sym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index af92271a75..1bf6896a7e 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017-2018 Intel Corporation
+# Copyright(c) 2017-2022 Intel Corporation
 
 if is_windows
     build = false
@@ -73,7 +73,7 @@ if qat_compress
 endif
 
 if qat_crypto
-    foreach f: ['qat_sym_pmd.c', 'qat_sym.c', 'qat_sym_session.c',
+    foreach f: ['qat_sym.c', 'qat_sym_session.c',
             'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 1f870d689a..6824d97050 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2020 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
  */
 
 #include <rte_string_fns.h>
@@ -8,7 +8,7 @@
 
 #include "qat_device.h"
 #include "adf_transport_access_macros.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 #include "qat_comp_pmd.h"
 #include "adf_pf2vf_msg.h"
 #include "qat_pf2vf.h"
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 7f2fdc53ce..b36ffa6f6d 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -838,7 +838,8 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz]);
+				tmp_qp->op_cookies[head >> rx_queue->trailz],
+				NULL);
 		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
 			nb_fw_responses = qat_comp_process_response(
 				ops, resp_msg,
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 5ca76fcaa6..c01266f81c 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -12,7 +12,10 @@
 extern uint8_t qat_sym_driver_id;
 extern uint8_t qat_asym_driver_id;
 
-/** helper macro to set cryptodev capability range **/
+/**
+ * helper macro to set cryptodev capability range
+ * <n: name> <l: min > <r: max> <i: increment> <v: value>
+ **/
 #define CAP_RNG(n, l, r, i) .n = {.min = l, .max = r, .increment = i}
 
 #define CAP_RNG_ZERO(n) .n = {.min = 0, .max = 0, .increment = 0}
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 83bf55c933..aad4b243b7 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -17,6 +17,27 @@ uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_sym_init_op_cookie(void *op_cookie)
+{
+	struct qat_sym_op_cookie *cookie = op_cookie;
+
+	cookie->qat_sgl_src_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_src);
+
+	cookie->qat_sgl_dst_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			qat_sgl_dst);
+
+	cookie->opt.spc_gmac.cd_phys_addr =
+			rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_sym_op_cookie,
+			opt.spc_gmac.cd_cipher);
+}
+
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 		struct icp_qat_fw_la_cipher_req_params *cipher_param,
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index e3ec7f0de4..f4ff2ce4cd 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #ifndef _QAT_SYM_H_
@@ -15,7 +15,7 @@
 
 #include "qat_common.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_crypto.h"
 #include "qat_logs.h"
 
 #define BYTE_LENGTH    8
@@ -24,6 +24,67 @@
  */
 #define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
 
+/** Intel(R) QAT Symmetric Crypto PMD name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
+
+/* Internal capabilities */
+#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
+#define QAT_SYM_CAP_VALID		(1 << 31)
+
+/**
+ * Macro to add a sym capability
+ * helper function to add an sym capability
+ * <n: name> <b: block size> <k: key size> <d: digest size>
+ * <a: aad_size> <i: iv_size>
+ **/
+#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, d					\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
+			{.auth = {					\
+				.algo = RTE_CRYPTO_AUTH_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_##n,		\
+				b, k, d, a, i				\
+			}, }						\
+		}, }							\
+	}
+
+#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
+	{								\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
+			{.cipher = {					\
+				.algo = RTE_CRYPTO_CIPHER_##n,		\
+				b, k, i					\
+			}, }						\
+		}, }							\
+	}
+
 /*
  * Maximum number of SGL entries
  */
@@ -54,6 +115,22 @@ struct qat_sym_op_cookie {
 	} opt;
 };
 
+struct qat_sym_dp_ctx {
+	struct qat_sym_session *session;
+	uint32_t tail;
+	uint32_t head;
+	uint16_t cached_enqueue;
+	uint16_t cached_dequeue;
+};
+
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		void *op_cookie, enum qat_device_gen qat_dev_gen);
@@ -213,17 +290,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
 		}
 	}
 }
-#else
-
-static inline void
-qat_sym_preprocess_requests(void **ops __rte_unused,
-				uint16_t nb_ops __rte_unused)
-{
-}
 #endif
 
-static inline void
-qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
+static __rte_always_inline int
+qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
+		uint64_t *dequeue_err_count __rte_unused)
 {
 	struct icp_qat_fw_comn_resp *resp_msg =
 			(struct icp_qat_fw_comn_resp *)resp;
@@ -282,6 +353,12 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie)
 	}
 
 	*op = (void *)rx_op;
+
+	/*
+	 * return 1 as dequeue op only move on to the next op
+	 * if one was ready to return to API
+	 */
+	return 1;
 }
 
 int
@@ -293,6 +370,52 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 int
 qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev);
 
+void
+qat_sym_init_op_cookie(void *cookie);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_vec *vec, uint32_t vec_len,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t i;
+
+	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+			sizeof(struct icp_qat_fw_la_bulk_req));
+	for (i = 0; i < vec_len; i++)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", vec[i].base, vec[i].len);
+	if (cipher_iv && ctx->cipher_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv->va,
+				ctx->cipher_iv.length);
+	if (auth_iv && ctx->auth_iv.length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv->va,
+				ctx->auth_iv.length);
+	if (aad && ctx->aad_len > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", aad->va,
+				ctx->aad_len);
+	if (digest && ctx->digest_length > 0)
+		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", digest->va,
+				ctx->digest_length);
+}
+#else
+static __rte_always_inline void
+qat_sym_debug_log_dump(struct icp_qat_fw_la_bulk_req *qat_req __rte_unused,
+		struct qat_sym_session *ctx __rte_unused,
+		struct rte_crypto_vec *vec __rte_unused,
+		uint32_t vec_len __rte_unused,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+}
+#endif
+
 #else
 
 static inline void
@@ -307,5 +430,5 @@ qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
 {
 }
 
-#endif
+#endif /* BUILD_QAT_SYM */
 #endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 792ad2b213..5322faff44 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020 Intel Corporation
+ * Copyright(c) 2022 Intel Corporation
  */
 
 #include <cryptodev_pmd.h>
@@ -9,18 +9,9 @@
 #include "icp_qat_fw_la.h"
 
 #include "qat_sym.h"
-#include "qat_sym_pmd.h"
 #include "qat_sym_session.h"
 #include "qat_qp.h"
 
-struct qat_sym_dp_ctx {
-	struct qat_sym_session *session;
-	uint32_t tail;
-	uint32_t head;
-	uint16_t cached_enqueue;
-	uint16_t cached_dequeue;
-};
-
 static __rte_always_inline int32_t
 qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
 		struct rte_crypto_vec *data, uint16_t n_data_vecs)
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
deleted file mode 100644
index 63ab9a5a8b..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <cryptodev_pmd.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security_driver.h>
-#endif
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
-
-#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
-
-extern uint8_t qat_sym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_sym_init_op_cookie(void *op_cookie)
-{
-	struct qat_sym_op_cookie *cookie = op_cookie;
-
-	cookie->qat_sgl_src_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_src);
-
-	cookie->qat_sgl_dst_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			qat_sgl_dst);
-
-	cookie->opt.spc_gmac.cd_phys_addr =
-			rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_sym_op_cookie,
-			opt.spc_gmac.cd_cipher);
-}
-
-static uint16_t
-qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-static uint16_t
-qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
-static const struct rte_driver cryptodev_qat_sym_driver = {
-	.name = qat_sym_drv_name,
-	.alias = qat_sym_drv_name
-};
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
-{
-	int i = 0, ret = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "sym");
-	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	/*
-	 * All processes must use same driver id so they can share sessions.
-	 * Store driver_id so we can validate that all processes have the same
-	 * value, typically they have, but could differ if binaries built
-	 * separately.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_sym_driver_id =
-				qat_sym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_sym_driver_id !=
-				qat_sym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
-	qat_dev_instance->sym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->sym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->sym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_sym_driver_id;
-	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
-
-	cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-#ifdef RTE_LIB_SECURITY
-	if (gen_dev_ops->create_security_ctx) {
-		cryptodev->security_ctx =
-			gen_dev_ops->create_security_ctx((void *)cryptodev);
-		if (cryptodev->security_ctx == NULL) {
-			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-		QAT_LOG(INFO, "Device %s rte_security support enabled", name);
-	} else
-		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
-
-#endif
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_SYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->service_type = QAT_SERVICE_SYMMETRIC;
-	internals->dev_id = cryptodev->data->dev_id;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating capability memzon for %s",
-				name);
-			ret = -EFAULT;
-			goto error;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->sym_dev = internals;
-	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	return 0;
-
-error:
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	memset(&qat_dev_instance->sym_rte_dev, 0,
-		sizeof(qat_dev_instance->sym_rte_dev));
-
-	return ret;
-}
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->sym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
-#ifdef RTE_LIB_SECURITY
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
-	qat_pci_dev->sym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_sym_driver,
-		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
deleted file mode 100644
index 59fbdefa12..0000000000
--- a/drivers/crypto/qat/qat_sym_pmd.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_SYM_PMD_H_
-#define _QAT_SYM_PMD_H_
-
-#ifdef BUILD_QAT_SYM
-
-#include <rte_ether.h>
-#include <rte_cryptodev.h>
-#ifdef RTE_LIB_SECURITY
-#include <rte_security.h>
-#endif
-
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Symmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_SYM_PMD	crypto_qat
-
-/* Internal capabilities */
-#define QAT_SYM_CAP_MIXED_CRYPTO	(1 << 0)
-#define QAT_SYM_CAP_VALID		(1 << 31)
-
-/**
- * Macro to add a sym capability
- * helper function to add an sym capability
- * <n: name> <b: block size> <k: key size> <d: digest size>
- * <a: aad_size> <i: iv_size>
- **/
-#define QAT_SYM_PLAIN_AUTH_CAP(n, b, d)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, d					\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AUTH_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,	\
-			{.auth = {					\
-				.algo = RTE_CRYPTO_AUTH_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_AEAD_CAP(n, b, k, d, a, i)				\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
-			{.aead = {					\
-				.algo = RTE_CRYPTO_AEAD_##n,		\
-				b, k, d, a, i				\
-			}, }						\
-		}, }							\
-	}
-
-#define QAT_SYM_CIPHER_CAP(n, b, k, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
-		{.sym = {						\
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
-			{.cipher = {					\
-				.algo = RTE_CRYPTO_CIPHER_##n,		\
-				b, k, i					\
-			}, }						\
-		}, }							\
-	}
-
-extern uint8_t qat_sym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
-
-int
-qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param);
-
-int
-qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
-
-void
-qat_sym_init_op_cookie(void *op_cookie);
-
-#endif
-#endif /* _QAT_SYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 3a880096c4..9d6a19c0be 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -20,7 +20,7 @@
 
 #include "qat_logs.h"
 #include "qat_sym_session.h"
-#include "qat_sym_pmd.h"
+#include "qat_sym.h"
 
 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
 static const uint8_t sha1InitialState[] = {
-- 
2.30.2


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v12 6/9] crypto/qat: unify asymmetric functions
  2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                             ` (4 preceding siblings ...)
  2022-02-23  0:50                           ` [dpdk-dev v12 5/9] crypto/qat: unify symmetric functions Kai Ji
@ 2022-02-23  0:50                           ` Kai Ji
  2022-02-23  0:50                           ` [dpdk-dev v12 7/9] crypto/qat: rework burst data path Kai Ji
                                             ` (3 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-23  0:50 UTC (permalink / raw)
  To: dev; +Cc: roy.fan.zhang, gakhil, Kai Ji

This patch removes qat_asym_pmd.c and integrates all the
functions into qat_asym.c. The unified/integrated asym crypto
pmd functions should make them easier to maintain.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/meson.build    |   2 +-
 drivers/crypto/qat/qat_asym.c     | 180 +++++++++++++++++++++++
 drivers/crypto/qat/qat_asym_pmd.c | 231 ------------------------------
 drivers/crypto/qat/qat_asym_pmd.h |  54 -------
 4 files changed, 181 insertions(+), 286 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.c
 delete mode 100644 drivers/crypto/qat/qat_asym_pmd.h

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index 1bf6896a7e..f687f5c9d8 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym_pmd.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index d6ca1afae8..353c36534a 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -19,6 +19,32 @@ uint8_t qat_asym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
 
+void
+qat_asym_init_op_cookie(void *op_cookie)
+{
+	int j;
+	struct qat_asym_op_cookie *cookie = op_cookie;
+
+	cookie->input_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					input_params_ptrs);
+
+	cookie->output_addr = rte_mempool_virt2iova(cookie) +
+			offsetof(struct qat_asym_op_cookie,
+					output_params_ptrs);
+
+	for (j = 0; j < 8; j++) {
+		cookie->input_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						input_array[j]);
+		cookie->output_params_ptrs[j] =
+				rte_mempool_virt2iova(cookie) +
+				offsetof(struct qat_asym_op_cookie,
+						output_array[j]);
+	}
+}
+
 /* An rte_driver is needed in the registration of both the device and the driver
  * with cryptodev.
  * The actual qat pci's rte_driver can't be used as its name represents
@@ -788,6 +814,160 @@ qat_asym_session_clear(struct rte_cryptodev *dev,
 		memset(s, 0, qat_asym_session_get_private_size(dev));
 }
 
+static uint16_t
+qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops,
+			nb_ops);
+}
+
+static uint16_t
+qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response,
+				nb_ops);
+}
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param)
+{
+	struct qat_cryptodev_private *internals;
+	struct rte_cryptodev *cryptodev;
+	struct qat_device_info *qat_dev_instance =
+		&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	uint64_t capa_size;
+	int i = 0;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "asym");
+	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
+				name);
+		return -(EFAULT);
+	}
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_asym_driver_id =
+				qat_asym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_asym_driver_id !=
+				qat_asym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
+		}
+	}
+
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
+	qat_dev_instance->asym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->asym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->asym_rte_dev), &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_asym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
+
+	cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst;
+	cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst;
+
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_ASYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			rte_cryptodev_pmd_destroy(cryptodev);
+			memset(&qat_dev_instance->asym_rte_dev, 0,
+				sizeof(qat_dev_instance->asym_rte_dev));
+			return -EFAULT;
+		}
+	}
+
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
+	}
+
+	qat_pci_dev->asym_dev = internals;
+	internals->service_type = QAT_SERVICE_ASYMMETRIC;
+	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
+	return 0;
+}
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->asym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
+
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(
+			qat_pci_dev->asym_dev->dev_id);
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
+	qat_pci_dev->asym_dev = NULL;
+
+	return 0;
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_asym_driver,
diff --git a/drivers/crypto/qat/qat_asym_pmd.c b/drivers/crypto/qat/qat_asym_pmd.c
deleted file mode 100644
index d56294d3d9..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "qat_logs.h"
-
-#include "qat_crypto.h"
-#include "qat_asym.h"
-#include "qat_asym_pmd.h"
-
-extern uint8_t qat_asym_driver_id;
-extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
-
-void
-qat_asym_init_op_cookie(void *op_cookie)
-{
-	int j;
-	struct qat_asym_op_cookie *cookie = op_cookie;
-
-	cookie->input_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					input_params_ptrs);
-
-	cookie->output_addr = rte_mempool_virt2iova(cookie) +
-			offsetof(struct qat_asym_op_cookie,
-					output_params_ptrs);
-
-	for (j = 0; j < 8; j++) {
-		cookie->input_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						input_array[j]);
-		cookie->output_params_ptrs[j] =
-				rte_mempool_virt2iova(cookie) +
-				offsetof(struct qat_asym_op_cookie,
-						output_array[j]);
-	}
-}
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
-	/* Device related operations */
-	.dev_configure		= qat_cryptodev_config,
-	.dev_start		= qat_cryptodev_start,
-	.dev_stop		= qat_cryptodev_stop,
-	.dev_close		= qat_cryptodev_close,
-	.dev_infos_get		= qat_cryptodev_info_get,
-
-	.stats_get		= qat_cryptodev_stats_get,
-	.stats_reset		= qat_cryptodev_stats_reset,
-	.queue_pair_setup	= qat_cryptodev_qp_setup,
-	.queue_pair_release	= qat_cryptodev_qp_release,
-
-	/* Crypto related operations */
-	.asym_session_get_size	= qat_asym_session_get_private_size,
-	.asym_session_configure	= qat_asym_session_configure,
-	.asym_session_clear	= qat_asym_session_clear
-};
-
-uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
-}
-
-uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-				       uint16_t nb_ops)
-{
-	return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
-}
-
-/* An rte_driver is needed in the registration of both the device and the driver
- * with cryptodev.
- * The actual qat pci's rte_driver can't be used as its name represents
- * the whole pci device with all services. Think of this as a holder for a name
- * for the crypto part of the pci device.
- */
-static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD);
-static const struct rte_driver cryptodev_qat_asym_driver = {
-	.name = qat_asym_drv_name,
-	.alias = qat_asym_drv_name
-};
-
-int
-qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
-		struct qat_dev_cmd_param *qat_dev_cmd_param)
-{
-	int i = 0;
-	struct qat_device_info *qat_dev_instance =
-			&qat_pci_devs[qat_pci_dev->qat_dev_id];
-	struct rte_cryptodev_pmd_init_params init_params = {
-		.name = "",
-		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
-		.private_data_size = sizeof(struct qat_cryptodev_private)
-	};
-	struct qat_capabilities_info capa_info;
-	const struct rte_cryptodev_capabilities *capabilities;
-	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
-		&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	struct rte_cryptodev *cryptodev;
-	struct qat_cryptodev_private *internals;
-	uint64_t capa_size;
-
-	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
-			qat_pci_dev->name, "asym");
-	QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
-
-	if (gen_dev_ops->cryptodev_ops == NULL) {
-		QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
-				name);
-		return -EFAULT;
-	}
-
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		qat_pci_dev->qat_asym_driver_id =
-				qat_asym_driver_id;
-	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-		if (qat_pci_dev->qat_asym_driver_id !=
-				qat_asym_driver_id) {
-			QAT_LOG(ERR,
-				"Device %s have different driver id than corresponding device in primary process",
-				name);
-			return -(EFAULT);
-		}
-	}
-
-	/* Populate subset device to use in cryptodev device creation */
-	qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver;
-	qat_dev_instance->asym_rte_dev.numa_node =
-			qat_dev_instance->pci_dev->device.numa_node;
-	qat_dev_instance->asym_rte_dev.devargs = NULL;
-
-	cryptodev = rte_cryptodev_pmd_create(name,
-			&(qat_dev_instance->asym_rte_dev), &init_params);
-
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	qat_dev_instance->asym_rte_dev.name = cryptodev->data->name;
-	cryptodev->driver_id = qat_asym_driver_id;
-	cryptodev->dev_ops = &crypto_qat_ops;
-
-	cryptodev->enqueue_burst = qat_asym_pmd_enqueue_op_burst;
-	cryptodev->dequeue_burst = qat_asym_pmd_dequeue_op_burst;
-
-
-	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
-
-	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		return 0;
-
-	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
-			"QAT_ASYM_CAPA_GEN_%d",
-			qat_pci_dev->qat_dev_gen);
-
-	internals = cryptodev->data->dev_private;
-	internals->qat_dev = qat_pci_dev;
-	internals->dev_id = cryptodev->data->dev_id;
-	internals->service_type = QAT_SERVICE_ASYMMETRIC;
-
-	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
-	capabilities = capa_info.data;
-	capa_size = capa_info.size;
-
-	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
-	if (internals->capa_mz == NULL) {
-		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
-				capa_size, rte_socket_id(), 0);
-		if (internals->capa_mz == NULL) {
-			QAT_LOG(DEBUG,
-				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
-			rte_cryptodev_pmd_destroy(cryptodev);
-			memset(&qat_dev_instance->asym_rte_dev, 0,
-				sizeof(qat_dev_instance->asym_rte_dev));
-			return -EFAULT;
-		}
-	}
-
-	memcpy(internals->capa_mz->addr, capabilities, capa_size);
-	internals->qat_dev_capabilities = internals->capa_mz->addr;
-
-	while (1) {
-		if (qat_dev_cmd_param[i].name == NULL)
-			break;
-		if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
-			internals->min_enq_burst_threshold =
-					qat_dev_cmd_param[i].val;
-		i++;
-	}
-
-	qat_pci_dev->asym_dev = internals;
-
-	rte_cryptodev_pmd_probing_finish(cryptodev);
-
-	QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",
-			cryptodev->data->name, internals->dev_id);
-	return 0;
-}
-
-int
-qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev)
-{
-	struct rte_cryptodev *cryptodev;
-
-	if (qat_pci_dev == NULL)
-		return -ENODEV;
-	if (qat_pci_dev->asym_dev == NULL)
-		return 0;
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		rte_memzone_free(qat_pci_dev->asym_dev->capa_mz);
-
-	/* free crypto device */
-	cryptodev = rte_cryptodev_pmd_get_dev(
-			qat_pci_dev->asym_dev->dev_id);
-	rte_cryptodev_pmd_destroy(cryptodev);
-	qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL;
-	qat_pci_dev->asym_dev = NULL;
-
-	return 0;
-}
-
-static struct cryptodev_driver qat_crypto_drv;
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
-		cryptodev_qat_asym_driver,
-		qat_asym_driver_id);
diff --git a/drivers/crypto/qat/qat_asym_pmd.h b/drivers/crypto/qat/qat_asym_pmd.h
deleted file mode 100644
index f988d646e5..0000000000
--- a/drivers/crypto/qat/qat_asym_pmd.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-
-#ifndef _QAT_ASYM_PMD_H_
-#define _QAT_ASYM_PMD_H_
-
-#include <rte_cryptodev.h>
-#include "qat_crypto.h"
-#include "qat_device.h"
-
-/** Intel(R) QAT Asymmetric Crypto PMD name */
-#define CRYPTODEV_NAME_QAT_ASYM_PMD	crypto_qat_asym
-
-
-/**
- * Helper function to add an asym capability
- * <name> <op type> <modlen (min, max, increment)>
- **/
-#define QAT_ASYM_CAP(n, o, l, r, i)					\
-	{								\
-		.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,			\
-		{.asym = {						\
-			.xform_capa = {					\
-				.xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\
-				.op_types = o,				\
-				{					\
-				.modlen = {				\
-				.min = l,				\
-				.max = r,				\
-				.increment = i				\
-				}, }					\
-			}						\
-		},							\
-		}							\
-	}
-
-extern uint8_t qat_asym_driver_id;
-
-extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
-
-void
-qat_asym_init_op_cookie(void *op_cookie);
-
-uint16_t
-qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-uint16_t
-qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
-			      uint16_t nb_ops);
-
-#endif /* _QAT_ASYM_PMD_H_ */
-- 
2.30.2


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v12 7/9] crypto/qat: rework burst data path
  2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                             ` (5 preceding siblings ...)
  2022-02-23  0:50                           ` [dpdk-dev v12 6/9] crypto/qat: unify asymmetric functions Kai Ji
@ 2022-02-23  0:50                           ` Kai Ji
  2022-02-23  0:50                           ` [dpdk-dev v12 8/9] crypto/qat: unify raw data path functions Kai Ji
                                             ` (2 subsequent siblings)
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-23  0:50 UTC (permalink / raw)
  To: dev; +Cc: roy.fan.zhang, gakhil, Kai Ji

This patch enable the op_build_request function in
qat_enqueue_op_burst, and the qat_dequeue_process_response
function in qat_dequeue_op_burst.
The op_build_request invoked in crypto build request op is based
on crypto operations setup'd during session init.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/qat_qp.c               |  42 +-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c |   4 -
 drivers/crypto/qat/qat_asym.c             |   2 +-
 drivers/crypto/qat/qat_asym.h             |  22 -
 drivers/crypto/qat/qat_sym.c              | 830 +++++++---------------
 drivers/crypto/qat/qat_sym.h              |   5 -
 6 files changed, 271 insertions(+), 634 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index b36ffa6f6d..08ac91eac4 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,7 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp,
-		__rte_unused qat_op_build_request_t op_build_request,
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
 		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
@@ -599,29 +598,18 @@ qat_enqueue_op_burst(void *qp,
 		}
 	}
 
-#ifdef BUILD_QAT_SYM
+#ifdef RTE_LIB_SECURITY
 	if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 		qat_sym_preprocess_requests(ops, nb_ops_possible);
 #endif
 
+	memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque));
+
 	while (nb_ops_sent != nb_ops_possible) {
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
-#ifdef BUILD_QAT_SYM
-			ret = qat_sym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		} else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
-			ret = qat_comp_build_request(*ops, base_addr + tail,
+		ret = op_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-		} else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
-#ifdef BUILD_QAT_ASYM
-			ret = qat_asym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				NULL, tmp_qp->qat_dev_gen);
-#endif
-		}
+				tmp_qp->opaque, tmp_qp->qat_dev_gen);
+
 		if (ret != 0) {
 			tmp_qp->stats.enqueue_err_count++;
 			/* This message cannot be enqueued */
@@ -817,8 +805,7 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 
 uint16_t
 qat_dequeue_op_burst(void *qp, void **ops,
-		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
-		uint16_t nb_ops)
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -836,21 +823,10 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		nb_fw_responses = 1;
 
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
-			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
-			nb_fw_responses = qat_comp_process_response(
+		nb_fw_responses = qat_dequeue_process_response(
 				ops, resp_msg,
 				tmp_qp->op_cookies[head >> rx_queue->trailz],
 				&tmp_qp->stats.dequeue_err_count);
-#ifdef BUILD_QAT_ASYM
-		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
-			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-#endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
 				  rx_queue->modulo_mask);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index ff176dddf1..c088fd50d2 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,10 +146,6 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
-
-	/* Raw data-path API related operations */
-	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
-	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index 353c36534a..6e65950baf 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -507,7 +507,7 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	return 0;
 }
 
-int
+static __rte_always_inline int
 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 		__rte_unused uint64_t *opaque,
 		__rte_unused enum qat_device_gen dev_gen)
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index 3ae95f2e7b..78caa5649c 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -103,28 +103,6 @@ void
 qat_asym_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_asym_session *sess);
 
-/*
- * Build PKE request to be sent to the fw, partially uses template
- * request generated during session creation.
- *
- * @param	in_op		Pointer to the crypto operation, for every
- *				service it points to service specific struct.
- * @param	out_msg		Message to be returned to enqueue function
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	qat_dev_gen	Generation of QAT hardware
- *
- * @return
- *	This function always returns zero,
- *	it is because of backward compatibility.
- *	- 0: Always returned
- *
- */
-int
-qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		enum qat_device_gen qat_dev_gen);
-
 /*
  * Process PKE response received from outgoing queue of QAT
  *
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index aad4b243b7..692e1f8f0a 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -11,12 +11,25 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-#include "dev/qat_crypto_pmd_gens.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
 
 uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
 void
 qat_sym_init_op_cookie(void *op_cookie)
 {
@@ -38,160 +51,68 @@ qat_sym_init_op_cookie(void *op_cookie)
 			opt.spc_gmac.cd_cipher);
 }
 
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op,
-		struct icp_qat_fw_la_bulk_req *qat_req)
+static __rte_always_inline int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
 {
-	/* copy IV into request if it fits */
-	if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
-		rte_memcpy(cipher_param->u.cipher_IV_array,
-				rte_crypto_op_ctod_offset(op, uint8_t *,
-					iv_offset),
-				iv_length);
-	} else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr =
-				rte_crypto_op_ctophys_offset(op,
-					iv_offset);
-	}
-}
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	uintptr_t sess = (uintptr_t)opaque[0];
+	uintptr_t build_request_p = (uintptr_t)opaque[1];
+	qat_sym_build_request_t build_request = (void *)build_request_p;
+	struct qat_sym_session *ctx = NULL;
 
-/** Set IV for CCM is special case, 0th byte is set to q-1
- *  where q is padding of nonce in 16 byte block
- */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
-	rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
-			ICP_QAT_HW_CCM_NONCE_OFFSET,
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-	*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-	if (aad_len_field_sz)
-		rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-}
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != (uintptr_t)ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
 
-/** Handle Single-Pass AES-GMAC on QAT GEN3 */
-static inline void
-handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
-		struct qat_sym_op_cookie *cookie,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	static const uint32_t ver_key_offset =
-			sizeof(struct icp_qat_hw_auth_setup) +
-			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
-			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
-			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
-			sizeof(struct icp_qat_hw_cipher_config);
-	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
-			(void *) &qat_req->cd_ctrl;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-			(void *) &qat_req->serv_specif_rqpars;
-	uint32_t data_length = op->sym->auth.data.length;
-
-	/* Fill separate Content Descriptor for this op */
-	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
-			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-				ctx->cd.cipher.key :
-				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
-			ctx->auth_key_length);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
-				ICP_QAT_HW_CIPHER_AEAD_MODE,
-				ctx->qat_cipher_alg,
-				ICP_QAT_HW_CIPHER_NO_CONVERT,
-				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-					ICP_QAT_HW_CIPHER_ENCRYPT :
-					ICP_QAT_HW_CIPHER_DECRYPT));
-	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
-			ctx->digest_length,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
-
-	/* Update the request */
-	qat_req->cd_pars.u.s.content_desc_addr =
-			cookie->opt.spc_gmac.cd_phys_addr;
-	qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
-			sizeof(struct icp_qat_hw_cipher_config) +
-			ctx->auth_key_length, 8) >> 3;
-	qat_req->comn_mid.src_length = data_length;
-	qat_req->comn_mid.dst_length = 0;
-
-	cipher_param->spc_aad_addr = 0;
-	cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
-	cipher_param->spc_aad_sz = data_length;
-	cipher_param->reserved = 0;
-	cipher_param->spc_auth_res_sz = ctx->digest_length;
-
-	qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
-	cipher_cd_ctrl->cipher_cfg_offset = 0;
-	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
-	ICP_QAT_FW_LA_PROTO_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_NO_PROTO);
-}
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, (void *)sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
-{
-	int ret = 0;
-	struct qat_sym_session *ctx = NULL;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	register struct icp_qat_fw_la_bulk_req *qat_req;
-	uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
-	uint32_t cipher_len = 0, cipher_ofs = 0;
-	uint32_t auth_len = 0, auth_ofs = 0;
-	uint32_t min_ofs = 0;
-	uint64_t src_buf_start = 0, dst_buf_start = 0;
-	uint64_t auth_data_end = 0;
-	uint8_t do_sgl = 0;
-	uint8_t in_place = 1;
-	int alignment_adjustment = 0;
-	int oop_shift = 0;
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *)op_cookie;
-
-	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
-				"operation requests, op (%p) is not a "
-				"symmetric operation.", op);
-		return -EINVAL;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
 	}
 
-	if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
-				" requests, op (%p) is sessionless.", op);
-		return -EINVAL;
-	} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_sym_session *)get_sym_session_private_data(
-				op->sym->session, qat_sym_driver_id);
 #ifdef RTE_LIB_SECURITY
-	} else {
-		ctx = (struct qat_sym_session *)get_sec_session_private_data(
-				op->sym->sec_session);
-		if (likely(ctx)) {
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if ((void *)sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
 			if (unlikely(ctx->bpi_ctx == NULL)) {
 				QAT_DP_LOG(ERR, "QAT PMD only supports security"
 						" operation requests for"
@@ -207,463 +128,234 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 				return -EINVAL;
 			}
-		}
-#endif
-	}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
 
-	if (unlikely(ctx == NULL)) {
-		QAT_DP_LOG(ERR, "Session was not created for this device");
-		return -EINVAL;
-	}
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, (void *)sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
-	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
-	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
-	cipher_param = (void *)&qat_req->serv_specif_rqpars;
-	cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			do_aead = 1;
-		} else {
-			do_auth = 1;
-			do_cipher = 1;
+			sess = (uintptr_t)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = sess;
+			opaque[1] = (uintptr_t)build_request;
 		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		do_auth = 1;
-		do_cipher = 0;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		do_auth = 0;
-		do_cipher = 1;
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
 	}
 
-	if (do_cipher) {
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
 
-		if (ctx->qat_cipher_alg ==
-					 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
 
-			if (unlikely(
-			    (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
-			    (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cipher_len = op->sym->cipher.data.length >> 3;
-			cipher_ofs = op->sym->cipher.data.offset >> 3;
-
-		} else if (ctx->bpi_ctx) {
-			/* DOCSIS - only send complete blocks to device.
-			 * Process any partial block using CFB mode.
-			 * Even if 0 complete blocks, still send this to device
-			 * to get into rx queue for post-process and dequeuing
-			 */
-			cipher_len = qat_bpicipher_preprocess(ctx, op);
-			cipher_ofs = op->sym->cipher.data.offset;
-		} else {
-			cipher_len = op->sym->cipher.data.length;
-			cipher_ofs = op->sym->cipher.data.offset;
-		}
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
 
-		set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
-				cipher_param, op, qat_req);
-		min_ofs = cipher_ofs;
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	if (do_auth) {
-
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
-			ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
-			if (unlikely(
-			    (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
-			    (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			auth_ofs = op->sym->auth.data.offset >> 3;
-			auth_len = op->sym->auth.data.length >> 3;
-
-			auth_param->u1.aad_adr =
-					rte_crypto_op_ctophys_offset(op,
-							ctx->auth_iv.offset);
-
-		} else if (ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-			/* AES-GMAC */
-			set_cipher_iv(ctx->auth_iv.length,
-				ctx->auth_iv.offset,
-				cipher_param, op, qat_req);
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
-			auth_param->u1.aad_adr = 0;
-			auth_param->u2.aad_sz = 0;
-
-		} else {
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-		min_ofs = auth_ofs;
-
-		if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
-				ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
-			auth_param->auth_res_addr =
-					op->sym->auth.digest.phys_addr;
-
 	}
 
-	if (do_aead) {
-		/*
-		 * This address may used for setting AAD physical pointer
-		 * into IV offset from op
-		 */
-		rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
-		if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-
-			set_cipher_iv(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, qat_req);
-
-		} else if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
-			/* In case of AES-CCM this may point to user selected
-			 * memory or iv offset in crypto_op
-			 */
-			uint8_t *aad_data = op->sym->aead.aad.data;
-			/* This is true AAD length, it not includes 18 bytes of
-			 * preceding data
-			 */
-			uint8_t aad_ccm_real_len = 0;
-			uint8_t aad_len_field_sz = 0;
-			uint32_t msg_len_be =
-					rte_bswap32(op->sym->aead.data.length);
-
-			if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-				aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-				aad_ccm_real_len = ctx->aad_len -
-					ICP_QAT_HW_CCM_AAD_B0_LEN -
-					ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			} else {
-				/*
-				 * aad_len not greater than 18, so no actual aad
-				 *  data, then use IV after op for B0 block
-				 */
-				aad_data = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-				aad_phys_addr_aead =
-						rte_crypto_op_ctophys_offset(op,
-							ctx->cipher_iv.offset);
-			}
-
-			uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
-							ctx->cipher_iv.length;
-
-			aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-							aad_len_field_sz,
-							ctx->digest_length, q);
-
-			if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET +
-				    (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				    (uint8_t *)&msg_len_be,
-				    ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-			} else {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET,
-				    (uint8_t *)&msg_len_be
-				    + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				    - q), q);
-			}
-
-			if (aad_len_field_sz > 0) {
-				*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
-						= rte_bswap16(aad_ccm_real_len);
-
-				if ((aad_ccm_real_len + aad_len_field_sz)
-						% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-					uint8_t pad_len = 0;
-					uint8_t pad_idx = 0;
-
-					pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len + aad_len_field_sz) %
-						ICP_QAT_HW_CCM_AAD_B0_LEN);
-					pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					    aad_ccm_real_len + aad_len_field_sz;
-					memset(&aad_data[pad_idx],
-							0, pad_len);
-				}
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
 
-			}
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
 
-			set_cipher_iv_ccm(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, q,
-					aad_len_field_sz);
+	if (cryptodev == NULL)
+		return -ENODEV;
 
-		}
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
-		cipher_len = op->sym->aead.data.length;
-		cipher_ofs = op->sym->aead.data.offset;
-		auth_len = op->sym->aead.data.length;
-		auth_ofs = op->sym->aead.data.offset;
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
-		auth_param->u1.aad_adr = aad_phys_addr_aead;
-		auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
-		min_ofs = op->sym->aead.data.offset;
-	}
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
-	if (op->sym->m_src->nb_segs > 1 ||
-			(op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
-		do_sgl = 1;
-
-	/* adjust for chain case */
-	if (do_cipher && do_auth)
-		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
-	if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
-		min_ofs = 0;
-
-	if (unlikely((op->sym->m_dst != NULL) &&
-			(op->sym->m_dst != op->sym->m_src))) {
-		/* Out-of-place operation (OOP)
-		 * Don't align DMA start. DMA the minimum data-set
-		 * so as not to overwrite data in dest buffer
-		 */
-		in_place = 0;
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-		dst_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-		oop_shift = min_ofs;
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
 
-	} else {
-		/* In-place operation
-		 * Start DMA at nearest aligned address below min_ofs
-		 */
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
-						& QAT_64_BTYE_ALIGN_MASK;
-
-		if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
-					rte_pktmbuf_headroom(op->sym->m_src))
-							> src_buf_start)) {
-			/* alignment has pushed addr ahead of start of mbuf
-			 * so revert and take the performance hit
-			 */
-			src_buf_start =
-				rte_pktmbuf_iova_offset(op->sym->m_src,
-								min_ofs);
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
 		}
-		dst_buf_start = src_buf_start;
 
-		/* remember any adjustment for later, note, can be +/- */
-		alignment_adjustment = src_buf_start -
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-	}
-
-	if (do_cipher || do_aead) {
-		cipher_param->cipher_offset =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, cipher_ofs) - src_buf_start;
-		cipher_param->cipher_length = cipher_len;
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
 	} else {
-		cipher_param->cipher_offset = 0;
-		cipher_param->cipher_length = 0;
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
 	}
-
-	if (!ctx->is_single_pass) {
-		/* Do not let to overwrite spc_aad len */
-		if (do_auth || do_aead) {
-			auth_param->auth_off =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, auth_ofs) - src_buf_start;
-			auth_param->auth_len = auth_len;
-		} else {
-			auth_param->auth_off = 0;
-			auth_param->auth_len = 0;
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
 		}
 	}
 
-	qat_req->comn_mid.dst_length =
-		qat_req->comn_mid.src_length =
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		> (auth_param->auth_off + auth_param->auth_len) ?
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		: (auth_param->auth_off + auth_param->auth_len);
-
-	if (do_auth && do_cipher) {
-		/* Handle digest-encrypted cases, i.e.
-		 * auth-gen-then-cipher-encrypt and
-		 * cipher-decrypt-then-auth-verify
-		 */
-		 /* First find the end of the data */
-		if (do_sgl) {
-			uint32_t remaining_off = auth_param->auth_off +
-				auth_param->auth_len + alignment_adjustment + oop_shift;
-			struct rte_mbuf *sgl_buf =
-				(in_place ?
-					op->sym->m_src : op->sym->m_dst);
-
-			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
-					&& sgl_buf->next != NULL) {
-				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
-				sgl_buf = sgl_buf->next;
-			}
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-			auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
-				sgl_buf, remaining_off);
-		} else {
-			auth_data_end = (in_place ?
-				src_buf_start : dst_buf_start) +
-				auth_param->auth_off + auth_param->auth_len;
-		}
-		/* Then check if digest-encrypted conditions are met */
-		if ((auth_param->auth_off + auth_param->auth_len <
-					cipher_param->cipher_offset +
-					cipher_param->cipher_length) &&
-				(op->sym->auth.digest.phys_addr ==
-					auth_data_end)) {
-			/* Handle partial digest encryption */
-			if (cipher_param->cipher_offset +
-					cipher_param->cipher_length <
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length)
-				qat_req->comn_mid.dst_length =
-					qat_req->comn_mid.src_length =
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length;
-			struct icp_qat_fw_comn_req_hdr *header =
-				&qat_req->comn_hdr;
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-		}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
 	}
 
-	if (do_sgl) {
-
-		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
-				QAT_COMN_PTR_TYPE_SGL);
-		ret = qat_sgl_fill_array(op->sym->m_src,
-		   (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
-		   &cookie->qat_sgl_src,
-		   qat_req->comn_mid.src_length,
-		   QAT_SYM_SGL_MAX_NUMBER);
-
-		if (unlikely(ret)) {
-			QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
-			return ret;
-		}
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
 
-		if (in_place)
-			qat_req->comn_mid.dest_data_addr =
-				qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-		else {
-			ret = qat_sgl_fill_array(op->sym->m_dst,
-				(int64_t)(dst_buf_start -
-					  rte_pktmbuf_iova(op->sym->m_dst)),
-				 &cookie->qat_sgl_dst,
-				 qat_req->comn_mid.dst_length,
-				 QAT_SYM_SGL_MAX_NUMBER);
-
-			if (unlikely(ret)) {
-				QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
-				return ret;
-			}
+	return 0;
 
-			qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-			qat_req->comn_mid.dest_data_addr =
-					cookie->qat_sgl_dst_phys_addr;
-		}
-		qat_req->comn_mid.src_length = 0;
-		qat_req->comn_mid.dst_length = 0;
-	} else {
-		qat_req->comn_mid.src_data_addr = src_buf_start;
-		qat_req->comn_mid.dest_data_addr = dst_buf_start;
-	}
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
 
-	if (ctx->is_single_pass) {
-		if (ctx->is_ucs) {
-			/* GEN 4 */
-			cipher_param20->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param20->spc_auth_res_addr =
-				op->sym->aead.digest.phys_addr;
-		} else {
-			cipher_param->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param->spc_auth_res_addr =
-					op->sym->aead.digest.phys_addr;
-		}
-	} else if (ctx->is_single_pass_gmac &&
-		       op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
-		/* Handle Single-Pass AES-GMAC */
-		handle_spc_gmac(ctx, op, cookie, qat_req);
-	}
+	return ret;
+}
 
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
-			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
-			rte_pktmbuf_data_len(op->sym->m_src));
-	if (do_cipher) {
-		uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
-				ctx->cipher_iv.length);
-	}
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
 
-	if (do_auth) {
-		if (ctx->auth_iv.length) {
-			uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
-							uint8_t *,
-							ctx->auth_iv.offset);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
-						ctx->auth_iv.length);
-		}
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
-				ctx->digest_length);
-	}
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
 
-	if (do_aead) {
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
-				ctx->digest_length);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
-				ctx->aad_len);
-	}
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
 #endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
+
 	return 0;
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f4ff2ce4cd..074612c11b 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -131,11 +131,6 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
-
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
-- 
2.30.2


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v12 8/9] crypto/qat: unify raw data path functions
  2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                             ` (6 preceding siblings ...)
  2022-02-23  0:50                           ` [dpdk-dev v12 7/9] crypto/qat: rework burst data path Kai Ji
@ 2022-02-23  0:50                           ` Kai Ji
  2022-02-23  0:50                           ` [dpdk-dev v12 9/9] crypto/qat: support out of place SG list Kai Ji
  2022-02-23  9:18                           ` [EXT] [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-23  0:50 UTC (permalink / raw)
  To: dev; +Cc: roy.fan.zhang, gakhil, Kai Ji

This patch unifies QAT's raw dp api implementations
to the same enqueue/dequeue methods used in crypto operations.
The specific functions for different QAT generation are updated
respectively. The qat_sym_hw_dp.c is removed as no longer required.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/meson.build               |   2 +-
 drivers/compress/qat/qat_comp_pmd.c          |  12 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |   2 +
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 214 ++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 122 +++
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |  78 ++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 656 ++++++++++++
 drivers/crypto/qat/qat_crypto.h              |   3 +
 drivers/crypto/qat/qat_sym.c                 |  56 +-
 drivers/crypto/qat/qat_sym_hw_dp.c           | 986 -------------------
 10 files changed, 1137 insertions(+), 994 deletions(-)
 delete mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c

diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index f687f5c9d8..b7027f3164 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -74,7 +74,7 @@ endif
 
 if qat_crypto
     foreach f: ['qat_sym.c', 'qat_sym_session.c',
-            'qat_sym_hw_dp.c', 'qat_asym.c', 'qat_crypto.c',
+            'qat_asym.c', 'qat_crypto.c',
             'dev/qat_sym_pmd_gen1.c',
             'dev/qat_asym_pmd_gen1.c',
             'dev/qat_crypto_pmd_gen2.c',
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index efe5d08a99..dc8db84a68 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -616,11 +616,18 @@ static struct rte_compressdev_ops compress_qat_dummy_ops = {
 	.private_xform_free	= qat_comp_private_xform_free
 };
 
+static uint16_t
+qat_comp_dequeue_burst(void *qp, struct rte_comp_op **ops,  uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops, qat_comp_process_response,
+				nb_ops);
+}
+
 static uint16_t
 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 				   uint16_t nb_ops)
 {
-	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
+	uint16_t ret = qat_comp_dequeue_burst(qp, ops, nb_ops);
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
 
 	if (ret) {
@@ -638,8 +645,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
 
 		} else {
 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
-					(compressdev_dequeue_pkt_burst_t)
-					qat_comp_pmd_enq_deq_dummy_op_burst;
+					qat_comp_dequeue_burst;
 		}
 	}
 	return ret;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index 64e6ae66ec..0c64c1e43f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -291,6 +291,8 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 			qat_sym_crypto_cap_get_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
 			qat_sym_crypto_set_session_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index db864d973a..ffa093a7a3 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -394,6 +394,218 @@ qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
+	} else if (ctx->is_single_pass_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
+	}
+
+	return 0;
+}
+
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -403,6 +615,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_feature_flags_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
 			qat_sym_crypto_set_session_gen3;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 7642a87d55..f803bc1459 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -223,6 +223,126 @@ qat_sym_crypto_set_session_gen4(void *cdev, void *session)
 	return ret;
 }
 
+static int
+qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -230,6 +350,8 @@ RTE_INIT(qat_sym_crypto_gen4_init)
 			qat_sym_crypto_cap_get_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
 			qat_sym_crypto_set_session_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 96cdb97a26..50a9c5ad5b 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -839,6 +839,84 @@ int
 qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
 		uint8_t *out_msg, void *op_cookie);
 
+/* -----------------GEN 1 sym crypto raw data path APIs ---------------- */
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data);
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status);
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status);
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status);
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index c088fd50d2..58f07eb0b3 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,6 +146,10 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
+
+	/* Raw data-path API related operations */
+	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
+	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
@@ -448,6 +452,656 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest __rte_unused,
+	struct rte_crypto_va_iova_ptr *aad __rte_unused,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct qat_sym_op_cookie *cookie;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, NULL, NULL);
+#endif
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i],
+				cookie, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
+			(uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				NULL, NULL, NULL);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
+			NULL, 0, cipher_iv, digest, auth_iv, ofs,
+			(uint32_t)data_len)))
+		return -1;
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
+			auth_iv, NULL, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				NULL, 0,
+				&vec->iv[i], &vec->digest[i],
+				&vec->auth_iv[i], ofs, (uint32_t)data_len)))
+			break;
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i],
+				&vec->auth_iv[i],
+				NULL, &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+int
+qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+
+uint32_t
+qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
+	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
+	uint32_t max_nb_to_dequeue,
+	rte_cryptodev_raw_post_dequeue_t post_dequeue,
+	void **out_user_data, uint8_t is_user_data_array,
+	uint32_t *n_success_jobs, int *return_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp;
+	void *resp_opaque;
+	uint32_t i, n, inflight;
+	uint32_t head;
+	uint8_t status;
+
+	*n_success_jobs = 0;
+	*return_status = 0;
+	head = dp_ctx->head;
+
+	inflight = qp->enqueued - qp->dequeued;
+	if (unlikely(inflight == 0))
+		return 0;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			head);
+	/* no operation ready */
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return 0;
+
+	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
+	/* get the dequeue count */
+	if (get_dequeue_count) {
+		n = get_dequeue_count(resp_opaque);
+		if (unlikely(n == 0))
+			return 0;
+	} else {
+		if (unlikely(max_nb_to_dequeue == 0))
+			return 0;
+		n = max_nb_to_dequeue;
+	}
+
+	out_user_data[0] = resp_opaque;
+	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+	post_dequeue(resp_opaque, 0, status);
+	*n_success_jobs += status;
+
+	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+
+	/* we already finished dequeue when n == 1 */
+	if (unlikely(n == 1)) {
+		i = 1;
+		goto end_deq;
+	}
+
+	if (is_user_data_array) {
+		for (i = 1; i < n; i++) {
+			resp = (struct icp_qat_fw_comn_resp *)(
+				(uint8_t *)rx_queue->base_addr + head);
+			if (unlikely(*(uint32_t *)resp ==
+					ADF_RING_EMPTY_SIG))
+				goto end_deq;
+			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
+			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+			*n_success_jobs += status;
+			post_dequeue(out_user_data[i], i, status);
+			head = (head + rx_queue->msg_size) &
+					rx_queue->modulo_mask;
+		}
+
+		goto end_deq;
+	}
+
+	/* opaque is not array */
+	for (i = 1; i < n; i++) {
+		resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+			goto end_deq;
+		head = (head + rx_queue->msg_size) &
+				rx_queue->modulo_mask;
+		post_dequeue(resp_opaque, i, status);
+		*n_success_jobs += status;
+	}
+
+end_deq:
+	dp_ctx->head = head;
+	dp_ctx->cached_dequeue += i;
+	return i;
+}
+
+void *
+qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
+	int *dequeue_status, enum rte_crypto_op_status *op_status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	register struct icp_qat_fw_comn_resp *resp;
+
+	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+			dp_ctx->head);
+
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+		return NULL;
+
+	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
+			rx_queue->modulo_mask;
+	dp_ctx->cached_dequeue++;
+
+	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
+			RTE_CRYPTO_OP_STATUS_SUCCESS :
+			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	*dequeue_status = 0;
+	return (void *)(uintptr_t)resp->opaque_data;
+}
+
+int
+qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_enqueue != n))
+		return -1;
+
+	qp->enqueued += n;
+	qp->stats.enqueued_count += n;
+
+	tx_queue->tail = dp_ctx->tail;
+
+	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number, tx_queue->tail);
+	tx_queue->csr_tail = tx_queue->tail;
+	dp_ctx->cached_enqueue = 0;
+
+	return 0;
+}
+
+int
+qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+	if (unlikely(dp_ctx->cached_dequeue != n))
+		return -1;
+
+	rx_queue->head = dp_ctx->head;
+	rx_queue->nb_processed_responses += n;
+	qp->dequeued += n;
+	qp->stats.dequeued_count += n;
+	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
+		uint32_t old_head, new_head;
+		uint32_t max_head;
+
+		old_head = rx_queue->csr_head;
+		new_head = rx_queue->head;
+		max_head = qp->nb_descriptors * rx_queue->msg_size;
+
+		/* write out free descriptors */
+		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
+
+		if (new_head < old_head) {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
+					max_head - old_head);
+			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
+					new_head);
+		} else {
+			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
+					old_head);
+		}
+		rx_queue->nb_processed_responses = 0;
+		rx_queue->csr_head = new_head;
+
+		/* write current head to CSR */
+		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
+			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
+			new_head);
+	}
+
+	dp_ctx->cached_dequeue = 0;
+	return 0;
+}
+
+int
+qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+
+	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
+	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
+	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
+	raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen1;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_chain_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_chain_gen1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
+			ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_aead_gen1;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_cipher_jobs_gen1;
+			raw_dp_ctx->enqueue =
+					qat_sym_dp_enqueue_single_cipher_gen1;
+		}
+	} else
+		return -1;
+
+	return 0;
+}
+
 int
 qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
 {
@@ -518,6 +1172,8 @@ RTE_INIT(qat_sym_crypto_gen1_init)
 			qat_sym_crypto_cap_get_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
 			qat_sym_crypto_set_session_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index c01266f81c..cf386d0ed0 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -53,11 +53,14 @@ typedef void * (*create_security_ctx_t)(void *cryptodev);
 
 typedef int (*set_session_t)(void *cryptodev, void *session);
 
+typedef int (*set_raw_dp_ctx_t)(void *raw_dp_ctx, void *ctx);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
 	set_session_t set_session;
+	set_raw_dp_ctx_t set_raw_dp_ctx;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 692e1f8f0a..1ccffad5ab 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -89,7 +89,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, (void *)sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -144,7 +144,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 					(void *)cdev, (void *)sess);
 				if (ret < 0) {
 					op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+						RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 					return -EINVAL;
 				}
 			}
@@ -292,8 +292,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 		if (internals->capa_mz == NULL) {
 			QAT_LOG(DEBUG,
 				"Error allocating memzone for capabilities, "
-				"destroying PMD for %s",
-				name);
+				"destroying PMD for %s", name);
 			ret = -EFAULT;
 			goto error;
 		}
@@ -355,6 +354,55 @@ qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
 	return 0;
 }
 
+int
+qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
+	enum rte_crypto_op_sess_type sess_type,
+	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
+{
+	struct qat_cryptodev_private *internals = dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_crypto_gen_dev_ops *gen_dev_ops =
+			&qat_sym_gen_dev_ops[qat_dev_gen];
+	struct qat_qp *qp;
+	struct qat_sym_session *ctx;
+	struct qat_sym_dp_ctx *dp_ctx;
+
+	if (!gen_dev_ops->set_raw_dp_ctx) {
+		QAT_LOG(ERR, "Device GEN %u does not support raw data path",
+				qat_dev_gen);
+		return -ENOTSUP;
+	}
+
+	qp = dev->data->queue_pairs[qp_id];
+	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
+
+	if (!is_update) {
+		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
+				sizeof(struct qat_sym_dp_ctx));
+		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
+		dp_ctx->tail = qp->tx_q.tail;
+		dp_ctx->head = qp->rx_q.head;
+		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
+	}
+
+	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
+		return -EINVAL;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session_ctx.crypto_sess, qat_sym_driver_id);
+
+	dp_ctx->session = ctx;
+
+	return gen_dev_ops->set_raw_dp_ctx(raw_dp_ctx, ctx);
+}
+
+int
+qat_sym_get_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct qat_sym_dp_ctx);
+}
+
 static struct cryptodev_driver qat_crypto_drv;
 RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
 		cryptodev_qat_sym_driver,
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
deleted file mode 100644
index 5322faff44..0000000000
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ /dev/null
@@ -1,986 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2022 Intel Corporation
- */
-
-#include <cryptodev_pmd.h>
-
-#include "adf_transport_access_macros.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-
-#include "qat_sym.h"
-#include "qat_sym_session.h"
-#include "qat_qp.h"
-
-static __rte_always_inline int32_t
-qat_sym_dp_parse_data_vec(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
-		struct rte_crypto_vec *data, uint16_t n_data_vecs)
-{
-	struct qat_queue *tx_queue;
-	struct qat_sym_op_cookie *cookie;
-	struct qat_sgl *list;
-	uint32_t i;
-	uint32_t total_len;
-
-	if (likely(n_data_vecs == 1)) {
-		req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			data[0].iova;
-		req->comn_mid.src_length = req->comn_mid.dst_length =
-			data[0].len;
-		return data[0].len;
-	}
-
-	if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
-		return -1;
-
-	total_len = 0;
-	tx_queue = &qp->tx_q;
-
-	ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
-			QAT_COMN_PTR_TYPE_SGL);
-	cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
-	list = (struct qat_sgl *)&cookie->qat_sgl_src;
-
-	for (i = 0; i < n_data_vecs; i++) {
-		list->buffers[i].len = data[i].len;
-		list->buffers[i].resrvd = 0;
-		list->buffers[i].addr = data[i].iova;
-		if (total_len + data[i].len > UINT32_MAX) {
-			QAT_DP_LOG(ERR, "Message too long");
-			return -1;
-		}
-		total_len += data[i].len;
-	}
-
-	list->num_bufs = i;
-	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
-			cookie->qat_sgl_src_phys_addr;
-	req->comn_mid.src_length = req->comn_mid.dst_length = 0;
-	return total_len;
-}
-
-static __rte_always_inline void
-set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	/* copy IV into request if it fits */
-	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
-				iv_len);
-	else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
-	}
-}
-
-#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
-	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
-	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
-
-static __rte_always_inline void
-qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
-{
-	uint32_t i;
-
-	for (i = 0; i < n; i++)
-		sta[i] = status;
-}
-
-#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
-	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
-
-static __rte_always_inline void
-enqueue_one_cipher_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-
-	/* cipher IV */
-	set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_cipher(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest __rte_unused,
-	struct rte_crypto_va_iova_ptr *aad __rte_unused,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_cipher_job(ctx, req, iv, ofs, (uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_cipher_job(ctx, req, &vec->iv[i], ofs,
-			(uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_auth_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = data_len - ofs.ofs.auth.head -
-			ofs.ofs.auth.tail;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
-				ctx->auth_iv.length);
-		break;
-	default:
-		break;
-	}
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv __rte_unused,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs,
-			(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_auth_job(ctx, req, &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline int
-enqueue_one_chain_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_vec *data,
-	uint16_t n_data_vecs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	rte_iova_t auth_iova_end;
-	int32_t cipher_len, auth_len;
-
-	cipher_param = (void *)&req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	cipher_len = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
-
-	if (unlikely(cipher_len < 0 || auth_len < 0))
-		return -1;
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = cipher_len;
-	set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
-
-	auth_param->auth_off = ofs.ofs.auth.head;
-	auth_param->auth_len = auth_len;
-	auth_param->auth_res_addr = digest->iova;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
-	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		auth_param->u1.aad_adr = auth_iv->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		break;
-	default:
-		break;
-	}
-
-	if (unlikely(n_data_vecs > 1)) {
-		int auth_end_get = 0, i = n_data_vecs - 1;
-		struct rte_crypto_vec *cvec = &data[0];
-		uint32_t len;
-
-		len = data_len - ofs.ofs.auth.tail;
-
-		while (i >= 0 && len > 0) {
-			if (cvec->len >= len) {
-				auth_iova_end = cvec->iova + len;
-				len = 0;
-				auth_end_get = 1;
-				break;
-			}
-			len -= cvec->len;
-			i--;
-			cvec++;
-		}
-
-		if (unlikely(auth_end_get == 0))
-			return -1;
-	} else
-		auth_iova_end = data[0].iova + auth_param->auth_off +
-			auth_param->auth_len;
-
-	/* Then check if digest-encrypted conditions are met */
-	if ((auth_param->auth_off + auth_param->auth_len <
-		cipher_param->cipher_offset +
-		cipher_param->cipher_length) &&
-		(digest->iova == auth_iova_end)) {
-		/* Handle partial digest encryption */
-		if (cipher_param->cipher_offset +
-				cipher_param->cipher_length <
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length)
-			req->comn_mid.dst_length =
-				req->comn_mid.src_length =
-				auth_param->auth_off +
-				auth_param->auth_len +
-				ctx->digest_length;
-		struct icp_qat_fw_comn_req_hdr *header =
-			&req->comn_hdr;
-		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-			header->serv_specif_flags,
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-	}
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *cipher_iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *auth_iv,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs,
-			cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len)))
-		return -1;
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		if (unlikely(enqueue_one_chain_job(ctx, req,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num,
-			&vec->iv[i], &vec->digest[i],
-			&vec->auth_iv[i], ofs, (uint32_t)data_len)))
-			break;
-
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline void
-enqueue_one_aead_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-		(void *)&req->serv_specif_rqpars;
-	struct icp_qat_fw_la_auth_req_params *auth_param =
-		(void *)((uint8_t *)&req->serv_specif_rqpars +
-		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-	uint8_t *aad_data;
-	uint8_t aad_ccm_real_len;
-	uint8_t aad_len_field_sz;
-	uint32_t msg_len_be;
-	rte_iova_t aad_iova = 0;
-	uint8_t q;
-
-	/* CPM 1.7 uses single pass to treat AEAD as cipher operation */
-	if (ctx->is_single_pass) {
-		enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
-
-		if (ctx->is_ucs) {
-			/* QAT GEN4 uses single pass to treat AEAD as cipher
-			 * operation
-			 */
-			struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
-				(void *)&req->serv_specif_rqpars;
-			cipher_param_20->spc_aad_addr = aad->iova;
-			cipher_param_20->spc_auth_res_addr = digest->iova;
-		} else {
-			cipher_param->spc_aad_addr = aad->iova;
-			cipher_param->spc_auth_res_addr = digest->iova;
-		}
-
-		return;
-	}
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
-				ctx->cipher_iv.length);
-		aad_iova = aad->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
-		aad_data = aad->va;
-		aad_iova = aad->iova;
-		aad_ccm_real_len = 0;
-		aad_len_field_sz = 0;
-		msg_len_be = rte_bswap32((uint32_t)data_len -
-				ofs.ofs.cipher.head);
-
-		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			aad_ccm_real_len = ctx->aad_len -
-				ICP_QAT_HW_CCM_AAD_B0_LEN -
-				ICP_QAT_HW_CCM_AAD_LEN_INFO;
-		} else {
-			aad_data = iv->va;
-			aad_iova = iv->iova;
-		}
-
-		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-			aad_len_field_sz, ctx->digest_length, q);
-		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				(uint8_t *)&msg_len_be,
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-		} else {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-				(uint8_t *)&msg_len_be +
-				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				- q), q);
-		}
-
-		if (aad_len_field_sz > 0) {
-			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
-				rte_bswap16(aad_ccm_real_len);
-
-			if ((aad_ccm_real_len + aad_len_field_sz)
-				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-				uint8_t pad_len = 0;
-				uint8_t pad_idx = 0;
-
-				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len +
-					aad_len_field_sz) %
-					ICP_QAT_HW_CCM_AAD_B0_LEN);
-				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					aad_ccm_real_len +
-					aad_len_field_sz;
-				memset(&aad_data[pad_idx], 0, pad_len);
-			}
-		}
-
-		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
-			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va +
-			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
-		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-		rte_memcpy((uint8_t *)aad->va +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			ctx->cipher_iv.length);
-		break;
-	default:
-		break;
-	}
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_param->auth_off = ofs.ofs.cipher.head;
-	auth_param->auth_len = cipher_param->cipher_length;
-	auth_param->auth_res_addr = digest->iova;
-	auth_param->u1.aad_adr = aad_iova;
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
-		(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req,
-			vec->src_sgl[i].vec,
-			vec->src_sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
-			&vec->aad[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
-	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
-	uint32_t max_nb_to_dequeue,
-	rte_cryptodev_raw_post_dequeue_t post_dequeue,
-	void **out_user_data, uint8_t is_user_data_array,
-	uint32_t *n_success_jobs, int *return_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct icp_qat_fw_comn_resp *resp;
-	void *resp_opaque;
-	uint32_t i, n, inflight;
-	uint32_t head;
-	uint8_t status;
-
-	*n_success_jobs = 0;
-	*return_status = 0;
-	head = dp_ctx->head;
-
-	inflight = qp->enqueued - qp->dequeued;
-	if (unlikely(inflight == 0))
-		return 0;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			head);
-	/* no operation ready */
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return 0;
-
-	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
-	/* get the dequeue count */
-	if (get_dequeue_count) {
-		n = get_dequeue_count(resp_opaque);
-		if (unlikely(n == 0))
-			return 0;
-	} else {
-		if (unlikely(max_nb_to_dequeue == 0))
-			return 0;
-		n = max_nb_to_dequeue;
-	}
-
-	out_user_data[0] = resp_opaque;
-	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-	post_dequeue(resp_opaque, 0, status);
-	*n_success_jobs += status;
-
-	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
-
-	/* we already finished dequeue when n == 1 */
-	if (unlikely(n == 1)) {
-		i = 1;
-		goto end_deq;
-	}
-
-	if (is_user_data_array) {
-		for (i = 1; i < n; i++) {
-			resp = (struct icp_qat_fw_comn_resp *)(
-				(uint8_t *)rx_queue->base_addr + head);
-			if (unlikely(*(uint32_t *)resp ==
-					ADF_RING_EMPTY_SIG))
-				goto end_deq;
-			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
-			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-			*n_success_jobs += status;
-			post_dequeue(out_user_data[i], i, status);
-			head = (head + rx_queue->msg_size) &
-					rx_queue->modulo_mask;
-		}
-
-		goto end_deq;
-	}
-
-	/* opaque is not array */
-	for (i = 1; i < n; i++) {
-		resp = (struct icp_qat_fw_comn_resp *)(
-			(uint8_t *)rx_queue->base_addr + head);
-		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
-		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-			goto end_deq;
-		head = (head + rx_queue->msg_size) &
-				rx_queue->modulo_mask;
-		post_dequeue(resp_opaque, i, status);
-		*n_success_jobs += status;
-	}
-
-end_deq:
-	dp_ctx->head = head;
-	dp_ctx->cached_dequeue += i;
-	return i;
-}
-
-static __rte_always_inline void *
-qat_sym_dp_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
-		enum rte_crypto_op_status *op_status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	register struct icp_qat_fw_comn_resp *resp;
-
-	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
-			dp_ctx->head);
-
-	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
-		return NULL;
-
-	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
-			rx_queue->modulo_mask;
-	dp_ctx->cached_dequeue++;
-
-	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
-			RTE_CRYPTO_OP_STATUS_SUCCESS :
-			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	*dequeue_status = 0;
-	return (void *)(uintptr_t)resp->opaque_data;
-}
-
-static __rte_always_inline int
-qat_sym_dp_kick_tail(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_enqueue != n))
-		return -1;
-
-	qp->enqueued += n;
-	qp->stats.enqueued_count += n;
-
-	tx_queue->tail = dp_ctx->tail;
-
-	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
-			tx_queue->hw_bundle_number,
-			tx_queue->hw_queue_number, tx_queue->tail);
-	tx_queue->csr_tail = tx_queue->tail;
-	dp_ctx->cached_enqueue = 0;
-
-	return 0;
-}
-
-static __rte_always_inline int
-qat_sym_dp_update_head(void *qp_data, uint8_t *drv_ctx, uint32_t n)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_queue *rx_queue = &qp->rx_q;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-
-	if (unlikely(dp_ctx->cached_dequeue != n))
-		return -1;
-
-	rx_queue->head = dp_ctx->head;
-	rx_queue->nb_processed_responses += n;
-	qp->dequeued += n;
-	qp->stats.dequeued_count += n;
-	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
-		uint32_t old_head, new_head;
-		uint32_t max_head;
-
-		old_head = rx_queue->csr_head;
-		new_head = rx_queue->head;
-		max_head = qp->nb_descriptors * rx_queue->msg_size;
-
-		/* write out free descriptors */
-		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
-
-		if (new_head < old_head) {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
-					max_head - old_head);
-			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
-					new_head);
-		} else {
-			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
-					old_head);
-		}
-		rx_queue->nb_processed_responses = 0;
-		rx_queue->csr_head = new_head;
-
-		/* write current head to CSR */
-		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
-			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
-			new_head);
-	}
-
-	dp_ctx->cached_dequeue = 0;
-	return 0;
-}
-
-int
-qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
-	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
-	enum rte_crypto_op_sess_type sess_type,
-	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
-{
-	struct qat_qp *qp;
-	struct qat_sym_session *ctx;
-	struct qat_sym_dp_ctx *dp_ctx;
-
-	qp = dev->data->queue_pairs[qp_id];
-	dp_ctx = (struct qat_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;
-
-	if (!is_update) {
-		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx) +
-				sizeof(struct qat_sym_dp_ctx));
-		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
-		dp_ctx->tail = qp->tx_q.tail;
-		dp_ctx->head = qp->rx_q.head;
-		dp_ctx->cached_enqueue = dp_ctx->cached_dequeue = 0;
-	}
-
-	if (sess_type != RTE_CRYPTO_OP_WITH_SESSION)
-		return -EINVAL;
-
-	ctx = (struct qat_sym_session *)get_sym_session_private_data(
-			session_ctx.crypto_sess, qat_sym_driver_id);
-
-	dp_ctx->session = ctx;
-
-	raw_dp_ctx->enqueue_done = qat_sym_dp_kick_tail;
-	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst;
-	raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
-	raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_chain_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
-		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
-		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_aead_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
-		} else {
-			raw_dp_ctx->enqueue_burst =
-					qat_sym_dp_enqueue_cipher_jobs;
-			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
-		}
-	} else
-		return -1;
-
-	return 0;
-}
-
-int
-qat_sym_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
-{
-	return sizeof(struct qat_sym_dp_ctx);
-}
-- 
2.30.2


^ permalink raw reply	[flat|nested] 156+ messages in thread

* [dpdk-dev v12 9/9] crypto/qat: support out of place SG list
  2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                             ` (7 preceding siblings ...)
  2022-02-23  0:50                           ` [dpdk-dev v12 8/9] crypto/qat: unify raw data path functions Kai Ji
@ 2022-02-23  0:50                           ` Kai Ji
  2022-02-23  9:18                           ` [EXT] [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
  9 siblings, 0 replies; 156+ messages in thread
From: Kai Ji @ 2022-02-23  0:50 UTC (permalink / raw)
  To: dev; +Cc: roy.fan.zhang, gakhil, Kai Ji

This patch adds the SGL out of place support to QAT PMD

Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++--
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 55 +++++++++++++++++---
 3 files changed, 83 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index ffa093a7a3..5084a5fcd1 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -468,8 +468,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -565,8 +575,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index f803bc1459..bd7f3785df 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -297,8 +297,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 58f07eb0b3..3bcb53cf9f 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -526,9 +526,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i],
-				cookie, vec->src_sgl[i].vec,
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
 				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
@@ -625,8 +634,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 		enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
@@ -725,8 +744,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
@@ -830,8 +859,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (vec->dest_sgl) {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+		} else {
+			data_len = qat_sym_build_req_set_data(req,
+				user_data[i], cookie,
+				vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, NULL, 0);
+		}
+
 		if (unlikely(data_len < 0))
 			break;
 
-- 
2.30.2


^ permalink raw reply	[flat|nested] 156+ messages in thread

* RE: [EXT] [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework
  2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
                                             ` (8 preceding siblings ...)
  2022-02-23  0:50                           ` [dpdk-dev v12 9/9] crypto/qat: support out of place SG list Kai Ji
@ 2022-02-23  9:18                           ` Akhil Goyal
  9 siblings, 0 replies; 156+ messages in thread
From: Akhil Goyal @ 2022-02-23  9:18 UTC (permalink / raw)
  To: Kai Ji, dev; +Cc: roy.fan.zhang

> v11 - v12:
> - fixed a compile issue
> 
> v10:
> - rebase to the lastest for-main
> - fix of build rerror when RTE_LOG_DEBUG enabled
> 
> v9:
> - commit messages reword
> - fix of unused function error
> 
> v8:
> - rebase to 22.03-rc1
> 
> v7:
> - fix of pointer cast compile error in x86
> 
> v6:
> - fix of pointer cast error in x86
> - rebase to the lastest for-main
> 
> v5:
> - rebase to the latest for-main
> - patchset reconstruct
> 
> v4:
> - patchset break down and reconstruct
> 
> v3:
> - sperate a single patch 6 to two different patches
> 
> v2:
> - review comments addressed
> 
Applied to dpdk-next-crypto


^ permalink raw reply	[flat|nested] 156+ messages in thread

end of thread, other threads:[~2022-02-23  9:18 UTC | newest]

Thread overview: 156+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-26 17:25 [dpdk-dev] [dpdk-dev v1 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
2021-10-29 13:58   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 2/7] crypto/qat: qat driver sym op refactor Kai Ji
2021-10-29 14:26   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 3/7] crypto/qat: qat driver asym " Kai Ji
2021-10-29 14:36   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 4/7] crypto/qat: qat driver session method rework Kai Ji
2021-10-29 14:40   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 5/7] crypto/qat: qat driver datapath rework Kai Ji
2021-10-29 14:41   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 6/7] app/test: cryptodev test fix Kai Ji
2021-10-29 14:43   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 7/7] crypto/qat: qat driver rework clean up Kai Ji
2021-10-29 14:46   ` Zhang, Roy Fan
2021-11-01 23:12 ` [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 2/7] crypto/qat: qat driver sym op refactor Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 3/7] crypto/qat: qat driver asym " Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 4/7] crypto/qat: qat driver session method rework Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 5/7] crypto/qat: qat driver datapath rework Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 6/7] app/test: cryptodev test fix Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 7/7] crypto/qat: qat driver rework clean up Kai Ji
2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 1/8] crypro/qat: qat driver refactor skeleton Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 2/8] crypto/qat: qat driver sym op refactor Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 3/8] crypto/qat: qat driver asym " Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 4/8] crypto/qat: qat driver session method rework Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 5/8] crypto/qat: qat driver datapath rework Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 6/8] crypto/qat: support sgl oop operation Kai Ji
2021-11-03 15:46       ` Zhang, Roy Fan
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 7/8] app/test: cryptodev test fix Kai Ji
2021-11-03 15:45       ` Zhang, Roy Fan
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 8/8] crypto/qat: qat driver rework clean up Kai Ji
2021-11-03 15:46     ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Zhang, Roy Fan
2021-11-03 18:49     ` [dpdk-dev] [EXT] " Akhil Goyal
2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 01/11] common/qat: define build op request and dequeue op Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 02/11] crypto/qat: sym build op request specific implementation Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 03/11] crypto/qat: rework session APIs Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 04/11] crypto/qat: asym build op request specific implementation Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 05/11] crypto/qat: unify sym pmd apis Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 06/11] crypto/qat: unify qat asym " Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 07/11] crypto/qat: op burst data path rework Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 08/11] compress/qat: comp dequeue burst update Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 09/11] crypto/qat: raw dp api integration Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 10/11] crypto/qat: support out of place SG list Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 11/11] test/cryptodev: fix incomplete data length Kai Ji
2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 04/10] crypto/qat: rework session APIs Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 07/10] crypto/qat: unify qat asym " Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 08/10] crypto/qat: op burst data path rework Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 09/10] crypto/qat: raw dp api integration Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 04/10] crypto/qat: rework session APIs Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 07/10] crypto/qat: unify qat asym " Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 08/10] crypto/qat: op burst data path rework Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 09/10] crypto/qat: raw dp api integration Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-08 18:14                 ` [dpdk-dev v7 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-02-09 10:22                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-02-09 10:22                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-02-09 10:23                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 04/10] crypto/qat: rework session APIs Kai Ji
2022-02-09 10:23                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-02-09 10:23                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-02-09 10:24                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 07/10] crypto/qat: unify qat asym " Kai Ji
2022-02-09 10:24                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 08/10] crypto/qat: op burst data path rework Kai Ji
2022-02-09 10:24                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 09/10] crypto/qat: raw dp api integration Kai Ji
2022-02-09 10:25                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-09 10:25                   ` Zhang, Roy Fan
2022-02-09 10:20                 ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Zhang, Roy Fan
2022-02-12 11:32                   ` Akhil Goyal
2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 04/10] crypto/qat: rework session APIs Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 07/10] crypto/qat: unify qat asym " Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 08/10] crypto/qat: op burst data path rework Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 09/10] crypto/qat: raw dp api integration Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-17 17:59                   ` [EXT] [dpdk-dev v8 00/10] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 1/9] common/qat: define build request and dequeue ops Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 2/9] crypto/qat: support symmetric build op request Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 3/9] crypto/qat: rework session functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 5/9] crypto/qat: unify symmetric functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 6/9] crypto/qat: unify asymmetric functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 7/9] crypto/qat: rework burst data path Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 8/9] crypto/qat: unify raw data path functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 9/9] crypto/qat: support out of place SG list Kai Ji
2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-22 17:02                       ` [dpdk-dev v10 1/9] common/qat: define build request and dequeue ops Kai Ji
2022-02-22 18:52                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 2/9] crypto/qat: support symmetric build op request Kai Ji
2022-02-22 18:52                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 3/9] crypto/qat: rework session functions Kai Ji
2022-02-22 18:53                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
2022-02-22 18:53                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 5/9] crypto/qat: unify symmetric functions Kai Ji
2022-02-22 18:53                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 6/9] crypto/qat: unify asymmetric functions Kai Ji
2022-02-22 18:52                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 7/9] crypto/qat: rework burst data path Kai Ji
2022-02-22 18:54                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 8/9] crypto/qat: unify raw data path functions Kai Ji
2022-02-22 18:54                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 9/9] crypto/qat: support out of place SG list Kai Ji
2022-02-22 18:55                         ` Zhang, Roy Fan
2022-02-22 18:23                       ` [EXT] [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 1/9] common/qat: define build request and dequeue ops Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 2/9] crypto/qat: support symmetric build op request Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 3/9] crypto/qat: rework session functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 4/9] crypto/qat: rework asymmetric op build operation Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 5/9] crypto/qat: unify symmetric functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 6/9] crypto/qat: unify asymmetric functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 7/9] crypto/qat: rework burst data path Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 8/9] crypto/qat: unify raw data path functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 9/9] crypto/qat: support out of place SG list Fan Zhang
2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-23  0:49                           ` [dpdk-dev v12 1/9] common/qat: define build request and dequeue ops Kai Ji
2022-02-23  0:49                           ` [dpdk-dev v12 2/9] crypto/qat: support symmetric build op request Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 3/9] crypto/qat: rework session functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 5/9] crypto/qat: unify symmetric functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 6/9] crypto/qat: unify asymmetric functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 7/9] crypto/qat: rework burst data path Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 8/9] crypto/qat: unify raw data path functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 9/9] crypto/qat: support out of place SG list Kai Ji
2022-02-23  9:18                           ` [EXT] [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).