DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ciara Power <ciara.power@intel.com>
To: dev@dpdk.org
Cc: arkadiuszx.kusztal@intel.com, gakhil@marvell.com,
	Ciara Power <ciara.power@intel.com>, Kai Ji <kai.ji@intel.com>
Subject: [PATCH v3 2/4] common/qat: add zuc256 wireless slice for gen3
Date: Mon, 26 Feb 2024 17:08:16 +0000	[thread overview]
Message-ID: <20240226170818.533793-3-ciara.power@intel.com> (raw)
In-Reply-To: <20240226170818.533793-1-ciara.power@intel.com>

The new gen3 device handles wireless algorithms on wireless slices,
based on the device wireless slice support, set the required flags for
these algorithms to move slice.

One of the algorithms supported for the wireless slices is ZUC 256,
support is added for this, along with modifying the capability for the
device.
The device supports 24 bytes iv for ZUC 256, with iv[20]
being ignored in register.
For 25 byte iv, compress this into 23 bytes.

Signed-off-by: Ciara Power <ciara.power@intel.com>
Acked-by: Kai Ji <kai.ji@intel.com>
---
v2:
  - Fixed setting extended protocol flag bit position.
  - Added slice map check for ZUC256 wireless slice.
  - Fixed IV modification for ZUC256 in raw datapath.
  - Added increment size for ZUC256 capabiltiies.
  - Added release note.
---
 doc/guides/rel_notes/release_24_03.rst       |   1 +
 drivers/common/qat/qat_adf/icp_qat_fw.h      |   6 +-
 drivers/common/qat/qat_adf/icp_qat_fw_la.h   |  24 ++++
 drivers/common/qat/qat_adf/icp_qat_hw.h      |  24 +++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |   7 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c |  52 ++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |  34 ++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  43 ++++++
 drivers/crypto/qat/qat_sym_session.c         | 142 +++++++++++++++++--
 drivers/crypto/qat/qat_sym_session.h         |   2 +
 10 files changed, 312 insertions(+), 23 deletions(-)

diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 55517eabd8..0dee1ff104 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -134,6 +134,7 @@ New Features
 * **Updated Intel QuickAssist Technology driver.**
 
   * Enabled support for new QAT GEN3 (578a) devices in QAT crypto driver.
+  * Enabled ZUC256 cipher and auth algorithm for wireless slice enabled GEN3 device.
 
 * **Updated Marvell cnxk crypto driver.**
 
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h
index 3aa17ae041..dd7c926140 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw.h
@@ -75,7 +75,8 @@ struct icp_qat_fw_comn_req_hdr {
 	uint8_t service_type;
 	uint8_t hdr_flags;
 	uint16_t serv_specif_flags;
-	uint16_t comn_req_flags;
+	uint8_t comn_req_flags;
+	uint8_t ext_flags;
 };
 
 struct icp_qat_fw_comn_req_rqpars {
@@ -176,9 +177,6 @@ struct icp_qat_fw_comn_resp {
 #define QAT_COMN_PTR_TYPE_SGL 0x1
 #define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
 #define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
-#define QAT_COMN_EXT_FLAGS_BITPOS 8
-#define QAT_COMN_EXT_FLAGS_MASK 0x1
-#define QAT_COMN_EXT_FLAGS_USED 0x1
 
 #define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
 	((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index 70f0effa62..134c309355 100644
--- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -81,6 +81,15 @@ struct icp_qat_fw_la_bulk_req {
 #define ICP_QAT_FW_LA_PARTIAL_END 2
 #define QAT_LA_PARTIAL_BITPOS 0
 #define QAT_LA_PARTIAL_MASK 0x3
+#define QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS_BITPOS 0
+#define QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS 1
+#define QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS_MASK 0x1
+#define QAT_LA_USE_WCP_SLICE 1
+#define QAT_LA_USE_WCP_SLICE_BITPOS 2
+#define QAT_LA_USE_WCP_SLICE_MASK 0x1
+#define QAT_LA_USE_WAT_SLICE_BITPOS 3
+#define QAT_LA_USE_WAT_SLICE 1
+#define QAT_LA_USE_WAT_SLICE_MASK 0x1
 #define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
 	cmp_auth, ret_auth, update_state, \
 	ciph_iv, ciphcfg, partial) \
@@ -188,6 +197,21 @@ struct icp_qat_fw_la_bulk_req {
 	QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
 	QAT_LA_PARTIAL_MASK)
 
+#define ICP_QAT_FW_USE_EXTENDED_PROTOCOL_FLAGS_SET(flags, val)	\
+	QAT_FIELD_SET(flags, val,				\
+	QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS_BITPOS,		\
+	QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS_MASK)
+
+#define ICP_QAT_FW_USE_WCP_SLICE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, \
+	QAT_LA_USE_WCP_SLICE_BITPOS, \
+	QAT_LA_USE_WCP_SLICE_MASK)
+
+#define ICP_QAT_FW_USE_WAT_SLICE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, \
+	QAT_LA_USE_WAT_SLICE_BITPOS, \
+	QAT_LA_USE_WAT_SLICE_MASK)
+
 #define QAT_FW_LA_MODE2 1
 #define QAT_FW_LA_NO_MODE2 0
 #define QAT_FW_LA_MODE2_MASK 0x1
diff --git a/drivers/common/qat/qat_adf/icp_qat_hw.h b/drivers/common/qat/qat_adf/icp_qat_hw.h
index 33756d512d..4651fb90bb 100644
--- a/drivers/common/qat/qat_adf/icp_qat_hw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_hw.h
@@ -21,7 +21,8 @@ enum icp_qat_slice_mask {
 	ICP_ACCEL_MASK_CRYPTO1_SLICE = 0x100,
 	ICP_ACCEL_MASK_CRYPTO2_SLICE = 0x200,
 	ICP_ACCEL_MASK_SM3_SLICE = 0x400,
-	ICP_ACCEL_MASK_SM4_SLICE = 0x800
+	ICP_ACCEL_MASK_SM4_SLICE = 0x800,
+	ICP_ACCEL_MASK_ZUC_256_SLICE = 0x2000,
 };
 
 enum icp_qat_hw_ae_id {
@@ -71,7 +72,16 @@ enum icp_qat_hw_auth_algo {
 	ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
 	ICP_QAT_HW_AUTH_ALGO_SHA3_384 = 18,
 	ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
-	ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+	ICP_QAT_HW_AUTH_ALGO_RESERVED = 20,
+	ICP_QAT_HW_AUTH_ALGO_RESERVED1 = 21,
+	ICP_QAT_HW_AUTH_ALGO_RESERVED2 = 22,
+	ICP_QAT_HW_AUTH_ALGO_RESERVED3 = 22,
+	ICP_QAT_HW_AUTH_ALGO_RESERVED4 = 23,
+	ICP_QAT_HW_AUTH_ALGO_RESERVED5 = 24,
+	ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32 = 25,
+	ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64 = 26,
+	ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128 = 27,
+	ICP_QAT_HW_AUTH_ALGO_DELIMITER = 28
 };
 
 enum icp_qat_hw_auth_mode {
@@ -167,6 +177,9 @@ struct icp_qat_hw_auth_setup {
 #define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
 #define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
 #define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_256_MAC_32_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_256_MAC_64_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_256_MAC_128_STATE1_SZ 16
 
 #define ICP_QAT_HW_NULL_STATE2_SZ 32
 #define ICP_QAT_HW_MD5_STATE2_SZ 16
@@ -191,6 +204,7 @@ struct icp_qat_hw_auth_setup {
 #define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
 #define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
 #define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_ZUC_256_STATE2_SZ 56
 #define ICP_QAT_HW_GALOIS_H_SZ 16
 #define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
 #define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
@@ -228,7 +242,8 @@ enum icp_qat_hw_cipher_algo {
 	ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
 	ICP_QAT_HW_CIPHER_ALGO_SM4 = 10,
 	ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 = 11,
-	ICP_QAT_HW_CIPHER_DELIMITER = 12
+	ICP_QAT_HW_CIPHER_ALGO_ZUC_256 = 12,
+	ICP_QAT_HW_CIPHER_DELIMITER = 13
 };
 
 enum icp_qat_hw_cipher_mode {
@@ -308,6 +323,7 @@ enum icp_qat_hw_cipher_convert {
 #define ICP_QAT_HW_KASUMI_BLK_SZ 8
 #define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
 #define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_256_BLK_SZ 8
 #define ICP_QAT_HW_NULL_KEY_SZ 256
 #define ICP_QAT_HW_DES_KEY_SZ 8
 #define ICP_QAT_HW_3DES_KEY_SZ 24
@@ -343,6 +359,8 @@ enum icp_qat_hw_cipher_convert {
 #define ICP_QAT_HW_SPC_CTR_SZ 16
 #define ICP_QAT_HW_CHACHAPOLY_ICV_SZ 16
 #define ICP_QAT_HW_CHACHAPOLY_AAD_MAX_LOG 14
+#define ICP_QAT_HW_ZUC_256_KEY_SZ 32
+#define ICP_QAT_HW_ZUC_256_IV_SZ 24
 
 #define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index df47767749..62874039a9 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -182,10 +182,8 @@ qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
 
 	/* Set the Use Extended Protocol Flags bit in LW 1 */
-	QAT_FIELD_SET(header->comn_req_flags,
-			QAT_COMN_EXT_FLAGS_USED,
-			QAT_COMN_EXT_FLAGS_BITPOS,
-			QAT_COMN_EXT_FLAGS_MASK);
+	ICP_QAT_FW_USE_EXTENDED_PROTOCOL_FLAGS_SET(
+			header->ext_flags, QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS);
 
 	/* Set Hash Flags in LW 28 */
 	cd_ctrl->hash_flags |= hash_flag;
@@ -199,6 +197,7 @@ qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 				header->serv_specif_flags, 0);
 		break;
 	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+	case ICP_QAT_HW_CIPHER_ALGO_ZUC_256:
 		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
 				ICP_QAT_FW_LA_NO_PROTO);
 		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index bc53e2e0f1..907c3ce3e2 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -204,6 +204,7 @@ qat_sym_crypto_cap_get_gen3(struct qat_cryptodev_private *internals,
 	uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen3);
 	capa_num = size/sizeof(struct rte_cryptodev_capabilities);
 	legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
+	struct rte_cryptodev_capabilities *cap;
 
 	if (unlikely(qat_legacy_capa))
 		size = size + legacy_size;
@@ -255,6 +256,15 @@ qat_sym_crypto_cap_get_gen3(struct qat_cryptodev_private *internals,
 				RTE_CRYPTO_AUTH_SM3_HMAC))) {
 			continue;
 		}
+
+		if (slice_map & ICP_ACCEL_MASK_ZUC_256_SLICE && (
+			check_auth_capa(&capabilities[iter],
+				RTE_CRYPTO_AUTH_ZUC_EIA3) ||
+			check_cipher_capa(&capabilities[iter],
+				RTE_CRYPTO_CIPHER_ZUC_EEA3))) {
+			continue;
+		}
+
 		if (internals->qat_dev->has_wireless_slice && (
 			check_auth_capa(&capabilities[iter],
 				RTE_CRYPTO_AUTH_KASUMI_F9) ||
@@ -268,6 +278,27 @@ qat_sym_crypto_cap_get_gen3(struct qat_cryptodev_private *internals,
 
 		memcpy(addr + curr_capa, capabilities + iter,
 			sizeof(struct rte_cryptodev_capabilities));
+
+		if (internals->qat_dev->has_wireless_slice && (
+			check_auth_capa(&capabilities[iter],
+				RTE_CRYPTO_AUTH_ZUC_EIA3))) {
+			cap = addr + curr_capa;
+			cap->sym.auth.key_size.max = 32;
+			cap->sym.auth.key_size.increment = 16;
+			cap->sym.auth.iv_size.max = 25;
+			cap->sym.auth.iv_size.increment = 1;
+			cap->sym.auth.digest_size.max = 16;
+			cap->sym.auth.digest_size.increment = 4;
+		}
+		if (internals->qat_dev->has_wireless_slice && (
+			check_cipher_capa(&capabilities[iter],
+				RTE_CRYPTO_CIPHER_ZUC_EEA3))) {
+			cap = addr + curr_capa;
+			cap->sym.cipher.key_size.max = 32;
+			cap->sym.cipher.key_size.increment = 16;
+			cap->sym.cipher.iv_size.max = 25;
+			cap->sym.cipher.iv_size.increment = 1;
+		}
 		curr_capa++;
 	}
 	internals->qat_dev_capabilities = internals->capa_mz->addr;
@@ -480,11 +511,14 @@ qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
 }
 
 static int
-qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+qat_sym_crypto_set_session_gen3(void *cdev, void *session)
 {
 	struct qat_sym_session *ctx = session;
 	enum rte_proc_type_t proc_type = rte_eal_process_type();
 	int ret;
+	struct qat_cryptodev_private *internals;
+
+	internals = ((struct rte_cryptodev *)cdev)->data->dev_private;
 
 	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
 		return -EINVAL;
@@ -517,6 +551,22 @@ qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
 				ctx->qat_cipher_alg ==
 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
 			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		} else if ((internals->qat_dev->has_wireless_slice) &&
+				((ctx->aes_cmac ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+				(ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+				ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 ||
+				ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_256))) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+		} else if ((internals->qat_dev->has_wireless_slice) &&
+			(ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32 ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64 ||
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128) &&
+				ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_ZUC_256) {
+			qat_sym_session_set_ext_hash_flags_gen2(ctx,
+					1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
 		}
 
 		ret = 0;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 911400e53b..ff7ba55c01 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -117,7 +117,10 @@ qat_auth_is_len_in_bits(struct qat_sym_session *ctx,
 {
 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
 		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
-		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128) {
 		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
 				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
 			return -EINVAL;
@@ -132,7 +135,8 @@ qat_cipher_is_len_in_bits(struct qat_sym_session *ctx,
 {
 	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
 		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
-		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_256)  {
 		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
 			((op->sym->cipher.data.offset %
 			BYTE_LENGTH) != 0)))
@@ -589,6 +593,26 @@ qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
 	return 0;
 }
 
+static inline void
+zuc256_modify_iv(uint8_t *iv)
+{
+	uint8_t iv_tmp[8];
+
+	iv_tmp[0] = iv[16];
+	/* pack the last 8 bytes of IV to 6 bytes.
+	 * discard the 2 MSB bits of each byte
+	 */
+	iv_tmp[1] = (((iv[17] & 0x3f) << 2) | ((iv[18] >> 4) & 0x3));
+	iv_tmp[2] = (((iv[18] & 0xf) << 4) | ((iv[19] >> 2) & 0xf));
+	iv_tmp[3] = (((iv[19] & 0x3) << 6) | (iv[20] & 0x3f));
+
+	iv_tmp[4] = (((iv[21] & 0x3f) << 2) | ((iv[22] >> 4) & 0x3));
+	iv_tmp[5] = (((iv[22] & 0xf) << 4) | ((iv[23] >> 2) & 0xf));
+	iv_tmp[6] = (((iv[23] & 0x3) << 6) | (iv[24] & 0x3f));
+
+	memcpy(iv + 16, iv_tmp, 8);
+}
+
 static __rte_always_inline void
 qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
 		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
@@ -665,6 +689,9 @@ enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
 	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
 	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
 	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
 		auth_param->u1.aad_adr = auth_iv->iova;
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
@@ -747,6 +774,9 @@ enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
 	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
 	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
 	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
 		auth_param->u1.aad_adr = auth_iv->iova;
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 208b7e0ba6..bdd1647ea2 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -248,6 +248,9 @@ qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
 		return -EINVAL;
 	}
 
+	if (ctx->is_zuc256)
+		zuc256_modify_iv(cipher_iv.va);
+
 	enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len, op_cookie);
 
 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
@@ -270,6 +273,8 @@ qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
 	struct rte_crypto_va_iova_ptr digest;
 	union rte_crypto_sym_ofs ofs;
 	int32_t total_len;
+	struct rte_cryptodev *cdev;
+	struct qat_cryptodev_private *internals;
 
 	in_sgl.vec = in_vec;
 	out_sgl.vec = out_vec;
@@ -284,6 +289,13 @@ qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
 		return -EINVAL;
 	}
 
+	cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+	internals = cdev->data->dev_private;
+
+	if (internals->qat_dev->has_wireless_slice && !ctx->is_gmac)
+		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+				req->comn_hdr.serv_specif_flags, 0);
+
 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
 	if (unlikely(total_len < 0)) {
@@ -291,6 +303,9 @@ qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
 		return -EINVAL;
 	}
 
+	if (ctx->is_zuc256)
+		zuc256_modify_iv(auth_iv.va);
+
 	enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
 			total_len);
 
@@ -381,6 +396,11 @@ qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
 		return -EINVAL;
 	}
 
+	if (ctx->is_zuc256) {
+		zuc256_modify_iv(cipher_iv.va);
+		zuc256_modify_iv(auth_iv.va);
+	}
+
 	enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
 			out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
 			ofs, total_len, cookie);
@@ -507,6 +527,9 @@ qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
 	if (unlikely(data_len < 0))
 		return -1;
 
+	if (ctx->is_zuc256)
+		zuc256_modify_iv(iv->va);
+
 	enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len, cookie);
 
 	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
@@ -563,6 +586,10 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 
 		if (unlikely(data_len < 0))
 			break;
+
+		if (ctx->is_zuc256)
+			zuc256_modify_iv(vec->iv[i].va);
+
 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
 			(uint32_t)data_len, cookie);
 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
@@ -613,6 +640,9 @@ qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
 	if (unlikely(data_len < 0))
 		return -1;
 
+	if (ctx->is_zuc256)
+		zuc256_modify_iv(auth_iv->va);
+
 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
 		null_digest.iova = cookie->digest_null_phys_addr;
 		job_digest = &null_digest;
@@ -678,6 +708,9 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 		if (unlikely(data_len < 0))
 			break;
 
+		if (ctx->is_zuc256)
+			zuc256_modify_iv(vec->auth_iv[i].va);
+
 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
 			null_digest.iova = cookie->digest_null_phys_addr;
 			job_digest = &null_digest;
@@ -733,6 +766,11 @@ qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
 	if (unlikely(data_len < 0))
 		return -1;
 
+	if (ctx->is_zuc256) {
+		zuc256_modify_iv(cipher_iv->va);
+		zuc256_modify_iv(auth_iv->va);
+	}
+
 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
 		null_digest.iova = cookie->digest_null_phys_addr;
 		job_digest = &null_digest;
@@ -801,6 +839,11 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 		if (unlikely(data_len < 0))
 			break;
 
+		if (ctx->is_zuc256) {
+			zuc256_modify_iv(vec->iv[i].va);
+			zuc256_modify_iv(vec->auth_iv[i].va);
+		}
+
 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
 			null_digest.iova = cookie->digest_null_phys_addr;
 			job_digest = &null_digest;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 9f4f6c3d93..ebdad0bd67 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -379,7 +379,9 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
 	enum qat_device_gen qat_dev_gen =
 				internals->qat_dev->qat_dev_gen;
-	int ret;
+	int ret, is_wireless = 0;
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
 
 	/* Get cipher xform from crypto xform chain */
 	cipher_xform = qat_get_cipher_xform(xform);
@@ -416,6 +418,8 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
 			goto error_out;
 		}
 		session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+		if (internals->qat_dev->has_wireless_slice)
+			is_wireless = 1;
 		break;
 	case RTE_CRYPTO_CIPHER_NULL:
 		session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
@@ -533,6 +537,10 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
 			goto error_out;
 		}
 		session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+		if (cipher_xform->key.length == ICP_QAT_HW_ZUC_256_KEY_SZ)
+			session->is_zuc256 = 1;
+		if (internals->qat_dev->has_wireless_slice)
+			is_wireless = 1;
 		break;
 	case RTE_CRYPTO_CIPHER_AES_XTS:
 		if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
@@ -587,6 +595,17 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
 		goto error_out;
 	}
 
+	if (is_wireless) {
+		/* Set the Use Extended Protocol Flags bit in LW 1 */
+		ICP_QAT_FW_USE_EXTENDED_PROTOCOL_FLAGS_SET(
+				header->ext_flags,
+				QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS);
+		/* Force usage of Wireless Cipher slice */
+		ICP_QAT_FW_USE_WCP_SLICE_SET(header->ext_flags,
+				QAT_LA_USE_WCP_SLICE);
+		session->is_wireless = 1;
+	}
+
 	return 0;
 
 error_out:
@@ -820,9 +839,16 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
 	struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
 	struct qat_cryptodev_private *internals = dev->data->dev_private;
 	const uint8_t *key_data = auth_xform->key.data;
-	uint8_t key_length = auth_xform->key.length;
+	uint16_t key_length = auth_xform->key.length;
 	enum qat_device_gen qat_dev_gen =
 			internals->qat_dev->qat_dev_gen;
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+	uint8_t hash_flag = 0;
+	int is_wireless = 0;
 
 	session->aes_cmac = 0;
 	session->auth_key_length = auth_xform->key.length;
@@ -898,6 +924,10 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
 	case RTE_CRYPTO_AUTH_AES_CMAC:
 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
 		session->aes_cmac = 1;
+		if (internals->qat_dev->has_wireless_slice) {
+			is_wireless = 1;
+			session->is_wireless = 1;
+		}
 		break;
 	case RTE_CRYPTO_AUTH_AES_GMAC:
 		if (qat_sym_validate_aes_key(auth_xform->key.length,
@@ -918,6 +948,11 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
 		break;
 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
+		if (internals->qat_dev->has_wireless_slice) {
+			is_wireless = 1;
+			session->is_wireless = 1;
+			hash_flag = 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS;
+		}
 		break;
 	case RTE_CRYPTO_AUTH_MD5_HMAC:
 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
@@ -934,7 +969,35 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
 				rte_cryptodev_get_auth_algo_string(auth_xform->algo));
 			return -ENOTSUP;
 		}
-		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
+		if (key_length == ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ)
+			session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
+		else if (key_length == ICP_QAT_HW_ZUC_256_KEY_SZ) {
+			switch (auth_xform->digest_length) {
+			case 4:
+				session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32;
+				break;
+			case 8:
+				session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64;
+				break;
+			case 16:
+				session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128;
+				break;
+			default:
+				QAT_LOG(ERR, "Invalid digest length: %d",
+						auth_xform->digest_length);
+				return -ENOTSUP;
+			}
+			session->is_zuc256 = 1;
+		} else {
+			QAT_LOG(ERR, "Invalid key length: %d", key_length);
+			return -ENOTSUP;
+		}
+		if (internals->qat_dev->has_wireless_slice) {
+			is_wireless = 1;
+			session->is_wireless = 1;
+			hash_flag = 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS;
+		} else
+			session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
 		break;
 	case RTE_CRYPTO_AUTH_MD5:
 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
@@ -1002,6 +1065,21 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
 			return -EINVAL;
 	}
 
+	if (is_wireless) {
+		if (!session->aes_cmac) {
+			/* Set the Use Extended Protocol Flags bit in LW 1 */
+			ICP_QAT_FW_USE_EXTENDED_PROTOCOL_FLAGS_SET(
+					header->ext_flags,
+					QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS);
+
+			/* Set Hash Flags in LW 28 */
+			cd_ctrl->hash_flags |= hash_flag;
+		}
+		/* Force usage of Wireless Auth slice */
+		ICP_QAT_FW_USE_WAT_SLICE_SET(header->ext_flags,
+				QAT_LA_USE_WAT_SLICE);
+	}
+
 	return 0;
 }
 
@@ -1204,6 +1282,15 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
 	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
 		return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
 						QAT_HW_DEFAULT_ALIGNMENT);
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
+		return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_256_MAC_32_STATE1_SZ,
+						QAT_HW_DEFAULT_ALIGNMENT);
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
+		return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_256_MAC_64_STATE1_SZ,
+						QAT_HW_DEFAULT_ALIGNMENT);
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
+		return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_256_MAC_128_STATE1_SZ,
+						QAT_HW_DEFAULT_ALIGNMENT);
 	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
 						QAT_HW_DEFAULT_ALIGNMENT);
@@ -1286,6 +1373,10 @@ static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
 		return ICP_QAT_HW_AES_BLK_SZ;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
 		return QAT_MD5_CBLOCK;
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
+		return ICP_QAT_HW_ZUC_256_BLK_SZ;
 	case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
 		/* return maximum block size in this case */
 		return QAT_SHA512_CBLOCK;
@@ -2040,7 +2131,8 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
 	} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
 		|| cdesc->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3
+			|| cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_256) {
 		key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
 		cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
 	} else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
@@ -2075,6 +2167,17 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+	} else if (cdesc->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_256) {
+		if (cdesc->cipher_iv.length != 23 && cdesc->cipher_iv.length != 25) {
+			QAT_LOG(ERR, "Invalid IV length for ZUC256, must be 23 or 25.");
+			return -EINVAL;
+		}
+		total_key_size = ICP_QAT_HW_ZUC_256_KEY_SZ +
+			ICP_QAT_HW_ZUC_256_IV_SZ;
+		cipher_cd_ctrl->cipher_state_sz =
+			ICP_QAT_HW_ZUC_256_IV_SZ >> 3;
+		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2246,6 +2349,9 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
+		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32
+		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64
+		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128
 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
 		|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
@@ -2519,7 +2625,8 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		cdesc->aad_len = aad_length;
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
-		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
+		if (!cdesc->is_wireless)
+			cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
 		state1_size = qat_hash_get_state1_size(
 				ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
 		state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
@@ -2540,10 +2647,12 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
-		hash->auth_config.config =
-			ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
-				cdesc->qat_hash_alg, digestsize);
-		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+		if (!cdesc->is_wireless) {
+			hash->auth_config.config =
+				ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
+					cdesc->qat_hash_alg, digestsize);
+			cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+		}
 		state1_size = qat_hash_get_state1_size(
 				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
 		state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
@@ -2554,6 +2663,18 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
+		state1_size = qat_hash_get_state1_size(cdesc->qat_hash_alg);
+		state2_size = ICP_QAT_HW_ZUC_256_STATE2_SZ;
+		memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
+			+ ICP_QAT_HW_ZUC_256_IV_SZ);
+
+		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+		cd_extra_size += ICP_QAT_HW_ZUC_256_IV_SZ;
+		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_256_IV_SZ >> 3;
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
 #ifdef RTE_QAT_OPENSSL
@@ -2740,6 +2861,9 @@ int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
 	case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
 		*alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
 		break;
+	case ICP_QAT_HW_ZUC_256_KEY_SZ:
+		*alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_256;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 9209e2e8df..2e25c90342 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -140,6 +140,8 @@ struct qat_sym_session {
 	uint8_t is_auth;
 	uint8_t is_cnt_zero;
 	/* Some generations need different setup of counter */
+	uint8_t is_zuc256;
+	uint8_t is_wireless;
 	uint32_t slice_types;
 	enum qat_sym_proto_flag qat_proto_flag;
 	qat_sym_build_request_t build_request[2];
-- 
2.25.1


  parent reply	other threads:[~2024-02-26 17:08 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-19 15:51 [PATCH 0/4] add new QAT gen3 device Ciara Power
2023-12-19 15:51 ` [PATCH 1/4] crypto/qat: add new " Ciara Power
2023-12-19 15:51 ` [PATCH 2/4] crypto/qat: add zuc256 wireless slice for gen3 Ciara Power
2023-12-19 15:51 ` [PATCH 3/4] crypto/qat: add new gen3 CMAC macros Ciara Power
2023-12-19 15:51 ` [PATCH 4/4] crypto/qat: disable asym and compression for new gen3 device Ciara Power
2024-02-23 15:12 ` [PATCH v2 0/4] add new QAT gen3 and gen5 Ciara Power
2024-02-23 15:12   ` [PATCH v2 1/4] common/qat: add new gen3 device Ciara Power
2024-02-23 15:12   ` [PATCH v2 2/4] common/qat: add zuc256 wireless slice for gen3 Ciara Power
2024-02-23 15:12   ` [PATCH v2 3/4] common/qat: add new gen3 CMAC macros Ciara Power
2024-02-23 15:12   ` [PATCH v2 4/4] common/qat: add gen5 device Ciara Power
2024-02-26 13:32   ` [PATCH v2 0/4] add new QAT gen3 and gen5 Ji, Kai
2024-02-29  9:48     ` Kusztal, ArkadiuszX
2024-02-26 17:08 ` [PATCH v3 " Ciara Power
2024-02-26 17:08   ` [PATCH v3 1/4] common/qat: add new gen3 device Ciara Power
2024-02-26 17:08   ` Ciara Power [this message]
2024-02-26 17:08   ` [PATCH v3 3/4] common/qat: add new gen3 CMAC macros Ciara Power
2024-02-26 17:08   ` [PATCH v3 4/4] common/qat: add gen5 device Ciara Power
2024-02-29 18:53   ` [EXT] [PATCH v3 0/4] add new QAT gen3 and gen5 Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240226170818.533793-3-ciara.power@intel.com \
    --to=ciara.power@intel.com \
    --cc=arkadiuszx.kusztal@intel.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=kai.ji@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).