DPDK patches and discussions
 help / color / mirror / Atom feed
From: Hanxiao Li <li.hanxiao@zte.com.cn>
To: dev@dpdk.org
Cc: wang.yong19@zte.com.cn, Hanxiao Li <li.hanxiao@zte.com.cn>
Subject: [PATCH v7 5/8] crypto/zsda: configure drivers, sessions, capabilities of cryptodev
Date: Fri, 27 Sep 2024 21:09:43 +0800	[thread overview]
Message-ID: <20240927131000.738277-5-li.hanxiao@zte.com.cn> (raw)
In-Reply-To: <20240927131000.738277-1-li.hanxiao@zte.com.cn>


[-- Attachment #1.1.1: Type: text/plain, Size: 49417 bytes --]

Add drivers, interfaces and session configuration of zsda compressdev.

Signed-off-by: Hanxiao Li <li.hanxiao@zte.com.cn>
---
 MAINTAINERS                                 |   3 +
 drivers/common/zsda/meson.build             |  15 +-
 drivers/crypto/zsda/zsda_sym.c              | 285 +++++++++++
 drivers/crypto/zsda/zsda_sym.h              |  50 ++
 drivers/crypto/zsda/zsda_sym_capabilities.h | 112 +++++
 drivers/crypto/zsda/zsda_sym_pmd.c          | 429 +++++++++++++++++
 drivers/crypto/zsda/zsda_sym_pmd.h          |  35 ++
 drivers/crypto/zsda/zsda_sym_session.c      | 503 ++++++++++++++++++++
 drivers/crypto/zsda/zsda_sym_session.h      |  82 ++++
 9 files changed, 1513 insertions(+), 1 deletion(-)
 create mode 100644 drivers/crypto/zsda/zsda_sym.c
 create mode 100644 drivers/crypto/zsda/zsda_sym.h
 create mode 100644 drivers/crypto/zsda/zsda_sym_capabilities.h
 create mode 100644 drivers/crypto/zsda/zsda_sym_pmd.c
 create mode 100644 drivers/crypto/zsda/zsda_sym_pmd.h
 create mode 100644 drivers/crypto/zsda/zsda_sym_session.c
 create mode 100644 drivers/crypto/zsda/zsda_sym_session.h

diff --git a/MAINTAINERS b/MAINTAINERS
index e38e6589f7..ff4fc977a0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1221,6 +1221,9 @@ F: drivers/crypto/virtio/
 F: doc/guides/cryptodevs/virtio.rst
 F: doc/guides/cryptodevs/features/virtio.ini
 
+ZTE Storage Data Accelerator(ZSDA)
+M: Hanxiao Li <li.hanxiao@zte.com.cn>
+F: drivers/crypto/zsda/
 
 Compression Drivers
 -------------------
diff --git a/drivers/common/zsda/meson.build b/drivers/common/zsda/meson.build
index 1fed5a7f78..cbbcc63abf 100644
--- a/drivers/common/zsda/meson.build
+++ b/drivers/common/zsda/meson.build
@@ -7,7 +7,7 @@ if is_windows
     subdir_done()
 endif
 
-deps += ['bus_pci', 'compressdev']
+deps += ['bus_pci', 'compressdev', 'cryptodev']
 sources += files(
 		'zsda_common.c',
 		'zsda_logs.c',
@@ -24,3 +24,16 @@ if zsda_compress
 		sources += files(join_paths(zsda_compress_relpath, f))
 	endforeach
 endif
+
+zsda_crypto = true
+zsda_crypto_path = 'crypto/zsda'
+zsda_crypto_relpath = '../../' + zsda_crypto_path
+if zsda_crypto
+	libcrypto = dependency('libcrypto', required: false, method: 'pkg-config')
+	foreach f: ['zsda_sym_pmd.c', 'zsda_sym_session.c', 'zsda_sym.c']
+		sources += files(join_paths(zsda_crypto_relpath, f))
+	endforeach
+	deps += ['security']
+	ext_deps += libcrypto
+	cflags += ['-DBUILD_ZSDA_SYM']
+endif
diff --git a/drivers/crypto/zsda/zsda_sym.c b/drivers/crypto/zsda/zsda_sym.c
new file mode 100644
index 0000000000..38de4160a0
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym.c
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include "cryptodev_pmd.h"
+
+#include "zsda_logs.h"
+#include "zsda_sym.h"
+#include "zsda_sym_pmd.h"
+#include "zsda_sym_session.h"
+
+#define choose_dst_mbuf(mbuf_src, mbuf_dst) ((mbuf_dst) == NULL ? (mbuf_src) : (mbuf_dst))
+#define LBADS_MAX_REMAINDER (16 - 1)
+
+void
+zsda_reverse_memcpy(uint8_t *dst, const uint8_t *src, size_t n)
+{
+	size_t i;
+
+	for (i = 0; i < n; ++i)
+		dst[n - 1 - i] = src[i];
+}
+
+static uint8_t
+zsda_get_opcode_hash(struct zsda_sym_session *sess)
+{
+	switch (sess->auth.algo) {
+	case RTE_CRYPTO_AUTH_SHA1:
+		return ZSDA_OPC_HASH_SHA1;
+
+	case RTE_CRYPTO_AUTH_SHA224:
+		return ZSDA_OPC_HASH_SHA2_224;
+
+	case RTE_CRYPTO_AUTH_SHA256:
+		return ZSDA_OPC_HASH_SHA2_256;
+
+	case RTE_CRYPTO_AUTH_SHA384:
+		return ZSDA_OPC_HASH_SHA2_384;
+
+	case RTE_CRYPTO_AUTH_SHA512:
+		return ZSDA_OPC_HASH_SHA2_512;
+
+	case RTE_CRYPTO_AUTH_SM3:
+		return ZSDA_OPC_HASH_SM3;
+	default:
+		break;
+	}
+
+	return ZSDA_OPC_INVALID;
+}
+
+static uint8_t
+zsda_get_opcode_crypto(struct zsda_sym_session *sess)
+{
+	if (sess->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+		if (sess->cipher.algo == RTE_CRYPTO_CIPHER_AES_XTS &&
+		    sess->cipher.key_encry.length == 32)
+			return ZSDA_OPC_EC_AES_XTS_256;
+		else if (sess->cipher.algo == RTE_CRYPTO_CIPHER_AES_XTS &&
+			 sess->cipher.key_encry.length == 64)
+			return ZSDA_OPC_EC_AES_XTS_512;
+		else if (sess->cipher.algo == RTE_CRYPTO_CIPHER_SM4_XTS)
+			return ZSDA_OPC_EC_SM4_XTS_256;
+	} else if (sess->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+		if (sess->cipher.algo == RTE_CRYPTO_CIPHER_AES_XTS &&
+		    sess->cipher.key_decry.length == 32)
+			return ZSDA_OPC_DC_AES_XTS_256;
+		else if (sess->cipher.algo == RTE_CRYPTO_CIPHER_AES_XTS &&
+			 sess->cipher.key_decry.length == 64)
+			return ZSDA_OPC_DC_AES_XTS_512;
+		else if (sess->cipher.algo == RTE_CRYPTO_CIPHER_SM4_XTS)
+			return ZSDA_OPC_DC_SM4_XTS_256;
+	}
+	return ZSDA_OPC_INVALID;
+}
+
+int
+zsda_encry_match(const void *op_in)
+{
+	const struct rte_crypto_op *op = op_in;
+	struct rte_cryptodev_sym_session *session = op->sym->session;
+	struct zsda_sym_session *sess =
+		(struct zsda_sym_session *)session->driver_priv_data;
+
+	if (sess->chain_order == ZSDA_SYM_CHAIN_ONLY_CIPHER &&
+	    sess->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+		return 1;
+	else
+		return 0;
+}
+
+int
+zsda_decry_match(const void *op_in)
+{
+	const struct rte_crypto_op *op = op_in;
+	struct rte_cryptodev_sym_session *session = op->sym->session;
+	struct zsda_sym_session *sess =
+		(struct zsda_sym_session *)session->driver_priv_data;
+
+	if (sess->chain_order == ZSDA_SYM_CHAIN_ONLY_CIPHER &&
+	    sess->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
+		return 1;
+	else
+		return 0;
+}
+
+int
+zsda_hash_match(const void *op_in)
+{
+	const struct rte_crypto_op *op = op_in;
+	struct rte_cryptodev_sym_session *session = op->sym->session;
+	struct zsda_sym_session *sess =
+		(struct zsda_sym_session *)session->driver_priv_data;
+
+	if (sess->chain_order == ZSDA_SYM_CHAIN_ONLY_AUTH)
+		return 1;
+	else
+		return 0;
+}
+
+static int
+zsda_check_len_lbads(uint32_t data_len, uint32_t lbads_size)
+{
+	if (data_len < 16) {
+		ZSDA_LOG(ERR, "data_len wrong!");
+		return ZSDA_FAILED;
+	}
+	if (lbads_size != 0) {
+		if (!(((data_len % lbads_size) == 0) ||
+		      ((data_len % lbads_size) > LBADS_MAX_REMAINDER))) {
+			ZSDA_LOG(ERR, "data_len wrong!");
+			return ZSDA_FAILED;
+		}
+	}
+
+	return 0;
+}
+
+int
+zsda_build_cipher_request(void *op_in, const struct zsda_queue *queue,
+			 void **op_cookies, const uint16_t new_tail)
+{
+	struct rte_crypto_op *op = op_in;
+
+	struct rte_cryptodev_sym_session *session = op->sym->session;
+	struct zsda_sym_session *sess =
+		(struct zsda_sym_session *)session->driver_priv_data;
+
+	struct zsda_wqe_crpt *wqe =
+		(struct zsda_wqe_crpt *)(queue->base_addr +
+					 (new_tail * queue->msg_size));
+	struct zsda_op_cookie *cookie = op_cookies[new_tail];
+	struct zsda_sgl *sgl_src = (struct zsda_sgl *)&cookie->sgl_src;
+	struct zsda_sgl *sgl_dst = (struct zsda_sgl *)&cookie->sgl_dst;
+	struct rte_mbuf *mbuf;
+
+	int ret = 0;
+	uint32_t op_offset;
+	uint32_t op_src_len;
+	uint32_t op_dst_len;
+	const uint8_t *iv_addr = NULL;
+	uint8_t iv_len = 0;
+
+	ret = zsda_check_len_lbads(op->sym->cipher.data.length,
+				   sess->cipher.dataunit_len);
+	if (ret)
+		return ZSDA_FAILED;
+
+	op_offset = op->sym->cipher.data.offset;
+	op_src_len = op->sym->cipher.data.length;
+	mbuf = op->sym->m_src;
+	ret = zsda_fill_sgl(mbuf, op_offset, sgl_src, cookie->sgl_src_phys_addr,
+			    op_src_len, NULL);
+
+	mbuf = choose_dst_mbuf(op->sym->m_src, op->sym->m_dst);
+	op_dst_len = mbuf->pkt_len - op_offset;
+	ret |= zsda_fill_sgl(mbuf, op_offset, sgl_dst,
+			     cookie->sgl_dst_phys_addr, op_dst_len, NULL);
+
+	if (ret) {
+		ZSDA_LOG(ERR, E_FUNC);
+		return ret;
+	}
+
+	cookie->used = true;
+	cookie->sid = new_tail;
+	cookie->op = op;
+
+	memset(wqe, 0, sizeof(struct zsda_wqe_crpt));
+	wqe->rx_length = op_src_len;
+	wqe->tx_length = op_dst_len;
+	wqe->valid = queue->valid;
+	wqe->op_code = zsda_get_opcode_crypto(sess);
+	wqe->sid = cookie->sid;
+	wqe->rx_sgl_type = SGL_ELM_TYPE_LIST;
+	wqe->tx_sgl_type = SGL_ELM_TYPE_LIST;
+	wqe->rx_addr = cookie->sgl_src_phys_addr;
+	wqe->tx_addr = cookie->sgl_dst_phys_addr;
+	wqe->cfg.lbads = sess->cipher.lbads;
+
+	if (sess->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+		memcpy((uint8_t *)wqe->cfg.key, sess->cipher.key_encry.data,
+		       ZSDA_CIPHER_KEY_MAX_LEN);
+	else
+		memcpy((uint8_t *)wqe->cfg.key, sess->cipher.key_decry.data,
+		       ZSDA_CIPHER_KEY_MAX_LEN);
+
+	iv_addr = (const uint8_t *)rte_crypto_op_ctod_offset(
+			       op, char *, sess->cipher.iv.offset);
+	iv_len = sess->cipher.iv.length;
+	zsda_reverse_memcpy((uint8_t *)wqe->cfg.slba_H, iv_addr, iv_len / 2);
+	zsda_reverse_memcpy((uint8_t *)wqe->cfg.slba_L, iv_addr + 8, iv_len / 2);
+
+	ZSDA_LOG(INFO, "log test ok!");
+	return ret;
+}
+
+int
+zsda_build_hash_request(void *op_in, const struct zsda_queue *queue,
+	       void **op_cookies, const uint16_t new_tail)
+{
+	struct rte_crypto_op *op = op_in;
+
+	struct rte_cryptodev_sym_session *session = op->sym->session;
+	struct zsda_sym_session *sess =
+		(struct zsda_sym_session *)session->driver_priv_data;
+
+	struct zsda_wqe_crpt *wqe =
+		(struct zsda_wqe_crpt *)(queue->base_addr +
+					 (new_tail * queue->msg_size));
+	struct zsda_op_cookie *cookie = op_cookies[new_tail];
+	struct zsda_sgl *sgl_src = &cookie->sgl_src;
+	uint8_t opcode;
+	uint32_t op_offset;
+	uint32_t op_src_len;
+	int ret = 0;
+
+	memset(wqe, 0, sizeof(struct zsda_wqe_crpt));
+	wqe->rx_length = op->sym->auth.data.length;
+	wqe->tx_length = sess->auth.digest_length;
+
+	opcode = zsda_get_opcode_hash(sess);
+	if (opcode == ZSDA_OPC_INVALID) {
+		ZSDA_LOG(ERR, E_FUNC);
+		return ZSDA_FAILED;
+	}
+
+	op_offset = op->sym->auth.data.offset;
+	op_src_len = op->sym->auth.data.length;
+	ret = zsda_fill_sgl(op->sym->m_src, op_offset, sgl_src,
+				   cookie->sgl_src_phys_addr, op_src_len, NULL);
+	if (ret) {
+		ZSDA_LOG(ERR, E_FUNC);
+		return ret;
+	}
+
+	cookie->used = true;
+	cookie->sid = new_tail;
+	cookie->op = op;
+	wqe->valid = queue->valid;
+	wqe->op_code = opcode;
+	wqe->sid = cookie->sid;
+	wqe->rx_sgl_type = SGL_ELM_TYPE_LIST;
+	wqe->tx_sgl_type = SGL_ELM_TYPE_PHYS_ADDR;
+	wqe->rx_addr = cookie->sgl_src_phys_addr;
+	wqe->tx_addr = op->sym->auth.digest.phys_addr;
+
+	return ret;
+}
+
+int
+zsda_crypto_callback(void *cookie_in, struct zsda_cqe *cqe)
+{
+	struct zsda_op_cookie *tmp_cookie = cookie_in;
+	struct rte_crypto_op *op = tmp_cookie->op;
+
+	if (!(CQE_ERR0(cqe->err0) || CQE_ERR1(cqe->err1)))
+		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	else {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return ZSDA_FAILED;
+	}
+
+	return ZSDA_SUCCESS;
+}
diff --git a/drivers/crypto/zsda/zsda_sym.h b/drivers/crypto/zsda/zsda_sym.h
new file mode 100644
index 0000000000..faf14047b6
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_SYM_H_
+#define _ZSDA_SYM_H_
+
+#include "zsda_common.h"
+#include "zsda_qp.h"
+
+#define ZSDA_CIPHER_KEY_MAX_LEN 64
+struct crypto_cfg {
+	uint8_t slba_L[8];
+	uint8_t key[ZSDA_CIPHER_KEY_MAX_LEN];
+	uint8_t lbads : 4;
+	uint8_t resv1 : 4;
+	uint8_t resv2[7];
+	uint8_t slba_H[8];
+	uint8_t resv3[8];
+} __rte_packed;
+
+struct zsda_wqe_crpt {
+	uint8_t valid;
+	uint8_t op_code;
+	uint16_t sid;
+	uint8_t resv[3];
+	uint8_t rx_sgl_type : 4;
+	uint8_t tx_sgl_type : 4;
+	uint64_t rx_addr;
+	uint32_t rx_length;
+	uint64_t tx_addr;
+	uint32_t tx_length;
+	struct crypto_cfg cfg;
+} __rte_packed;
+
+int zsda_build_cipher_request(void *op_in, const struct zsda_queue *queue,
+			 void **op_cookies, const uint16_t new_tail);
+
+int zsda_build_hash_request(void *op_in, const struct zsda_queue *queue,
+		       void **op_cookies, const uint16_t new_tail);
+
+int zsda_encry_match(const void *op_in);
+int zsda_decry_match(const void *op_in);
+int zsda_hash_match(const void *op_in);
+
+void zsda_reverse_memcpy(uint8_t *dst, const uint8_t *src, size_t n);
+
+int zsda_crypto_callback(void *cookie_in, struct zsda_cqe *cqe);
+
+#endif /* _ZSDA_SYM_H_ */
diff --git a/drivers/crypto/zsda/zsda_sym_capabilities.h b/drivers/crypto/zsda/zsda_sym_capabilities.h
new file mode 100644
index 0000000000..dd387b36ad
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_capabilities.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_SYM_CAPABILITIES_H_
+#define _ZSDA_SYM_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities zsda_crypto_sym_capabilities[] = {
+	{/* SHA1 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{ .auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1,
+				.block_size = 64,
+				.key_size = {.min = 0, .max = 0, .increment = 0},
+				.digest_size = {.min = 20, .max = 20, .increment = 2},
+				.iv_size = {0} },
+			}	},
+		}
+	},
+	{/* SHA224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{ .auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA224,
+				.block_size = 64,
+				.key_size = {.min = 0, .max = 0, .increment = 0},
+				.digest_size = {.min = 28, .max = 28, .increment = 0},
+				.iv_size = {0} },
+			}	},
+		}
+	},
+	{/* SHA256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{ .auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256,
+				.block_size = 64,
+				.key_size = {.min = 0, .max = 0, .increment = 0},
+				.digest_size = {.min = 32, .max = 32, .increment = 0},
+				.iv_size = {0} },
+			} },
+		}
+	},
+	{/* SHA384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{ .auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384,
+				.block_size = 128,
+				.key_size = {.min = 0, .max = 0, .increment = 0},
+				.digest_size = {.min = 48, .max = 48, .increment = 0},
+				.iv_size = {0} },
+			} },
+		}
+	},
+	{/* SHA512 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{ .auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512,
+				.block_size = 128,
+				.key_size = {.min = 0, .max = 0, .increment = 0},
+				.digest_size = {.min = 64, .max = 64, .increment = 0},
+				.iv_size = {0} },
+			} },
+		}
+	},
+	{/* SM3 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{ .auth = {
+				.algo = RTE_CRYPTO_AUTH_SM3,
+				.block_size = 64,
+				.key_size = {.min = 0, .max = 0, .increment = 0},
+				.digest_size = {.min = 32, .max = 32, .increment = 0},
+				.iv_size = {0} },
+			} },
+		}
+	},
+	{/* AES XTS */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{ .cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_XTS,
+				.block_size = 16,
+				.key_size = {.min = 16, .max = 32, .increment = 16},
+				.iv_size = {.min = 16, .max = 16, .increment = 0} },
+			} },
+		}
+	},
+	{/* SM4 XTS */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{ .cipher = {
+				.algo = RTE_CRYPTO_CIPHER_SM4_XTS,
+				.block_size = 16,
+				.key_size = {.min = 32, .max = 32, .increment = 0},
+				.iv_size = {.min = 16, .max = 16, .increment = 0} },
+			} },
+		}
+	}
+};
+#endif /* _ZSDA_SYM_CAPABILITIES_H_ */
+
diff --git a/drivers/crypto/zsda/zsda_sym_pmd.c b/drivers/crypto/zsda/zsda_sym_pmd.c
new file mode 100644
index 0000000000..ac5a63b96e
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_pmd.c
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <rte_cryptodev.h>
+
+#include "cryptodev_pmd.h"
+#include "zsda_logs.h"
+#include "zsda_sym.h"
+#include "zsda_sym_pmd.h"
+#include "zsda_sym_session.h"
+#include "zsda_sym_capabilities.h"
+
+uint8_t zsda_sym_driver_id;
+
+static int
+zsda_sym_dev_config(__rte_unused struct rte_cryptodev *dev,
+		    __rte_unused struct rte_cryptodev_config *config)
+{
+	return ZSDA_SUCCESS;
+}
+
+static int zsda_sym_qp_release(struct rte_cryptodev *dev,
+				uint16_t queue_pair_id);
+
+static int
+zsda_sym_dev_start(struct rte_cryptodev *dev)
+{
+	struct zsda_sym_dev_private *sym_dev = dev->data->dev_private;
+	int ret = 0;
+
+	ret = zsda_queue_start(sym_dev->zsda_pci_dev->pci_dev);
+
+	if (ret)
+		ZSDA_LOG(ERR, E_START_Q);
+	return ret;
+}
+
+static void
+zsda_sym_dev_stop(struct rte_cryptodev *dev)
+{
+	struct zsda_sym_dev_private *sym_dev = dev->data->dev_private;
+
+	zsda_queue_stop(sym_dev->zsda_pci_dev->pci_dev);
+}
+
+static int
+zsda_sym_dev_close(struct rte_cryptodev *dev)
+{
+	int ret = 0;
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_queue_pairs; i++)
+		ret |= zsda_sym_qp_release(dev, i);
+
+	return ret;
+}
+
+static void
+zsda_sym_dev_info_get(struct rte_cryptodev *dev,
+		      struct rte_cryptodev_info *info)
+{
+	struct zsda_sym_dev_private *sym_priv = dev->data->dev_private;
+
+	if (info != NULL) {
+		info->max_nb_queue_pairs =
+			zsda_crypto_max_nb_qps(sym_priv->zsda_pci_dev);
+		info->feature_flags = dev->feature_flags;
+		info->capabilities = sym_priv->zsda_dev_capabilities;
+		info->driver_id = zsda_sym_driver_id;
+		info->sym.max_nb_sessions = 0;
+	}
+}
+
+static void
+zsda_sym_stats_get(struct rte_cryptodev *dev, struct rte_cryptodev_stats *stats)
+{
+	struct zsda_common_stat comm = {0};
+
+	zsda_stats_get(dev->data->queue_pairs, dev->data->nb_queue_pairs,
+		       &comm);
+	stats->enqueued_count = comm.enqueued_count;
+	stats->dequeued_count = comm.dequeued_count;
+	stats->enqueue_err_count = comm.enqueue_err_count;
+	stats->dequeue_err_count = comm.dequeue_err_count;
+}
+
+static void
+zsda_sym_stats_reset(struct rte_cryptodev *dev)
+{
+	zsda_stats_reset(dev->data->queue_pairs, dev->data->nb_queue_pairs);
+}
+
+static int
+zsda_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+	ZSDA_LOG(DEBUG, "Release sym qp %u on device %d", queue_pair_id,
+		 dev->data->dev_id);
+
+	return zsda_queue_pair_release(
+		(struct zsda_qp **)&(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static int
+zsda_setup_encrypto_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t qp_id,
+		     struct zsda_qp *qp, uint32_t nb_des, int socket_id)
+{
+	enum zsda_service_type type = ZSDA_SERVICE_SYMMETRIC_ENCRYPT;
+	struct zsda_qp_config conf;
+	int ret = 0;
+	struct zsda_qp_hw *qp_hw;
+
+	qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+	conf.hw = qp_hw->data + qp_id;
+	conf.service_type = type;
+	conf.cookie_size = sizeof(struct zsda_op_cookie);
+	conf.nb_descriptors = nb_des;
+	conf.socket_id = socket_id;
+	conf.service_str = "sym_encrypt";
+
+	ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+	qp->srv[type].rx_cb = zsda_crypto_callback;
+	qp->srv[type].tx_cb = zsda_build_cipher_request;
+	qp->srv[type].match = zsda_encry_match;
+
+	return ret;
+}
+
+static int
+zsda_setup_decrypto_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t qp_id,
+		     struct zsda_qp *qp, uint32_t nb_des, int socket_id)
+{
+	enum zsda_service_type type = ZSDA_SERVICE_SYMMETRIC_DECRYPT;
+	struct zsda_qp_config conf;
+	int ret = 0;
+	struct zsda_qp_hw *qp_hw;
+
+	qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+	conf.hw = qp_hw->data + qp_id;
+	conf.service_type = type;
+
+	conf.cookie_size = sizeof(struct zsda_op_cookie);
+	conf.nb_descriptors = nb_des;
+	conf.socket_id = socket_id;
+	conf.service_str = "sym_decrypt";
+
+	ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+	qp->srv[type].rx_cb = zsda_crypto_callback;
+	qp->srv[type].tx_cb = zsda_build_cipher_request;
+	qp->srv[type].match = zsda_decry_match;
+
+	return ret;
+}
+
+static int
+zsda_setup_hash_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t qp_id,
+		 struct zsda_qp *qp, uint32_t nb_des, int socket_id)
+{
+	enum zsda_service_type type = ZSDA_SERVICE_HASH_ENCODE;
+	struct zsda_qp_config conf;
+	int ret = 0;
+	struct zsda_qp_hw *qp_hw;
+
+	qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+	conf.hw = qp_hw->data + qp_id;
+	conf.service_type = type;
+	conf.cookie_size = sizeof(struct zsda_op_cookie);
+	conf.nb_descriptors = nb_des;
+	conf.socket_id = socket_id;
+	conf.service_str = "sym_hash";
+
+	ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+	qp->srv[type].rx_cb = zsda_crypto_callback;
+	qp->srv[type].tx_cb = zsda_build_hash_request;
+	qp->srv[type].match = zsda_hash_match;
+
+	return ret;
+}
+
+static int
+zsda_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		  const struct rte_cryptodev_qp_conf *qp_conf,
+		  int socket_id)
+{
+	int ret = 0;
+	struct zsda_qp *qp_new;
+
+	struct zsda_qp **qp_addr =
+		(struct zsda_qp **)&(dev->data->queue_pairs[qp_id]);
+	struct zsda_sym_dev_private *sym_priv = dev->data->dev_private;
+	struct zsda_pci_device *zsda_pci_dev = sym_priv->zsda_pci_dev;
+	uint16_t num_qps_encrypt = zsda_qps_per_service(
+		zsda_pci_dev, ZSDA_SERVICE_SYMMETRIC_ENCRYPT);
+	uint16_t num_qps_decrypt = zsda_qps_per_service(
+		zsda_pci_dev, ZSDA_SERVICE_SYMMETRIC_DECRYPT);
+	uint16_t num_qps_hash = zsda_qps_per_service(
+		zsda_pci_dev, ZSDA_SERVICE_HASH_ENCODE);
+
+	uint32_t nb_des = qp_conf->nb_descriptors;
+	nb_des = (nb_des == NB_DES) ? nb_des : NB_DES;
+
+	if (*qp_addr != NULL) {
+		ret = zsda_sym_qp_release(dev, qp_id);
+		if (ret)
+			return ret;
+	}
+
+	qp_new = rte_zmalloc_socket("zsda PMD qp metadata", sizeof(*qp_new),
+				    RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp_new == NULL) {
+		ZSDA_LOG(ERR, "Failed to alloc mem for qp struct");
+		return -ENOMEM;
+	}
+
+	if (num_qps_encrypt == MAX_QPS_ON_FUNCTION)
+		ret = zsda_setup_encrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					    socket_id);
+	else if (num_qps_decrypt == MAX_QPS_ON_FUNCTION)
+		ret = zsda_setup_decrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					    socket_id);
+	else if (num_qps_hash == MAX_QPS_ON_FUNCTION)
+		ret = zsda_setup_hash_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					socket_id);
+	else {
+		ret = zsda_setup_encrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					    socket_id);
+		ret |= zsda_setup_decrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					    socket_id);
+		ret |= zsda_setup_hash_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					socket_id);
+	}
+
+	if (ret) {
+		rte_free(qp_new);
+		return ret;
+	}
+
+	qp_new->mmap_bar_addr =
+		sym_priv->zsda_pci_dev->pci_dev->mem_resource[0].addr;
+	*qp_addr = qp_new;
+
+	return ret;
+}
+
+static unsigned int
+zsda_sym_session_get_private_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return RTE_ALIGN_CEIL(sizeof(struct zsda_sym_session), 8);
+}
+
+static int
+zsda_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+			   struct rte_crypto_sym_xform *xform,
+			   struct rte_cryptodev_sym_session *sess)
+{
+	void *sess_private_data;
+	int ret = 0;
+
+	if (unlikely(sess == NULL)) {
+		ZSDA_LOG(ERR, "Invalid session struct");
+		return -EINVAL;
+	}
+
+	sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
+
+	ret = zsda_crypto_set_session_parameters(
+			sess_private_data, xform);
+
+	if (ret != 0) {
+		ZSDA_LOG(ERR, "Failed configure session parameters");
+		return ret;
+	}
+
+	return 0;
+}
+
+static void
+zsda_sym_session_clear(struct rte_cryptodev *dev __rte_unused,
+			struct rte_cryptodev_sym_session  *sess __rte_unused)
+{}
+
+static struct rte_cryptodev_ops crypto_zsda_ops = {
+
+	.dev_configure = zsda_sym_dev_config,
+	.dev_start = zsda_sym_dev_start,
+	.dev_stop = zsda_sym_dev_stop,
+	.dev_close = zsda_sym_dev_close,
+	.dev_infos_get = zsda_sym_dev_info_get,
+
+	.stats_get = zsda_sym_stats_get,
+	.stats_reset = zsda_sym_stats_reset,
+	.queue_pair_setup = zsda_sym_qp_setup,
+	.queue_pair_release = zsda_sym_qp_release,
+
+	.sym_session_get_size = zsda_sym_session_get_private_size,
+	.sym_session_configure = zsda_sym_session_configure,
+	.sym_session_clear = zsda_sym_session_clear,
+};
+
+static uint16_t
+zsda_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+			      uint16_t nb_ops)
+{
+	return zsda_enqueue_op_burst((struct zsda_qp *)qp, (void **)ops,
+				     nb_ops);
+}
+
+static uint16_t
+zsda_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+			      uint16_t nb_ops)
+{
+	return zsda_dequeue_op_burst((struct zsda_qp *)qp, (void **)ops,
+				     nb_ops);
+}
+
+static const char zsda_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_ZSDA_SYM_PMD);
+static const struct rte_driver cryptodev_zsda_sym_driver = {
+	.name = zsda_sym_drv_name, .alias = zsda_sym_drv_name};
+
+int
+zsda_sym_dev_create(struct zsda_pci_device *zsda_pci_dev)
+{
+	int ret = 0;
+	struct zsda_device_info *dev_info =
+		&zsda_devs[zsda_pci_dev->zsda_dev_id];
+
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = (int)rte_socket_id(),
+		.private_data_size = sizeof(struct zsda_sym_dev_private)};
+
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct zsda_sym_dev_private *sym_priv;
+	const struct rte_cryptodev_capabilities *capabilities;
+	uint64_t capa_size;
+
+	init_params.max_nb_queue_pairs = zsda_crypto_max_nb_qps(zsda_pci_dev);
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", zsda_pci_dev->name,
+		 "sym_encrypt");
+	ZSDA_LOG(DEBUG, "Creating ZSDA SYM device %s", name);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return ZSDA_SUCCESS;
+
+	dev_info->sym_rte_dev.driver = &cryptodev_zsda_sym_driver;
+	dev_info->sym_rte_dev.numa_node = dev_info->pci_dev->device.numa_node;
+	dev_info->sym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name, &(dev_info->sym_rte_dev),
+					     &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	dev_info->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = zsda_sym_driver_id;
+
+	cryptodev->dev_ops = &crypto_zsda_ops;
+
+	cryptodev->enqueue_burst = zsda_sym_pmd_enqueue_op_burst;
+	cryptodev->dequeue_burst = zsda_sym_pmd_dequeue_op_burst;
+
+	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+				   RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
+				   RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+				   RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+				   RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+				   RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+				   RTE_CRYPTODEV_FF_HW_ACCELERATED;
+
+	sym_priv = cryptodev->data->dev_private;
+	sym_priv->zsda_pci_dev = zsda_pci_dev;
+	capabilities = zsda_crypto_sym_capabilities;
+	capa_size = sizeof(zsda_crypto_sym_capabilities);
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN, "ZSDA_SYM_CAPA");
+
+	sym_priv->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (sym_priv->capa_mz == NULL)
+		sym_priv->capa_mz = rte_memzone_reserve(
+			capa_memz_name, capa_size, rte_socket_id(), 0);
+
+	if (sym_priv->capa_mz == NULL) {
+		ZSDA_LOG(ERR, E_MALLOC);
+		ret = -EFAULT;
+		goto error;
+	}
+
+	memcpy(sym_priv->capa_mz->addr, capabilities, capa_size);
+	sym_priv->zsda_dev_capabilities = sym_priv->capa_mz->addr;
+
+	zsda_pci_dev->sym_dev = sym_priv;
+
+	return ZSDA_SUCCESS;
+
+error:
+
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&dev_info->sym_rte_dev, 0, sizeof(dev_info->sym_rte_dev));
+
+	return ret;
+}
+
+int
+zsda_sym_dev_destroy(struct zsda_pci_device *zsda_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (zsda_pci_dev == NULL)
+		return -ENODEV;
+	if (zsda_pci_dev->sym_dev == NULL)
+		return ZSDA_SUCCESS;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(zsda_pci_dev->sym_dev->capa_mz);
+
+	cryptodev = rte_cryptodev_pmd_get_dev(zsda_pci_dev->zsda_dev_id);
+
+	rte_cryptodev_pmd_destroy(cryptodev);
+	zsda_devs[zsda_pci_dev->zsda_dev_id].sym_rte_dev.name = NULL;
+	zsda_pci_dev->sym_dev = NULL;
+
+	return ZSDA_SUCCESS;
+}
+
+static struct cryptodev_driver zsda_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(zsda_crypto_drv, cryptodev_zsda_sym_driver,
+			       zsda_sym_driver_id);
diff --git a/drivers/crypto/zsda/zsda_sym_pmd.h b/drivers/crypto/zsda/zsda_sym_pmd.h
new file mode 100644
index 0000000000..77175fed47
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_pmd.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_SYM_PMD_H_
+#define _ZSDA_SYM_PMD_H_
+
+#include "zsda_device.h"
+
+/** ZSDA Symmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_ZSDA_SYM_PMD crypto_zsda
+
+extern uint8_t zsda_sym_driver_id;
+
+/** private data structure for a ZSDA device.
+ * This ZSDA device is a device offering only symmetric crypto service,
+ * there can be one of these on each zsda_pci_device (VF).
+ */
+struct zsda_sym_dev_private {
+	struct zsda_pci_device *zsda_pci_dev;
+	/**< The zsda pci device hosting the service */
+
+	const struct rte_cryptodev_capabilities *zsda_dev_capabilities;
+	/* ZSDA device symmetric crypto capabilities */
+	const struct rte_memzone *capa_mz;
+	/* Shared memzone for storing capabilities */
+	uint16_t min_enq_burst_threshold;
+	uint32_t internal_capabilities; /* see flags ZSDA_SYM_CAP_xxx */
+};
+
+int zsda_sym_dev_create(struct zsda_pci_device *zsda_pci_dev);
+
+int zsda_sym_dev_destroy(struct zsda_pci_device *zsda_pci_dev);
+
+#endif /* _ZSDA_SYM_PMD_H_ */
diff --git a/drivers/crypto/zsda/zsda_sym_session.c b/drivers/crypto/zsda/zsda_sym_session.c
new file mode 100644
index 0000000000..79026ef4c0
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_session.c
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include "cryptodev_pmd.h"
+
+#include "zsda_sym_session.h"
+#include "zsda_logs.h"
+
+/**************** AES KEY EXPANSION ****************/
+/**
+ * AES S-boxes
+ * Sbox table: 8bits input convert to 8bits output
+ **/
+static const unsigned char aes_sbox[256] = {
+	/* 0     1    2      3     4    5     6     7      8    9     A      B
+	 * C     D    E     F
+	 */
+	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b,
+	0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26,
+	0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2,
+	0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed,
+	0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f,
+	0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
+	0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14,
+	0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d,
+	0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f,
+	0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11,
+	0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f,
+	0xb0, 0x54, 0xbb, 0x16};
+
+/**
+ * The round constant word array, Rcon[i]
+ *
+ * From Wikipedia's article on the Rijndael key schedule @
+ * https://en.wikipedia.org/wiki/Rijndael_key_schedule#Rcon "Only the first some
+ * of these constants are actually used – up to rcon[10] for AES-128 (as 11
+ * round keys are needed), up to rcon[8] for AES-192, up to rcon[7] for AES-256.
+ * rcon[0] is not used in AES algorithm."
+ */
+static const unsigned char Rcon[11] = {0x8d, 0x01, 0x02, 0x04, 0x08, 0x10,
+				       0x20, 0x40, 0x80, 0x1b, 0x36};
+
+#define GET_AES_SBOX_VAL(num) (aes_sbox[(num)])
+
+/**************** SM4 KEY EXPANSION ****************/
+/*
+ * 32-bit integer manipulation macros (big endian)
+ */
+#ifndef GET_ULONG_BE
+#define GET_ULONG_BE(n, b, i)                                                  \
+	{                                                                      \
+		(n) = ((unsigned int)(b)[(i)] << 24) |                         \
+		      ((unsigned int)(b)[(i) + 1] << 16) |                     \
+		      ((unsigned int)(b)[(i) + 2] << 8) |                      \
+		      ((unsigned int)(b)[(i) + 3]);                            \
+	}
+#endif
+
+#ifndef PUT_ULONG_BE
+#define PUT_ULONG_BE(n, b, i)                                                  \
+	{                                                                      \
+		(b)[(i)] = (unsigned char)((n) >> 24);                         \
+		(b)[(i) + 1] = (unsigned char)((n) >> 16);                     \
+		(b)[(i) + 2] = (unsigned char)((n) >> 8);                      \
+		(b)[(i) + 3] = (unsigned char)((n));                           \
+	}
+#endif
+
+/**
+ *rotate shift left marco definition
+ *
+ **/
+#define SHL(x, n)  (((x)&0xFFFFFFFF) << n)
+#define ROTL(x, n) (SHL((x), n) | ((x) >> (32 - n)))
+
+/**
+ * SM4 S-boxes
+ * Sbox table: 8bits input convert to 8 bitg288s output
+ **/
+static unsigned char sm4_sbox[16][16] = {
+	{0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, 0x16, 0xb6, 0x14, 0xc2,
+	 0x28, 0xfb, 0x2c, 0x05},
+	{0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, 0xaa, 0x44, 0x13, 0x26,
+	 0x49, 0x86, 0x06, 0x99},
+	{0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, 0x33, 0x54, 0x0b, 0x43,
+	 0xed, 0xcf, 0xac, 0x62},
+	{0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, 0x80, 0xdf, 0x94, 0xfa,
+	 0x75, 0x8f, 0x3f, 0xa6},
+	{0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, 0x83, 0x59, 0x3c, 0x19,
+	 0xe6, 0x85, 0x4f, 0xa8},
+	{0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, 0xf8, 0xeb, 0x0f, 0x4b,
+	 0x70, 0x56, 0x9d, 0x35},
+	{0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, 0x25, 0x22, 0x7c, 0x3b,
+	 0x01, 0x21, 0x78, 0x87},
+	{0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, 0x4c, 0x36, 0x02, 0xe7,
+	 0xa0, 0xc4, 0xc8, 0x9e},
+	{0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, 0xa3, 0xf7, 0xf2, 0xce,
+	 0xf9, 0x61, 0x15, 0xa1},
+	{0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, 0xad, 0x93, 0x32, 0x30,
+	 0xf5, 0x8c, 0xb1, 0xe3},
+	{0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, 0xc0, 0x29, 0x23, 0xab,
+	 0x0d, 0x53, 0x4e, 0x6f},
+	{0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, 0x03, 0xff, 0x6a, 0x72,
+	 0x6d, 0x6c, 0x5b, 0x51},
+	{0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, 0x11, 0xd9, 0x5c, 0x41,
+	 0x1f, 0x10, 0x5a, 0xd8},
+	{0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, 0x2d, 0x74, 0xd0, 0x12,
+	 0xb8, 0xe5, 0xb4, 0xb0},
+	{0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, 0x65, 0xb9, 0xf1, 0x09,
+	 0xc5, 0x6e, 0xc6, 0x84},
+	{0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, 0x79, 0xee, 0x5f, 0x3e,
+	 0xd7, 0xcb, 0x39, 0x48},
+};
+
+/* System parameter */
+static const unsigned int FK[4] = {0xa3b1bac6, 0x56aa3350, 0x677d9197,
+				   0xb27022dc};
+
+/* fixed parameter */
+static const unsigned int CK[32] = {
+	0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269, 0x70777e85, 0x8c939aa1,
+	0xa8afb6bd, 0xc4cbd2d9, 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
+	0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9, 0xc0c7ced5, 0xdce3eaf1,
+	0xf8ff060d, 0x141b2229, 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
+	0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209, 0x10171e25, 0x2c333a41,
+	0x484f565d, 0x646b7279};
+
+/*
+ * private function:
+ * look up in SM4 S-boxes and get the related value.
+ * args:    [in] inch: 0x00~0xFF (8 bits unsigned value).
+ */
+static unsigned char
+sm4Sbox(unsigned char inch)
+{
+	unsigned char *pTable = (unsigned char *)sm4_sbox;
+	unsigned char retVal = (unsigned char)(pTable[inch]);
+	return retVal;
+}
+
+/* private function:
+ * Calculating round encryption key.
+ * args:    [in] ka: ka is a 32 bits unsigned value;
+ * return:  sk[i]: i{0,1,2,3,...31}.
+ */
+static unsigned int
+sm4CalciRK(unsigned int ka)
+{
+	unsigned int bb = 0;
+	unsigned int rk = 0;
+	unsigned char a[4];
+	unsigned char b[4];
+
+	PUT_ULONG_BE(ka, a, 0)
+	b[0] = sm4Sbox(a[0]);
+	b[1] = sm4Sbox(a[1]);
+	b[2] = sm4Sbox(a[2]);
+	b[3] = sm4Sbox(a[3]);
+	GET_ULONG_BE(bb, b, 0)
+	rk = bb ^ (ROTL(bb, 13)) ^ (ROTL(bb, 23));
+	return rk;
+}
+
+static void
+zsda_sm4_key_expansion(unsigned int SK[32], const uint8_t key[16])
+{
+	unsigned int MK[4];
+	unsigned int k[36];
+	unsigned int i = 0;
+
+	GET_ULONG_BE(MK[0], key, 0);
+	GET_ULONG_BE(MK[1], key, 4);
+	GET_ULONG_BE(MK[2], key, 8);
+	GET_ULONG_BE(MK[3], key, 12);
+	k[0] = MK[0] ^ FK[0];
+	k[1] = MK[1] ^ FK[1];
+	k[2] = MK[2] ^ FK[2];
+	k[3] = MK[3] ^ FK[3];
+	for (; i < 32; i++) {
+		k[i + 4] = k[i] ^
+			   (sm4CalciRK(k[i + 1] ^ k[i + 2] ^ k[i + 3] ^ CK[i]));
+		SK[i] = k[i + 4];
+	}
+}
+
+static void
+u32_to_u8(uint32_t *u_int32_t_data, uint8_t *u8_data)
+{
+	*(u8_data + 0) = ((*u_int32_t_data & 0xFF000000) >> 24) & (0xFF);
+	*(u8_data + 1) = ((*u_int32_t_data & 0x00FF0000) >> 16) & (0xFF);
+	*(u8_data + 2) = ((*u_int32_t_data & 0x0000FF00) >> 8) & (0xFF);
+	*(u8_data + 3) = (*u_int32_t_data & 0x000000FF);
+}
+
+static void
+zsda_aes_key_expansion(uint8_t *round_key, uint32_t round_num,
+		       const uint8_t *key, uint32_t key_len)
+{
+	uint32_t i, j, k, nk, nr;
+	uint8_t tempa[4];
+
+	nk = key_len >> 2;
+	nr = round_num;
+
+	/* The first round key is the key itself. */
+	for (i = 0; i < nk; ++i) {
+		round_key[(i * 4) + 0] = key[(i * 4) + 0];
+
+		round_key[(i * 4) + 1] = key[(i * 4) + 1];
+
+		round_key[(i * 4) + 2] = key[(i * 4) + 2];
+		round_key[(i * 4) + 3] = key[(i * 4) + 3];
+	}
+
+	/* All other round keys are found from the previous round keys. */
+	for (i = nk; i < (4 * (nr + 1)); ++i) {
+		k = (i - 1) * 4;
+		tempa[0] = round_key[k + 0];
+		tempa[1] = round_key[k + 1];
+		tempa[2] = round_key[k + 2];
+		tempa[3] = round_key[k + 3];
+
+		if ((nk != 0) && ((i % nk) == 0)) {
+			/* This function shifts the 4 bytes in a word to the
+			 * left once. [a0,a1,a2,a3] becomes [a1,a2,a3,a0]
+			 * Function RotWord()
+			 */
+			{
+				const u_int8_t u8tmp = tempa[0];
+
+				tempa[0] = tempa[1];
+				tempa[1] = tempa[2];
+				tempa[2] = tempa[3];
+				tempa[3] = u8tmp;
+			}
+
+			/* SubWord() is a function that takes a four-byte input
+			 * word and applies the S-box to each of the four bytes
+			 * to produce an output word. Function Subword()
+			 */
+			{
+				tempa[0] = GET_AES_SBOX_VAL(tempa[0]);
+				tempa[1] = GET_AES_SBOX_VAL(tempa[1]);
+				tempa[2] = GET_AES_SBOX_VAL(tempa[2]);
+				tempa[3] = GET_AES_SBOX_VAL(tempa[3]);
+			}
+
+			tempa[0] = tempa[0] ^ Rcon[i / nk];
+		}
+
+		if (nk == 8) {
+			if ((i % nk) == 4) {
+				/* Function Subword() */
+				{
+					tempa[0] = GET_AES_SBOX_VAL(tempa[0]);
+					tempa[1] = GET_AES_SBOX_VAL(tempa[1]);
+					tempa[2] = GET_AES_SBOX_VAL(tempa[2]);
+					tempa[3] = GET_AES_SBOX_VAL(tempa[3]);
+				}
+			}
+		}
+
+		j = i * 4;
+		k = (i - nk) * 4;
+		round_key[j + 0] = round_key[k + 0] ^ tempa[0];
+		round_key[j + 1] = round_key[k + 1] ^ tempa[1];
+		round_key[j + 2] = round_key[k + 2] ^ tempa[2];
+		round_key[j + 3] = round_key[k + 3] ^ tempa[3];
+	}
+}
+
+static void
+zsda_decry_set_key(uint8_t key[64], const uint8_t *key1_ptr, uint8_t skey_len,
+	      enum rte_crypto_cipher_algorithm algo)
+{
+	uint8_t round_num;
+	uint8_t dec_key1[ZSDA_AES_MAX_KEY_BYTE_LEN] = {0};
+	uint8_t aes_round_key[ZSDA_AES_MAX_EXP_BYTE_SIZE] = {0};
+	uint32_t sm4_round_key[ZSDA_SM4_MAX_EXP_DWORD_SIZE] = {0};
+
+	switch (algo) {
+	case RTE_CRYPTO_CIPHER_AES_XTS:
+		round_num = (skey_len == ZSDA_SYM_XTS_256_SKEY_LEN)
+				    ? ZSDA_AES256_ROUND_NUM
+				    : ZSDA_AES512_ROUND_NUM;
+		zsda_aes_key_expansion(aes_round_key, round_num, key1_ptr,
+				       skey_len);
+		rte_memcpy(dec_key1,
+			   ((uint8_t *)aes_round_key + (16 * round_num)), 16);
+
+		if (skey_len == ZSDA_SYM_XTS_512_SKEY_LEN &&
+			(16 * round_num) <= ZSDA_AES_MAX_EXP_BYTE_SIZE) {
+			for (int i = 0; i < 16; i++) {
+				dec_key1[i + 16] =
+					aes_round_key[(16 * (round_num - 1)) + i];
+			}
+		}
+		break;
+	case RTE_CRYPTO_CIPHER_SM4_XTS:
+		zsda_sm4_key_expansion(sm4_round_key, key1_ptr);
+		for (size_t i = 0; i < 4; i++)
+			u32_to_u8((uint32_t *)sm4_round_key +
+					  ZSDA_SM4_MAX_EXP_DWORD_SIZE - 1 - i,
+				  dec_key1 + (4 * i));
+		break;
+	default:
+		ZSDA_LOG(ERR, "unknown cipher algo!");
+		return;
+	}
+
+	if (skey_len == ZSDA_SYM_XTS_256_SKEY_LEN) {
+		zsda_reverse_memcpy((uint8_t *)key + ZSDA_SYM_XTS_256_KEY2_OFF,
+			       key1_ptr + skey_len, skey_len);
+		zsda_reverse_memcpy((uint8_t *)key + ZSDA_SYM_XTS_256_KEY1_OFF,
+			       dec_key1, skey_len);
+	} else {
+		zsda_reverse_memcpy(key, key1_ptr + skey_len, skey_len);
+		zsda_reverse_memcpy((uint8_t *)key + ZSDA_SYM_XTS_512_KEY1_OFF,
+			       dec_key1, skey_len);
+	}
+}
+
+static uint8_t
+zsda_sym_lbads(uint32_t dataunit_len)
+{
+	uint8_t lbads;
+
+	switch (dataunit_len) {
+	case ZSDA_AES_LBADS_512:
+		lbads = ZSDA_AES_LBADS_INDICATE_512;
+		break;
+	case ZSDA_AES_LBADS_4096:
+		lbads = ZSDA_AES_LBADS_INDICATE_4096;
+		break;
+	case ZSDA_AES_LBADS_8192:
+		lbads = ZSDA_AES_LBADS_INDICATE_8192;
+		break;
+	case ZSDA_AES_LBADS_0:
+		lbads = ZSDA_AES_LBADS_INDICATE_0;
+		break;
+	default:
+		ZSDA_LOG(ERR, "dataunit_len should be 0/512/4096/8192 - %d.",
+			 dataunit_len);
+		lbads = ZSDA_AES_LBADS_INDICATE_INVALID;
+		break;
+	}
+	return lbads;
+}
+
+static int
+zsda_set_session_cipher(struct zsda_sym_session *sess,
+				   struct rte_crypto_cipher_xform *cipher_xform)
+{
+	uint8_t skey_len = 0;
+	const uint8_t *key1_ptr = NULL;
+
+	if (cipher_xform->key.length > ZSDA_CIPHER_KEY_MAX_LEN) {
+		ZSDA_LOG(ERR, "key length not supported");
+		return -EINVAL;
+	}
+
+	sess->chain_order = ZSDA_SYM_CHAIN_ONLY_CIPHER;
+	sess->cipher.iv.offset = cipher_xform->iv.offset;
+	sess->cipher.iv.length = cipher_xform->iv.length;
+	sess->cipher.op = cipher_xform->op;
+	sess->cipher.algo = cipher_xform->algo;
+	sess->cipher.dataunit_len = cipher_xform->dataunit_len;
+	sess->cipher.lbads = zsda_sym_lbads(cipher_xform->dataunit_len);
+	if (sess->cipher.lbads == 0xff) {
+		ZSDA_LOG(ERR, "dataunit_len wrong!");
+		return -EINVAL;
+	}
+
+	skey_len = (cipher_xform->key.length / 2) & 0xff;
+
+	/* key set */
+	if (sess->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+		sess->cipher.key_encry.length = cipher_xform->key.length;
+		if (skey_len == ZSDA_SYM_XTS_256_SKEY_LEN) {
+			zsda_reverse_memcpy((uint8_t *)sess->cipher.key_encry.data +
+					       ZSDA_SYM_XTS_256_KEY2_OFF,
+				       (cipher_xform->key.data + skey_len),
+				       skey_len);
+			zsda_reverse_memcpy(((uint8_t *)sess->cipher.key_encry.data +
+					ZSDA_SYM_XTS_256_KEY1_OFF),
+				       cipher_xform->key.data, skey_len);
+		} else
+			zsda_reverse_memcpy((uint8_t *)sess->cipher.key_encry.data,
+				       cipher_xform->key.data,
+				       cipher_xform->key.length);
+	} else if (sess->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+		sess->cipher.key_decry.length = cipher_xform->key.length;
+		key1_ptr = cipher_xform->key.data;
+		zsda_decry_set_key(sess->cipher.key_decry.data, key1_ptr, skey_len,
+			      sess->cipher.algo);
+	}
+
+	return 0;
+}
+
+static void
+zsda_set_session_auth(struct zsda_sym_session *sess,
+				 struct rte_crypto_auth_xform *xform)
+{
+	sess->auth.op = xform->op;
+	sess->auth.algo = xform->algo;
+	sess->auth.digest_length = xform->digest_length;
+	sess->chain_order = ZSDA_SYM_CHAIN_ONLY_AUTH;
+}
+
+static struct rte_crypto_auth_xform *
+zsda_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+	do {
+		if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+			return &xform->auth;
+
+		xform = xform->next;
+	} while (xform);
+
+	return NULL;
+}
+
+static struct rte_crypto_cipher_xform *
+zsda_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+	do {
+		if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+			return &xform->cipher;
+
+		xform = xform->next;
+	} while (xform);
+
+	return NULL;
+}
+
+/** Configure the session from a crypto xform chain */
+static enum zsda_sym_chain_order
+zsda_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+	enum zsda_sym_chain_order res = ZSDA_SYM_CHAIN_NOT_SUPPORTED;
+
+	if (xform != NULL) {
+		if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+			if (xform->next == NULL)
+				res = ZSDA_SYM_CHAIN_ONLY_AUTH;
+			else if (xform->next->type ==
+					RTE_CRYPTO_SYM_XFORM_CIPHER)
+				res = ZSDA_SYM_CHAIN_AUTH_CIPHER;
+		}
+		if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+			if (xform->next == NULL)
+				res = ZSDA_SYM_CHAIN_ONLY_CIPHER;
+			else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+				res = ZSDA_SYM_CHAIN_CIPHER_AUTH;
+		}
+	}
+
+	return res;
+}
+
+/* Set session cipher parameters */
+int
+zsda_crypto_set_session_parameters(void *sess_priv,
+			 struct rte_crypto_sym_xform *xform)
+{
+
+	struct zsda_sym_session *sess = sess_priv;
+	struct rte_crypto_cipher_xform *cipher_xform =
+			zsda_get_cipher_xform(xform);
+	struct rte_crypto_auth_xform *auth_xform =
+			zsda_get_auth_xform(xform);
+
+	int ret = 0;
+
+	sess->chain_order = zsda_crypto_get_chain_order(xform);
+	switch (sess->chain_order) {
+	case ZSDA_SYM_CHAIN_ONLY_CIPHER:
+		zsda_set_session_cipher(sess, cipher_xform);
+		break;
+	case ZSDA_SYM_CHAIN_ONLY_AUTH:
+		zsda_set_session_auth(sess, auth_xform);
+		break;
+
+	default:
+		ZSDA_LOG(ERR, "Invalid chain order");
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
diff --git a/drivers/crypto/zsda/zsda_sym_session.h b/drivers/crypto/zsda/zsda_sym_session.h
new file mode 100644
index 0000000000..1797e46cb3
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_session.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_SYM_SESSION_H_
+#define _ZSDA_SYM_SESSION_H_
+
+#include "zsda_sym.h"
+
+#define ZSDA_SYM_XTS_IV_SLBA_OFF  (8)
+#define ZSDA_SYM_XTS_256_SKEY_LEN (16)
+#define ZSDA_SYM_XTS_512_SKEY_LEN (32)
+#define ZSDA_SYM_XTS_256_KEY2_OFF (16)
+#define ZSDA_SYM_XTS_256_KEY1_OFF (48)
+#define ZSDA_SYM_XTS_512_KEY1_OFF (32)
+#define ZSDA_SYM_MIN_SRC_LEN_HASH (16)
+
+#define ZSDA_AES256_ROUND_NUM	    (10)
+#define ZSDA_AES512_ROUND_NUM	    (14)
+#define ZSDA_AES_MAX_EXP_BYTE_SIZE  (240)
+#define ZSDA_AES_MAX_KEY_BYTE_LEN   (32)
+#define ZSDA_SM4_MAX_EXP_DWORD_SIZE (32)
+
+#define ZSDA_AES_LBADS_0	  (0)
+#define ZSDA_AES_LBADS_512	  (512)
+#define ZSDA_AES_LBADS_4096	  (4096)
+#define ZSDA_AES_LBADS_8192	  (8192)
+
+#define ZSDA_AES_LBADS_INDICATE_0       (0x0)
+#define ZSDA_AES_LBADS_INDICATE_512     (0x9)
+#define ZSDA_AES_LBADS_INDICATE_4096    (0xC)
+#define ZSDA_AES_LBADS_INDICATE_8192    (0xD)
+#define ZSDA_AES_LBADS_INDICATE_INVALID (0xff)
+
+enum zsda_sym_chain_order {
+	ZSDA_SYM_CHAIN_ONLY_CIPHER,
+	ZSDA_SYM_CHAIN_ONLY_AUTH,
+	ZSDA_SYM_CHAIN_CIPHER_AUTH,
+	ZSDA_SYM_CHAIN_AUTH_CIPHER,
+	ZSDA_SYM_CHAIN_NOT_SUPPORTED
+};
+
+struct __rte_cache_aligned zsda_sym_session {
+	enum zsda_sym_chain_order chain_order;
+
+	/* Cipher Parameters */
+	struct {
+		enum rte_crypto_cipher_operation op;
+		enum rte_crypto_cipher_algorithm algo;
+		struct {
+			uint8_t data[ZSDA_CIPHER_KEY_MAX_LEN];
+			size_t length;
+		} key_encry;
+		struct {
+			uint8_t data[ZSDA_CIPHER_KEY_MAX_LEN];
+			size_t length;
+		} key_decry;
+		struct {
+			uint32_t offset;
+			size_t length;
+		} iv;
+
+		uint32_t dataunit_len;
+		uint8_t lbads;
+	} cipher;
+
+	struct {
+		enum rte_crypto_auth_operation op;
+		/* Auth operation */
+		enum rte_crypto_auth_algorithm algo;
+		/* Auth algorithm */
+		uint16_t digest_length;
+	} auth;
+
+	bool cipher_first;
+};
+
+
+int zsda_crypto_set_session_parameters(void *sess_priv,
+				       struct rte_crypto_sym_xform *xform);
+
+#endif /* _ZSDA_SYM_SESSION_H_ */
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 119186 bytes --]

  parent reply	other threads:[~2024-09-27 13:13 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-01  8:27 [PATCH] zsda:introduce zsda drivers and examples lhx
2024-07-02  8:52 ` David Marchand
2024-07-02 13:11 ` [EXTERNAL] " Akhil Goyal
2024-09-09  8:08 ` [PATCH v4 1/8] zsda: Introduce zsda device drivers Hanxiao Li
2024-09-10  9:15 ` [PATCH v5 " Hanxiao Li
2024-09-10  9:18   ` [PATCH v5 2/8] zsda: add support for zsdadev operations Hanxiao Li
2024-09-10  9:18     ` [PATCH v5 3/8] zsda: add support for queue operation Hanxiao Li
2024-09-10  9:18     ` [PATCH v5 4/8] zsda: add zsda compressdev driver and interface Hanxiao Li
2024-09-10  9:18     ` [PATCH v5 5/8] zsda: modify files for introducing zsda cryptodev Hanxiao Li
2024-09-10  9:18     ` [PATCH v5 6/8] zsda: add zsda crypto-pmd Hanxiao Li
2024-09-10  9:18     ` [PATCH v5 7/8] zsda: add zsda crypto-sym Hanxiao Li
2024-09-10  9:18     ` [PATCH v5 8/8] zsda: add zsda crypto-session and compile file Hanxiao Li
2024-09-11  7:52   ` [PATCH v6 1/8] zsda: Introduce zsda device drivers Hanxiao Li
2024-09-11  7:54     ` [PATCH v6 2/8] zsda: add support for zsdadev operations Hanxiao Li
2024-09-11  7:54       ` [PATCH v6 3/8] zsda: add support for queue operation Hanxiao Li
2024-09-11 16:01         ` Stephen Hemminger
2024-09-11  7:54       ` [PATCH v6 4/8] zsda: add zsda compressdev driver and interface Hanxiao Li
2024-09-11  7:54       ` [PATCH v6 5/8] zsda: modify files for introducing zsda cryptodev Hanxiao Li
2024-09-17 18:22         ` [EXTERNAL] " Akhil Goyal
2024-09-11  7:54       ` [PATCH v6 6/8] zsda: add zsda crypto-pmd Hanxiao Li
2024-09-17 18:25         ` [EXTERNAL] " Akhil Goyal
2024-09-11  7:54       ` [PATCH v6 7/8] zsda: add zsda crypto-sym Hanxiao Li
2024-09-11  7:54       ` [PATCH v6 8/8] zsda: add zsda crypto-session and compile file Hanxiao Li
2024-09-17 18:33         ` [EXTERNAL] " Akhil Goyal
2024-09-17 18:13     ` [EXTERNAL] [PATCH v6 1/8] zsda: Introduce zsda device drivers Akhil Goyal
2024-09-27 12:44     ` [PATCH v7 0/8] drivers/zsda: introduce zsda drivers Hanxiao Li
2024-09-27 13:01     ` [PATCH v7 1/8] common/zsda: add common function and log macro Hanxiao Li
2024-09-27 13:09       ` [PATCH v7 0/8] drivers/zsda: introduce zsda drivers Hanxiao Li
2024-09-27 13:09         ` [PATCH v7 2/8] common/zsda: configure device Hanxiao Li
2024-09-27 13:09         ` [PATCH v7 3/8] common/zsda: configure queues Hanxiao Li
2024-09-27 13:09         ` [PATCH v7 4/8] compress/zsda: configure drivers of compressdev Hanxiao Li
2024-09-27 13:09         ` Hanxiao Li [this message]
2024-09-27 13:09         ` [PATCH v7 6/8] lib/cryptodev: add sm4 xts for crypto Hanxiao Li
2024-09-27 13:09         ` [PATCH v7 7/8] app/test: add sm4-xts test Hanxiao Li
2024-09-27 13:09         ` [PATCH v7 8/8] doc/guides: add documents and release notes for two drivers Hanxiao Li

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240927131000.738277-5-li.hanxiao@zte.com.cn \
    --to=li.hanxiao@zte.com.cn \
    --cc=dev@dpdk.org \
    --cc=wang.yong19@zte.com.cn \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).