From: lhx <li.hanxiao@zte.com.cn>
To: dev@dpdk.org
Cc: = Hanxiao Li <li.hanxiao@zte.com.cn>
Subject: [PATCH] zsda: introduce zsda drivers and examples
Date: Fri, 28 Jun 2024 17:17:45 +0800 [thread overview]
Message-ID: <20240628091745.3329385-1-li.hanxiao@zte.com.cn> (raw)
[-- Attachment #1.1.1: Type: text/plain, Size: 292401 bytes --]
From: = Hanxiao Li <li.hanxiao@zte.com.cn>
Signed-off-by: Hanxiao Li <li.hanxiao@zte.com.cn>
---
MAINTAINERS | 14 +
config/rte_config.h | 6 +-
drivers/common/zsda/meson.build | 35 +
drivers/common/zsda/version.map | 3 +
drivers/common/zsda/zsda_common.c | 226 +++++
drivers/common/zsda/zsda_common.h | 345 ++++++++
drivers/common/zsda/zsda_device.c | 661 +++++++++++++++
drivers/common/zsda/zsda_device.h | 207 +++++
drivers/common/zsda/zsda_logs.c | 21 +
drivers/common/zsda/zsda_logs.h | 32 +
drivers/common/zsda/zsda_qp.c | 703 ++++++++++++++++
drivers/common/zsda/zsda_qp.h | 208 +++++
drivers/compress/zsda/zsda_comp.c | 273 ++++++
drivers/compress/zsda/zsda_comp.h | 34 +
drivers/compress/zsda/zsda_comp_pmd.c | 430 ++++++++++
drivers/compress/zsda/zsda_comp_pmd.h | 42 +
drivers/crypto/zsda/meson.build | 26 +
drivers/crypto/zsda/version.map | 6 +
drivers/crypto/zsda/zsda_sym.c | 734 ++++++++++++++++
drivers/crypto/zsda/zsda_sym.h | 42 +
drivers/crypto/zsda/zsda_sym_capabilities.h | 136 +++
drivers/crypto/zsda/zsda_sym_pmd.c | 431 ++++++++++
drivers/crypto/zsda/zsda_sym_pmd.h | 44 +
drivers/meson.build | 1 +
examples/meson.build | 1 +
examples/zsda/Makefile | 56 ++
examples/zsda/commands.c | 321 +++++++
examples/zsda/meson.build | 30 +
examples/zsda/test.c | 198 +++++
examples/zsda/test.h | 236 ++++++
examples/zsda/test_zsda.c | 309 +++++++
examples/zsda/test_zsda.h | 457 ++++++++++
examples/zsda/test_zsda_compressdev.c | 678 +++++++++++++++
examples/zsda/test_zsda_compressdev.h | 93 ++
examples/zsda/test_zsda_cryptodev.c | 794 ++++++++++++++++++
examples/zsda/test_zsda_cryptodev.h | 144 ++++
.../test_zsda_cryptodev_aes_test_vectors.h | 139 +++
examples/zsda/test_zsda_cryptodev_data.h | 184 ++++
.../test_zsda_cryptodev_hash_test_vectors.h | 210 +++++
lib/compressdev/rte_compressdev.h | 15 +-
lib/compressdev/rte_compressdev_pmd.h | 3 +
lib/cryptodev/rte_crypto_sym.h | 4 +
lib/cryptodev/rte_cryptodev_pmd.h | 325 +++++++
usertools/dpdk-devbind.py | 117 +--
44 files changed, 8900 insertions(+), 74 deletions(-)
create mode 100644 drivers/common/zsda/meson.build
create mode 100644 drivers/common/zsda/version.map
create mode 100644 drivers/common/zsda/zsda_common.c
create mode 100644 drivers/common/zsda/zsda_common.h
create mode 100644 drivers/common/zsda/zsda_device.c
create mode 100644 drivers/common/zsda/zsda_device.h
create mode 100644 drivers/common/zsda/zsda_logs.c
create mode 100644 drivers/common/zsda/zsda_logs.h
create mode 100644 drivers/common/zsda/zsda_qp.c
create mode 100644 drivers/common/zsda/zsda_qp.h
create mode 100644 drivers/compress/zsda/zsda_comp.c
create mode 100644 drivers/compress/zsda/zsda_comp.h
create mode 100644 drivers/compress/zsda/zsda_comp_pmd.c
create mode 100644 drivers/compress/zsda/zsda_comp_pmd.h
create mode 100644 drivers/crypto/zsda/meson.build
create mode 100644 drivers/crypto/zsda/version.map
create mode 100644 drivers/crypto/zsda/zsda_sym.c
create mode 100644 drivers/crypto/zsda/zsda_sym.h
create mode 100644 drivers/crypto/zsda/zsda_sym_capabilities.h
create mode 100644 drivers/crypto/zsda/zsda_sym_pmd.c
create mode 100644 drivers/crypto/zsda/zsda_sym_pmd.h
create mode 100644 examples/zsda/Makefile
create mode 100644 examples/zsda/commands.c
create mode 100644 examples/zsda/meson.build
create mode 100644 examples/zsda/test.c
create mode 100644 examples/zsda/test.h
create mode 100644 examples/zsda/test_zsda.c
create mode 100644 examples/zsda/test_zsda.h
create mode 100644 examples/zsda/test_zsda_compressdev.c
create mode 100644 examples/zsda/test_zsda_compressdev.h
create mode 100644 examples/zsda/test_zsda_cryptodev.c
create mode 100644 examples/zsda/test_zsda_cryptodev.h
create mode 100644 examples/zsda/test_zsda_cryptodev_aes_test_vectors.h
create mode 100644 examples/zsda/test_zsda_cryptodev_data.h
create mode 100644 examples/zsda/test_zsda_cryptodev_hash_test_vectors.h
create mode 100644 lib/cryptodev/rte_cryptodev_pmd.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 22ef2ea..9503cfa 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1088,6 +1088,11 @@ F: drivers/common/qat/
F: doc/guides/cryptodevs/qat.rst
F: doc/guides/cryptodevs/features/qat.ini
+ZTE Storage Data Accelerator
+M: Hanxiao Li <li.hanxiao@zte.com.cn>
+F: drivers/crypto/zsda/
+F: drivers/common/zsda/
+
IPsec MB
M: Kai Ji <kai.ji@intel.com>
M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
@@ -1205,6 +1210,15 @@ F: drivers/compress/zlib/
F: doc/guides/compressdevs/zlib.rst
F: doc/guides/compressdevs/features/zlib.ini
+ZTE Storage Data Accelerator
+M: Hanxiao Li <li.hanxiao@zte.com.cn>
+F: drivers/compress/zsda/
+F: drivers/common/zsda/
+
+ZTE Storage Data Accelerator
+M: Hanxiao Li <li.hanxiao@zte.com.cn>
+F: drivers/crypto/zsda/
+F: drivers/common/zsda/
DMAdev Drivers
--------------
diff --git a/config/rte_config.h b/config/rte_config.h
index 3c4876d..b097360 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -37,7 +37,7 @@
#define RTE_MAX_MEMZONE 2560
#define RTE_MAX_TAILQ 32
#define RTE_LOG_DP_LEVEL RTE_LOG_INFO
-#define RTE_BACKTRACE 1
+// #define RTE_BACKTRACE 1
#define RTE_MAX_VFIO_CONTAINERS 64
/* bsd module defines */
@@ -105,6 +105,10 @@
#define RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS 16
#define RTE_PMD_QAT_COMP_IM_BUFFER_SIZE 65536
+/* ZSDA device */
+/* Max. number of ZSDA devices which can be attached */
+#define RTE_PMD_ZSDA_MAX_PCI_DEVICES 256
+
/* virtio crypto defines */
#define RTE_MAX_VIRTIO_CRYPTO 32
diff --git a/drivers/common/zsda/meson.build b/drivers/common/zsda/meson.build
new file mode 100644
index 0000000..093965b
--- /dev/null
+++ b/drivers/common/zsda/meson.build
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON'
+
+zsda_crypto = true
+zsda_crypto_path = 'crypto/zsda'
+zsda_crypto_relpath = '../../' + zsda_crypto_path
+zsda_compress = true
+zsda_compress_path = 'compress/zsda'
+zsda_compress_relpath = '../../' + zsda_compress_path
+
+deps += ['bus_pci', 'cryptodev', 'net', 'compressdev']
+sources += files('zsda_common.c','zsda_qp.c',
+ 'zsda_device.c',
+ 'zsda_logs.c')
+includes += include_directories(zsda_crypto_relpath,
+ zsda_compress_relpath)
+
+if zsda_compress
+ foreach f: ['zsda_comp_pmd.c','zsda_comp.c']
+ sources += files(join_paths(zsda_compress_relpath, f))
+ endforeach
+endif
+
+if zsda_crypto
+libcrypto = dependency('libcrypto', required: false, method: 'pkg-config')
+ foreach f: ['zsda_sym_pmd.c', 'zsda_sym.c']
+ sources += files(join_paths(zsda_crypto_relpath, f))
+ endforeach
+ deps += ['security']
+ ext_deps += libcrypto
+ cflags += ['-DBUILD_ZSDA_SYM']
+endif
+
diff --git a/drivers/common/zsda/version.map b/drivers/common/zsda/version.map
new file mode 100644
index 0000000..4a76d1d
--- /dev/null
+++ b/drivers/common/zsda/version.map
@@ -0,0 +1,3 @@
+DPDK_21 {
+ local: *;
+};
diff --git a/drivers/common/zsda/zsda_common.c b/drivers/common/zsda/zsda_common.c
new file mode 100644
index 0000000..c8f34b0
--- /dev/null
+++ b/drivers/common/zsda/zsda_common.c
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include "zsda_common.h"
+#include "zsda_device.h"
+
+#define MAGIC_SEND 0xab
+#define MAGIC_RECV 0xcd
+#define ADMIN_VER 1
+
+static uint8_t crc8_table[256] = {
+ 0x00, 0x41, 0x13, 0x52, 0x26, 0x67, 0x35, 0x74, 0x4c, 0x0d, 0x5f, 0x1e,
+ 0x6a, 0x2b, 0x79, 0x38, 0x09, 0x48, 0x1a, 0x5b, 0x2f, 0x6e, 0x3c, 0x7d,
+ 0x45, 0x04, 0x56, 0x17, 0x63, 0x22, 0x70, 0x31, 0x12, 0x53, 0x01, 0x40,
+ 0x34, 0x75, 0x27, 0x66, 0x5e, 0x1f, 0x4d, 0x0c, 0x78, 0x39, 0x6b, 0x2a,
+ 0x1b, 0x5a, 0x08, 0x49, 0x3d, 0x7c, 0x2e, 0x6f, 0x57, 0x16, 0x44, 0x05,
+ 0x71, 0x30, 0x62, 0x23, 0x24, 0x65, 0x37, 0x76, 0x02, 0x43, 0x11, 0x50,
+ 0x68, 0x29, 0x7b, 0x3a, 0x4e, 0x0f, 0x5d, 0x1c, 0x2d, 0x6c, 0x3e, 0x7f,
+ 0x0b, 0x4a, 0x18, 0x59, 0x61, 0x20, 0x72, 0x33, 0x47, 0x06, 0x54, 0x15,
+ 0x36, 0x77, 0x25, 0x64, 0x10, 0x51, 0x03, 0x42, 0x7a, 0x3b, 0x69, 0x28,
+ 0x5c, 0x1d, 0x4f, 0x0e, 0x3f, 0x7e, 0x2c, 0x6d, 0x19, 0x58, 0x0a, 0x4b,
+ 0x73, 0x32, 0x60, 0x21, 0x55, 0x14, 0x46, 0x07, 0x48, 0x09, 0x5b, 0x1a,
+ 0x6e, 0x2f, 0x7d, 0x3c, 0x04, 0x45, 0x17, 0x56, 0x22, 0x63, 0x31, 0x70,
+ 0x41, 0x00, 0x52, 0x13, 0x67, 0x26, 0x74, 0x35, 0x0d, 0x4c, 0x1e, 0x5f,
+ 0x2b, 0x6a, 0x38, 0x79, 0x5a, 0x1b, 0x49, 0x08, 0x7c, 0x3d, 0x6f, 0x2e,
+ 0x16, 0x57, 0x05, 0x44, 0x30, 0x71, 0x23, 0x62, 0x53, 0x12, 0x40, 0x01,
+ 0x75, 0x34, 0x66, 0x27, 0x1f, 0x5e, 0x0c, 0x4d, 0x39, 0x78, 0x2a, 0x6b,
+ 0x6c, 0x2d, 0x7f, 0x3e, 0x4a, 0x0b, 0x59, 0x18, 0x20, 0x61, 0x33, 0x72,
+ 0x06, 0x47, 0x15, 0x54, 0x65, 0x24, 0x76, 0x37, 0x43, 0x02, 0x50, 0x11,
+ 0x29, 0x68, 0x3a, 0x7b, 0x0f, 0x4e, 0x1c, 0x5d, 0x7e, 0x3f, 0x6d, 0x2c,
+ 0x58, 0x19, 0x4b, 0x0a, 0x32, 0x73, 0x21, 0x60, 0x14, 0x55, 0x07, 0x46,
+ 0x77, 0x36, 0x64, 0x25, 0x51, 0x10, 0x42, 0x03, 0x3b, 0x7a, 0x28, 0x69,
+ 0x1d, 0x5c, 0x0e, 0x4f};
+
+static uint8_t
+zsda_crc8(uint8_t *message, int length)
+{
+ uint8_t crc = 0;
+ int i;
+
+ for (i = 0; i < length; i++)
+ crc = crc8_table[crc ^ message[i]];
+ return crc;
+}
+
+uint32_t set_reg_8(void *addr, uint8_t val0, uint8_t val1, uint8_t val2, uint8_t val3)
+{
+ uint8_t val[4];
+ val[0] = val0;
+ val[1] = val1;
+ val[2] = val2;
+ val[3] = val3;
+ ZSDA_CSR_WRITE32(addr, *(uint32_t *)val);
+ return *(uint32_t *)val;
+}
+
+uint8_t
+get_reg_8(void *addr, uint8_t offset)
+{
+ uint32_t val = ZSDA_CSR_READ32(addr);
+
+ return *(((uint8_t *)&val) + offset);
+}
+
+int
+zsda_admin_msg_init(struct rte_pci_device *pci_dev)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+
+ set_reg_8(mmio_base + ZSDA_ADMIN_WQ_BASE7, 0, 0, MAGIC_RECV, 0);
+ set_reg_8(mmio_base + ZSDA_ADMIN_CQ_BASE7, 0, 0, MAGIC_RECV, 0);
+ return 0;
+}
+
+int
+zsda_send_admin_msg(struct rte_pci_device *pci_dev, void *req, uint32_t len)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ uint8_t wq_flag = 0;
+ uint8_t crc = 0;
+ uint16_t admin_db = 0;
+ uint32_t retry = 1000;
+ uint32_t i = 0;
+ uint16_t db = 0;
+
+
+ if (len > ADMIN_BUF_DATA_LEN)
+ return -EINVAL;
+
+ for (i = 0; i < 7; i++) {
+ ZSDA_CSR_WRITE32(((uint32_t *)(mmio_base + ZSDA_ADMIN_WQ) + i),
+ *((uint32_t *)req + i));
+ }
+
+ crc = zsda_crc8((uint8_t *)req, ADMIN_BUF_DATA_LEN);
+ set_reg_8(mmio_base + ZSDA_ADMIN_WQ_BASE7, crc, ADMIN_VER, MAGIC_SEND, 0);
+ rte_delay_us_sleep(100);
+ rte_wmb();
+
+ admin_db = ZSDA_CSR_READ32(mmio_base + ZSDA_ADMIN_WQ_TAIL);
+ db = zsda_modulo_32(admin_db, 0x1ff);
+ ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_WQ_TAIL, db);
+
+ do {
+ rte_delay_us_sleep(100);
+ wq_flag = get_reg_8(mmio_base + ZSDA_ADMIN_WQ_BASE7, 2);
+ if (wq_flag == MAGIC_RECV)
+ break;
+
+ retry--;
+ if (!retry) {
+ printf("wq_flag 0x%X \n", wq_flag);
+ set_reg_8(mmio_base + ZSDA_ADMIN_WQ_BASE7, 0, crc,
+ ADMIN_VER, 0);
+ return -EIO;
+ }
+ } while (1);
+
+ return ZSDA_SUCCESS;
+}
+
+int
+zsda_recv_admin_msg(struct rte_pci_device *pci_dev, void *resp, uint32_t len)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ uint8_t cq_flag = 0;
+ uint32_t retry = 100;
+ uint8_t crc = 0;
+ uint8_t buf[ADMIN_BUF_TOTAL_LEN] = {0};
+ uint32_t i = 0;
+
+ if (len > ADMIN_BUF_DATA_LEN)
+ return -EINVAL;
+
+ do {
+ rte_delay_us_sleep(50);
+
+ cq_flag = get_reg_8(mmio_base + ZSDA_ADMIN_CQ_BASE7, 2);
+ if (cq_flag == MAGIC_SEND)
+ break;
+
+ retry--;
+ if (!retry)
+ return -EIO;
+ } while (1);
+
+ for (i = 0; i < len; i++)
+ buf[i] = ZSDA_CSR_READ8(
+ (uint8_t *)(mmio_base + ZSDA_ADMIN_CQ + i));
+
+ crc = ZSDA_CSR_READ8(mmio_base + ZSDA_ADMIN_CQ_CRC);
+ rte_rmb();
+ ZSDA_CSR_WRITE8(mmio_base + ZSDA_ADMIN_CQ_FLAG, MAGIC_RECV);
+ if (crc != zsda_crc8(buf, ADMIN_BUF_DATA_LEN)) {
+ ZSDA_LOG(ERR, "[%d] Failed! crc error!", __LINE__);
+ return -EIO;
+ }
+
+ memcpy(resp, buf, len);
+
+ return 0;
+}
+
+void
+zsda_stats_get(void **queue_pairs, uint32_t nb_queue_pairs,
+ struct zsda_common_stat *stats)
+{
+ enum zsda_service_type type;
+ uint32_t i = 0;
+ struct zsda_qp *qp;
+
+ if ((stats == NULL) || (queue_pairs == NULL)) {
+ ZSDA_LOG(ERR, E_NULL);
+ return;
+ }
+
+ for (i = 0; i < nb_queue_pairs; i++) {
+ qp = queue_pairs[i];
+
+ if (qp == NULL) {
+ ZSDA_LOG(ERR, E_NULL);
+ break;
+ }
+
+ for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+ if (qp->srv[type].used) {
+ stats->enqueued_count +=
+ qp->srv[type].stats.enqueued_count;
+ stats->dequeued_count +=
+ qp->srv[type].stats.dequeued_count;
+ stats->enqueue_err_count +=
+ qp->srv[type].stats.enqueue_err_count;
+ stats->dequeue_err_count +=
+ qp->srv[type].stats.dequeue_err_count;
+ }
+ }
+ }
+}
+
+void
+zsda_stats_reset(void **queue_pairs, uint32_t nb_queue_pairs)
+{
+ enum zsda_service_type type;
+ uint32_t i = 0;
+ struct zsda_qp *qp;
+
+ if (queue_pairs == NULL) {
+ ZSDA_LOG(ERR, E_NULL);
+ return;
+ }
+
+ for (i = 0; i < nb_queue_pairs; i++) {
+ qp = queue_pairs[i];
+
+ if (qp == NULL) {
+ ZSDA_LOG(ERR, E_NULL);
+ break;
+ }
+ for (type = 0; type < ZSDA_MAX_SERVICES; type++) {
+ if (qp->srv[type].used)
+ memset(&(qp->srv[type].stats), 0,
+ sizeof(struct zsda_common_stats));
+ }
+ }
+}
diff --git a/drivers/common/zsda/zsda_common.h b/drivers/common/zsda/zsda_common.h
new file mode 100644
index 0000000..4c6b983
--- /dev/null
+++ b/drivers/common/zsda/zsda_common.h
@@ -0,0 +1,345 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_COMMON_H_
+#define _ZSDA_COMMON_H_
+
+#include <stdint.h>
+
+#include <rte_bus_pci.h>
+#include <rte_mbuf.h>
+
+#include "eal_interrupts.h"
+#include "zsda_logs.h"
+#define ZSDA_PCI_NAME zsda
+#define ZSDA_64_BTYE_ALIGN_MASK (~0x3f)
+#define ZSDA_SGL_MAX_NUMBER 512
+#define ZSDA_MAX_NUM_SEGS (ZSDA_SGL_MAX_NUMBER / 32 * 31 + 1)
+#define ZSDA_SGL_FRAGMENT_SIZE 32
+#define NB_DES 512
+
+#define ZSDA_SUCCESS EXIT_SUCCESS
+#define ZSDA_FAILED (-1)
+
+#define E_NULL "Failed! Addr is NULL"
+#define E_CREATE "Failed! Create"
+#define E_FUNC "Failed! Function"
+#define E_START_Q "Failed! START q"
+#define E_MALLOC "Failed! malloc"
+#define E_FREE "Failed! free"
+
+#define E_COMPARE "Failed! compare"
+#define E_START "Failed! start/setup"
+#define E_CLOSE "Failed! stop/close"
+#define E_CONFIG "Failed! config"
+#define E_RESULT "Failed! result wrong"
+
+#define W_MAY_EXCEPT_TEST "Wrong situation, may be execption test"
+
+#define CHECK_ADDR_NULL(addr) \
+ do { \
+ if (NULL == addr) { \
+ ZSDA_LOG(ERR, "Failed! ADDR is NULL!"); \
+ return ZSDA_FAILED; \
+ } \
+ } while (0)
+
+enum zsda_device_gen {
+ ZSDA_GEN1 = 1,
+ ZSDA_GEN2,
+ ZSDA_GEN3,
+};
+
+enum zsda_service_type {
+ ZSDA_SERVICE_COMPRESSION = 0,
+ ZSDA_SERVICE_DECOMPRESSION,
+ ZSDA_SERVICE_SYMMETRIC_ENCRYPT,
+ ZSDA_SERVICE_SYMMETRIC_DECRYPT,
+ ZSDA_SERVICE_HASH_ENCODE = 6,
+ ZSDA_SERVICE_INVALID,
+};
+
+#define ZSDA_MAX_SERVICES (ZSDA_SERVICE_INVALID)
+
+#define ZSDA_OPC_EC_AES_XTS_256 0x0 //Encry AES-XTS-256
+#define ZSDA_OPC_EC_AES_XTS_512 0x01 //Encry AES-XTS-512
+#define ZSDA_OPC_EC_SM4_XTS_256 0x02 //Encry SM4-XTS-256
+#define ZSDA_OPC_DC_AES_XTS_256 0x08 //Decry AES-XTS-256
+#define ZSDA_OPC_DC_AES_XTS_512 0x09 //Decry AES-XTS-512
+#define ZSDA_OPC_DC_SM4_XTS_256 0x0A //Decry SM4-XTS-256
+#define ZSDA_OPC_EC_GZIP 0x10 //Encomp deflate-Gzip
+#define ZSDA_OPC_EC_ZLIB 0x11 //Encomp deflate-Zlib
+#define ZSDA_OPC_DC_GZIP 0x18 //Decompinfalte-Gzip
+#define ZSDA_OPC_DC_ZLIB 0x19 //Decompinfalte-Zlib
+#define ZSDA_OPC_HASH_SHA1 0x20 // Hash-SHA1
+#define ZSDA_OPC_HASH_SHA2_224 0x21 // Hash-SHA2-224
+#define ZSDA_OPC_HASH_SHA2_256 0x22 // Hash-SHA2-256
+#define ZSDA_OPC_HASH_SHA2_384 0x23 // Hash-SHA2-384
+#define ZSDA_OPC_HASH_SHA2_512 0x24 // Hash-SHA2-512
+#define ZSDA_OPC_HASH_SM3 0x25 // Hash-SM3
+#define ZSDA_OPC_INVALID 0xff
+
+#define ZSDA_DIGEST_SIZE_SHA1 (20)
+#define ZSDA_DIGEST_SIZE_SHA2_224 (28)
+#define ZSDA_DIGEST_SIZE_SHA2_256 (32)
+#define ZSDA_DIGEST_SIZE_SHA2_384 (48)
+#define ZSDA_DIGEST_SIZE_SHA2_512 (64)
+#define ZSDA_DIGEST_SIZE_SM3 (32)
+
+#define ZSDA_AES_LBADS_INDICATE_0 (0x0)
+#define ZSDA_AES_LBADS_INDICATE_512 (0x9)
+#define ZSDA_AES_LBADS_INDICATE_4096 (0xC)
+#define ZSDA_AES_LBADS_INDICATE_8192 (0xD)
+#define ZSDA_AES_LBADS_INDICATE_INVALID (0xff)
+
+#define LEN_MIN_CIPHER 16
+#define LEN_MIN_HASH 16
+#define LEN_MIN_HASH_RESULT 16
+#define LEN_MAX_COMP (0x1000000)
+#define LEN_MAX_COMP_8M (0x800000)
+#define LEN_MIN_COMP 4
+#define LBADS_MAX_REMAINDER (16 - 1)
+
+#define DATA_SIZE_1K 1024
+#define DATA_LEN_TEST_4G 0x0100000000
+#define DATA_LEN_TEST_8M (0x7ffff8 - 7)
+#define DATA_LEN_TEST_8B 7
+#define DATA_LEN_TEST_16B 15
+#define SET_CYCLE 0xff
+#define SET_HEAD_INTI 0x0
+#define ZSDA_IF_ERROR_TEST 1
+
+#define ZSDA_Q_START 0x1
+#define ZSDA_Q_STOP 0x0
+#define ZSDA_CLEAR_VALID 0x1
+#define ZSDA_CLEAR_INVALID 0x0
+#define ZSDA_RESP_VALID 0x1
+#define ZSDA_RESP_INVALID 0x0
+
+struct zsda_pci_device;
+
+enum sgl_elment_type_wqe {
+ SGL_ELM_TYPE_PHYS_ADDR = 1,
+ SGL_ELM_TYPE_LIST,
+ SGL_ELM_TYPE_LIST_ADDR,
+ SGL_ELM_TYPE_LIST_SGL32,
+};
+
+enum sgl_elment_type {
+ SGL_TYPE_PHYS_ADDR = 0,
+ SGL_TYPE_LAST_PHYS_ADDR,
+ SGL_TYPE_NEXT_LIST,
+ SGL_TYPE_EC_LEVEL1_SGL32,
+};
+
+enum zsda_admin_msg_id {
+ // Version information
+ ZSDA_ADMIN_VERSION_REQ = 0,
+ ZSDA_ADMIN_VERSION_RESP,
+ // algo type
+ ZSDA_ADMIN_QUEUE_CFG_REQ,
+ ZSDA_ADMIN_QUEUE_CFG_RESP,
+ // get cycle
+ ZSDA_ADMIN_QUEUE_CYCLE_REQ,
+ ZSDA_ADMIN_QUEUE_CYCLE_RESP,
+ // set cyclr
+ ZSDA_ADMIN_SET_CYCLE_REQ,
+ ZSDA_ADMIN_SET_CYCLE_RESP,
+
+ ZSDA_MIG_STATE_WARNING,
+ ZSDA_ADMIN_RESERVE,
+ // set close flr register
+ ZSDA_FLR_SET_FUNCTION,
+ ZSDA_ADMIN_MSG_VALID,
+ ZSDA_ADMIN_INT_TEST
+};
+
+struct zsda_admin_req {
+ uint16_t msg_type;
+ uint8_t data[26];
+};
+
+struct zsda_admin_resp {
+ uint16_t msg_type;
+ uint8_t data[26];
+};
+
+struct zsda_test_msg {
+ uint32_t msg_type;
+ uint32_t data_in;
+ uint8_t data[20];
+};
+
+struct zsda_admin_req_qcfg {
+ uint16_t msg_type;
+ uint8_t qid;
+ uint8_t data[25];
+};
+
+#pragma pack(1)
+typedef struct {
+ uint16_t q_type;
+ uint16_t wq_tail;
+ uint16_t wq_head;
+ uint16_t cq_tail;
+ uint16_t cq_head;
+ uint16_t cycle;
+} qinfo;
+
+struct zsda_admin_resp_qcfg {
+ uint16_t msg_type;
+ qinfo qcfg;
+ uint8_t data[14];
+};
+#pragma pack()
+
+enum flr_clr_mask {
+ unmask = 0,
+ mask,
+};
+
+/**< Common struct for scatter-gather list operations */
+struct zsda_buf {
+ uint64_t addr;
+ uint32_t len;
+ uint8_t resrvd[3];
+ uint8_t type;
+} __rte_packed;
+
+struct zsda_sgl {
+ struct zsda_buf buffers[ZSDA_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
+struct zsda_op_cookie {
+ bool used;
+ void *op;
+ uint8_t valid;
+ uint16_t sid;
+ struct zsda_sgl sgl_src;
+ struct zsda_sgl sgl_dst;
+ phys_addr_t sgl_src_phys_addr;
+ phys_addr_t sgl_dst_phys_addr;
+} __rte_packed;
+
+struct zsda_mul_sgl {
+ struct zsda_buf point_to_sgl[ZSDA_SGL_FRAGMENT_SIZE];
+ struct zsda_sgl sgls[ZSDA_SGL_FRAGMENT_SIZE];
+} __rte_packed __rte_cache_aligned;
+
+struct zsda_ec_op_cookie {
+ bool used;
+ void *op;
+ uint8_t valid;
+ uint16_t sid;
+ struct zsda_mul_sgl sgl_src;
+ struct zsda_mul_sgl sgl_dst;
+ phys_addr_t sgl_src_phys_addr;
+ phys_addr_t sgl_dst_phys_addr;
+} __rte_packed;
+
+struct crypto_cfg {
+ uint8_t slba[8];
+ uint8_t key[64];
+ uint8_t lbads : 4;
+ uint8_t resv1 : 4;
+ uint8_t resv2[23];
+} __rte_packed;
+
+struct compress_cfg {
+} __rte_packed;
+
+struct zsda_wqe_crpt {
+ uint8_t valid;
+ uint8_t op_code;
+ uint16_t sid;
+ uint8_t resv[3];
+ uint8_t rx_sgl_type : 4;
+ uint8_t tx_sgl_type : 4;
+ uint64_t rx_addr;
+ uint32_t rx_length;
+ uint64_t tx_addr;
+ uint32_t tx_length;
+ struct crypto_cfg cfg;
+} __rte_packed;
+
+struct zsda_wqe_comp {
+ uint8_t valid;
+ uint8_t op_code;
+ uint16_t sid;
+ uint8_t resv[3];
+ uint8_t rx_sgl_type : 4;
+ uint8_t tx_sgl_type : 4;
+ uint64_t rx_addr;
+ uint32_t rx_length;
+ uint64_t tx_addr;
+ uint32_t tx_length;
+ struct compress_cfg cfg;
+} __rte_packed;
+
+struct zsda_wqe_common {
+ uint8_t valid;
+ uint8_t op_code;
+ uint16_t sid;
+ uint8_t resv[3];
+ uint8_t rx_sgl_type : 4;
+ uint8_t tx_sgl_type : 4;
+ uint64_t rx_addr;
+ uint32_t rx_length;
+ uint64_t tx_addr;
+ uint32_t tx_length;
+} __rte_packed;
+
+struct zsda_cqe {
+ uint8_t valid; // cqe_cycle
+ uint8_t op_code;
+ uint16_t sid;
+ uint8_t state;
+ uint8_t result;
+ uint16_t zsda_wq_id;
+ uint32_t tx_real_length;
+ uint16_t err0;
+ uint16_t err1; // bit15 cqe flag
+} __rte_packed;
+
+struct zsda_common_stat {
+ /**< Count of all operations enqueued */
+ uint64_t enqueued_count;
+ /**< Count of all operations dequeued */
+ uint64_t dequeued_count;
+
+ /**< Total error count on operations enqueued */
+ uint64_t enqueue_err_count;
+ /**< Total error count on operations dequeued */
+ uint64_t dequeue_err_count;
+};
+
+enum zsda_algo_core {
+ ZSDA_CORE_COMP,
+ ZSDA_CORE_DECOMP,
+ ZSDA_CORE_ENCRY,
+ ZSDA_CORE_DECRY,
+ ZSDA_CORE_HASH,
+ ZSDA_CORE_INVALID,
+
+};
+
+#define CQE_VALID(value) (value & 0x8000)
+#define CQE_ERR1(value) (value & 0x7fff)
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]));
+
+uint32_t set_reg_8(void *addr, uint8_t val0, uint8_t val1, uint8_t val2,
+ uint8_t val3);
+uint8_t get_reg_8(void *addr, uint8_t offset);
+
+int zsda_admin_msg_init(struct rte_pci_device *pci_dev);
+int zsda_send_admin_msg(struct rte_pci_device *pci_dev, void *req, uint32_t len);
+int zsda_recv_admin_msg(struct rte_pci_device *pci_dev, void *resp,
+ uint32_t len);
+
+void zsda_stats_get(void **queue_pairs, uint32_t nb_queue_pairs,
+ struct zsda_common_stat *stats);
+void zsda_stats_reset(void **queue_pairs, uint32_t nb_queue_pairs);
+
+#endif /* _ZSDA_COMMON_H_ */
diff --git a/drivers/common/zsda/zsda_device.c b/drivers/common/zsda/zsda_device.c
new file mode 100644
index 0000000..214c00d
--- /dev/null
+++ b/drivers/common/zsda/zsda_device.c
@@ -0,0 +1,661 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include <rte_devargs.h>
+#include <rte_string_fns.h>
+
+#include "zsda_device.h"
+#include "zsda_qp.h"
+
+/* per-process array of device data */
+struct zsda_device_info zsda_devs[RTE_PMD_ZSDA_MAX_PCI_DEVICES];
+static int zsda_nb_pci_devices;
+static int zsda_num_used_qps;
+#define MAX_EVENT 10
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_zsda_map[] = {
+ {
+ RTE_PCI_DEVICE(0x1cf2, 0x8050),
+ },
+ {
+ RTE_PCI_DEVICE(0x1cf2, 0x8051),
+ },
+ {.device_id = 0},
+};
+
+static int
+zsda_check_write(uint8_t *addr, uint32_t dst_value)
+{
+ int times = 500;
+ uint32_t ret = 0;
+
+ ret = ZSDA_CSR_READ32(addr);
+
+ while ((ret != dst_value) && times--) {
+ ret = ZSDA_CSR_READ32(addr);
+ rte_delay_us_sleep(100);
+ }
+ if (ret == dst_value)
+ return ZSDA_SUCCESS;
+ else
+ return ZSDA_FAILED;
+}
+
+static uint8_t
+zsda_get_num_used_qps(const struct rte_pci_device *pci_dev)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ uint8_t num_used_qps;
+ num_used_qps = ZSDA_CSR_READ8(mmio_base + 0);
+
+ return num_used_qps;
+}
+
+int
+zsda_admin_q_start(const struct rte_pci_device *pci_dev)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ int ret = 0;
+
+ ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_START, 0);
+
+ ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_START, ZSDA_Q_START);
+ ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_START, ZSDA_Q_START);
+
+ return ret;
+}
+
+int
+zsda_admin_q_stop(const struct rte_pci_device *pci_dev)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ int ret = 0;
+
+ ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_STOP_RESP, ZSDA_RESP_INVALID);
+ ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_STOP, ZSDA_Q_STOP);
+
+ ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_STOP_RESP,
+ ZSDA_RESP_VALID);
+
+ if (ret == ZSDA_FAILED) {
+ ZSDA_LOG(INFO, "Failed! zsda_admin q stop");
+ return ZSDA_FAILED;
+ }
+ return ZSDA_SUCCESS;
+}
+
+int
+zsda_admin_q_clear(const struct rte_pci_device *pci_dev)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ int ret = 0;
+
+ ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_CLR_RESP, ZSDA_RESP_INVALID);
+ ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_CLR, ZSDA_RESP_VALID);
+
+ ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_CLR_RESP,
+ ZSDA_RESP_VALID);
+
+ if (ret == ZSDA_FAILED) {
+ ZSDA_LOG(INFO, "Failed! zsda_admin q clear");
+ return ZSDA_FAILED;
+ }
+ return ZSDA_SUCCESS;
+}
+
+static int
+zsda_queue_stop_single(uint8_t *mmio_base, uint8_t id)
+{
+ int ret = ZSDA_SUCCESS;
+ uint8_t *addr_stop = mmio_base + ZSDA_IO_Q_STOP + (4 * id);
+ uint8_t *addr_resp = mmio_base + ZSDA_IO_Q_STOP_RESP + (4 * id);
+
+ ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+ ZSDA_CSR_WRITE32(addr_stop, ZSDA_Q_STOP);
+
+ ret = zsda_check_write(addr_resp, ZSDA_RESP_VALID);
+ ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+
+ return ret;
+}
+
+int
+zsda_queue_stop(const struct rte_pci_device *pci_dev,
+ const struct zsda_qp_hw *qp_hw __rte_unused)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ uint8_t id = 0;
+ int ret = ZSDA_SUCCESS;
+
+ for (id = 0; id < zsda_num_used_qps; id++)
+ ret |= zsda_queue_stop_single(mmio_base, id);
+
+ return ret;
+}
+
+static int
+zsda_queue_start_single(uint8_t *mmio_base, uint8_t id)
+{
+ uint8_t *addr_start = mmio_base + ZSDA_IO_Q_START + (4 * id);
+
+ ZSDA_CSR_WRITE32(addr_start, ZSDA_Q_START);
+ return zsda_check_write(addr_start, ZSDA_Q_START);
+}
+
+int
+zsda_queue_start(const struct rte_pci_device *pci_dev,
+ const struct zsda_qp_hw *qp_hw __rte_unused)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ uint8_t id = 0;
+ int ret = ZSDA_SUCCESS;
+
+ for (id = 0; id < zsda_num_used_qps; id++)
+ ret |= zsda_queue_start_single(mmio_base, id);
+
+ return ret;
+}
+
+static int
+zsda_queue_clear_single(uint8_t *mmio_base, uint8_t id)
+{
+ int ret = ZSDA_SUCCESS;
+ uint8_t *addr_clear = mmio_base + ZSDA_IO_Q_CLR + (4 * id);
+ uint8_t *addr_resp = mmio_base + ZSDA_IO_Q_CLR_RESP + (4 * id);
+
+ ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+ ZSDA_CSR_WRITE32(addr_clear, ZSDA_CLEAR_VALID);
+ ret = zsda_check_write(addr_resp, ZSDA_RESP_VALID);
+ ZSDA_CSR_WRITE32(addr_clear, ZSDA_CLEAR_INVALID);
+
+ return ret;
+}
+
+int
+zsda_queue_clear(const struct rte_pci_device *pci_dev,
+ const struct zsda_qp_hw *qp_hw __rte_unused)
+{
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ uint8_t id = 0;
+ int ret = ZSDA_SUCCESS;
+
+ for (id = 0; id < zsda_num_used_qps; id++)
+ ret |= zsda_queue_clear_single(mmio_base, id);
+
+ return ret;
+}
+
+static struct zsda_pci_device *
+zsda_pci_get_named_dev(const char *name)
+{
+ unsigned int i;
+
+ if (name == NULL) {
+ ZSDA_LOG(ERR, E_NULL);
+ return NULL;
+ }
+
+ for (i = 0; i < RTE_PMD_ZSDA_MAX_PCI_DEVICES; i++) {
+ if (zsda_devs[i].mz &&
+ (strcmp(((struct zsda_pci_device *)zsda_devs[i].mz->addr)
+ ->name,
+ name) == 0))
+ return (struct zsda_pci_device *)zsda_devs[i].mz->addr;
+ }
+
+ return NULL;
+}
+
+static uint8_t
+zsda_pci_find_free_device_index(void)
+{
+ uint32_t dev_id;
+
+ for (dev_id = 0; dev_id < RTE_PMD_ZSDA_MAX_PCI_DEVICES; dev_id++)
+ if (zsda_devs[dev_id].mz == NULL)
+ break;
+
+ return dev_id & 0xff;
+}
+
+struct zsda_pci_device *
+zsda_get_zsda_dev_from_pci_dev(struct rte_pci_device *pci_dev)
+{
+ char name[ZSDA_DEV_NAME_MAX_LEN];
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ return zsda_pci_get_named_dev(name);
+}
+
+struct zsda_pci_device *
+zsda_pci_device_allocate(struct rte_pci_device *pci_dev)
+{
+ struct zsda_pci_device *zsda_pci_dev;
+ uint8_t zsda_dev_id = 0;
+ char name[ZSDA_DEV_NAME_MAX_LEN];
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+ snprintf(name + strlen(name), (ZSDA_DEV_NAME_MAX_LEN - strlen(name)),
+ "_zsda");
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ const struct rte_memzone *mz = rte_memzone_lookup(name);
+
+ if (mz == NULL) {
+ ZSDA_LOG(ERR, "Secondary can't find %s mz", name);
+ return NULL;
+ }
+ zsda_pci_dev = mz->addr;
+ zsda_devs[zsda_pci_dev->zsda_dev_id].mz = mz;
+ zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev = pci_dev;
+ zsda_nb_pci_devices++;
+ return zsda_pci_dev;
+ }
+
+ if (zsda_pci_get_named_dev(name) != NULL) {
+ ZSDA_LOG(ERR, E_CONFIG);
+ return NULL;
+ }
+
+ zsda_dev_id = zsda_pci_find_free_device_index();
+
+ if (zsda_dev_id == (RTE_PMD_ZSDA_MAX_PCI_DEVICES - 1)) {
+ ZSDA_LOG(ERR, "Reached maximum number of ZSDA devices");
+ return NULL;
+ }
+
+ unsigned int socket_id = rte_socket_id();
+
+ zsda_devs[zsda_dev_id].mz =
+ rte_memzone_reserve(name, sizeof(struct zsda_pci_device),
+ (int)(socket_id & 0xfff), 0);
+
+ if (zsda_devs[zsda_dev_id].mz == NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ return NULL;
+ }
+
+ zsda_pci_dev = zsda_devs[zsda_dev_id].mz->addr;
+ memset(zsda_pci_dev, 0, sizeof(*zsda_pci_dev));
+ strlcpy(zsda_pci_dev->name, name, ZSDA_DEV_NAME_MAX_LEN);
+ zsda_pci_dev->zsda_dev_id = zsda_dev_id;
+ zsda_pci_dev->pci_dev = pci_dev;
+ zsda_devs[zsda_dev_id].pci_dev = pci_dev;
+
+ switch (pci_dev->id.device_id) {
+
+ case 0x8000 ... 0x9000:
+ break;
+
+ default:
+ ZSDA_LOG(ERR, "Invalid dev_id");
+ rte_memzone_free(zsda_devs[zsda_pci_dev->zsda_dev_id].mz);
+ return NULL;
+ }
+
+ rte_spinlock_init(&zsda_pci_dev->arb_csr_lock);
+ zsda_nb_pci_devices++;
+
+ return zsda_pci_dev;
+}
+
+static int
+zsda_pci_device_release(struct rte_pci_device *pci_dev)
+{
+ struct zsda_pci_device *zsda_pci_dev;
+ struct zsda_device_info *inst;
+ char name[ZSDA_DEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ snprintf(name + strlen(name),
+ ZSDA_DEV_NAME_MAX_LEN - (strlen(name) - 1), "_zsda");
+ zsda_pci_dev = zsda_pci_get_named_dev(name);
+ if (zsda_pci_dev != NULL) {
+ inst = &zsda_devs[zsda_pci_dev->zsda_dev_id];
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if ((zsda_pci_dev->sym_dev != NULL) ||
+ (zsda_pci_dev->comp_dev != NULL)) {
+ ZSDA_LOG(DEBUG, "ZSDA device %s is busy", name);
+ return -EBUSY;
+ }
+ rte_memzone_free(inst->mz);
+ }
+ memset(inst, 0, sizeof(struct zsda_device_info));
+ zsda_nb_pci_devices--;
+ }
+ return 0;
+}
+
+static int
+zsda_pci_dev_destroy(struct zsda_pci_device *zsda_pci_dev,
+ struct rte_pci_device *pci_dev)
+{
+ zsda_sym_dev_destroy(zsda_pci_dev);
+ zsda_comp_dev_destroy(zsda_pci_dev);
+
+ return zsda_pci_device_release(pci_dev);
+}
+
+static void
+zsda_set_queue_head_tail(struct zsda_pci_device *zsda_pci_dev, uint8_t qid)
+{
+ struct rte_pci_device *pci_dev =
+ zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+
+ ZSDA_CSR_WRITE32(mmio_base + IO_DB_INITIAL_CONFIG + (qid * 4),
+ SET_HEAD_INTI);
+}
+
+int
+zsda_set_queue_cycle(struct zsda_pci_device *zsda_pci_dev, uint8_t qid)
+{
+ struct zsda_admin_req_qcfg req = {0};
+ struct zsda_admin_resp_qcfg resp = {0};
+ int ret = 0;
+ struct rte_pci_device *pci_dev =
+ zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+
+ zsda_admin_msg_init(pci_dev);
+ req.msg_type = ZSDA_ADMIN_SET_CYCLE_REQ;
+ req.qid = qid;
+ req.data[0] = SET_CYCLE;
+ ret = zsda_send_admin_msg(pci_dev, &req, sizeof(req));
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! Send msg");
+ return ZSDA_FAILED;
+ }
+
+ ret = zsda_recv_admin_msg(pci_dev, &resp, sizeof(resp));
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! Receive msg");
+ return ZSDA_FAILED;
+ }
+ return ZSDA_SUCCESS;
+}
+
+int
+zsda_set_cycle_head_tail(struct zsda_pci_device *zsda_pci_dev)
+{
+ struct rte_pci_device *pci_dev =
+ zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+ uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+ uint8_t id = 0;
+ uint8_t num_used_ioqp = ZSDA_CSR_READ8(mmio_base + 0);
+ int ret = ZSDA_SUCCESS;
+
+ for (id = 0; id < num_used_ioqp; id++) {
+ zsda_set_queue_head_tail(zsda_pci_dev, id);
+ ret |= zsda_set_queue_cycle(zsda_pci_dev, id);
+ }
+
+ return ret;
+}
+
+enum zsda_service_type
+zsda_get_queue_cfg_by_id(struct zsda_pci_device *zsda_pci_dev, uint8_t qid,
+ qinfo *qcfg)
+{
+ struct zsda_admin_req_qcfg req = {0};
+ struct zsda_admin_resp_qcfg resp = {0};
+ int ret = 0;
+ struct rte_pci_device *pci_dev =
+ zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+
+ zsda_admin_msg_init(pci_dev);
+ req.msg_type = ZSDA_ADMIN_QUEUE_CFG_REQ;
+
+ req.qid = qid;
+
+ ret = zsda_send_admin_msg(pci_dev, &req, sizeof(req));
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! Send msg");
+ return ZSDA_SERVICE_INVALID;
+ }
+
+ ret = zsda_recv_admin_msg(pci_dev, &resp, sizeof(resp));
+ if (ret)
+ ZSDA_LOG(ERR, "Failed! Receive msg");
+
+ if (resp.msg_type == ZSDA_ADMIN_QUEUE_CFG_RESP) {
+ memcpy(qcfg, &resp.qcfg, sizeof(*qcfg));
+ return resp.qcfg.q_type;
+ }
+ return ZSDA_SERVICE_INVALID;
+}
+
+int
+zsda_close_flr(const struct zsda_pci_device *zsda_pci_dev)
+{
+ struct zsda_admin_req_qcfg req = {0};
+ struct zsda_admin_resp_qcfg resp = {0};
+
+ int ret = 0;
+ struct rte_pci_device *pci_dev =
+ zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+
+ zsda_admin_msg_init(pci_dev);
+
+ req.msg_type = ZSDA_FLR_SET_FUNCTION;
+
+ ret = zsda_send_admin_msg(pci_dev, &req, sizeof(req));
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! Send msg");
+ return ZSDA_FAILED;
+ }
+
+ ret = zsda_recv_admin_msg(pci_dev, &resp, sizeof(resp));
+ if (ret) {
+ ZSDA_LOG(ERR, "Failed! Receive msg");
+ return ZSDA_FAILED;
+ }
+
+ return ZSDA_SUCCESS;
+}
+
+static void
+zsda_interrupt_handler(__rte_unused void *param)
+{
+ return;
+}
+
+static void *
+intr_loop(void *dummy)
+{
+ uint32_t i = 0;
+ __rte_unused int n = 0;
+ struct rte_epoll_event event[MAX_EVENT];
+ uint32_t vector_total = 10;
+ struct rte_pci_device *pci_dev = (struct rte_pci_device *)dummy;
+ struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
+ int ret;
+ ret = rte_intr_efd_enable(intr_handle, vector_total);
+ if (ret)
+ return NULL;
+
+ ret = rte_intr_callback_register(intr_handle, zsda_interrupt_handler,
+ (void *)0);
+ if (ret)
+ return NULL;
+
+ for (i = 0; i < vector_total; ++i) {
+ rte_intr_rx_ctl(intr_handle, RTE_EPOLL_PER_THREAD,
+ RTE_INTR_EVENT_ADD, i, (void *)&i);
+ }
+
+ ret = rte_intr_enable(intr_handle);
+ if (ret != 0)
+ ZSDA_LOG(ERR, E_FUNC);
+
+ while (1) {
+ n = rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, MAX_EVENT,
+ 3000);
+ }
+ return NULL;
+}
+
+static void
+zsda_hot_unplug_callback(const char *device_name,
+ __rte_unused enum rte_dev_event_type type, void *arg)
+{
+ const struct rte_pci_device *pci_dev = (struct rte_pci_device *)arg;
+ const struct zsda_qp_hw *qp_hw = NULL;
+ int ret;
+
+ ret = zsda_queue_clear(pci_dev, qp_hw);
+ if (ret == ZSDA_FAILED) {
+ ZSDA_LOG(ERR, "Failed! used zsda_io q clear");
+ return;
+ }
+ ret = zsda_queue_stop(pci_dev, qp_hw);
+ if (ret == ZSDA_FAILED) {
+ ZSDA_LOG(ERR, "Failed! used zsda_io q stop");
+ return;
+ }
+
+ zsda_admin_q_clear(pci_dev);
+ zsda_admin_q_stop(pci_dev);
+
+ ZSDA_LOG(DEBUG, "The device: %s has been removed!", device_name);
+}
+
+static int
+zsda_hot_plug_handler(struct rte_pci_device *pci_dev)
+{
+ int ret = 0;
+
+ ret = rte_dev_hotplug_handle_enable();
+ if (ret) {
+ ZSDA_LOG(ERR, "Fail to enable hotplug handling.");
+ return ZSDA_FAILED;
+ }
+
+ ret = rte_dev_event_monitor_start();
+ if (ret) {
+ ZSDA_LOG(ERR, "Fail to start device event monitoring.");
+ return ZSDA_FAILED;
+ }
+
+ ret = rte_dev_event_callback_register(NULL, zsda_hot_unplug_callback,
+ (void *)pci_dev);
+
+ if (ret) {
+ ZSDA_LOG(ERR, "Fail to register device event callback");
+ return ZSDA_FAILED;
+ }
+ return ZSDA_SUCCESS;
+}
+
+static int
+zsda_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ int sym_ret = 0;
+ int comp_ret = 0;
+ int ret;
+ pthread_t id;
+ struct zsda_pci_device *zsda_pci_dev;
+ struct zsda_dev_cmd_param zsda_dev_cmd_param[] = {
+ {SYM_ENQ_THRESHOLD_NAME, 0},
+ {HASH_ENQ_THRESHOLD_NAME, 0},
+ {COMP_ENQ_THRESHOLD_NAME, 0},
+ {EC_ENQ_THRESHOLD_NAME, 0},
+ {NULL, 0},
+ };
+
+ zsda_pci_dev = zsda_pci_device_allocate(pci_dev);
+ if (zsda_pci_dev == NULL) {
+ ZSDA_LOG(ERR, E_NULL);
+ return -ENODEV;
+ }
+
+ zsda_num_used_qps = zsda_get_num_used_qps(zsda_pci_dev->pci_dev);
+
+ ret = zsda_admin_q_start(zsda_pci_dev->pci_dev);
+ if (ret == ZSDA_FAILED) {
+ ZSDA_LOG(ERR, "Failed! admin q start");
+ return ZSDA_FAILED;
+ }
+ const struct zsda_qp_hw *qp_hw = NULL;
+ ret = zsda_queue_clear(zsda_pci_dev->pci_dev, qp_hw);
+ if (ret == ZSDA_FAILED) {
+ ZSDA_LOG(ERR, "Failed! used zsda_io q clear");
+ return ZSDA_FAILED;
+ }
+
+ ret = zsda_close_flr(zsda_pci_dev);
+ if (ret == ZSDA_FAILED) {
+ ZSDA_LOG(ERR, "Failed! flr close");
+ return ZSDA_FAILED;
+ }
+
+ ret = zsda_hot_plug_handler(zsda_pci_dev->pci_dev);
+ if (ret == ZSDA_FAILED) {
+ ZSDA_LOG(ERR, "Failed! zsda_hot_plug_handler");
+ return ZSDA_FAILED;
+ }
+
+ zsda_get_queue_cfg(zsda_pci_dev);
+
+ sym_ret = zsda_sym_dev_create(zsda_pci_dev, zsda_dev_cmd_param);
+ comp_ret = zsda_comp_dev_create(zsda_pci_dev, zsda_dev_cmd_param);
+
+ if (sym_ret | comp_ret) {
+ ZSDA_LOG(ERR, "Failed! dev create");
+ zsda_pci_dev_destroy(zsda_pci_dev, pci_dev);
+ return ZSDA_FAILED;
+ }
+
+ ret = pthread_create(&id, NULL, intr_loop, pci_dev);
+ if (ret)
+ ZSDA_LOG(ERR, E_CREATE);
+ return ZSDA_SUCCESS;
+}
+
+static int
+zsda_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct zsda_pci_device *zsda_pci_dev;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ zsda_pci_dev = zsda_get_zsda_dev_from_pci_dev(pci_dev);
+ if (zsda_pci_dev == NULL)
+ return 0;
+
+ if (zsda_admin_q_clear(zsda_pci_dev->pci_dev) == ZSDA_FAILED)
+ ZSDA_LOG(ERR, "Failed! q clear");
+
+ if (zsda_admin_q_stop(zsda_pci_dev->pci_dev) == ZSDA_FAILED)
+ ZSDA_LOG(ERR, "Failed! q stop");
+
+ return zsda_pci_dev_destroy(zsda_pci_dev, pci_dev);
+}
+
+/* clang-format off */
+static struct rte_pci_driver rte_zsda_pmd = {
+ .id_table = pci_id_zsda_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = zsda_pci_probe,
+ .remove = zsda_pci_remove };
+/* clang-format on */
+
+RTE_PMD_REGISTER_PCI(ZSDA_PCI_NAME, rte_zsda_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(ZSDA_PCI_NAME, pci_id_zsda_map);
+RTE_PMD_REGISTER_KMOD_DEP(ZSDA_PCI_NAME,
+ "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/common/zsda/zsda_device.h b/drivers/common/zsda/zsda_device.h
new file mode 100644
index 0000000..5558506
--- /dev/null
+++ b/drivers/common/zsda/zsda_device.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_DEVICE_H_
+#define _ZSDA_DEVICE_H_
+
+#include "bus_pci_driver.h"
+
+#include <rte_bus_pci.h>
+#include <rte_io.h>
+
+#include "zsda_common.h"
+#include "zsda_logs.h"
+#include "zsda_qp.h"
+
+#include "dev_driver.h"
+
+#define ZSDA_DETACHED (0)
+#define ZSDA_ATTACHED (1)
+
+#define MAGIC_FLR_CLR_SUCCESS 0xff
+#define MAGIC_FLR_CLR_FAIL 0xfe
+#define ZSDA_DEV_NAME_MAX_LEN 64
+#define MAX_QPS_ON_FUNCTION 8
+#define NUM_QPS 128
+
+#define SYM_ENQ_THRESHOLD_NAME "zsda_sym_enq_threshold"
+#define HASH_ENQ_THRESHOLD_NAME "zsda_hash_enq_threshold"
+#define COMP_ENQ_THRESHOLD_NAME "zsda_comp_enq_threshold"
+#define EC_ENQ_THRESHOLD_NAME "zsda_ec_enq_threshold"
+#define MAX_QP_THRESHOLD_SIZE 32
+
+#define ADMIN_WQ_BASE_ADDR_0 0x40
+#define ADMIN_WQ_BASE_ADDR_1 0x44
+#define ADMIN_WQ_BASE_ADDR_2 0x48
+#define ADMIN_WQ_BASE_ADDR_3 0x4C
+#define ADMIN_WQ_BASE_ADDR_4 0x50
+#define ADMIN_WQ_BASE_ADDR_5 0x54
+#define ADMIN_WQ_BASE_ADDR_6 0x58
+#define ADMIN_WQ_BASE_ADDR_7 0x5C
+
+#define ADMIN_CQ_BASE_ADDR_0 0x60
+#define ADMIN_CQ_BASE_ADDR_1 0x64
+#define ADMIN_CQ_BASE_ADDR_2 0x68
+#define ADMIN_CQ_BASE_ADDR_3 0x6C
+#define ADMIN_CQ_BASE_ADDR_4 0x70
+#define ADMIN_CQ_BASE_ADDR_5 0x74
+#define ADMIN_CQ_BASE_ADDR_6 0x78
+#define ADMIN_CQ_BASE_ADDR_7 0x7C
+
+#define IO_DB_INITIAL_CONFIG 0x1C00
+
+#define ADMIN_BUF_DATA_LEN 0x1C
+#define ADMIN_BUF_TOTAL_LEN 0x20
+
+#define ZSDA_CSR_VERSION 0x0
+#define ZSDA_ADMIN_WQ 0x40
+#define ZSDA_ADMIN_WQ_BASE7 0x5C
+#define ZSDA_ADMIN_WQ_CRC 0x5C
+#define ZSDA_ADMIN_WQ_VERSION 0x5D
+#define ZSDA_ADMIN_WQ_FLAG 0x5E
+#define ZSDA_ADMIN_CQ 0x60
+#define ZSDA_ADMIN_CQ_BASE7 0x7C
+#define ZSDA_ADMIN_CQ_CRC 0x7C
+#define ZSDA_ADMIN_CQ_VERSION 0x7D
+#define ZSDA_ADMIN_CQ_FLAG 0x7E
+
+#define ZSDA_ADMIN_WQ_TAIL 0x80
+#define ZSDA_ADMIN_CQ_HEAD 0x84
+
+#define ZSDA_ADMIN_Q_START 0x100
+#define ZSDA_ADMIN_Q_STOP 0x100
+#define ZSDA_ADMIN_Q_STOP_RESP 0x104
+#define ZSDA_ADMIN_Q_CLR 0x108
+#define ZSDA_ADMIN_Q_CLR_RESP 0x10C
+
+#define ZSDA_IO_Q_START 0x200
+#define ZSDA_IO_Q_STOP 0x200
+#define ZSDA_IO_Q_STOP_RESP 0x400
+#define ZSDA_IO_Q_CLR 0x600
+#define ZSDA_IO_Q_CLR_RESP 0x800
+
+#define ZSDA_CSR_READ32(addr) rte_read32((addr))
+#define ZSDA_CSR_WRITE32(addr, value) rte_write32((value), (addr))
+#define ZSDA_CSR_READ16(addr) rte_read16((addr))
+#define ZSDA_CSR_WRITE16(addr, value) rte_write16((value), (addr))
+#define ZSDA_CSR_READ8(addr) rte_read8((addr))
+#define ZSDA_CSR_WRITE8(addr, value) rte_write8_relaxed((value), (addr))
+
+struct zsda_dev_cmd_param {
+ const char *name;
+ uint16_t val;
+};
+
+struct zsda_device_info {
+ const struct rte_memzone *mz;
+ /**< mz to store the: struct zsda_pci_device , so it can be
+ * shared across processes
+ */
+
+ struct rte_pci_device *pci_dev;
+
+ // struct rte_device sym_rte_dev;
+ struct rte_device sym_rte_dev;
+ /**< This represents the crypto sym subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a crypto-specific name
+ */
+
+ struct rte_device comp_rte_dev;
+ /**< This represents the compression subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a compression-specific name
+ */
+};
+
+extern struct zsda_device_info zsda_devs[];
+
+struct zsda_sym_dev_private;
+struct zsda_comp_dev_private;
+
+struct zsda_qp_hw {
+ struct zsda_qp_hw_data data[MAX_QPS_ON_FUNCTION];
+};
+
+struct zsda_register_opt {
+ char op_type;
+ uint64_t addr;
+ uint32_t data;
+};
+
+/*
+ * This struct holds all the data about a ZSDA pci device
+ * including data about all services it supports.
+ * It contains
+ * - hw_data
+ * - config data
+ * - runtime data
+ * Note: as this data can be shared in a multi-process scenario,
+ * any pointers in it must also point to shared memory.
+ */
+struct zsda_pci_device {
+ /* Data used by all services */
+ char name[ZSDA_DEV_NAME_MAX_LEN];
+ /**< Name of zsda pci device */
+ uint8_t zsda_dev_id;
+ /**< Id of device instance for this zsda pci device */
+
+ rte_spinlock_t arb_csr_lock;
+ /**< lock to protect accesses to the arbiter CSR */
+
+ struct rte_pci_device *pci_dev;
+
+ /* Data relating to symmetric crypto service */
+ struct zsda_sym_dev_private *sym_dev;
+ /**< link back to cryptodev private data */
+
+ /* Data relating to compression service */
+ struct zsda_comp_dev_private *comp_dev;
+ /**< link back to compressdev private data */
+
+ struct zsda_qp_hw zsda_hw_qps[ZSDA_MAX_SERVICES];
+ uint16_t zsda_qp_hw_num[ZSDA_MAX_SERVICES];
+};
+
+struct zsda_pci_device *
+zsda_pci_device_allocate(struct rte_pci_device *pci_dev);
+
+struct zsda_pci_device *
+zsda_get_zsda_dev_from_pci_dev(struct rte_pci_device *pci_dev);
+
+__rte_weak int
+zsda_sym_dev_create(struct zsda_pci_device *zsda_pci_dev __rte_unused,
+ struct zsda_dev_cmd_param *zsda_dev_cmd_param);
+
+__rte_weak int
+zsda_sym_dev_destroy(struct zsda_pci_device *zsda_pci_dev __rte_unused);
+
+__rte_weak int
+zsda_comp_dev_create(struct zsda_pci_device *zsda_pci_dev __rte_unused,
+ struct zsda_dev_cmd_param *zsda_dev_cmd_param);
+
+__rte_weak int
+zsda_comp_dev_destroy(struct zsda_pci_device *zsda_pci_dev __rte_unused);
+
+enum zsda_service_type
+zsda_get_queue_cfg_by_id(struct zsda_pci_device *zsda_pci_dev, uint8_t qid,
+ qinfo *qcfg);
+int zsda_set_queue_cycle(struct zsda_pci_device *zsda_pci_dev, uint8_t qid);
+
+int zsda_queue_start(const struct rte_pci_device *pci_dev,
+ const struct zsda_qp_hw *qp_hw __rte_unused);
+int zsda_queue_stop(const struct rte_pci_device *pci_dev,
+ const struct zsda_qp_hw *qp_hw __rte_unused);
+int zsda_queue_clear(const struct rte_pci_device *pci_dev,
+ const struct zsda_qp_hw *qp_hw __rte_unused);
+
+int zsda_admin_q_start(const struct rte_pci_device *pci_dev);
+int zsda_admin_q_stop(const struct rte_pci_device *pci_dev);
+int zsda_admin_q_clear(const struct rte_pci_device *pci_dev);
+
+int zsda_set_cycle_head_tail(struct zsda_pci_device *zsda_pci_dev);
+
+int zsda_close_flr(const struct zsda_pci_device *zsda_pci_dev);
+
+#endif /* _ZSDA_DEVICE_H_ */
diff --git a/drivers/common/zsda/zsda_logs.c b/drivers/common/zsda/zsda_logs.c
new file mode 100644
index 0000000..045c168
--- /dev/null
+++ b/drivers/common/zsda/zsda_logs.c
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <rte_log.h>
+#include <rte_hexdump.h>
+
+#include "zsda_logs.h"
+
+int
+zsda_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
+ const void *buf, unsigned int len)
+{
+ if (rte_log_can_log(logtype, level))
+ rte_hexdump(rte_log_get_stream(), title, buf, len);
+
+ return 0;
+}
+
+RTE_LOG_REGISTER(zsda_gen_logtype, pmd.zsda_general, NOTICE);
+RTE_LOG_REGISTER(zsda_dp_logtype, pmd.zsda_dp, NOTICE);
diff --git a/drivers/common/zsda/zsda_logs.h b/drivers/common/zsda/zsda_logs.h
new file mode 100644
index 0000000..f6b27e2
--- /dev/null
+++ b/drivers/common/zsda/zsda_logs.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_LOGS_H_
+#define _ZSDA_LOGS_H_
+
+extern int zsda_gen_logtype;
+extern int zsda_dp_logtype;
+
+#define ZSDA_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_##level, (zsda_gen_logtype & 0xff), \
+ "%s(): [%d] " fmt "\n", __func__, __LINE__, ##args)
+
+#define ZSDA_DP_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_##level, zsda_dp_logtype, "%s(): " fmt "\n", __func__, \
+ ##args)
+
+#define ZSDA_DP_HEXDUMP_LOG(level, title, buf, len) \
+ zsda_hexdump_log(RTE_LOG_##level, zsda_dp_logtype, title, buf, len)
+
+/**
+ * zsda_hexdump_log - Dump out memory in a special hex dump format.
+ *
+ * Dump out the message buffer in a special hex dump output format with
+ * characters printed for each line of 16 hex values. The message will be sent
+ * to the stream used by the rte_log infrastructure.
+ */
+int zsda_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
+ const void *buf, unsigned int len);
+
+#endif /* _ZSDA_LOGS_H_ */
diff --git a/drivers/common/zsda/zsda_qp.c b/drivers/common/zsda/zsda_qp.c
new file mode 100644
index 0000000..fb6c638
--- /dev/null
+++ b/drivers/common/zsda/zsda_qp.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <asm-generic/errno.h>
+#include <bits/stdint-uintn.h>
+#include <limits.h>
+#include <stdint.h>
+
+#include <rte_atomic.h>
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_prefetch.h>
+
+#include "zsda_common.h"
+#include "zsda_device.h"
+#include "zsda_logs.h"
+#include "zsda_qp.h"
+#include "zsda_sym.h"
+
+#define ZSDA_MAX_DESC 512
+#define MAX_NUM_OPS 0x1FF
+
+#define RING_DIR_TX 0
+#define RING_DIR_RX 1
+
+struct ring_size {
+ uint16_t tx_msg_size;
+ uint16_t rx_msg_size;
+};
+
+struct ring_size zsda_qp_hw_ring_size[ZSDA_MAX_SERVICES] = {
+ [ZSDA_SERVICE_SYMMETRIC_ENCRYPT] = {128, 16},
+ [ZSDA_SERVICE_SYMMETRIC_DECRYPT] = {128, 16},
+ [ZSDA_SERVICE_COMPRESSION] = {32, 16},
+ [ZSDA_SERVICE_DECOMPRESSION] = {32, 16},
+ [ZSDA_SERVICE_HASH_ENCODE] = {32, 16},
+};
+
+void
+zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev)
+{
+ uint8_t i = 0;
+ uint32_t index = 0;
+ enum zsda_service_type type = ZSDA_SERVICE_INVALID;
+ struct zsda_qp_hw *zsda_hw_qps = zsda_pci_dev->zsda_hw_qps;
+ qinfo qcfg;
+
+ for (i = 0; i < MAX_QPS_ON_FUNCTION; i++) {
+ type = zsda_get_queue_cfg_by_id(zsda_pci_dev, i, &qcfg);
+ if (type >= ZSDA_SERVICE_INVALID)
+ continue;
+
+ index = zsda_pci_dev->zsda_qp_hw_num[type];
+ zsda_hw_qps[type].data[index].used = 1;
+ zsda_hw_qps[type].data[index].tx_ring_num = i;
+ zsda_hw_qps[type].data[index].rx_ring_num = i;
+ zsda_hw_qps[type].data[index].tx_msg_size =
+ zsda_qp_hw_ring_size[type].tx_msg_size;
+ zsda_hw_qps[type].data[index].rx_msg_size =
+ zsda_qp_hw_ring_size[type].rx_msg_size & 0xffff;
+
+ zsda_pci_dev->zsda_qp_hw_num[type]++;
+ }
+}
+
+struct zsda_qp_hw *
+zsda_qps_hw_per_service(struct zsda_pci_device *zsda_pci_dev,
+ enum zsda_service_type service)
+{
+ if (service < ZSDA_SERVICE_INVALID)
+ return &(zsda_pci_dev->zsda_hw_qps[service]);
+ else {
+ ZSDA_LOG(ERR, "Failed! service type is wrong");
+ return NULL;
+ }
+}
+
+uint16_t
+zsda_qps_per_service(struct zsda_pci_device *zsda_pci_dev,
+ enum zsda_service_type service)
+{
+ if (service < ZSDA_SERVICE_INVALID)
+ return zsda_pci_dev->zsda_qp_hw_num[service];
+ else
+ return 0;
+}
+
+uint16_t
+zsda_crypto_max_nb_qps(struct zsda_pci_device *zsda_pci_dev)
+{
+ uint16_t encrypt = zsda_qps_per_service(zsda_pci_dev,
+ ZSDA_SERVICE_SYMMETRIC_ENCRYPT);
+ uint16_t decrypt = zsda_qps_per_service(zsda_pci_dev,
+ ZSDA_SERVICE_SYMMETRIC_DECRYPT);
+ uint16_t hash =
+ zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_HASH_ENCODE);
+ uint16_t min = 0;
+
+ if ((encrypt == MAX_QPS_ON_FUNCTION) ||
+ (decrypt == MAX_QPS_ON_FUNCTION) ||
+ (hash == MAX_QPS_ON_FUNCTION))
+ min = MAX_QPS_ON_FUNCTION;
+ else {
+ min = (encrypt < decrypt) ? encrypt : decrypt;
+ min = (min < hash) ? min : hash;
+ }
+
+ if (min == 0)
+ return MAX_QPS_ON_FUNCTION;
+ return min;
+}
+
+uint16_t
+zsda_comp_max_nb_qps(struct zsda_pci_device *zsda_pci_dev)
+{
+ uint16_t comp =
+ zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_COMPRESSION);
+ uint16_t decomp =
+ zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_DECOMPRESSION);
+ uint16_t min = 0;
+
+ if ((comp == MAX_QPS_ON_FUNCTION) ||
+ (decomp == MAX_QPS_ON_FUNCTION))
+ min = MAX_QPS_ON_FUNCTION;
+ else
+ min = (comp < decomp) ? comp : decomp;
+
+ if (min == 0)
+ return MAX_QPS_ON_FUNCTION;
+ return min;
+}
+
+static const struct rte_memzone *
+queue_dma_zone_reserve(const char *queue_name, unsigned int queue_size,
+ unsigned int socket_id)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(queue_name);
+ if (mz != 0) {
+ if (((size_t)queue_size <= mz->len) &&
+ ((socket_id == (SOCKET_ID_ANY & 0xffff)) ||
+ (socket_id == (mz->socket_id & 0xffff)))) {
+ ZSDA_LOG(DEBUG,
+ "re-use memzone already allocated for %s",
+ queue_name);
+ return mz;
+ }
+ ZSDA_LOG(ERR, E_MALLOC);
+ return NULL;
+ }
+
+ return rte_memzone_reserve_aligned(queue_name, queue_size,
+ (int)(socket_id & 0xfff),
+ RTE_MEMZONE_IOVA_CONTIG, queue_size);
+}
+
+static int
+zsda_queue_create(uint32_t dev_id, struct zsda_queue *queue,
+ struct zsda_qp_config *qp_conf, uint8_t dir)
+{
+ void *io_addr;
+ const struct rte_memzone *qp_mz;
+ qinfo qcfg;
+ int ret = 0;
+
+ uint16_t desc_size = ((dir == RING_DIR_TX) ? qp_conf->hw->tx_msg_size
+ : qp_conf->hw->rx_msg_size);
+
+ unsigned int queue_size_bytes = qp_conf->nb_descriptors * desc_size;
+
+ queue->hw_queue_number =
+ ((dir == RING_DIR_TX) ? qp_conf->hw->tx_ring_num
+ : qp_conf->hw->rx_ring_num);
+
+ struct rte_pci_device *pci_dev = zsda_devs[dev_id].pci_dev;
+ struct zsda_pci_device *zsda_dev =
+ (struct zsda_pci_device *)zsda_devs[dev_id].mz->addr;
+
+ ret = zsda_set_cycle_head_tail(zsda_dev);
+ if (ret == ZSDA_FAILED)
+ ZSDA_LOG(ERR, "Failed! set cytcle");
+
+ zsda_get_queue_cfg_by_id(zsda_dev, queue->hw_queue_number, &qcfg);
+
+ if (dir == RING_DIR_TX)
+ snprintf(queue->memz_name, sizeof(queue->memz_name),
+ "%s_%d_%s_%s_%d", pci_dev->driver->driver.name, dev_id,
+ qp_conf->service_str, "qptxmem",
+ queue->hw_queue_number);
+ else
+ snprintf(queue->memz_name, sizeof(queue->memz_name),
+ "%s_%d_%s_%s_%d", pci_dev->driver->driver.name, dev_id,
+ qp_conf->service_str, "qprxmem",
+ queue->hw_queue_number);
+
+ qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
+ rte_socket_id());
+ if (qp_mz == NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ return -ENOMEM;
+ }
+
+ queue->base_addr = (uint8_t *)qp_mz->addr;
+ queue->base_phys_addr = qp_mz->iova;
+ queue->modulo_mask = MAX_NUM_OPS;
+ queue->msg_size = desc_size;
+
+ queue->head = (dir == RING_DIR_TX) ? qcfg.wq_head : qcfg.cq_head;
+ queue->tail = (dir == RING_DIR_TX) ? qcfg.wq_tail : qcfg.cq_tail;
+
+ if ((queue->head == 0) && (queue->tail == 0))
+ qcfg.cycle += 1;
+
+ queue->valid = qcfg.cycle & 0xff;
+ queue->queue_size = ZSDA_MAX_DESC;
+ queue->io_addr = pci_dev->mem_resource[0].addr;
+
+ memset(queue->base_addr, 0x0, queue_size_bytes);
+
+ io_addr = pci_dev->mem_resource[0].addr;
+
+ if (dir == RING_DIR_TX)
+ ZSDA_CSR_WQ_RING_BASE(io_addr, queue->hw_queue_number,
+ queue->base_phys_addr);
+ else
+ ZSDA_CSR_CQ_RING_BASE(io_addr, queue->hw_queue_number,
+ queue->base_phys_addr);
+
+ return 0;
+}
+
+static void
+zsda_queue_delete(const struct zsda_queue *queue)
+{
+ const struct rte_memzone *mz;
+ int status = 0;
+
+ if (queue == NULL) {
+ ZSDA_LOG(DEBUG, "Invalid queue");
+ return;
+ }
+
+ mz = rte_memzone_lookup(queue->memz_name);
+ if (mz != NULL) {
+ /* Write an unused pattern to the queue memory. */
+ memset(queue->base_addr, 0x0,
+ (uint16_t)(queue->queue_size * queue->msg_size));
+ status = rte_memzone_free(mz);
+ if (status != 0)
+ ZSDA_LOG(ERR, E_FREE);
+ } else
+ ZSDA_LOG(DEBUG, "queue %s doesn't exist", queue->memz_name);
+}
+
+static int
+cookie_init(uint32_t dev_id, struct zsda_qp **qp_addr, uint16_t queue_pair_id,
+ struct zsda_qp_config *zsda_qp_conf)
+{
+ struct zsda_qp *qp = *qp_addr;
+ struct rte_pci_device *pci_dev = zsda_devs[dev_id].pci_dev;
+ char op_cookie_pool_name[RTE_RING_NAMESIZE];
+ uint32_t i;
+ enum zsda_service_type type = zsda_qp_conf->service_type;
+
+ if (zsda_qp_conf->nb_descriptors != ZSDA_MAX_DESC)
+ ZSDA_LOG(ERR, "Can't create qp for %u descriptors",
+ zsda_qp_conf->nb_descriptors);
+
+ qp->srv[type].nb_descriptors = zsda_qp_conf->nb_descriptors;
+ qp->srv[type].enqueued = qp->srv[type].dequeued = 0;
+
+ qp->srv[type].op_cookies = rte_zmalloc_socket(
+ "zsda PMD op cookie pointer",
+ zsda_qp_conf->nb_descriptors *
+ sizeof(*qp->srv[type].op_cookies),
+ RTE_CACHE_LINE_SIZE, zsda_qp_conf->socket_id);
+
+ if (qp->srv[type].op_cookies == NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ return -ENOMEM;
+ }
+
+ snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s%d_cks_%s_qp%hu",
+ pci_dev->driver->driver.name, dev_id,
+ zsda_qp_conf->service_str, queue_pair_id);
+
+ qp->srv[type].op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
+ if (qp->srv[type].op_cookie_pool == NULL)
+ qp->srv[type].op_cookie_pool = rte_mempool_create(
+ op_cookie_pool_name, qp->srv[type].nb_descriptors,
+ zsda_qp_conf->cookie_size, 64, 0, NULL, NULL, NULL,
+ NULL, (int)(rte_socket_id() & 0xfff), 0);
+ if (!qp->srv[type].op_cookie_pool) {
+ ZSDA_LOG(ERR, E_CREATE);
+ goto create_err;
+ }
+
+ for (i = 0; i < qp->srv[type].nb_descriptors; i++) {
+ if (rte_mempool_get(qp->srv[type].op_cookie_pool,
+ &qp->srv[type].op_cookies[i])) {
+ ZSDA_LOG(ERR, "ZSDA PMD Cannot get op_cookie");
+ goto create_err;
+ }
+ memset(qp->srv[type].op_cookies[i], 0,
+ zsda_qp_conf->cookie_size);
+ }
+ return 0;
+
+create_err:
+ if (qp->srv[type].op_cookie_pool)
+ rte_mempool_free(qp->srv[type].op_cookie_pool);
+ rte_free(qp->srv[type].op_cookies);
+
+ return -EFAULT;
+}
+
+int
+zsda_queue_pair_setup(uint32_t dev_id, struct zsda_qp **qp_addr,
+ uint16_t queue_pair_id,
+ struct zsda_qp_config *zsda_qp_conf)
+{
+ struct zsda_qp *qp = *qp_addr;
+ struct rte_pci_device *pci_dev = zsda_devs[dev_id].pci_dev;
+ int ret = 0;
+ enum zsda_service_type type = zsda_qp_conf->service_type;
+
+ if (type >= ZSDA_SERVICE_INVALID) {
+ ZSDA_LOG(ERR, "Failed! servie type");
+ return -EINVAL;
+ }
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ ZSDA_LOG(ERR, E_NULL);
+ return -EINVAL;
+ }
+
+ if (zsda_queue_create(dev_id, &(qp->srv[type].tx_q), zsda_qp_conf,
+ RING_DIR_TX) != 0) {
+ ZSDA_LOG(ERR, E_CREATE);
+ return -EFAULT;
+ }
+
+ if (zsda_queue_create(dev_id, &(qp->srv[type].rx_q), zsda_qp_conf,
+ RING_DIR_RX) != 0) {
+ ZSDA_LOG(ERR, E_CREATE);
+ zsda_queue_delete(&(qp->srv[type].tx_q));
+ return -EFAULT;
+ }
+
+ ret = cookie_init(dev_id, qp_addr, queue_pair_id, zsda_qp_conf);
+ if (ret) {
+ zsda_queue_delete(&(qp->srv[type].tx_q));
+ zsda_queue_delete(&(qp->srv[type].rx_q));
+ qp->srv[type].used = 0;
+ }
+ qp->srv[type].used = 1;
+ return ret;
+}
+
+int
+zsda_queue_pair_release(struct zsda_qp **qp_addr)
+{
+ struct zsda_qp *qp = *qp_addr;
+ uint32_t i;
+ enum zsda_service_type type = 0;
+
+ if (qp == NULL) {
+ ZSDA_LOG(DEBUG, "qp already freed");
+ return 0;
+ }
+
+ for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+ if (qp->srv[type].enqueued != qp->srv[type].dequeued)
+ return -EAGAIN;
+ }
+
+ for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+ if (!qp->srv[type].used)
+ continue;
+
+ zsda_queue_delete(&(qp->srv[type].tx_q));
+ zsda_queue_delete(&(qp->srv[type].rx_q));
+ qp->srv[type].used = 0;
+ for (i = 0; i < qp->srv[type].nb_descriptors; i++)
+ rte_mempool_put(qp->srv[type].op_cookie_pool,
+ qp->srv[type].op_cookies[i]);
+
+ if (qp->srv[type].op_cookie_pool)
+ rte_mempool_free(qp->srv[type].op_cookie_pool);
+
+ rte_free(qp->srv[type].op_cookies);
+ }
+
+ rte_free(qp);
+ *qp_addr = NULL;
+
+ return 0;
+}
+
+int
+zsda_fill_sgl_offset(struct rte_mbuf *buf, uint32_t offset,
+ struct zsda_sgl *sgl, phys_addr_t sgl_phy_addr,
+ uint32_t op_src_dst_length, const uint32_t max_segs)
+{
+ uint32_t nr = 0;
+ uint32_t buffer_len;
+
+ if (max_segs > (ZSDA_SGL_MAX_NUMBER - 1)) {
+ ZSDA_LOG(ERR, "Failed! overflow!");
+ return ZSDA_FAILED;
+ }
+
+ for (nr = 0; (buf && (nr < max_segs));) {
+ if (offset >= rte_pktmbuf_data_len(buf)) {
+ offset -= rte_pktmbuf_data_len(buf);
+ buf = buf->next;
+ continue;
+ }
+ memset(&(sgl->buffers[nr]), 0, sizeof(struct zsda_buf));
+ if ((nr > 0) && (((nr + 1) % ZSDA_SGL_FRAGMENT_SIZE) == 0) &&
+ (buf->next != NULL)) {
+ sgl->buffers[nr].len = SGL_TYPE_PHYS_ADDR;
+ sgl->buffers[nr].addr =
+ sgl_phy_addr +
+ ((nr + 1) * sizeof(struct zsda_buf));
+ sgl->buffers[nr].type = SGL_TYPE_NEXT_LIST;
+ ++nr;
+ continue;
+ }
+ sgl->buffers[nr].len =
+ rte_pktmbuf_data_len(buf) - (offset & 0xffff);
+ sgl->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+ sgl->buffers[nr].type = SGL_TYPE_PHYS_ADDR;
+ buffer_len += sgl->buffers[nr].len;
+
+ buf = buf->next;
+ offset = 0;
+ ++nr;
+ }
+
+ if (buffer_len > op_src_dst_length) {
+ ZSDA_LOG(ERR, "len wrong! 0x%x != 0x%x", buffer_len,
+ op_src_dst_length);
+ return -EINVAL;
+ }
+
+ if (nr == 0) {
+ ZSDA_LOG(ERR, E_RESULT);
+ return -EINVAL;
+ }
+ sgl->buffers[nr - 1].type = SGL_TYPE_LAST_PHYS_ADDR;
+
+ if (buf) {
+ if (unlikely(buf->next)) {
+ if (nr == max_segs) {
+ ZSDA_LOG(ERR, "max_segs (%u)", max_segs);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return ZSDA_SUCCESS;
+}
+
+int
+zsda_get_sgl_num(struct zsda_sgl *sgl)
+{
+ int sgl_num = 0;
+
+ while (sgl->buffers[sgl_num].type != 1) {
+ sgl_num++;
+ if (sgl_num >= ZSDA_SGL_MAX_NUMBER)
+ return ZSDA_FAILED;
+ }
+ sgl_num++;
+ return sgl_num;
+}
+
+int
+find_next_free_cookie(struct zsda_queue *queue, void **op_cookie, uint16_t *idx)
+{
+ uint16_t old_tail = queue->tail;
+ uint16_t tail = queue->tail;
+ struct zsda_op_cookie *cookie = NULL;
+
+ do {
+ cookie = (struct zsda_op_cookie *)op_cookie[tail];
+ if (!cookie->used) {
+ *idx = tail & 0xffff;
+ return 0;
+ } else if (queue->valid == cookie->valid)
+ return -EINVAL;
+ tail = zsda_modulo_16(tail++, queue->modulo_mask);
+
+ } while (old_tail != tail);
+
+ return -EINVAL;
+}
+
+static int
+enqueue(void *op, struct zsda_qp *qp)
+{
+ uint16_t new_tail;
+ enum zsda_service_type type;
+ uint16_t num_enqueue;
+ uint16_t num_dequeue;
+ void **op_cookie;
+ int ret = 0;
+ register struct zsda_queue *queue;
+
+ for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+ if (qp->srv[type].used) {
+ if (!qp->srv[type].match(op))
+ continue;
+
+ queue = &qp->srv[type].tx_q;
+ num_enqueue = qp->srv[type].enqueued;
+ num_dequeue = qp->srv[type].dequeued;
+
+ if (zsda_modulo_16(num_enqueue - num_dequeue,
+ queue->queue_size - (uint16_t)1) ==
+ (queue->queue_size - 1)) {
+ ret = -EBUSY;
+ break;
+ }
+ if (queue->pushed_wqe == (queue->queue_size - 1)) {
+ ret = -EBUSY;
+ break;
+ }
+
+ op_cookie = qp->srv[type].op_cookies;
+
+ if (find_next_free_cookie(queue, op_cookie,
+ &new_tail)) {
+ ret = -EBUSY;
+ break;
+ }
+ ret = qp->srv[type].tx_cb(op, queue, op_cookie,
+ new_tail);
+ if (ret)
+ break;
+ queue->tail = zsda_modulo_16(new_tail + 1,
+ queue->queue_size - 1);
+
+ if (new_tail > queue->tail)
+ queue->valid =
+ zsda_modulo_8(queue->valid + 1, 0xff);
+
+ qp->srv[type].enqueued++;
+ queue->pushed_wqe++;
+ break;
+ }
+ }
+
+ if (type < ZSDA_SERVICE_INVALID) {
+ if (ret)
+ qp->srv[type].stats.enqueue_err_count++;
+ else
+ qp->srv[type].stats.enqueued_count++;
+ }
+
+ return ret;
+}
+
+void
+tx_write_tail(struct zsda_queue *queue)
+{
+ if (queue->pushed_wqe)
+ WRITE_CSR_WQ_TAIL(queue->io_addr, queue->hw_queue_number,
+ queue->tail);
+
+ queue->pushed_wqe = 0;
+}
+
+uint16_t
+zsda_enqueue_op_burst(struct zsda_qp *qp, void **ops, uint16_t nb_ops)
+{
+ register int ret = -1;
+ enum zsda_service_type type = 0;
+ uint16_t i = 0;
+ uint16_t nb_send = 0;
+ void *op;
+
+ for (i = 0; i < nb_ops; i++) {
+ op = ops[i];
+ ret = enqueue(op, qp);
+ if (ret < 0)
+ break;
+ nb_send++;
+ }
+
+ for (type = 0; type < ZSDA_SERVICE_INVALID; type++)
+ if (qp->srv[type].used)
+ tx_write_tail(&qp->srv[type].tx_q);
+
+ return nb_send;
+}
+
+static void
+dequeue(struct qp_srv *srv, void **ops, uint32_t nb_ops, uint32_t *nb)
+{
+ uint16_t head;
+ struct zsda_cqe *cqe = NULL;
+ struct zsda_queue *queue = &srv->rx_q;
+ struct zsda_op_cookie *cookie = NULL;
+
+ head = queue->head;
+
+ while (*nb < nb_ops) {
+ cqe = (struct zsda_cqe *)((uint8_t *)queue->base_addr +
+ head * queue->msg_size);
+
+ if (!CQE_VALID(cqe->err1))
+ break;
+
+ printf("Cqe , opcode - 0x%x, sid - 0x%x, tx_real_length - 0x%x, err0 - 0x%x, err1 - 0x%x\n",
+ cqe->op_code, cqe->sid, cqe->tx_real_length, cqe->err0, cqe->err1);
+
+ if (cqe->sid >= queue->queue_size) {
+ head = zsda_modulo_16(head++, queue->modulo_mask);
+ break;
+ }
+ cookie = (struct zsda_op_cookie *)srv->op_cookies[cqe->sid];
+
+ if (cqe->sid == cookie->sid) {
+ ops[*nb] = cookie->op;
+ } else {
+ ZSDA_LOG(ERR, "unequal");
+ srv->stats.dequeue_err_count++;
+ break;
+ }
+
+ if (!cookie->used) {
+ ZSDA_LOG(DEBUG, "Failed! Cookie unused");
+ } else {
+ srv->rx_cb(ops[*nb], cqe);
+ cookie->used = false;
+ (*nb)++;
+ srv->dequeued++;
+ }
+
+ memset(cqe, 0x0, sizeof(struct zsda_cqe));
+
+ head = zsda_modulo_16(head + 1, queue->modulo_mask);
+ queue->head = head;
+
+ WRITE_CSR_CQ_HEAD(queue->io_addr, queue->hw_queue_number, head);
+ }
+}
+
+uint16_t
+zsda_dequeue_op_burst(struct zsda_qp *qp, void **ops, uint16_t nb_ops)
+{
+ uint32_t nb = 0;
+ uint32_t type = 0;
+ struct qp_srv *srv;
+
+ for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+ if (!qp->srv[type].used)
+ continue;
+ srv = &qp->srv[type];
+ dequeue(srv, ops, nb_ops, &nb);
+ if (nb >= nb_ops)
+ return nb_ops;
+ }
+ return nb;
+}
+
+int
+common_setup_qp(uint32_t zsda_dev_id, struct zsda_qp **qp_addr,
+ uint16_t queue_pair_id, struct zsda_qp_config *conf)
+{
+ uint32_t i;
+ int ret = 0;
+ struct zsda_qp *qp;
+
+ ret = zsda_queue_pair_setup(zsda_dev_id, qp_addr, queue_pair_id, conf);
+ if (ret != 0)
+ return ret;
+
+ qp = (struct zsda_qp *)*qp_addr;
+
+ for (i = 0; i < qp->srv[conf->service_type].nb_descriptors; i++) {
+ struct zsda_op_cookie *cookie =
+ qp->srv[conf->service_type].op_cookies[i];
+
+ cookie->sgl_src_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct zsda_op_cookie, sgl_src);
+
+ cookie->sgl_dst_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct zsda_op_cookie, sgl_dst);
+ }
+ return 0;
+}
diff --git a/drivers/common/zsda/zsda_qp.h b/drivers/common/zsda/zsda_qp.h
new file mode 100644
index 0000000..f5eba50
--- /dev/null
+++ b/drivers/common/zsda/zsda_qp.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_QP_H_
+#define _ZSDA_QP_H_
+
+#include <rte_bus_pci.h>
+
+#include "zsda_common.h"
+
+#define WQ_CSR_LBASE 0x1000
+#define WQ_CSR_UBASE 0x1004
+#define CQ_CSR_LBASE 0x1400
+#define CQ_CSR_UBASE 0x1404
+#define WQ_TAIL 0x1800
+#define CQ_HEAD 0x1804
+
+/** Common, i.e. not service-specific, statistics */
+struct zsda_common_stats {
+ uint64_t enqueued_count;
+ /**< Count of all operations enqueued */
+ uint64_t dequeued_count;
+ /**< Count of all operations dequeued */
+
+ uint64_t enqueue_err_count;
+ /**< Total error count on operations enqueued */
+ uint64_t dequeue_err_count;
+ /**< Total error count on operations dequeued */
+ uint64_t threshold_hit_count;
+ /**< Total number of times min qp threshold condition was fulfilled */
+};
+
+/**
+ * Structure associated with each queue.
+ */
+struct zsda_queue {
+ char memz_name[RTE_MEMZONE_NAMESIZE];
+ uint8_t *io_addr;
+ uint8_t *base_addr; /* Base address */
+ rte_iova_t base_phys_addr; /* Queue physical address */
+ uint16_t head; /* Shadow copy of the head */
+ uint16_t tail; /* Shadow copy of the tail */
+ uint16_t modulo_mask;
+ uint16_t msg_size;
+ uint16_t queue_size;
+ uint16_t pushed_wqe;
+
+ uint32_t hw_queue_number;
+ uint32_t csr_head; /* last written head value */
+ uint32_t csr_tail; /* last written tail value */
+
+ uint8_t valid;
+ uint16_t sid;
+ uint16_t enqueued;
+ uint16_t dequeued __rte_aligned(4);
+};
+
+typedef void (*rx_callback)(void *op_in, struct zsda_cqe *cqe);
+typedef int (*tx_callback)(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, uint16_t new_tail);
+typedef int (*srv_match)(void *op_in);
+
+struct qp_srv {
+ uint32_t used;
+ struct zsda_queue tx_q;
+ struct zsda_queue rx_q;
+ rx_callback rx_cb;
+ tx_callback tx_cb;
+ srv_match match;
+ struct zsda_common_stats stats;
+ struct rte_mempool *op_cookie_pool;
+ void **op_cookies;
+ uint16_t nb_descriptors;
+ /**< zsda device this qp is on */
+ uint16_t enqueued;
+ uint16_t dequeued;
+};
+
+struct zsda_qp {
+ void *mmap_bar_addr;
+ struct qp_srv srv[ZSDA_MAX_SERVICES];
+
+ uint16_t max_inflights;
+ uint16_t min_enq_burst_threshold;
+
+} __rte_cache_aligned;
+
+struct zsda_hw_qp {
+ void *mmap_bar_addr;
+ uint32_t used;
+ uint16_t tx_q[2];
+ uint16_t rx_q[2];
+ enum zsda_service_type service_type;
+} __rte_cache_aligned;
+
+struct zsda_qp_hw_data {
+ uint32_t used;
+ uint8_t tx_ring_num;
+ uint8_t rx_ring_num;
+ uint16_t tx_msg_size;
+ uint16_t rx_msg_size;
+};
+
+struct zsda_qp_config {
+ enum zsda_service_type service_type;
+ const struct zsda_qp_hw_data *hw;
+ uint32_t nb_descriptors;
+ uint32_t cookie_size;
+ int socket_id;
+ const char *service_str;
+};
+
+struct zsda_qp_hw *zsda_qps_hw_per_service(struct zsda_pci_device *zsda_pci_dev,
+ enum zsda_service_type service);
+uint16_t zsda_qps_per_service(struct zsda_pci_device *zsda_pci_dev,
+ enum zsda_service_type service);
+
+uint16_t zsda_comp_max_nb_qps(struct zsda_pci_device *zsda_pci_dev);
+uint16_t zsda_crypto_max_nb_qps(struct zsda_pci_device *zsda_pci_dev);
+
+void zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev);
+
+static inline uint32_t
+zsda_modulo_32(uint32_t data, uint32_t modulo_mask)
+{
+ return (data) & (modulo_mask);
+}
+static inline uint16_t
+zsda_modulo_16(uint16_t data, uint16_t modulo_mask)
+{
+ return (data) & (modulo_mask);
+}
+static inline uint8_t
+zsda_modulo_8(uint8_t data, uint8_t modulo_mask)
+{
+ return (data) & (modulo_mask);
+}
+
+/* CSR write macro */
+#define ZSDA_CSR_WR(csrAddr, csrOffset, val) \
+ rte_write32(val, (((uint8_t *)csrAddr) + csrOffset))
+#define ZSDA_CSR_WC_WR(csrAddr, csrOffset, val) \
+ rte_write32_wc(val, (((uint8_t *)csrAddr) + csrOffset))
+
+/* CSR read macro */
+#define ZSDA_CSR_RD(csrAddr, csrOffset) \
+ rte_read32((((uint8_t *)csrAddr) + csrOffset))
+
+#define ZSDA_CSR_WQ_RING_BASE(csr_base_addr, ring, value) \
+ do { \
+ uint32_t l_base = 0, u_base = 0; \
+ l_base = (uint32_t)(value & 0xFFFFFFFF); \
+ u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+ ZSDA_CSR_WR(csr_base_addr, (ring << 3) + WQ_CSR_LBASE, \
+ l_base); \
+ ZSDA_LOG(INFO, "l_basg - offest:0x%x, value:0x%x", \
+ ((ring << 3) + WQ_CSR_LBASE), l_base); \
+ ZSDA_CSR_WR(csr_base_addr, (ring << 3) + WQ_CSR_UBASE, \
+ u_base); \
+ ZSDA_LOG(INFO, "h_base - offest:0x%x, value:0x%x", \
+ ((ring << 3) + WQ_CSR_UBASE), u_base); \
+ } while (0)
+
+#define ZSDA_CSR_CQ_RING_BASE(csr_base_addr, ring, value) \
+ do { \
+ uint32_t l_base = 0, u_base = 0; \
+ l_base = (uint32_t)(value & 0xFFFFFFFF); \
+ u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+ ZSDA_CSR_WR(csr_base_addr, (ring << 3) + CQ_CSR_LBASE, \
+ l_base); \
+ ZSDA_CSR_WR(csr_base_addr, (ring << 3) + CQ_CSR_UBASE, \
+ u_base); \
+ } while (0)
+
+#define READ_CSR_WQ_HEAD(csr_base_addr, ring) \
+ ZSDA_CSR_RD(csr_base_addr, WQ_TAIL + (ring << 3))
+#define WRITE_CSR_WQ_TAIL(csr_base_addr, ring, value) \
+ ZSDA_CSR_WC_WR(csr_base_addr, WQ_TAIL + (ring << 3), value)
+#define READ_CSR_CQ_HEAD(csr_base_addr, ring) \
+ ZSDA_CSR_RD(csr_base_addr, WQ_TAIL + (ring << 3))
+#define WRITE_CSR_CQ_HEAD(csr_base_addr, ring, value) \
+ ZSDA_CSR_WC_WR(csr_base_addr, CQ_HEAD + (ring << 3), value)
+
+#define WRITE_CSR_WQ_HEAD()
+
+uint16_t zsda_enqueue_op_burst(struct zsda_qp *qp, void **ops, uint16_t nb_ops);
+uint16_t zsda_dequeue_op_burst(struct zsda_qp *qp, void **ops, uint16_t nb_ops);
+
+void tx_write_tail(struct zsda_queue *queue);
+int zsda_queue_pair_setup(uint32_t dev_id, struct zsda_qp **qp_addr,
+ uint16_t queue_pair_id,
+ struct zsda_qp_config *zsda_qp_conf);
+
+int zsda_queue_pair_release(struct zsda_qp **qp_addr);
+int zsda_fill_sgl_offset(struct rte_mbuf *buf, uint32_t offset,
+ struct zsda_sgl *sgl, phys_addr_t sgl_phy_addr,
+ uint32_t op_src_dst_length, const uint32_t max_segs);
+
+int zsda_get_sgl_num(struct zsda_sgl *sgl);
+int zsda_sgl_opt_addr_lost(struct rte_mbuf *mbuf);
+
+int find_next_free_cookie(struct zsda_queue *queue, void **op_cookie,
+ uint16_t *idx);
+int common_setup_qp(uint32_t dev_id, struct zsda_qp **qp_addr,
+ uint16_t queue_pair_id, struct zsda_qp_config *conf);
+
+#endif /* _ZSDA_QP_H_ */
diff --git a/drivers/compress/zsda/zsda_comp.c b/drivers/compress/zsda/zsda_comp.c
new file mode 100644
index 0000000..c13378a
--- /dev/null
+++ b/drivers/compress/zsda/zsda_comp.c
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_comp.h>
+#include <rte_hexdump.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_mempool.h>
+#include <rte_memzone.h>
+#include <rte_spinlock.h>
+
+#include "zsda_comp.h"
+#include "zsda_comp_pmd.h"
+#include "zsda_logs.h"
+
+int
+comp_match(void *op_in)
+{
+ const struct rte_comp_op *op = (struct rte_comp_op *)op_in;
+ const struct zsda_comp_xform *xform =
+ (struct zsda_comp_xform *)op->private_xform;
+
+ if (op->op_type != RTE_COMP_OP_STATELESS)
+ return 0;
+
+ if (xform->type != RTE_COMP_COMPRESS)
+ return 0;
+
+ return 1;
+}
+
+static uint8_t
+get_opcode(const struct zsda_comp_xform *xform)
+{
+ if (xform->type == RTE_COMP_COMPRESS) {
+ if (xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
+ return ZSDA_OPC_EC_GZIP;
+ if (xform->checksum_type == RTE_COMP_CHECKSUM_ADLER32)
+ return ZSDA_OPC_EC_ZLIB;
+ }
+ if (xform->type == RTE_COMP_DECOMPRESS) {
+ if (xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
+ return ZSDA_OPC_DC_GZIP;
+ if (xform->checksum_type == RTE_COMP_CHECKSUM_ADLER32)
+ return ZSDA_OPC_DC_ZLIB;
+ }
+
+ return ZSDA_OPC_INVALID;
+}
+
+int
+build_comp_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, uint16_t new_tail)
+{
+ uint8_t opcode = ZSDA_OPC_INVALID;
+ struct rte_comp_op *op = op_in;
+ struct zsda_comp_xform *xform =
+ (struct zsda_comp_xform *)op->private_xform;
+ struct zsda_wqe_comp *wqe =
+ (struct zsda_wqe_comp *)(queue->base_addr +
+ (new_tail * queue->msg_size));
+
+ struct zsda_op_cookie *cookie =
+ (struct zsda_op_cookie *)op_cookies[new_tail];
+ int ret = 0;
+ uint32_t op_src_dst_offset = 0;
+ uint32_t op_src_dst_length = 0;
+ struct zsda_sgl *sgl_src = (struct zsda_sgl *)&cookie->sgl_src;
+ struct zsda_sgl *sgl_dst = (struct zsda_sgl *)&cookie->sgl_dst;
+
+ if ((op->m_dst == NULL) || (op->m_dst == op->m_src)) {
+ ZSDA_LOG(ERR, "Failed! m_dst");
+ return -EINVAL;
+ }
+
+ opcode = get_opcode(xform);
+ if (opcode == ZSDA_OPC_INVALID) {
+ ZSDA_LOG(ERR, E_CONFIG);
+ return -EINVAL;
+ }
+
+ op_src_dst_offset = op->src.offset;
+ op_src_dst_length = op->m_src->pkt_len - op_src_dst_offset;
+ ret = zsda_fill_sgl_offset(op->m_src, op_src_dst_offset, sgl_src,
+ cookie->sgl_src_phys_addr, op_src_dst_length,
+ ZSDA_SGL_MAX_NUMBER - 1);
+
+ op_src_dst_offset = op->dst.offset;
+ op_src_dst_length = op->m_dst->pkt_len - op_src_dst_offset;
+ ret |= zsda_fill_sgl_offset(op->m_dst, op_src_dst_offset, sgl_dst,
+ cookie->sgl_dst_phys_addr,
+ op_src_dst_length, ZSDA_SGL_MAX_NUMBER - 1);
+
+ if (ret) {
+ ZSDA_LOG(ERR, E_FUNC);
+ return -EINVAL;
+ }
+
+ cookie->valid = queue->valid;
+ cookie->used = true;
+ cookie->sid = new_tail;
+ cookie->op = op;
+
+ memset(wqe, 0, sizeof(struct zsda_wqe_comp));
+ wqe->rx_length = op->m_src->pkt_len - op->src.offset;
+ wqe->tx_length = op->m_dst->pkt_len - op->dst.offset;
+ wqe->valid = queue->valid;
+ wqe->op_code = opcode;
+ wqe->sid = cookie->sid;
+ wqe->rx_sgl_type = SGL_ELM_TYPE_LIST;
+ wqe->tx_sgl_type = SGL_ELM_TYPE_LIST;
+ wqe->rx_addr = cookie->sgl_src_phys_addr;
+ wqe->tx_addr = cookie->sgl_dst_phys_addr;
+
+ return ZSDA_SUCCESS;
+}
+
+int
+decomp_match(void *op_in)
+{
+ const struct rte_comp_op *op = (struct rte_comp_op *)op_in;
+ const struct zsda_comp_xform *xform =
+ (struct zsda_comp_xform *)op->private_xform;
+
+ if (op->op_type != RTE_COMP_OP_STATELESS)
+ return 0;
+
+ if (xform->type != RTE_COMP_DECOMPRESS)
+ return 0;
+ return 1;
+}
+
+int
+build_decomp_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, uint16_t new_tail)
+{
+ uint8_t opcode = ZSDA_OPC_INVALID;
+ struct rte_comp_op *op = op_in;
+ struct zsda_comp_xform *xform =
+ (struct zsda_comp_xform *)op->private_xform;
+
+ struct zsda_wqe_comp *wqe =
+ (struct zsda_wqe_comp *)(queue->base_addr +
+ (new_tail * queue->msg_size));
+ struct zsda_op_cookie *cookie =
+ (struct zsda_op_cookie *)op_cookies[new_tail];
+ struct zsda_sgl *sgl_src = (struct zsda_sgl *)&cookie->sgl_src;
+ struct zsda_sgl *sgl_dst = (struct zsda_sgl *)&cookie->sgl_dst;
+ int ret = 0;
+
+ uint32_t op_src_dst_offset = 0;
+ uint32_t op_src_dst_length = 0;
+
+ if ((op->m_dst == NULL) || (op->m_dst == op->m_src)) {
+ ZSDA_LOG(ERR, "Failed! m_dst");
+ return -EINVAL;
+ }
+
+ opcode = get_opcode(xform);
+ if (opcode == ZSDA_OPC_INVALID) {
+ ZSDA_LOG(ERR, E_CONFIG);
+ return -EINVAL;
+ }
+
+ op_src_dst_offset = op->src.offset;
+ op_src_dst_length = op->m_src->pkt_len - op_src_dst_offset;
+
+ ret = zsda_fill_sgl_offset(op->m_src, op_src_dst_offset, sgl_src,
+ cookie->sgl_src_phys_addr, op_src_dst_length,
+ ZSDA_SGL_MAX_NUMBER - 1);
+
+ op_src_dst_offset = op->dst.offset;
+ op_src_dst_length = op->m_dst->pkt_len - op_src_dst_offset;
+
+ ret |= zsda_fill_sgl_offset(op->m_dst, op_src_dst_offset, sgl_dst,
+ cookie->sgl_dst_phys_addr,
+ op_src_dst_length, ZSDA_SGL_MAX_NUMBER - 1);
+
+ if (ret) {
+ ZSDA_LOG(ERR, E_FUNC);
+ return -EINVAL;
+ }
+
+ cookie->valid = queue->valid;
+ cookie->used = true;
+ cookie->sid = new_tail;
+ cookie->op = op;
+
+ memset(wqe, 0, sizeof(struct zsda_wqe_comp));
+
+ wqe->rx_length = op->m_src->pkt_len - op->src.offset;
+ wqe->tx_length = op->m_dst->pkt_len - op->dst.offset;
+ wqe->valid = queue->valid;
+ wqe->op_code = opcode;
+ wqe->sid = cookie->sid;
+ wqe->rx_sgl_type = SGL_ELM_TYPE_LIST;
+ wqe->tx_sgl_type = SGL_ELM_TYPE_LIST;
+ wqe->rx_addr = cookie->sgl_src_phys_addr;
+ wqe->tx_addr = cookie->sgl_dst_phys_addr;
+
+ return ZSDA_SUCCESS;
+}
+
+unsigned int
+zsda_comp_xform_size(void)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct zsda_comp_xform), 8);
+}
+
+int
+zsda_comp_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform)
+{
+ struct zsda_comp_dev_private *zsda = dev->data->dev_private;
+
+ if (unlikely(private_xform == NULL)) {
+ ZSDA_LOG(ERR, E_NULL);
+ return -EINVAL;
+ }
+ if (unlikely(zsda->xformpool == NULL)) {
+ ZSDA_LOG(ERR, E_NULL);
+ return -ENOMEM;
+ }
+ if (rte_mempool_get(zsda->xformpool, private_xform)) {
+ ZSDA_LOG(ERR, E_NULL);
+ return -ENOMEM;
+ }
+
+ struct zsda_comp_xform *zsda_xform =
+ (struct zsda_comp_xform *)*private_xform;
+ zsda_xform->type = xform->type;
+
+ if (zsda_xform->type == RTE_COMP_COMPRESS) {
+ zsda_xform->checksum_type = xform->compress.chksum;
+ if (xform->compress.deflate.huffman !=
+ RTE_COMP_HUFFMAN_DYNAMIC) {
+ ZSDA_LOG(ERR, "Huffman code not supported");
+ return -ENOTSUP;
+ }
+ } else {
+ zsda_xform->checksum_type = xform->decompress.chksum;
+ }
+
+ if ((zsda_xform->checksum_type != RTE_COMP_CHECKSUM_CRC32) &&
+ (zsda_xform->checksum_type != RTE_COMP_CHECKSUM_ADLER32))
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+zsda_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
+ void *private_xform)
+{
+ struct zsda_comp_xform *zsda_xform =
+ (struct zsda_comp_xform *)private_xform;
+
+ if (zsda_xform) {
+ memset(zsda_xform, 0, zsda_comp_xform_size());
+ struct rte_mempool *mp = rte_mempool_from_obj(zsda_xform);
+
+ rte_mempool_put(mp, zsda_xform);
+ return 0;
+ }
+ return -EINVAL;
+}
diff --git a/drivers/compress/zsda/zsda_comp.h b/drivers/compress/zsda/zsda_comp.h
new file mode 100644
index 0000000..5a91a5e
--- /dev/null
+++ b/drivers/compress/zsda/zsda_comp.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_COMP_H_
+#define _ZSDA_COMP_H_
+
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#include "zsda_common.h"
+#include "zsda_device.h"
+#include "zsda_qp.h"
+
+struct zsda_comp_xform {
+ enum rte_comp_xform_type type;
+ enum rte_comp_checksum_type checksum_type;
+};
+
+int comp_match(void *op_in);
+int build_comp_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, uint16_t new_tail);
+int decomp_match(void *op_in);
+int build_decomp_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, uint16_t new_tail);
+int zsda_comp_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform);
+
+int zsda_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
+ void *private_xform);
+unsigned int zsda_comp_xform_size(void);
+
+#endif
diff --git a/drivers/compress/zsda/zsda_comp_pmd.c b/drivers/compress/zsda/zsda_comp_pmd.c
new file mode 100644
index 0000000..5b48153
--- /dev/null
+++ b/drivers/compress/zsda/zsda_comp_pmd.c
@@ -0,0 +1,430 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <rte_malloc.h>
+
+#include "zsda_comp.h"
+#include "zsda_comp_pmd.h"
+
+static const struct rte_compressdev_capabilities zsda_comp_capabilities[] = {
+ {
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = RTE_COMP_FF_HUFFMAN_DYNAMIC |
+ RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
+ RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
+ RTE_COMP_FF_CRC32_CHECKSUM |
+ RTE_COMP_FF_ADLER32_CHECKSUM,
+ .window_size = {.min = 15, .max = 15, .increment = 0},
+ },
+ {
+ RTE_COMP_ALGO_LIST_END,
+ 0,
+ {0, 0, 0},
+ },
+};
+
+static void
+zsda_comp_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ struct zsda_common_stat comm = {0};
+
+ zsda_stats_get(dev->data->queue_pairs, dev->data->nb_queue_pairs,
+ &comm);
+ stats->enqueued_count = comm.enqueued_count;
+ stats->dequeued_count = comm.dequeued_count;
+ stats->enqueue_err_count = comm.enqueue_err_count;
+ stats->dequeue_err_count = comm.dequeue_err_count;
+}
+
+static void
+zsda_comp_stats_reset(struct rte_compressdev *dev)
+{
+ zsda_stats_reset(dev->data->queue_pairs, dev->data->nb_queue_pairs);
+}
+
+static int
+zsda_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
+{
+ return zsda_queue_pair_release(
+ (struct zsda_qp **)&(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static void
+comp_callbak(void *op, struct zsda_cqe *cqe)
+{
+ struct rte_comp_op *tmp_op = (struct rte_comp_op *)op;
+
+ tmp_op->produced = cqe->tx_real_length;
+
+ if (cqe->err0 || CQE_ERR1(cqe->err1))
+ tmp_op->status = RTE_COMP_OP_STATUS_ERROR;
+ else
+ tmp_op->status = RTE_COMP_OP_STATUS_SUCCESS;
+}
+
+static int
+setup_comp_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t qp_id,
+ struct zsda_qp *qp, uint32_t nb_des, int socket_id)
+{
+ enum zsda_service_type type = ZSDA_SERVICE_COMPRESSION;
+ struct zsda_qp_config conf;
+ int ret = 0;
+ struct zsda_qp_hw *qp_hw = NULL;
+
+ qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+ conf.hw = qp_hw->data + qp_id;
+ conf.service_type = type;
+ conf.cookie_size = sizeof(struct zsda_op_cookie);
+ conf.nb_descriptors = nb_des;
+ conf.socket_id = socket_id;
+ conf.service_str = "comp";
+
+ ret = common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+ qp->srv[type].rx_cb = comp_callbak;
+ qp->srv[type].tx_cb = build_comp_request;
+ qp->srv[type].match = comp_match;
+
+ return ret;
+}
+
+static int
+setup_decomp_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t qp_id,
+ struct zsda_qp *qp, uint32_t nb_des, int socket_id)
+{
+ enum zsda_service_type type = ZSDA_SERVICE_DECOMPRESSION;
+ struct zsda_qp_config conf;
+ int ret = 0;
+ struct zsda_qp_hw *qp_hw = NULL;
+
+ qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+ conf.hw = qp_hw->data + qp_id;
+ conf.service_type = type;
+ conf.cookie_size = sizeof(struct zsda_op_cookie);
+ conf.nb_descriptors = nb_des;
+ conf.socket_id = socket_id;
+ conf.service_str = "decomp";
+
+ ret = common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+ qp->srv[type].rx_cb = comp_callbak;
+ qp->srv[type].tx_cb = build_decomp_request;
+ qp->srv[type].match = decomp_match;
+
+ return ret;
+}
+
+static int
+zsda_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ int ret = 0;
+ struct zsda_qp *qp_new;
+
+ struct zsda_qp **qp_addr =
+ (struct zsda_qp **)&(dev->data->queue_pairs[qp_id]);
+ struct zsda_comp_dev_private *comp_priv = dev->data->dev_private;
+ struct zsda_pci_device *zsda_pci_dev = comp_priv->zsda_pci_dev;
+ uint16_t num_qps_comp =
+ zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_COMPRESSION);
+ uint16_t num_qps_decomp =
+ zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_DECOMPRESSION);
+ uint32_t nb_des = max_inflight_ops;
+
+ nb_des = (nb_des == NB_DES) ? nb_des : NB_DES;
+
+ if (*qp_addr != NULL) {
+ ret = zsda_comp_qp_release(dev, qp_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ qp_new = rte_zmalloc_socket("zsda PMD qp metadata", sizeof(*qp_new),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp_new == NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ return -ENOMEM;
+ }
+
+ if (num_qps_comp == MAX_QPS_ON_FUNCTION)
+ ret |= setup_comp_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ else if (num_qps_decomp == MAX_QPS_ON_FUNCTION)
+ ret |= setup_decomp_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ else {
+ ret |= setup_comp_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ ret |= setup_decomp_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ }
+
+ if (ret) {
+ rte_free(qp_new);
+ return ret;
+ }
+
+ qp_new->mmap_bar_addr =
+ comp_priv->zsda_pci_dev->pci_dev->mem_resource[0].addr;
+ *qp_addr = qp_new;
+
+ return ret;
+}
+
+static struct rte_mempool *
+zsda_comp_create_xform_pool(struct zsda_comp_dev_private *comp_dev,
+ struct rte_compressdev_config *config,
+ uint32_t num_elements)
+{
+ char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *mp;
+
+ snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE, "%s_xforms",
+ comp_dev->zsda_pci_dev->name);
+
+ ZSDA_LOG(DEBUG, "xformpool: %s", xform_pool_name);
+ mp = rte_mempool_lookup(xform_pool_name);
+
+ if (mp != NULL) {
+ ZSDA_LOG(DEBUG, "xformpool already created");
+ if (mp->size != num_elements) {
+ ZSDA_LOG(DEBUG, "xformpool wrong size - delete it");
+ rte_mempool_free(mp);
+ mp = NULL;
+ comp_dev->xformpool = NULL;
+ }
+ }
+
+ if (mp == NULL)
+ mp = rte_mempool_create(xform_pool_name, num_elements,
+ zsda_comp_xform_size(), 0, 0, NULL,
+ NULL, NULL, NULL, config->socket_id, 0);
+ if (mp == NULL) {
+ ZSDA_LOG(ERR, E_CREATE);
+ return NULL;
+ }
+
+ return mp;
+}
+
+static int
+zsda_comp_dev_close(struct rte_compressdev *dev)
+{
+ int ret = 0;
+ uint16_t i = 0;
+ struct zsda_comp_dev_private *comp_dev = dev->data->dev_private;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = zsda_comp_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ rte_mempool_free(comp_dev->xformpool);
+ comp_dev->xformpool = NULL;
+
+ return ret;
+}
+
+static int
+zsda_comp_dev_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ struct zsda_comp_dev_private *comp_dev = dev->data->dev_private;
+
+ if (config->max_nb_priv_xforms) {
+ comp_dev->xformpool = zsda_comp_create_xform_pool(
+ comp_dev, config, config->max_nb_priv_xforms);
+ if (comp_dev->xformpool == NULL)
+ return -ENOMEM;
+ } else
+ comp_dev->xformpool = NULL;
+
+ return 0;
+}
+
+static int
+zsda_comp_dev_start(struct rte_compressdev *dev)
+{
+ struct zsda_comp_dev_private *comp_dev = dev->data->dev_private;
+ struct zsda_qp_hw *qp_hw = NULL;
+ int ret = 0;
+
+ qp_hw = zsda_qps_hw_per_service(comp_dev->zsda_pci_dev,
+ ZSDA_SERVICE_COMPRESSION);
+ ret = zsda_queue_start(comp_dev->zsda_pci_dev->pci_dev, qp_hw);
+ qp_hw = zsda_qps_hw_per_service(comp_dev->zsda_pci_dev,
+ ZSDA_SERVICE_DECOMPRESSION);
+ ret |= zsda_queue_start(comp_dev->zsda_pci_dev->pci_dev, qp_hw);
+
+ if (ret == ZSDA_FAILED) {
+ ZSDA_LOG(ERR, E_START_Q);
+ return ZSDA_FAILED;
+ }
+ return ZSDA_SUCCESS;
+}
+
+static void
+zsda_comp_dev_stop(struct rte_compressdev *dev)
+{
+ struct zsda_comp_dev_private *comp_dev = dev->data->dev_private;
+ struct zsda_qp_hw *qp_hw = NULL;
+
+ qp_hw = zsda_qps_hw_per_service(comp_dev->zsda_pci_dev,
+ ZSDA_SERVICE_COMPRESSION);
+ zsda_queue_stop(comp_dev->zsda_pci_dev->pci_dev, qp_hw);
+ qp_hw = zsda_qps_hw_per_service(comp_dev->zsda_pci_dev,
+ ZSDA_SERVICE_DECOMPRESSION);
+ zsda_queue_stop(comp_dev->zsda_pci_dev->pci_dev, qp_hw);
+}
+
+static void
+zsda_comp_dev_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *info)
+{
+ struct zsda_comp_dev_private *comp_dev = dev->data->dev_private;
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ zsda_comp_max_nb_qps(comp_dev->zsda_pci_dev);
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = comp_dev->zsda_dev_capabilities;
+ }
+}
+
+static uint16_t
+zsda_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ return zsda_enqueue_op_burst((struct zsda_qp *)qp, (void **)ops,
+ nb_ops);
+}
+
+static uint16_t
+zsda_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ return zsda_dequeue_op_burst((struct zsda_qp *)qp, (void **)ops,
+ nb_ops);
+}
+
+static struct rte_compressdev_ops compress_zsda_ops = {
+
+ .dev_configure = zsda_comp_dev_config,
+ .dev_start = zsda_comp_dev_start,
+ .dev_stop = zsda_comp_dev_stop,
+ .dev_close = zsda_comp_dev_close,
+ .dev_infos_get = zsda_comp_dev_info_get,
+
+ .stats_get = zsda_comp_stats_get,
+ .stats_reset = zsda_comp_stats_reset,
+ .queue_pair_setup = zsda_comp_qp_setup,
+ .queue_pair_release = zsda_comp_qp_release,
+
+ .private_xform_create = zsda_comp_private_xform_create,
+ .private_xform_free = zsda_comp_private_xform_free};
+
+/* An rte_driver is needed in the registration of the device with compressdev.
+ * The actual zsda pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the compression part of the pci device.
+ */
+static const char zsda_comp_drv_name[] = RTE_STR(COMPRESSDEV_NAME_ZSDA_PMD);
+static const struct rte_driver compdev_zsda_driver = {
+ .name = zsda_comp_drv_name, .alias = zsda_comp_drv_name};
+
+int
+zsda_comp_dev_create(struct zsda_pci_device *zsda_pci_dev,
+ __rte_unused struct zsda_dev_cmd_param *zsda_dev_cmd_param)
+{
+ struct zsda_device_info *dev_info =
+ &zsda_devs[zsda_pci_dev->zsda_dev_id];
+
+ struct rte_compressdev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = rte_socket_id(),
+ };
+
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ char capa_memz_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ struct rte_compressdev *compressdev;
+ struct zsda_comp_dev_private *comp_dev;
+ const struct rte_compressdev_capabilities *capabilities;
+ uint64_t capa_size = sizeof(struct rte_compressdev_capabilities);
+
+ snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
+ zsda_pci_dev->name, "comp");
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ dev_info->comp_rte_dev.driver = &compdev_zsda_driver;
+ dev_info->comp_rte_dev.numa_node = dev_info->pci_dev->device.numa_node;
+ dev_info->comp_rte_dev.devargs = NULL;
+
+ compressdev = rte_compressdev_pmd_create(
+ name, &(dev_info->comp_rte_dev),
+ sizeof(struct zsda_comp_dev_private), &init_params);
+
+ if (compressdev == NULL)
+ return -ENODEV;
+
+ compressdev->dev_ops = &compress_zsda_ops;
+
+ compressdev->enqueue_burst = zsda_comp_pmd_enqueue_op_burst;
+ compressdev->dequeue_burst = zsda_comp_pmd_dequeue_op_burst;
+
+ compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
+
+ snprintf(capa_memz_name, RTE_COMPRESSDEV_NAME_MAX_LEN,
+ "ZSDA_COMP_CAPA");
+
+ comp_dev = compressdev->data->dev_private;
+ comp_dev->zsda_pci_dev = zsda_pci_dev;
+ comp_dev->compressdev = compressdev;
+ capabilities = zsda_comp_capabilities;
+
+ comp_dev->capa_mz = rte_memzone_lookup(capa_memz_name);
+ if (comp_dev->capa_mz == NULL) {
+ comp_dev->capa_mz = rte_memzone_reserve(
+ capa_memz_name, capa_size, rte_socket_id(), 0);
+ }
+ if (comp_dev->capa_mz == NULL) {
+ ZSDA_LOG(DEBUG, E_MALLOC);
+ memset(&dev_info->comp_rte_dev, 0,
+ sizeof(dev_info->comp_rte_dev));
+ rte_compressdev_pmd_destroy(compressdev);
+ return -EFAULT;
+ }
+
+ memcpy(comp_dev->capa_mz->addr, capabilities, capa_size);
+ comp_dev->zsda_dev_capabilities = comp_dev->capa_mz->addr;
+
+ zsda_pci_dev->comp_dev = comp_dev;
+
+ return 0;
+}
+
+int
+zsda_comp_dev_destroy(struct zsda_pci_device *zsda_pci_dev)
+{
+ struct zsda_comp_dev_private *comp_dev;
+
+ if (zsda_pci_dev == NULL)
+ return -ENODEV;
+
+ comp_dev = zsda_pci_dev->comp_dev;
+ if (comp_dev == NULL)
+ return 0;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_memzone_free(zsda_pci_dev->comp_dev->capa_mz);
+
+ zsda_comp_dev_close(comp_dev->compressdev);
+
+ rte_compressdev_pmd_destroy(comp_dev->compressdev);
+ zsda_pci_dev->comp_dev = NULL;
+
+ return 0;
+}
diff --git a/drivers/compress/zsda/zsda_comp_pmd.h b/drivers/compress/zsda/zsda_comp_pmd.h
new file mode 100644
index 0000000..46e39f5
--- /dev/null
+++ b/drivers/compress/zsda/zsda_comp_pmd.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_COMP_PMD_H_
+#define _ZSDA_COMP_PMD_H_
+
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+/**< ZSDA Compression PMD driver name */
+#define COMPRESSDEV_NAME_ZSDA_PMD compress_zsda
+
+/** private data structure for a ZSDA compression device.
+ * This ZSDA device is a device offering only a compression service,
+ * there can be one of these on each zsda_pci_device (VF).
+ */
+struct zsda_comp_dev_private {
+ struct zsda_pci_device *zsda_pci_dev;
+ /**< The zsda pci device hosting the service */
+ struct rte_compressdev *compressdev;
+ /**< The pointer to this compression device structure */
+ const struct rte_compressdev_capabilities *zsda_dev_capabilities;
+ /* ZSDA device compression capabilities */
+ const struct rte_memzone *interm_buff_mz;
+ /**< The device's memory for intermediate buffers */
+ struct rte_mempool *xformpool;
+ /**< The device's pool for zsda_comp_xforms */
+ struct rte_mempool *streampool;
+ /**< The device's pool for zsda_comp_streams */
+ const struct rte_memzone *capa_mz;
+ /* Shared memzone for storing capabilities */
+ uint16_t min_enq_burst_threshold;
+};
+
+int zsda_comp_dev_create(
+ struct zsda_pci_device *zsda_pci_dev,
+ __rte_unused struct zsda_dev_cmd_param *zsda_dev_cmd_param);
+
+int zsda_comp_dev_destroy(struct zsda_pci_device *zsda_pci_dev);
+
+#endif /* _ZSDA_COMP_PMD_H_ */
diff --git a/drivers/crypto/zsda/meson.build b/drivers/crypto/zsda/meson.build
new file mode 100644
index 0000000..58a701c
--- /dev/null
+++ b/drivers/crypto/zsda/meson.build
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+dep = dependency('libcrypto', required: false, method: 'pkg-config')
+if not dep.found()
+ build = false
+ reason = 'missing dependency, "libcrypto"'
+endif
+
+headers = files(
+ 'zsda_sym_capabilities.h',
+ 'zsda_sym_pmd.h',
+ 'zsda_sym.h'
+)
+
+
+sources = files('zsda_sym_pmd.c',
+ 'zsda_sym.c',
+)
+
+
+
+
+
+
+
diff --git a/drivers/crypto/zsda/version.map b/drivers/crypto/zsda/version.map
new file mode 100644
index 0000000..2a95c45
--- /dev/null
+++ b/drivers/crypto/zsda/version.map
@@ -0,0 +1,6 @@
+DPDK_21 {
+ global:
+
+
+ local: *;
+};
diff --git a/drivers/crypto/zsda/zsda_sym.c b/drivers/crypto/zsda/zsda_sym.c
new file mode 100644
index 0000000..2cff408
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym.c
@@ -0,0 +1,734 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+#include <rte_crypto_sym.h>
+#include <rte_hexdump.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+
+#include "zsda_logs.h"
+#include "zsda_sym.h"
+
+/**************** AES KEY EXPANSION ****************/
+/**
+ * AES S-boxes
+ * Sbox table: 8bits input convert to 8bits output
+ **/
+static const unsigned char aes_sbox[256] = {
+ // 0 1 2 3 4 5 6 7 8 9 A B
+ // C D E F
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b,
+ 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+ 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26,
+ 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2,
+ 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+ 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed,
+ 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f,
+ 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+ 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
+ 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14,
+ 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+ 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d,
+ 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f,
+ 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+ 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11,
+ 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f,
+ 0xb0, 0x54, 0xbb, 0x16};
+
+/**
+ * The round constant word array, Rcon[i]
+ *
+ * From Wikipedia's article on the Rijndael key schedule @
+ * https://en.wikipedia.org/wiki/Rijndael_key_schedule#Rcon "Only the first some
+ * of these constants are actually used – up to rcon[10] for AES-128 (as 11
+ * round keys are needed), up to rcon[8] for AES-192, up to rcon[7] for AES-256.
+ * rcon[0] is not used in AES algorithm."
+ */
+static const unsigned char Rcon[11] = {0x8d, 0x01, 0x02, 0x04, 0x08, 0x10,
+ 0x20, 0x40, 0x80, 0x1b, 0x36};
+
+#define GET_AES_SBOX_VAL(num) (aes_sbox[(num)])
+
+/**************** SM4 KEY EXPANSION ****************/
+/*
+ * 32-bit integer manipulation macros (big endian)
+ */
+#ifndef GET_ULONG_BE
+#define GET_ULONG_BE(n, b, i) \
+ { \
+ (n) = ((unsigned int)(b)[(i)] << 24) | \
+ ((unsigned int)(b)[(i) + 1] << 16) | \
+ ((unsigned int)(b)[(i) + 2] << 8) | \
+ ((unsigned int)(b)[(i) + 3]); \
+ }
+#endif
+
+#ifndef PUT_ULONG_BE
+#define PUT_ULONG_BE(n, b, i) \
+ { \
+ (b)[(i)] = (unsigned char)((n) >> 24); \
+ (b)[(i) + 1] = (unsigned char)((n) >> 16); \
+ (b)[(i) + 2] = (unsigned char)((n) >> 8); \
+ (b)[(i) + 3] = (unsigned char)((n)); \
+ }
+#endif
+
+/**
+ *rotate shift left marco definition
+ *
+ **/
+#define SHL(x, n) (((x) & 0xFFFFFFFF) << n)
+#define ROTL(x, n) (SHL((x), n) | ((x) >> (32 - n)))
+
+/**
+ * SM4 S-boxes
+ * Sbox table: 8bits input convert to 8 bitg288s output
+ **/
+static const unsigned char sm4_sbox[16][16] = {
+ {0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, 0x16, 0xb6, 0x14, 0xc2,
+ 0x28, 0xfb, 0x2c, 0x05},
+ {0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, 0xaa, 0x44, 0x13, 0x26,
+ 0x49, 0x86, 0x06, 0x99},
+ {0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, 0x33, 0x54, 0x0b, 0x43,
+ 0xed, 0xcf, 0xac, 0x62},
+ {0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, 0x80, 0xdf, 0x94, 0xfa,
+ 0x75, 0x8f, 0x3f, 0xa6},
+ {0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, 0x83, 0x59, 0x3c, 0x19,
+ 0xe6, 0x85, 0x4f, 0xa8},
+ {0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, 0xf8, 0xeb, 0x0f, 0x4b,
+ 0x70, 0x56, 0x9d, 0x35},
+ {0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, 0x25, 0x22, 0x7c, 0x3b,
+ 0x01, 0x21, 0x78, 0x87},
+ {0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, 0x4c, 0x36, 0x02, 0xe7,
+ 0xa0, 0xc4, 0xc8, 0x9e},
+ {0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, 0xa3, 0xf7, 0xf2, 0xce,
+ 0xf9, 0x61, 0x15, 0xa1},
+ {0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, 0xad, 0x93, 0x32, 0x30,
+ 0xf5, 0x8c, 0xb1, 0xe3},
+ {0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, 0xc0, 0x29, 0x23, 0xab,
+ 0x0d, 0x53, 0x4e, 0x6f},
+ {0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, 0x03, 0xff, 0x6a, 0x72,
+ 0x6d, 0x6c, 0x5b, 0x51},
+ {0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, 0x11, 0xd9, 0x5c, 0x41,
+ 0x1f, 0x10, 0x5a, 0xd8},
+ {0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, 0x2d, 0x74, 0xd0, 0x12,
+ 0xb8, 0xe5, 0xb4, 0xb0},
+ {0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, 0x65, 0xb9, 0xf1, 0x09,
+ 0xc5, 0x6e, 0xc6, 0x84},
+ {0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, 0x79, 0xee, 0x5f, 0x3e,
+ 0xd7, 0xcb, 0x39, 0x48},
+};
+
+/* System parameter */
+static const unsigned int FK[4] = {0xa3b1bac6, 0x56aa3350, 0x677d9197,
+ 0xb27022dc};
+
+/* fixed parameter */
+static const unsigned int CK[32] = {
+ 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269, 0x70777e85, 0x8c939aa1,
+ 0xa8afb6bd, 0xc4cbd2d9, 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
+ 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9, 0xc0c7ced5, 0xdce3eaf1,
+ 0xf8ff060d, 0x141b2229, 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
+ 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209, 0x10171e25, 0x2c333a41,
+ 0x484f565d, 0x646b7279};
+
+/*
+ * private function:
+ * look up in SM4 S-boxes and get the related value.
+ * args: [in] inch: 0x00~0xFF (8 bits unsigned value).
+ */
+static unsigned char
+sm4Sbox(unsigned char inch)
+{
+ unsigned char *pTable = (unsigned char *)sm4_sbox;
+ unsigned char retVal = (unsigned char)(pTable[inch]);
+ return retVal;
+}
+
+/* private function:
+ * Calculating round encryption key.
+ * args: [in] ka: ka is a 32 bits unsigned value;
+ * return: sk[i]: i{0,1,2,3,...31}.
+ */
+static unsigned int
+sm4CalciRK(unsigned int ka)
+{
+ unsigned int bb = 0;
+ unsigned int rk = 0;
+ unsigned char a[4];
+ unsigned char b[4];
+
+ PUT_ULONG_BE(ka, a, 0)
+ b[0] = sm4Sbox(a[0]);
+ b[1] = sm4Sbox(a[1]);
+ b[2] = sm4Sbox(a[2]);
+ b[3] = sm4Sbox(a[3]);
+ GET_ULONG_BE(bb, b, 0)
+ rk = bb ^ (ROTL(bb, 13)) ^ (ROTL(bb, 23));
+ return rk;
+}
+
+static void
+zsda_sm4_key_expansion(unsigned int SK[32], const uint8_t key[16])
+{
+ unsigned int MK[4];
+ unsigned int k[36];
+ unsigned int i = 0;
+
+ GET_ULONG_BE(MK[0], key, 0);
+ GET_ULONG_BE(MK[1], key, 4);
+ GET_ULONG_BE(MK[2], key, 8);
+ GET_ULONG_BE(MK[3], key, 12);
+ k[0] = MK[0] ^ FK[0];
+ k[1] = MK[1] ^ FK[1];
+ k[2] = MK[2] ^ FK[2];
+ k[3] = MK[3] ^ FK[3];
+ for (; i < 32; i++) {
+ k[i + 4] = k[i] ^
+ (sm4CalciRK(k[i + 1] ^ k[i + 2] ^ k[i + 3] ^ CK[i]));
+ SK[i] = k[i + 4];
+ }
+}
+
+static void
+u32_to_u8(uint32_t *u_int32_t_data, uint8_t *u8_data)
+{
+ *(u8_data + 0) = ((*u_int32_t_data & 0xFF000000) >> 24) & (0xFF);
+ *(u8_data + 1) = ((*u_int32_t_data & 0x00FF0000) >> 16) & (0xFF);
+ *(u8_data + 2) = ((*u_int32_t_data & 0x0000FF00) >> 8) & (0xFF);
+ *(u8_data + 3) = (*u_int32_t_data & 0x000000FF);
+}
+
+static void
+zsda_aes_key_expansion(uint8_t *round_key, uint32_t round_num,
+ const uint8_t *key, uint32_t key_len)
+{
+ uint32_t i, j, k, nk, nr;
+ uint8_t tempa[4];
+
+ nk = key_len >> 2;
+ nr = round_num;
+
+ // The first round key is the key itself.
+ for (i = 0; i < nk; ++i) {
+ round_key[(i * 4) + 0] = key[(i * 4) + 0];
+
+ round_key[(i * 4) + 1] = key[(i * 4) + 1];
+
+ round_key[(i * 4) + 2] = key[(i * 4) + 2];
+ round_key[(i * 4) + 3] = key[(i * 4) + 3];
+ }
+
+ // All other round keys are found from the previous round keys.
+ for (i = nk; i < (4 * (nr + 1)); ++i) {
+ k = (i - 1) * 4;
+ tempa[0] = round_key[k + 0];
+ tempa[1] = round_key[k + 1];
+ tempa[2] = round_key[k + 2];
+ tempa[3] = round_key[k + 3];
+
+ if ((nk != 0) && ((i % nk) == 0)) {
+ // This function shifts the 4 bytes in a word to the
+ // left once. [a0,a1,a2,a3] becomes [a1,a2,a3,a0]
+ // Function RotWord()
+ {
+ const u_int8_t u8tmp = tempa[0];
+
+ tempa[0] = tempa[1];
+ tempa[1] = tempa[2];
+ tempa[2] = tempa[3];
+ tempa[3] = u8tmp;
+ }
+
+ // SubWord() is a function that takes a four-byte input
+ // word and applies the S-box to each of the four bytes
+ // to produce an output word. Function Subword()
+ {
+ tempa[0] = GET_AES_SBOX_VAL(tempa[0]);
+ tempa[1] = GET_AES_SBOX_VAL(tempa[1]);
+ tempa[2] = GET_AES_SBOX_VAL(tempa[2]);
+ tempa[3] = GET_AES_SBOX_VAL(tempa[3]);
+ }
+
+ tempa[0] = tempa[0] ^ Rcon[i / nk];
+ }
+
+ if (nk == 8) {
+ if ((i % nk) == 4) {
+ // Function Subword()
+ {
+ tempa[0] = GET_AES_SBOX_VAL(tempa[0]);
+ tempa[1] = GET_AES_SBOX_VAL(tempa[1]);
+ tempa[2] = GET_AES_SBOX_VAL(tempa[2]);
+ tempa[3] = GET_AES_SBOX_VAL(tempa[3]);
+ }
+ }
+ }
+
+ j = i * 4;
+ k = (i - nk) * 4;
+ round_key[j + 0] = round_key[k + 0] ^ tempa[0];
+ round_key[j + 1] = round_key[k + 1] ^ tempa[1];
+ round_key[j + 2] = round_key[k + 2] ^ tempa[2];
+ round_key[j + 3] = round_key[k + 3] ^ tempa[3];
+ }
+}
+
+static void
+reverse_memcpy(uint8_t *restrict dst, const uint8_t *restrict src, size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; ++i)
+ dst[n - 1 - i] = src[i];
+}
+
+static uint8_t
+get_opcode_hash(struct rte_crypto_op *op)
+{
+ if ((op->sym->xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
+ (op->sym->xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)) {
+ switch (op->sym->xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_SHA1:
+ return ZSDA_OPC_HASH_SHA1;
+
+ case RTE_CRYPTO_AUTH_SHA224:
+ return ZSDA_OPC_HASH_SHA2_224;
+
+ case RTE_CRYPTO_AUTH_SHA256:
+ return ZSDA_OPC_HASH_SHA2_256;
+
+ case RTE_CRYPTO_AUTH_SHA384:
+ return ZSDA_OPC_HASH_SHA2_384;
+
+ case RTE_CRYPTO_AUTH_SHA512:
+ return ZSDA_OPC_HASH_SHA2_512;
+
+ case RTE_CRYPTO_AUTH_SM3:
+ return ZSDA_OPC_HASH_SM3;
+ default:
+ break;
+ }
+ }
+
+ return ZSDA_OPC_INVALID;
+}
+
+static uint8_t
+get_opcode_crypto(struct rte_crypto_op *op)
+{
+ if (op->sym->xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if ((op->sym->xform->cipher.algo ==
+ RTE_CRYPTO_CIPHER_AES_XTS) &&
+ (op->sym->xform->cipher.key.length == 32))
+ return ZSDA_OPC_EC_AES_XTS_256;
+ else if ((op->sym->xform->cipher.algo ==
+ RTE_CRYPTO_CIPHER_AES_XTS) &&
+ (op->sym->xform->cipher.key.length == 64))
+ return ZSDA_OPC_EC_AES_XTS_512;
+ else if ((op->sym->xform->cipher.algo ==
+ RTE_CRYPTO_CIPHER_SM4_XTS) &&
+ (op->sym->xform->cipher.key.length == 32))
+ return ZSDA_OPC_EC_SM4_XTS_256;
+ } else if (op->sym->xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ if ((op->sym->xform->cipher.algo ==
+ RTE_CRYPTO_CIPHER_AES_XTS) &&
+ (op->sym->xform->cipher.key.length == 32))
+ return ZSDA_OPC_DC_AES_XTS_256;
+ else if ((op->sym->xform->cipher.algo ==
+ RTE_CRYPTO_CIPHER_AES_XTS) &&
+ (op->sym->xform->cipher.key.length == 64))
+ return ZSDA_OPC_DC_AES_XTS_512;
+ else if ((op->sym->xform->cipher.algo ==
+ RTE_CRYPTO_CIPHER_SM4_XTS) &&
+ (op->sym->xform->cipher.key.length == 32))
+ return ZSDA_OPC_DC_SM4_XTS_256;
+ }
+
+ return ZSDA_OPC_INVALID;
+}
+
+static uint32_t
+get_hash_dst_len(uint32_t opcode)
+{
+ switch (opcode) {
+ case ZSDA_OPC_HASH_SHA1:
+ return ZSDA_DIGEST_SIZE_SHA1;
+ case ZSDA_OPC_HASH_SHA2_224:
+ return ZSDA_DIGEST_SIZE_SHA2_224;
+ case ZSDA_OPC_HASH_SHA2_256:
+ return ZSDA_DIGEST_SIZE_SHA2_256;
+ case ZSDA_OPC_HASH_SHA2_384:
+ return ZSDA_DIGEST_SIZE_SHA2_384;
+ case ZSDA_OPC_HASH_SHA2_512:
+ return ZSDA_DIGEST_SIZE_SHA2_512;
+ case ZSDA_OPC_HASH_SM3:
+ return ZSDA_DIGEST_SIZE_SM3;
+ default:
+ return ZSDA_OPC_INVALID;
+ }
+}
+
+static void
+check_len_lbads(uint32_t data_len, uint32_t lbads_size)
+{
+ if (data_len < 16)
+ ZSDA_LOG(WARNING, W_MAY_EXCEPT_TEST);
+ if (lbads_size != 0) {
+ if (!(((data_len % lbads_size) == 0) ||
+ ((data_len % lbads_size) > LBADS_MAX_REMAINDER))) {
+ ZSDA_LOG(WARNING, W_MAY_EXCEPT_TEST);
+ }
+ }
+}
+
+static uint8_t
+zsda_sym_lbads(uint32_t dataunit_len, uint32_t data_len)
+{
+ uint8_t lbads;
+
+ switch (dataunit_len) {
+ case ZSDA_AES_LBADS_512:
+ lbads = ZSDA_AES_LBADS_INDICATE_512;
+ check_len_lbads(data_len, ZSDA_AES_LBADS_512);
+ break;
+
+ case ZSDA_AES_LBADS_4096:
+ lbads = ZSDA_AES_LBADS_INDICATE_4096;
+ check_len_lbads(data_len, ZSDA_AES_LBADS_4096);
+ break;
+
+ case ZSDA_AES_LBADS_8192:
+ lbads = ZSDA_AES_LBADS_INDICATE_8192;
+ check_len_lbads(data_len, ZSDA_AES_LBADS_8192);
+ break;
+
+ case ZSDA_AES_LBADS_0:
+ lbads = ZSDA_AES_LBADS_INDICATE_0;
+ check_len_lbads(data_len, ZSDA_AES_LBADS_0);
+ break;
+
+ default:
+ ZSDA_LOG(ERR, "dataunit_len should be 0/512/4096/8192 - %d.",
+ dataunit_len);
+ lbads = ZSDA_AES_LBADS_INDICATE_INVALID;
+ break;
+ }
+ return lbads;
+}
+
+int
+encry_match(void *op_in)
+{
+ struct rte_crypto_op *op = (struct rte_crypto_op *)op_in;
+
+ if (op->sym->xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return 0;
+ if (op->sym->xform->cipher.op != RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ return 0;
+
+ return 1;
+}
+
+int
+build_encry_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, uint16_t new_tail)
+{
+ struct rte_crypto_op *op = (struct rte_crypto_op *)op_in;
+ struct zsda_wqe_crpt *wqe =
+ (struct zsda_wqe_crpt *)(queue->base_addr +
+ (new_tail * queue->msg_size));
+ struct zsda_op_cookie *cookie =
+ (struct zsda_op_cookie *)op_cookies[new_tail];
+ struct zsda_sgl *sgl_src = (struct zsda_sgl *)&cookie->sgl_src;
+ struct zsda_sgl *sgl_dst = (struct zsda_sgl *)&cookie->sgl_dst;
+
+ int ret = 0;
+ uint32_t op_src_dst_offset = 0;
+ uint32_t op_src_dst_length = 0;
+ uint8_t skey_len = 0;
+
+ if ((op->sym->m_dst == NULL) || (op->sym->m_dst == op->sym->m_src)) {
+ ZSDA_LOG(ERR, "Failed! m_dst");
+ return -EINVAL;
+ }
+
+ op_src_dst_offset = op->sym->cipher.data.offset;
+ op_src_dst_length = op->sym->m_src->pkt_len - op_src_dst_offset;
+ ret = zsda_fill_sgl_offset(op->sym->m_src, op_src_dst_offset, sgl_src,
+ cookie->sgl_src_phys_addr, op_src_dst_length,
+ ZSDA_SGL_MAX_NUMBER - 1);
+
+ op_src_dst_length = op->sym->m_dst->pkt_len - op_src_dst_offset;
+ ret |= zsda_fill_sgl_offset(op->sym->m_dst, op_src_dst_offset, sgl_dst,
+ cookie->sgl_dst_phys_addr,
+ op_src_dst_length, ZSDA_SGL_MAX_NUMBER - 1);
+
+ if (ret) {
+ ZSDA_LOG(ERR, E_FUNC);
+ return -EINVAL;
+ }
+
+ cookie->valid = queue->valid;
+ cookie->used = true;
+ cookie->sid = new_tail;
+ cookie->op = op;
+
+ memset(wqe, 0, sizeof(struct zsda_wqe_crpt));
+ wqe->rx_length = op->sym->m_src->pkt_len - op_src_dst_offset;
+ wqe->tx_length = op->sym->m_dst->pkt_len - op_src_dst_offset;
+ wqe->valid = queue->valid;
+ wqe->op_code = get_opcode_crypto(op);
+ wqe->sid = cookie->sid;
+ wqe->rx_sgl_type = SGL_ELM_TYPE_LIST;
+ wqe->tx_sgl_type = SGL_ELM_TYPE_LIST;
+ wqe->rx_addr = cookie->sgl_src_phys_addr;
+ wqe->tx_addr = cookie->sgl_dst_phys_addr;
+
+ wqe->cfg.lbads = zsda_sym_lbads(op->sym->xform->cipher.dataunit_len,
+ wqe->rx_length);
+ if (wqe->cfg.lbads == ZSDA_AES_LBADS_INDICATE_INVALID) {
+ ZSDA_LOG(ERR, E_CONFIG);
+ return ZSDA_FAILED;
+ }
+ /* clang-format off */
+ reverse_memcpy((uint8_t *restrict) wqe->cfg.slba,
+ (const uint8_t *restrict)rte_crypto_op_ctod_offset(
+ op, char *, op->sym->xform->cipher.iv.offset) +
+ ZSDA_SYM_XTS_IV_SLBA_OFF,
+ sizeof(wqe->cfg.slba));
+
+ skey_len = (op->sym->xform->cipher.key.length / 2) & 0xff;
+ if (skey_len == ZSDA_SYM_XTS_256_SKEY_LEN) {
+ reverse_memcpy((uint8_t *restrict)(wqe->cfg.key +
+ ZSDA_SYM_XTS_256_KEY2_OFF),
+ (op->sym->xform->cipher.key.data + skey_len),
+ skey_len);
+ reverse_memcpy((uint8_t *restrict)(wqe->cfg.key +
+ ZSDA_SYM_XTS_256_KEY1_OFF),
+ op->sym->xform->cipher.key.data, skey_len);
+ } else
+ reverse_memcpy((uint8_t *restrict) wqe->cfg.key,
+ op->sym->xform->cipher.key.data,
+ op->sym->xform->cipher.key.length);
+ /* clang-format on */
+
+ return ZSDA_SUCCESS;
+}
+
+int
+decry_match(void *op_in)
+{
+ struct rte_crypto_op *op = (struct rte_crypto_op *)op_in;
+
+ if (op->sym->xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return 0;
+ if (op->sym->xform->cipher.op != RTE_CRYPTO_CIPHER_OP_DECRYPT)
+ return 0;
+
+ return 1;
+}
+
+static void
+decry_set_key(uint8_t key[64], const uint8_t *key1_ptr, uint8_t skey_len,
+ struct rte_crypto_op *op)
+{
+ uint8_t round_num;
+ uint8_t dec_key1[ZSDA_AES_MAX_KEY_BYTE_LEN] = {0};
+ uint8_t aes_round_key[ZSDA_AES_MAX_EXP_BYTE_SIZE] = {0};
+ uint32_t sm4_round_key[ZSDA_SM4_MAX_EXP_DWORD_SIZE] = {0};
+
+ switch (op->sym->xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ round_num = (skey_len == ZSDA_SYM_XTS_256_SKEY_LEN)
+ ? ZSDA_AES256_ROUND_NUM
+ : ZSDA_AES512_ROUND_NUM;
+ zsda_aes_key_expansion(aes_round_key, round_num, key1_ptr,
+ skey_len);
+ rte_memcpy(dec_key1, (aes_round_key + (16 * round_num)), 16);
+ if (skey_len > ZSDA_SYM_XTS_256_SKEY_LEN)
+ rte_memcpy((dec_key1 + 16),
+ (aes_round_key + (16 * round_num) -
+ (skey_len - 16)),
+ skey_len - 16);
+ break;
+ case RTE_CRYPTO_CIPHER_SM4_XTS:
+ zsda_sm4_key_expansion(sm4_round_key, key1_ptr);
+ for (size_t i = 0; i < 4; i++)
+ u32_to_u8(sm4_round_key + ZSDA_SM4_MAX_EXP_DWORD_SIZE -
+ 1 - i,
+ dec_key1 + (4 * i));
+ break;
+ default:
+ ZSDA_LOG(ERR, "unknow cipher algo!");
+ return;
+ }
+
+ if (skey_len == ZSDA_SYM_XTS_256_SKEY_LEN) {
+ reverse_memcpy(key + ZSDA_SYM_XTS_256_KEY2_OFF,
+ key1_ptr + skey_len, skey_len);
+ reverse_memcpy(key + ZSDA_SYM_XTS_256_KEY1_OFF, dec_key1,
+ skey_len);
+ } else {
+ reverse_memcpy(key, key1_ptr + skey_len, skey_len);
+ reverse_memcpy(key + ZSDA_SYM_XTS_512_KEY1_OFF, dec_key1,
+ skey_len);
+ }
+}
+
+int
+build_decry_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, uint16_t new_tail)
+{
+ struct rte_crypto_op *op = (struct rte_crypto_op *)op_in;
+ struct zsda_wqe_crpt *wqe =
+ (struct zsda_wqe_crpt *)(queue->base_addr +
+ (new_tail * queue->msg_size));
+ struct zsda_op_cookie *cookie =
+ (struct zsda_op_cookie *)op_cookies[new_tail];
+ struct zsda_sgl *sgl_src = (struct zsda_sgl *)&cookie->sgl_src;
+ struct zsda_sgl *sgl_dst = (struct zsda_sgl *)&cookie->sgl_dst;
+
+ uint32_t op_src_dst_offset = 0;
+ uint32_t op_src_dst_length = 0;
+ uint8_t skey_len = 0;
+ const uint8_t *key1_ptr = NULL;
+ int ret = 0;
+
+ if ((op->sym->m_dst == NULL) || (op->sym->m_dst == op->sym->m_src)) {
+ ZSDA_LOG(ERR, "Failed! m_dst");
+ return ZSDA_FAILED;
+ }
+
+ op_src_dst_offset = op->sym->cipher.data.offset;
+ op_src_dst_length = op->sym->m_src->pkt_len - op_src_dst_offset;
+ ret = zsda_fill_sgl_offset(op->sym->m_src, op_src_dst_offset, sgl_src,
+ cookie->sgl_src_phys_addr, op_src_dst_length,
+ ZSDA_SGL_MAX_NUMBER - 1);
+
+ op_src_dst_length = op->sym->m_dst->pkt_len - op_src_dst_offset;
+ ret |= zsda_fill_sgl_offset(op->sym->m_dst, op_src_dst_offset, sgl_dst,
+ cookie->sgl_dst_phys_addr,
+ op_src_dst_length, ZSDA_SGL_MAX_NUMBER - 1);
+ if (ret) {
+ ZSDA_LOG(ERR, E_FUNC);
+ return ZSDA_FAILED;
+ }
+
+ cookie->valid = queue->valid;
+ cookie->used = true;
+ cookie->sid = new_tail;
+ cookie->op = op;
+ memset(wqe, 0, sizeof(struct zsda_wqe_crpt));
+ wqe->rx_length = op->sym->m_src->pkt_len - op_src_dst_offset;
+ wqe->tx_length = op->sym->m_dst->pkt_len - op_src_dst_offset;
+
+ wqe->valid = queue->valid;
+ wqe->op_code = get_opcode_crypto(op);
+ wqe->sid = cookie->sid;
+ wqe->rx_sgl_type = SGL_ELM_TYPE_LIST;
+ wqe->tx_sgl_type = SGL_ELM_TYPE_LIST;
+ wqe->rx_addr = cookie->sgl_src_phys_addr;
+ wqe->tx_addr = cookie->sgl_dst_phys_addr;
+
+ wqe->cfg.lbads = zsda_sym_lbads(op->sym->xform->cipher.dataunit_len,
+ wqe->rx_length);
+ if (wqe->cfg.lbads == ZSDA_AES_LBADS_INDICATE_INVALID)
+ return ZSDA_FAILED;
+
+ /* clang-format off */
+ reverse_memcpy((uint8_t *restrict)wqe->cfg.slba,
+ (uint8_t *restrict)rte_crypto_op_ctod_offset(
+ op, char *, op->sym->xform->cipher.iv.offset) +
+ ZSDA_SYM_XTS_IV_SLBA_OFF,
+ sizeof(wqe->cfg.slba));
+ /* clang-format on */
+
+ skey_len = (op->sym->xform->cipher.key.length / 2) & 0xff;
+ key1_ptr = op->sym->xform->cipher.key.data;
+
+ decry_set_key(wqe->cfg.key, key1_ptr, skey_len, op);
+ return ZSDA_SUCCESS;
+}
+
+int
+hash_match(void *op_in)
+{
+ struct rte_crypto_op *op = (struct rte_crypto_op *)op_in;
+
+ if (op->sym->xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ return 0;
+ if (op->sym->xform->auth.op != RTE_CRYPTO_AUTH_OP_GENERATE)
+ return 0;
+
+ return 1;
+}
+
+int
+build_hash_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, uint16_t new_tail)
+{
+ struct rte_crypto_op *op = (struct rte_crypto_op *)op_in;
+ struct zsda_wqe_crpt *wqe =
+ (struct zsda_wqe_crpt *)(queue->base_addr +
+ (new_tail * queue->msg_size));
+ struct zsda_op_cookie *cookie =
+ (struct zsda_op_cookie *)op_cookies[new_tail];
+ struct zsda_sgl *sgl_src = (struct zsda_sgl *)&cookie->sgl_src;
+ uint8_t opcode = ZSDA_OPC_INVALID;
+ uint32_t op_src_dst_offset = 0;
+ uint32_t op_src_dst_length = 0;
+ int ret = 0;
+
+ if ((op->sym->m_dst == NULL) || (op->sym->m_dst == op->sym->m_src)) {
+ ZSDA_LOG(ERR, "Failed! m_dst");
+ return ZSDA_FAILED;
+ }
+
+ memset(wqe, 0, sizeof(struct zsda_wqe_crpt));
+ wqe->rx_length = op->sym->m_src->pkt_len - op->sym->auth.data.offset;
+ wqe->tx_length = op->sym->m_dst->pkt_len - op->sym->auth.data.offset;
+
+ opcode = get_opcode_hash(op);
+ if (opcode == ZSDA_OPC_INVALID) {
+ ZSDA_LOG(ERR, E_FUNC);
+ return ZSDA_FAILED;
+ }
+ if (wqe->tx_length < get_hash_dst_len(opcode)) {
+ ZSDA_LOG(WARNING, W_MAY_EXCEPT_TEST);
+ }
+
+ op_src_dst_offset = op->sym->auth.data.offset;
+ op_src_dst_length = op->sym->auth.data.length;
+ ret = zsda_fill_sgl_offset(op->sym->m_src, op_src_dst_offset, sgl_src,
+ cookie->sgl_src_phys_addr, op_src_dst_length,
+ ZSDA_SGL_MAX_NUMBER - 1);
+
+ if (ret) {
+ ZSDA_LOG(ERR, E_FUNC);
+ return -ZSDA_FAILED;
+ }
+
+ cookie->valid = queue->valid;
+ cookie->used = true;
+ cookie->sid = new_tail;
+ cookie->op = op;
+ wqe->valid = queue->valid;
+ wqe->op_code = opcode;
+ wqe->sid = cookie->sid;
+ wqe->rx_sgl_type = SGL_ELM_TYPE_LIST;
+ wqe->tx_sgl_type = SGL_ELM_TYPE_PHYS_ADDR;
+ wqe->rx_addr = cookie->sgl_src_phys_addr;
+ wqe->tx_addr =
+ rte_pktmbuf_iova_offset(op->sym->m_dst, op_src_dst_offset);
+
+ return ZSDA_SUCCESS;
+}
diff --git a/drivers/crypto/zsda/zsda_sym.h b/drivers/crypto/zsda/zsda_sym.h
new file mode 100644
index 0000000..a98fa2a
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_SYM_H_
+#define _ZSDA_SYM_H_
+
+#include <rte_cryptodev_pmd.h>
+
+#include "zsda_common.h"
+#include "zsda_logs.h"
+#include "zsda_sym_pmd.h"
+
+#define ZSDA_SYM_XTS_IV_SLBA_OFF (8)
+#define ZSDA_SYM_XTS_256_SKEY_LEN (16)
+#define ZSDA_SYM_XTS_256_KEY2_OFF (16)
+#define ZSDA_SYM_XTS_256_KEY1_OFF (48)
+#define ZSDA_SYM_XTS_512_KEY1_OFF (32)
+#define ZSDA_SYM_MIN_SRC_LEN_HASH (16)
+#define ZSDA_AES_LBADS_0 (0)
+#define ZSDA_AES_LBADS_512 (512)
+#define ZSDA_AES_LBADS_4096 (4096)
+#define ZSDA_AES_LBADS_8192 (8192)
+
+#define ZSDA_AES256_ROUND_NUM (10)
+#define ZSDA_AES512_ROUND_NUM (14)
+#define ZSDA_AES_MAX_EXP_BYTE_SIZE (240)
+#define ZSDA_AES_MAX_KEY_BYTE_LEN (32)
+#define ZSDA_SM4_MAX_EXP_DWORD_SIZE (32)
+
+int build_encry_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, uint16_t new_tail);
+int build_decry_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, uint16_t new_tail);
+int build_hash_request(void *op_in, const struct zsda_queue *queue,
+ void **op_cookies, uint16_t new_tail);
+
+int encry_match(void *op_in);
+int decry_match(void *op_in);
+int hash_match(void *op_in);
+
+#endif /* _ZSDA_SYM_H_ */
diff --git a/drivers/crypto/zsda/zsda_sym_capabilities.h b/drivers/crypto/zsda/zsda_sym_capabilities.h
new file mode 100644
index 0000000..6ed8ad6
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_capabilities.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_SYM_CAPABILITIES_H_
+#define _ZSDA_SYM_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities zsda_crypto_sym_capabilities[] = {
+ {/* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {
+ .sym = {.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {
+ .auth = {.algo = RTE_CRYPTO_AUTH_SHA1,
+ .block_size = 64,
+ .key_size = {.min = 0,
+ .max = 0,
+ .increment = 0},
+ .digest_size = {.min = 1,
+ .max = 20,
+ .increment = 1},
+ .iv_size = {0} },
+ } },
+ } },
+ {/* SHA224 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {
+ .sym = {.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {
+ .auth = {.algo = RTE_CRYPTO_AUTH_SHA224,
+ .block_size = 64,
+ .key_size = {.min = 0,
+ .max = 0,
+ .increment = 0},
+ .digest_size = {.min = 1,
+ .max = 28,
+ .increment = 1},
+ .iv_size = {0} },
+ } },
+ } },
+ {/* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {
+ .sym = {.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {
+ .auth = {.algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {.min = 0,
+ .max = 0,
+ .increment = 0},
+ .digest_size = {.min = 1,
+ .max = 32,
+ .increment = 1},
+ .iv_size = {0} },
+ } },
+ } },
+ {/* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {
+ .sym = {.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {
+ .auth = {.algo = RTE_CRYPTO_AUTH_SHA384,
+ .block_size = 128,
+ .key_size = {.min = 0,
+ .max = 0,
+ .increment = 0},
+ .digest_size = {.min = 1,
+ .max = 48,
+ .increment = 1},
+ .iv_size = {0} },
+ } },
+ } },
+ {/* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {
+ .sym = {.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {
+ .auth = {.algo = RTE_CRYPTO_AUTH_SHA512,
+ .block_size = 128,
+ .key_size = {.min = 0,
+ .max = 0,
+ .increment = 0},
+ .digest_size = {.min = 1,
+ .max = 64,
+ .increment = 1},
+ .iv_size = {0} },
+ } },
+ } },
+ {/* SM3 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {
+ .sym = {.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {
+ .auth = {.algo = RTE_CRYPTO_AUTH_SM3,
+ .block_size = 64,
+ .key_size = {.min = 0,
+ .max = 0,
+ .increment = 0},
+ .digest_size = {.min = 1,
+ .max = 32,
+ .increment = 1},
+ .iv_size = {0} },
+ } },
+ } },
+ {/* AES XTS */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {
+ .sym = {.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {
+ .cipher = {.algo = RTE_CRYPTO_CIPHER_AES_XTS,
+ .block_size = 16,
+ .key_size = {.min = 32,
+ .max = 64,
+ .increment = 32},
+ .iv_size = {.min = 16,
+ .max = 16,
+ .increment = 0} },
+ } },
+ } },
+ {/* SM4 XTS */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {
+ .sym = {.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {
+ .cipher = {.algo = RTE_CRYPTO_CIPHER_SM4_XTS,
+ .block_size = 16,
+ .key_size = {.min = 32,
+ .max = 64,
+ .increment = 32},
+ .iv_size = {.min = 16,
+ .max = 16,
+ .increment = 0} },
+ } },
+ } }
+};
+#endif /* _ZSDA_SYM_CAPABILITIES_H_ */
\ No newline at end of file
diff --git a/drivers/crypto/zsda/zsda_sym_pmd.c b/drivers/crypto/zsda/zsda_sym_pmd.c
new file mode 100644
index 0000000..5e34851
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_pmd.c
@@ -0,0 +1,431 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_pci.h>
+
+#include "cryptodev_pmd.h"
+#include "zsda_logs.h"
+#include "zsda_qp.h"
+#include "zsda_sym.h"
+#include "zsda_sym_pmd.h"
+
+uint8_t zsda_sym_driver_id;
+
+static int
+zsda_sym_dev_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+static int zsda_sym_qp_release(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id);
+
+static int
+zsda_sym_dev_start(struct rte_cryptodev *dev)
+{
+ struct zsda_sym_dev_private *sym_dev = dev->data->dev_private;
+ struct zsda_qp_hw *qp_hw = NULL;
+ int ret = 0;
+
+ qp_hw = zsda_qps_hw_per_service(sym_dev->zsda_pci_dev,
+ ZSDA_SERVICE_SYMMETRIC_ENCRYPT);
+ ret = zsda_queue_start(sym_dev->zsda_pci_dev->pci_dev, qp_hw);
+ qp_hw = zsda_qps_hw_per_service(sym_dev->zsda_pci_dev,
+ ZSDA_SERVICE_SYMMETRIC_DECRYPT);
+ ret |= zsda_queue_start(sym_dev->zsda_pci_dev->pci_dev, qp_hw);
+ qp_hw = zsda_qps_hw_per_service(sym_dev->zsda_pci_dev,
+ ZSDA_SERVICE_HASH_ENCODE);
+ ret |= zsda_queue_start(sym_dev->zsda_pci_dev->pci_dev, qp_hw);
+
+ if (ret == ZSDA_FAILED) {
+ ZSDA_LOG(ERR, E_START_Q);
+ return ZSDA_FAILED;
+ }
+
+ return 0;
+}
+
+static void
+zsda_sym_dev_stop(struct rte_cryptodev *dev)
+{
+ struct zsda_sym_dev_private *sym_dev = dev->data->dev_private;
+ struct zsda_qp_hw *qp_hw = NULL;
+
+ qp_hw = zsda_qps_hw_per_service(sym_dev->zsda_pci_dev,
+ ZSDA_SERVICE_SYMMETRIC_ENCRYPT);
+ zsda_queue_stop(sym_dev->zsda_pci_dev->pci_dev, qp_hw);
+ qp_hw = zsda_qps_hw_per_service(sym_dev->zsda_pci_dev,
+ ZSDA_SERVICE_SYMMETRIC_DECRYPT);
+ zsda_queue_stop(sym_dev->zsda_pci_dev->pci_dev, qp_hw);
+ qp_hw = zsda_qps_hw_per_service(sym_dev->zsda_pci_dev,
+ ZSDA_SERVICE_HASH_ENCODE);
+ zsda_queue_stop(sym_dev->zsda_pci_dev->pci_dev, qp_hw);
+}
+
+static int
+zsda_sym_dev_close(struct rte_cryptodev *dev)
+{
+ int ret = 0;
+ uint16_t i = 0;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = zsda_sym_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+ return ret;
+}
+
+static void
+zsda_sym_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct zsda_sym_dev_private *sym_priv = dev->data->dev_private;
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ zsda_crypto_max_nb_qps(sym_priv->zsda_pci_dev);
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = sym_priv->zsda_dev_capabilities;
+ info->driver_id = zsda_sym_driver_id;
+ info->sym.max_nb_sessions = 0;
+ }
+}
+
+static void
+zsda_sym_stats_get(struct rte_cryptodev *dev, struct rte_cryptodev_stats *stats)
+{
+ struct zsda_common_stat comm = {0};
+
+ zsda_stats_get(dev->data->queue_pairs, dev->data->nb_queue_pairs,
+ &comm);
+ stats->enqueued_count = comm.enqueued_count;
+ stats->dequeued_count = comm.dequeued_count;
+ stats->enqueue_err_count = comm.enqueue_err_count;
+ stats->dequeue_err_count = comm.dequeue_err_count;
+}
+
+static void
+zsda_sym_stats_reset(struct rte_cryptodev *dev)
+{
+ zsda_stats_reset(dev->data->queue_pairs, dev->data->nb_queue_pairs);
+}
+
+static int
+zsda_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ ZSDA_LOG(DEBUG, "Release sym qp %u on device %d", queue_pair_id,
+ dev->data->dev_id);
+
+ return zsda_queue_pair_release(
+ (struct zsda_qp **)&(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static void
+crypto_callbak(void *op_in, struct zsda_cqe *cqe)
+{
+ struct rte_crypto_op *op = (struct rte_crypto_op *)op_in;
+
+ if (cqe->err0 || CQE_ERR1(cqe->err1))
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+}
+
+static int
+setup_encrypto_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t qp_id,
+ struct zsda_qp *qp, uint32_t nb_des, int socket_id)
+{
+ enum zsda_service_type type = ZSDA_SERVICE_SYMMETRIC_ENCRYPT;
+ struct zsda_qp_config conf;
+ int ret = 0;
+ struct zsda_qp_hw *qp_hw = NULL;
+
+ qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+ conf.hw = qp_hw->data + qp_id;
+ conf.service_type = type;
+ conf.cookie_size = sizeof(struct zsda_op_cookie);
+ conf.nb_descriptors = nb_des;
+ conf.socket_id = socket_id;
+ conf.service_str = "sym_encrypt";
+
+ ret = common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+ qp->srv[type].rx_cb = crypto_callbak;
+ qp->srv[type].tx_cb = build_encry_request;
+ qp->srv[type].match = encry_match;
+
+ return ret;
+}
+
+static int
+setup_decrypto_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t qp_id,
+ struct zsda_qp *qp, uint32_t nb_des, int socket_id)
+{
+ enum zsda_service_type type = ZSDA_SERVICE_SYMMETRIC_DECRYPT;
+ struct zsda_qp_config conf;
+ int ret = 0;
+ struct zsda_qp_hw *qp_hw = NULL;
+
+ qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+ conf.hw = qp_hw->data + qp_id;
+ conf.service_type = type;
+
+ conf.cookie_size = sizeof(struct zsda_op_cookie);
+ conf.nb_descriptors = nb_des;
+ conf.socket_id = socket_id;
+ conf.service_str = "sym_decrypt";
+
+ ret = common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+ qp->srv[type].rx_cb = crypto_callbak;
+ qp->srv[type].tx_cb = build_decry_request;
+ qp->srv[type].match = decry_match;
+
+ return ret;
+}
+
+static int
+setup_hash_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t qp_id,
+ struct zsda_qp *qp, uint32_t nb_des, int socket_id)
+{
+ enum zsda_service_type type = ZSDA_SERVICE_HASH_ENCODE;
+ struct zsda_qp_config conf;
+ int ret = 0;
+ struct zsda_qp_hw *qp_hw = NULL;
+
+ qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+ conf.hw = qp_hw->data + qp_id;
+ conf.service_type = type;
+ conf.cookie_size = sizeof(struct zsda_op_cookie);
+ conf.nb_descriptors = nb_des;
+ conf.socket_id = socket_id;
+ conf.service_str = "sym_hash";
+
+ ret = common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+ qp->srv[type].rx_cb = crypto_callbak;
+ qp->srv[type].tx_cb = build_hash_request;
+ qp->srv[type].match = hash_match;
+
+ return ret;
+}
+
+static int
+zsda_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ int ret = 0;
+ struct zsda_qp *qp_new;
+
+ struct zsda_qp **qp_addr =
+ (struct zsda_qp **)&(dev->data->queue_pairs[qp_id]);
+ struct zsda_sym_dev_private *sym_priv = dev->data->dev_private;
+ struct zsda_pci_device *zsda_pci_dev = sym_priv->zsda_pci_dev;
+ uint16_t num_qps_encrypt = zsda_qps_per_service(
+ zsda_pci_dev, ZSDA_SERVICE_SYMMETRIC_ENCRYPT);
+ uint16_t num_qps_decrypt = zsda_qps_per_service(
+ zsda_pci_dev, ZSDA_SERVICE_SYMMETRIC_DECRYPT);
+ uint16_t num_qps_hash =
+ zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_HASH_ENCODE);
+
+ uint32_t nb_des = NB_DES;
+
+ if (*qp_addr != NULL) {
+ ret = zsda_sym_qp_release(dev, qp_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ qp_new = rte_zmalloc_socket("zsda PMD qp metadata", sizeof(*qp_new),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp_new == NULL) {
+ ZSDA_LOG(ERR, "Failed to alloc mem for qp struct");
+ return -ENOMEM;
+ }
+
+ if (num_qps_encrypt == MAX_QPS_ON_FUNCTION)
+ ret |= setup_encrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ else if (num_qps_decrypt == MAX_QPS_ON_FUNCTION)
+ ret |= setup_decrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ else if (num_qps_hash == MAX_QPS_ON_FUNCTION)
+ ret |= setup_hash_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ else {
+ ret |= setup_encrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ ret |= setup_decrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ ret |= setup_hash_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+ socket_id);
+ }
+
+ if (ret) {
+ rte_free(qp_new);
+ return ret;
+ }
+
+ qp_new->mmap_bar_addr =
+ sym_priv->zsda_pci_dev->pci_dev->mem_resource[0].addr;
+ *qp_addr = qp_new;
+
+ return ret;
+}
+
+static struct rte_cryptodev_ops crypto_zsda_ops = {
+
+ .dev_configure = zsda_sym_dev_config,
+ .dev_start = zsda_sym_dev_start,
+ .dev_stop = zsda_sym_dev_stop,
+ .dev_close = zsda_sym_dev_close,
+ .dev_infos_get = zsda_sym_dev_info_get,
+
+ .stats_get = zsda_sym_stats_get,
+ .stats_reset = zsda_sym_stats_reset,
+ .queue_pair_setup = zsda_sym_qp_setup,
+ .queue_pair_release = zsda_sym_qp_release,
+
+};
+
+static uint16_t
+zsda_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return zsda_enqueue_op_burst((struct zsda_qp *)qp, (void **)ops,
+ nb_ops);
+}
+
+static uint16_t
+zsda_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return zsda_dequeue_op_burst((struct zsda_qp *)qp, (void **)ops,
+ nb_ops);
+}
+
+static const char zsda_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_ZSDA_SYM_PMD);
+static const struct rte_driver cryptodev_zsda_sym_driver = {
+ .name = zsda_sym_drv_name, .alias = zsda_sym_drv_name};
+
+int
+zsda_sym_dev_create(struct zsda_pci_device *zsda_pci_dev,
+ struct zsda_dev_cmd_param *zsda_dev_cmd_param __rte_unused)
+{
+ int ret = 0;
+ struct zsda_device_info *dev_info =
+ &zsda_devs[zsda_pci_dev->zsda_dev_id];
+
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = (int)rte_socket_id(),
+ .private_data_size = sizeof(struct zsda_sym_dev_private)};
+
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *cryptodev;
+ struct zsda_sym_dev_private *sym_priv;
+ const struct rte_cryptodev_capabilities *capabilities;
+ uint64_t capa_size;
+
+ init_params.max_nb_queue_pairs = zsda_crypto_max_nb_qps(zsda_pci_dev);
+ snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", zsda_pci_dev->name,
+ "sym_encrypt");
+ ZSDA_LOG(DEBUG, "Creating ZSDA SYM device %s", name);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ dev_info->sym_rte_dev.driver = &cryptodev_zsda_sym_driver;
+ dev_info->sym_rte_dev.numa_node = dev_info->pci_dev->device.numa_node;
+ dev_info->sym_rte_dev.devargs = NULL;
+
+ cryptodev = rte_cryptodev_pmd_create(name, &(dev_info->sym_rte_dev),
+ &init_params);
+
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ dev_info->sym_rte_dev.name = cryptodev->data->name;
+ cryptodev->driver_id = zsda_sym_driver_id;
+
+ cryptodev->dev_ops = &crypto_zsda_ops;
+
+ cryptodev->enqueue_burst = zsda_sym_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = zsda_sym_pmd_dequeue_op_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ sym_priv = cryptodev->data->dev_private;
+ sym_priv->zsda_pci_dev = zsda_pci_dev;
+ capabilities = zsda_crypto_sym_capabilities;
+ capa_size = sizeof(zsda_crypto_sym_capabilities);
+
+ snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN, "ZSDA_SYM_CAPA");
+
+ sym_priv->capa_mz = rte_memzone_lookup(capa_memz_name);
+ if (sym_priv->capa_mz == NULL)
+ sym_priv->capa_mz = rte_memzone_reserve(
+ capa_memz_name, capa_size, rte_socket_id(), 0);
+
+ if (sym_priv->capa_mz == NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ ret = -EFAULT;
+ goto error;
+ }
+
+ memcpy(sym_priv->capa_mz->addr, capabilities, capa_size);
+ sym_priv->zsda_dev_capabilities = sym_priv->capa_mz->addr;
+
+ zsda_pci_dev->sym_dev = sym_priv;
+
+ return 0;
+
+error:
+
+ rte_cryptodev_pmd_destroy(cryptodev);
+ memset(&dev_info->sym_rte_dev, 0, sizeof(dev_info->sym_rte_dev));
+
+ return ret;
+}
+
+int
+zsda_sym_dev_destroy(struct zsda_pci_device *zsda_pci_dev)
+{
+ struct rte_cryptodev *cryptodev;
+
+ if (zsda_pci_dev == NULL)
+ return -ENODEV;
+ if (zsda_pci_dev->sym_dev == NULL)
+ return 0;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_memzone_free(zsda_pci_dev->sym_dev->capa_mz);
+
+ cryptodev = rte_cryptodev_pmd_get_dev(zsda_pci_dev->zsda_dev_id);
+
+ rte_cryptodev_pmd_destroy(cryptodev);
+ zsda_devs[zsda_pci_dev->zsda_dev_id].sym_rte_dev.name = NULL;
+ zsda_pci_dev->sym_dev = NULL;
+
+ return 0;
+}
+
+static struct cryptodev_driver zsda_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(zsda_crypto_drv, cryptodev_zsda_sym_driver,
+ zsda_sym_driver_id);
diff --git a/drivers/crypto/zsda/zsda_sym_pmd.h b/drivers/crypto/zsda/zsda_sym_pmd.h
new file mode 100644
index 0000000..e508794
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_pmd.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_SYM_PMD_H_
+#define _ZSDA_SYM_PMD_H_
+
+#include <rte_cryptodev.h>
+#include <rte_ether.h>
+
+#include "zsda_device.h"
+#include "zsda_sym_capabilities.h"
+
+/** Intel(R) ZSDA Symmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_ZSDA_SYM_PMD crypto_zsda
+
+/* Internal capabilities */
+#define ZSDA_SYM_CAP_MIXED_CRYPTO (1 << 0)
+#define ZSDA_SYM_CAP_VALID (1 << 31)
+
+extern uint8_t zsda_sym_driver_id;
+
+/** private data structure for a ZSDA device.
+ * This ZSDA device is a device offering only symmetric crypto service,
+ * there can be one of these on each zsda_pci_device (VF).
+ */
+struct zsda_sym_dev_private {
+ struct zsda_pci_device *zsda_pci_dev;
+ /**< The zsda pci device hosting the service */
+
+ const struct rte_cryptodev_capabilities *zsda_dev_capabilities;
+ /* ZSDA device symmetric crypto capabilities */
+ const struct rte_memzone *capa_mz;
+ /* Shared memzone for storing capabilities */
+ uint16_t min_enq_burst_threshold;
+ uint32_t internal_capabilities; /* see flags ZSDA_SYM_CAP_xxx */
+};
+
+int zsda_sym_dev_create(struct zsda_pci_device *zsda_pci_dev,
+ struct zsda_dev_cmd_param *zsda_dev_cmd_param);
+
+int zsda_sym_dev_destroy(struct zsda_pci_device *zsda_pci_dev);
+
+#endif /* _ZSDA_SYM_PMD_H_ */
diff --git a/drivers/meson.build b/drivers/meson.build
index c4ff3ff..74c4840 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -11,6 +11,7 @@ subdirs = [
'common/mlx5', # depends on bus.
'common/qat', # depends on bus.
'common/sfc_efx', # depends on bus.
+ 'common/zsda', # depends on common and bus.
'mempool', # depends on common and bus.
'dma', # depends on common and bus.
'net', # depends on common, bus, mempool
diff --git a/examples/meson.build b/examples/meson.build
index 6968c09..cf80855 100644
--- a/examples/meson.build
+++ b/examples/meson.build
@@ -59,6 +59,7 @@ all_examples = [
'vm_power_manager/guest_cli',
'vmdq',
'vmdq_dcb',
+ 'zsda',
]
# on install, skip copying all meson.build files
diff --git a/examples/zsda/Makefile b/examples/zsda/Makefile
new file mode 100644
index 0000000..b8971ad
--- /dev/null
+++ b/examples/zsda/Makefile
@@ -0,0 +1,56 @@
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= build
+
+# binary name
+APP = zsdaapp
+
+# all source are stored in SRCS-y
+SRCS-y := test.c
+
+# Build using pkg-config variables if possible
+ifneq ($(shell pkg-config --exists libdpdk && echo 0),0)
+$(error "no installation of DPDK found")
+endif
+
+all: shared
+.PHONY: shared static
+shared: build/$(APP)-shared
+ ln -sf $(APP)-shared build/$(APP)
+static: build/$(APP)-static
+ ln -sf $(APP)-static build/$(APP)
+
+PKGCONF ?= pkg-config
+
+CFLAGS += -DDPDK=1
+
+# Add flag to allow experimental API as zsda uses rte_ethdev_set_ptype API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null)
+CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk)
+LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk)
+LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk)
+LDFLAGS += $(shell $(PKGCONF) --libs libdpdk)
+
+LDFLAGS += -lrte_common_zsda -lrte_cryptodev -lrte_compressdev
+
+# for shared library builds, we need to explicitly link these PMDs
+LDFLAGS_SHARED += -lrte_common_zsda -lrte_common_zsda -lrte_cryptodev -lrte_compressdev
+
+build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
+
+build/$(APP)-static: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_STATIC)
+
+build:
+ @mkdir -p $@
+
+.PHONY: clean
+clean:
+ rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared
+ test -d build && rmdir -p build || true
\ No newline at end of file
diff --git a/examples/zsda/commands.c b/examples/zsda/commands.c
new file mode 100644
index 0000000..a9730f5
--- /dev/null
+++ b/examples/zsda/commands.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <termios.h>
+
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_devargs.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_memzone.h>
+#include <rte_per_lcore.h>
+#include <rte_ring.h>
+
+#include <cmdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_rdline.h>
+#include <rte_string_fns.h>
+
+#include "test.h"
+#include "test_zsda.h"
+
+static struct test_commands_list commands_list =
+ TAILQ_HEAD_INITIALIZER(commands_list);
+
+void
+add_test_command(struct test_command *t)
+{
+ TAILQ_INSERT_TAIL(&commands_list, t, next);
+}
+
+struct cmd_autotest_result {
+ cmdline_fixed_string_t autotest;
+};
+
+static void
+cmd_autotest_parsed(void *parsed_result, __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct test_command *t;
+ struct cmd_autotest_result *res = parsed_result;
+ int ret = 0;
+
+ TAILQ_FOREACH(t, &commands_list, next)
+ {
+ if (!strcmp(res->autotest, t->command))
+ ret = t->callback();
+ }
+
+ last_test_result = ret;
+ if (ret == 0)
+ printf("Test OK\n");
+ else if (ret == TEST_SKIPPED)
+ printf("Test Skipped\n");
+ else
+ printf("Test Failed\n");
+ fflush(stdout);
+}
+
+cmdline_parse_token_string_t cmd_autotest_autotest =
+ TOKEN_STRING_INITIALIZER(struct cmd_autotest_result, autotest, "");
+
+cmdline_parse_inst_t cmd_autotest = {
+ .f = cmd_autotest_parsed, /* function to call */
+ .data = NULL, /* 2nd arg of func */
+ .help_str = "launch autotest",
+ .tokens = {(void *)&cmd_autotest_autotest, NULL},
+};
+
+struct cmd_dump_result {
+ cmdline_fixed_string_t dump;
+};
+
+static void
+dump_struct_sizes(void)
+{
+#define DUMP_SIZE(t) printf("sizeof(" #t ") = %u\n", (unsigned int)sizeof(t))
+ DUMP_SIZE(struct rte_mbuf);
+ DUMP_SIZE(struct rte_mempool);
+ DUMP_SIZE(struct rte_ring);
+#undef DUMP_SIZE
+}
+
+static void
+cmd_dump_parsed(void *parsed_result, __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_dump_result *res = parsed_result;
+
+ if (!strcmp(res->dump, "dump_physmem"))
+ rte_dump_physmem_layout(stdout);
+ else if (!strcmp(res->dump, "dump_memzone"))
+ rte_memzone_dump(stdout);
+ else if (!strcmp(res->dump, "dump_struct_sizes"))
+ dump_struct_sizes();
+ else if (!strcmp(res->dump, "dump_ring"))
+ rte_ring_list_dump(stdout);
+ else if (!strcmp(res->dump, "dump_mempool"))
+ rte_mempool_list_dump(stdout);
+ else if (!strcmp(res->dump, "dump_devargs"))
+ rte_devargs_dump(stdout);
+ else if (!strcmp(res->dump, "dump_log_types"))
+ rte_log_dump(stdout);
+ else if (!strcmp(res->dump, "dump_malloc_stats"))
+ rte_malloc_dump_stats(stdout, NULL);
+ else if (!strcmp(res->dump, "dump_malloc_heaps"))
+ rte_malloc_dump_heaps(stdout);
+}
+
+cmdline_parse_token_string_t cmd_dump_dump =
+ TOKEN_STRING_INITIALIZER(struct cmd_dump_result, dump,
+ "dump_physmem#"
+ "dump_memzone#"
+ "dump_struct_sizes#"
+ "dump_ring#"
+ "dump_mempool#"
+ "dump_malloc_stats#"
+ "dump_malloc_heaps#"
+ "dump_devargs#"
+ "dump_log_types");
+
+cmdline_parse_inst_t cmd_dump = {
+ .f = cmd_dump_parsed,
+ .data = NULL,
+ .help_str = "dump status",
+ .tokens = {(void *)&cmd_dump_dump, NULL},
+};
+
+struct cmd_dump_one_result {
+ cmdline_fixed_string_t dump;
+ cmdline_fixed_string_t name;
+ cmdline_fixed_string_t arg0;
+};
+
+static void
+cmd_dump_one_parsed(void *parsed_result, struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_dump_one_result *res = parsed_result;
+
+ if (!strcmp(res->dump, "dump_ring")) {
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(res->name);
+ if (r == NULL) {
+ cmdline_printf(cl, "Cannot find ring\n");
+ return;
+ }
+ rte_ring_dump(stdout, r);
+ } else if (!strcmp(res->dump, "dump_mempool")) {
+ struct rte_mempool *mp;
+
+ mp = rte_mempool_lookup(res->name);
+ if (mp == NULL) {
+ cmdline_printf(cl, "Cannot find mempool\n");
+ return;
+ }
+ rte_mempool_dump(stdout, mp);
+ }
+}
+
+cmdline_parse_token_string_t cmd_dump_one_dump = TOKEN_STRING_INITIALIZER(
+ struct cmd_dump_one_result, dump, "dump_mempool");
+
+cmdline_parse_token_string_t cmd_dump_one_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_dump_one_result, name, NULL);
+
+cmdline_parse_inst_t cmd_dump_one = {
+ .f = cmd_dump_one_parsed,
+ .data = NULL,
+ .help_str = "dump one mempool: dump_mempool <name>",
+ .tokens = {(void *)&cmd_dump_one_dump, (void *)&cmd_dump_one_name,
+ NULL},
+};
+
+struct cmd_quit_result {
+ cmdline_fixed_string_t quit;
+};
+
+static void
+cmd_quit_parsed(__rte_unused void *parsed_result, struct cmdline *cl,
+ __rte_unused void *data)
+{
+ cmdline_quit(cl);
+}
+
+cmdline_parse_token_string_t cmd_quit_quit =
+ TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
+
+cmdline_parse_inst_t cmd_quit = {
+ .f = cmd_quit_parsed,
+ .data = NULL,
+ .help_str = "exit application",
+ .tokens = {(cmdline_parse_token_hdr_t *)&cmd_quit_quit, NULL},
+};
+
+struct cmd_run_test_result {
+ cmdline_fixed_string_t func;
+ cmdline_fixed_string_t name;
+ cmdline_fixed_string_t arg0;
+ cmdline_fixed_string_t arg1;
+ cmdline_fixed_string_t arg2;
+};
+
+static void
+run_test_group_parse(void *parsed_result, __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_run_test_result *res = parsed_result;
+ enum test_type zsda_test_type = run_test_invalid;
+ uint8_t arg0 = (uint8_t)(atoi(res->arg0) & 0xff);
+ uint8_t arg1 = (uint8_t)(atoi(res->arg1) & 0xff);
+ uint8_t arg2 = (uint8_t)(atoi(res->arg2) & 0xff);
+
+ if (!strcmp(res->func, "run_test"))
+ zsda_test_type = run_test;
+
+ if (atoi(res->arg1) > atoi(res->arg2))
+ printf("error: arg1 is greater than arg2 !\n");
+
+ if (atoi(res->arg2) >= 128)
+ printf("error: arg2 is greater than 128 !\n");
+
+ if (run_test_invalid == zsda_test_type) {
+ ZSDA_LOG(ERR, "[%d] Failed! zsda_test_type is invalid",
+ __LINE__);
+ return;
+ }
+ if (!strcmp(res->name, "crypto"))
+ zsda_run_test(zsda_test_type, arg0, arg1, arg2, zsda_crypto);
+ else if (!strcmp(res->name, "comp"))
+ zsda_run_test(zsda_test_type, arg0, arg1, arg2, zsda_compress);
+}
+
+cmdline_parse_token_string_t cmd_run_test =
+ TOKEN_STRING_INITIALIZER(struct cmd_run_test_result, func, "run_test");
+
+cmdline_parse_token_string_t cmd_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_run_test_result, name, NULL);
+
+cmdline_parse_token_string_t cmd_arg0 =
+ TOKEN_STRING_INITIALIZER(struct cmd_run_test_result, arg0, NULL);
+
+cmdline_parse_token_string_t cmd_arg1 =
+ TOKEN_STRING_INITIALIZER(struct cmd_run_test_result, arg1, NULL);
+
+cmdline_parse_token_string_t cmd_arg2 =
+ TOKEN_STRING_INITIALIZER(struct cmd_run_test_result, arg2, NULL);
+
+cmdline_parse_inst_t cmd_parse_run_one = {
+ .f = run_test_group_parse,
+ .data = NULL,
+ .help_str = "run one testcase: run_test <name> -- comp/crypto <dev> "
+ "<ring> <cpu>",
+ .tokens = {(cmdline_parse_token_hdr_t *)&cmd_run_test,
+ (cmdline_parse_token_hdr_t *)&cmd_name,
+ (cmdline_parse_token_hdr_t *)&cmd_arg0,
+ (cmdline_parse_token_hdr_t *)&cmd_arg1,
+ (cmdline_parse_token_hdr_t *)&cmd_arg2, NULL},
+};
+/****************/
+
+cmdline_parse_ctx_t main_ctx[] = {
+ (cmdline_parse_inst_t *)&cmd_autotest,
+ (cmdline_parse_inst_t *)&cmd_parse_run_one,
+ (cmdline_parse_inst_t *)&cmd_dump,
+ (cmdline_parse_inst_t *)&cmd_dump_one,
+ (cmdline_parse_inst_t *)&cmd_quit,
+
+ NULL,
+};
+
+int
+commands_init(void)
+{
+ struct test_command *t;
+ char *commands;
+ int commands_len = 0;
+
+ TAILQ_FOREACH(t, &commands_list, next)
+ {
+ commands_len += strlen(t->command) + 1;
+ }
+
+ commands = (char *)calloc(commands_len, sizeof(char));
+ if (!commands)
+ return -1;
+
+ TAILQ_FOREACH(t, &commands_list, next)
+ {
+ strlcat(commands, t->command, commands_len);
+ if (TAILQ_NEXT(t, next) != NULL)
+ strlcat(commands, "#", commands_len);
+ }
+
+ cmd_autotest_autotest.string_data.str = commands;
+ return 0;
+}
diff --git a/examples/zsda/meson.build b/examples/zsda/meson.build
new file mode 100644
index 0000000..c713330
--- /dev/null
+++ b/examples/zsda/meson.build
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2024 ZTE Corporation
+
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+
+includes = include_directories(
+ '../../../lib/eal/common',
+ '../../drivers/bus/pci',
+ '../../lib/pci',
+ '../../lib/timer',
+ '../../lib/rcu',
+ '../../lib/cryptodev',
+ '../../lib/compressdev',
+ '../../drivers/bus/vdev',
+ '../../drivers/common/zsda',
+ )
+
+allow_experimental_apis = true
+sources = files(
+ 'test.c',
+ 'commands.c',
+ 'test_zsda.c',
+ 'test_zsda_cryptodev.c',
+ 'test_zsda_compressdev.c',
+)
diff --git a/examples/zsda/test.c b/examples/zsda/test.c
new file mode 100644
index 0000000..f1826ce
--- /dev/null
+++ b/examples/zsda/test.c
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <termios.h>
+
+#ifdef RTE_LIB_CMDLINE
+#include <cmdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_rdline.h>
+#include <cmdline_socket.h>
+extern cmdline_parse_ctx_t main_ctx[];
+#endif
+
+#include <rte_cycles.h>
+#include <rte_eal.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_string_fns.h>
+#ifdef RTE_LIB_TIMER
+#include <rte_timer.h>
+#endif
+
+#include "test.h"
+#include "zsda_logs.h"
+#ifdef RTE_LIB_PDUMP
+#endif
+
+#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
+#define MAX_EXTRA_ARGS 32
+
+const char *prgname;
+int last_test_result;
+
+/* used in linux for MP and other tests */
+static const char *recursive_call;
+
+static int
+no_action(void)
+{
+ return 0;
+}
+
+static int
+do_recursive_call(void)
+{
+ unsigned int i;
+ struct {
+ const char *env_var;
+ int (*action_fn)(void);
+ } actions[] = {
+ {"test_missing_c_flag", no_action},
+ {"test_main_lcore_flag", no_action},
+ {"test_invalid_n_flag", no_action},
+ {"test_no_hpet_flag", no_action},
+ {"test_allow_flag", no_action},
+ {"test_invalid_b_flag", no_action},
+ {"test_invalid_vdev_flag", no_action},
+ {"test_invalid_r_flag", no_action},
+ {"test_misc_flags", no_action},
+ {"test_memory_flags", no_action},
+ {"test_file_prefix", no_action},
+ {"test_no_huge_flag", no_action},
+ };
+
+ if (recursive_call == NULL)
+ return -1;
+ for (i = 0; i < RTE_DIM(actions); i++) {
+ if (strcmp(actions[i].env_var, recursive_call) == 0)
+ return (actions[i].action_fn)();
+ }
+ printf("ERROR - missing action to take for %s\n", recursive_call);
+ return -1;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+#ifdef RTE_LIB_CMDLINE
+ struct cmdline *cl;
+#endif
+ char *extra_args;
+ int ret;
+
+ extra_args = getenv("DPDK_TEST_PARAMS");
+ if (extra_args != NULL && strlen(extra_args) > 0) {
+ char **all_argv;
+ char *eargv[MAX_EXTRA_ARGS];
+ int all_argc;
+ int eargc;
+ int i;
+
+ ZSDA_LOG(INFO, "Using additional DPDK_TEST_PARAMS: '%s'",
+ extra_args);
+ eargc = rte_strsplit(extra_args,
+ strlen(extra_args) & (0xffffffff), eargv,
+ MAX_EXTRA_ARGS, ' ');
+
+ /* merge argc/argv and the environment args */
+ all_argc = argc + eargc;
+ all_argv = malloc(sizeof(*all_argv) * (all_argc + 1));
+ if (all_argv == NULL) {
+ ret = -1;
+ goto out;
+ }
+
+ for (i = 0; i < argc; i++)
+ all_argv[i] = argv[i];
+ for (i = 0; i < eargc; i++)
+ all_argv[argc + i] = eargv[i];
+ all_argv[all_argc] = NULL;
+
+ /* call eal_init with combined args */
+ ret = rte_eal_init(all_argc, all_argv);
+ free(all_argv);
+ } else
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0) {
+ ret = -1;
+ goto out;
+ }
+
+ if (commands_init() < 0) {
+ ret = -1;
+ goto out;
+ }
+
+ argv += ret;
+
+ prgname = argv[0];
+
+ recursive_call = getenv(RECURSIVE_ENV_VAR);
+ if (recursive_call != NULL) {
+ ret = do_recursive_call();
+ goto out;
+ }
+
+#ifdef RTE_LIBEAL_USE_HPET
+ if (rte_eal_hpet_init(1) < 0)
+#endif
+ ZSDA_LOG(INFO,
+ "HPET is not enabled, using TSC as default timer");
+
+#ifdef RTE_LIB_CMDLINE
+ char *dpdk_test = getenv("DPDK_TEST");
+
+ if (dpdk_test && strlen(dpdk_test)) {
+ char buf[1024];
+
+ cl = cmdline_new(main_ctx, "RTE>>", 0, 1);
+ if (cl == NULL) {
+ ret = -1;
+ goto out;
+ }
+
+ snprintf(buf, sizeof(buf), "%s\n", dpdk_test);
+ if (cmdline_in(cl, buf, strlen(buf) & (0xffffffff)) < 0) {
+ printf("error on cmdline input\n");
+
+ ret = -1;
+ } else {
+ ret = last_test_result;
+ }
+ cmdline_free(cl);
+ goto out;
+ } else {
+ /* if no DPDK_TEST env variable, go interactive */
+ cl = cmdline_stdin_new(main_ctx, "RTE>>");
+ if (cl == NULL) {
+ ret = -1;
+ goto out;
+ }
+
+ cmdline_interact(cl);
+ cmdline_stdin_exit(cl);
+ // cmdline_free(cl);
+ }
+#endif
+
+ ret = 0;
+
+out:
+#ifdef RTE_LIB_TIMER
+ rte_timer_subsystem_finalize();
+#endif
+ rte_eal_cleanup();
+ return ret;
+}
diff --git a/examples/zsda/test.h b/examples/zsda/test.h
new file mode 100644
index 0000000..98db2e5
--- /dev/null
+++ b/examples/zsda/test.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _TEST_H_
+#define _TEST_H_
+
+#include <stddef.h>
+#include <sys/queue.h>
+
+#include <rte_hexdump.h>
+#include <rte_test.h>
+#include <rte_common.h>
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_string.h>
+
+#include <zsda_common.h>
+
+#ifndef TEST_SUCCESS
+#define TEST_SUCCESS EXIT_SUCCESS
+#endif
+
+#ifndef TEST_FAILED
+#define TEST_FAILED -1
+#endif
+
+#ifndef CHECK_ADDR_NULL
+#define CHECK_ADDR_NULL(addr) \
+ do { \
+ if (addr == NULL) { \
+ ZSDA_LOG(ERR, E_NULL); \
+ return TEST_FAILED; \
+ } \
+ } while (0)
+#endif
+
+#define TEST_SKIPPED 77
+#define RECURSIVE_ENV_VAR "RTE_TEST_RECURSIVE"
+
+/** Before including test.h file you can define
+ * TEST_TRACE_FAILURE(_file, _line, _func) macro to better trace/debug test
+ * failures. Mostly useful in test development phase.
+ **/
+#ifndef TEST_TRACE_FAILURE
+#define TEST_TRACE_FAILURE(_file, _line, _func)
+#endif
+
+
+#define TEST_ASSERT RTE_TEST_ASSERT
+
+#define TEST_ASSERT_EQUAL RTE_TEST_ASSERT_EQUAL
+
+/* Compare two buffers (length in bytes) */
+#define TEST_ASSERT_BUFFERS_ARE_EQUAL(a, b, len, msg, ...) \
+ do { \
+ if (memcmp(a, b, len)) { \
+ printf("TestCase %s() line %d failed: " msg "\n", \
+ __func__, __LINE__, ##__VA_ARGS__); \
+ TEST_TRACE_FAILURE(__FILE__, __LINE__, __func__); \
+ return TEST_FAILED; \
+ } \
+ } while (0)
+
+/* Compare two buffers with offset (length and offset in bytes) */
+#define TEST_ASSERT_BUFFERS_ARE_EQUAL_OFFSET(a, b, len, off, msg, ...) \
+ do { \
+ const uint8_t *_a_with_off = (const uint8_t *)a + off; \
+ const uint8_t *_b_with_off = (const uint8_t *)b + off; \
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(_a_with_off, _b_with_off, len, \
+ msg); \
+ } while (0)
+
+/* Compare two buffers (length in bits) */
+#define TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(a, b, len, msg, ...) \
+ do { \
+ uint8_t _last_byte_a, _last_byte_b; \
+ uint8_t _last_byte_mask, _last_byte_bits; \
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(a, b, (len >> 3), msg); \
+ if (len % 8) { \
+ _last_byte_bits = len % 8; \
+ _last_byte_mask = ~((1 << (8 - _last_byte_bits)) - 1); \
+ _last_byte_a = ((const uint8_t *)a)[len >> 3]; \
+ _last_byte_b = ((const uint8_t *)b)[len >> 3]; \
+ _last_byte_a &= _last_byte_mask; \
+ _last_byte_b &= _last_byte_mask; \
+ if (_last_byte_a != _last_byte_b) { \
+ printf("TestCase %s() line %d failed: " msg \
+ "\n", \
+ __func__, __LINE__, ##__VA_ARGS__); \
+ TEST_TRACE_FAILURE(__FILE__, __LINE__, \
+ __func__); \
+ return TEST_FAILED; \
+ } \
+ } \
+ } while (0)
+
+/* Compare two buffers with offset (length and offset in bits) */
+#define TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT_OFFSET(a, b, len, off, msg, ...) \
+ do { \
+ uint8_t _first_byte_a, _first_byte_b; \
+ uint8_t _first_byte_mask, _first_byte_bits; \
+ uint32_t _len_without_first_byte = \
+ (off % 8) ? len - (8 - (off % 8)) : len; \
+ uint32_t _off_in_bytes = \
+ (off % 8) ? (off >> 3) + 1 : (off >> 3); \
+ const uint8_t *_a_with_off = \
+ (const uint8_t *)a + _off_in_bytes; \
+ const uint8_t *_b_with_off = \
+ (const uint8_t *)b + _off_in_bytes; \
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(_a_with_off, _b_with_off, \
+ _len_without_first_byte, \
+ msg); \
+ if (off % 8) { \
+ _first_byte_bits = 8 - (off % 8); \
+ _first_byte_mask = (1 << _first_byte_bits) - 1; \
+ _first_byte_a = *(_a_with_off - 1); \
+ _first_byte_b = *(_b_with_off - 1); \
+ _first_byte_a &= _first_byte_mask; \
+ _first_byte_b &= _first_byte_mask; \
+ if (_first_byte_a != _first_byte_b) { \
+ printf("TestCase %s() line %d failed: " msg \
+ "\n", \
+ __func__, __LINE__, ##__VA_ARGS__); \
+ TEST_TRACE_FAILURE(__FILE__, __LINE__, \
+ __func__); \
+ return TEST_FAILED; \
+ } \
+ } \
+ } while (0)
+
+#define TEST_ASSERT_NOT_EQUAL RTE_TEST_ASSERT_NOT_EQUAL
+
+#define TEST_ASSERT_SUCCESS RTE_TEST_ASSERT_SUCCESS
+
+#define TEST_ASSERT_FAIL RTE_TEST_ASSERT_FAIL
+
+#define TEST_ASSERT_NULL RTE_TEST_ASSERT_NULL
+
+#define TEST_ASSERT_NOT_NULL RTE_TEST_ASSERT_NOT_NULL
+
+struct unit_test_case {
+ int (*setup)(void);
+ void (*teardown)(void);
+ int (*testcase)(void);
+ const char *name;
+ unsigned int enabled;
+};
+
+#define TEST_CASE(fn) \
+ { \
+ NULL, NULL, fn, #fn, 1 \
+ }
+
+#define TEST_CASE_NAMED(name, fn) \
+ { \
+ NULL, NULL, fn, name, 1 \
+ }
+
+#define TEST_CASE_ST(setup, teardown, testcase) \
+ { \
+ setup, teardown, testcase, #testcase, 1 \
+ }
+
+#define TEST_CASE_DISABLED(fn) \
+ { \
+ NULL, NULL, fn, #fn, 0 \
+ }
+
+#define TEST_CASE_ST_DISABLED(setup, teardown, testcase) \
+ { \
+ setup, teardown, testcase, #testcase, 0 \
+ }
+
+#define TEST_CASES_END() \
+ { \
+ NULL, NULL, NULL, NULL, 0 \
+ }
+
+static inline void debug_hexdump(FILE *file, const char *title, const void *buf,
+ size_t len)
+{
+ if (rte_log_get_global_level() == RTE_LOG_DEBUG)
+ rte_hexdump(file, title, buf, len);
+}
+
+struct unit_test_suite {
+ const char *suite_name;
+ int (*setup)(void);
+ void (*teardown)(void);
+ struct unit_test_case unit_test_cases[];
+};
+
+int unit_test_suite_runner(struct unit_test_suite *suite);
+extern int last_test_result;
+
+extern const char *prgname;
+
+int commands_init(void);
+int test_set_rxtx_conf(cmdline_fixed_string_t mode);
+int test_set_rxtx_anchor(cmdline_fixed_string_t type);
+int test_set_rxtx_sc(cmdline_fixed_string_t type);
+
+typedef int(test_callback)(void);
+TAILQ_HEAD(test_commands_list, test_command);
+struct test_command {
+ TAILQ_ENTRY(test_command) next;
+ const char *command;
+ test_callback *callback;
+};
+
+void add_test_command(struct test_command *t);
+
+struct cml_run_test {
+ char func[128];
+ char name[128];
+ int arg0;
+ int arg1;
+ int arg2;
+};
+
+/* Register a test function with its command string */
+#define REGISTER_TEST_COMMAND(cmd, func) \
+ static struct test_command test_struct_##cmd = { \
+ .command = RTE_STR(cmd), \
+ .callback = func, \
+ }; \
+ RTE_INIT(test_register_##cmd) \
+ { \
+ add_test_command(&test_struct_##cmd); \
+ }
+
+extern uint8_t *g_name_core;
+extern void zsda_test_func_cmdline(struct cml_run_test *res);
+
+#endif
diff --git a/examples/zsda/test_zsda.c b/examples/zsda/test_zsda.c
new file mode 100644
index 0000000..748dd9b
--- /dev/null
+++ b/examples/zsda/test_zsda.c
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+
+#include "rte_lcore.h"
+
+#include "test_zsda.h"
+#include "test_zsda_cryptodev.h"
+#include "zsda_device.h"
+
+#define DPU_PCI_DEVICES_DIR "/sys/bus/pci/devices"
+#define FILE_PATH_LEN (100)
+
+const char *zsda_device_name[4] = {
+ "crypto",
+ "comp",
+};
+
+struct zsda_test_dev_info zsda_info[ZSDA_MAX_QUEUE];
+struct zsda_thread zsda_data[ZSDA_MAX_QUEUE];
+uint32_t global_zsda_idx;
+
+static int
+zsda_unit_test_suite_runner(struct zsda_unit_test_suite *suite,
+ struct zsda_test_dev_info *dev_info)
+{
+ unsigned int total = 0;
+ unsigned int failed = 0;
+ unsigned int executed = 0;
+ unsigned int skipped = 0;
+ unsigned int succeeded = 0;
+ unsigned int unsupported = 0;
+ int test_success;
+ const char *status;
+
+ if (suite->suite_name) {
+ printf("\n");
+ printf(" + "
+ "---------------------------*---------------------------"
+ "- +\n");
+ printf(" + Test Suite : %s\n", suite->suite_name);
+ }
+
+ if (suite->setup) {
+ test_success = suite->setup(dev_info);
+ if (test_success != 0) {
+ while (suite->unit_test_cases[total].testcase) {
+ if (!suite->unit_test_cases[total].enabled ||
+ test_success == TEST_SKIPPED)
+ skipped++;
+ else
+ failed++;
+ total++;
+ }
+ goto suite_summary;
+ }
+ }
+
+ printf(" + ---------------------------*--------------------------- "
+ "+\n\n");
+
+ while (suite->unit_test_cases[total].testcase) {
+ if (!suite->unit_test_cases[total].enabled) {
+ skipped++;
+ total++;
+ continue;
+ } else
+ executed++;
+
+ if (suite->unit_test_cases[total].setup)
+ test_success =
+ suite->unit_test_cases[total].setup(dev_info);
+ else
+ test_success = TEST_SUCCESS;
+
+ if (test_success == TEST_SUCCESS) {
+ test_success = suite->unit_test_cases[total].testcase(
+ dev_info);
+ if (test_success == TEST_SUCCESS)
+ succeeded++;
+ else if (test_success == TEST_SKIPPED)
+ skipped++;
+ else if (test_success == -ENOTSUP)
+ unsupported++;
+ else
+ failed++;
+ } else if (test_success == -ENOTSUP)
+ unsupported++;
+ else
+ failed++;
+
+ if (suite->unit_test_cases[total].teardown)
+ suite->unit_test_cases[total].teardown(dev_info);
+
+ if (test_success == TEST_SUCCESS)
+ status = "succeeded";
+ else if (test_success == TEST_SKIPPED)
+ status = "skipped";
+ else if (test_success == -ENOTSUP)
+ status = "unsupported";
+ else
+ status = "failed";
+
+ printf(" +++++ TestCase [%2d] : %s %s\n\n", total,
+ suite->unit_test_cases[total].name, status);
+
+ total++;
+ }
+
+ if (suite->teardown)
+ suite->teardown(dev_info);
+
+ goto suite_summary;
+
+suite_summary:
+ printf("\n");
+ printf(" + ------------------------------------------------------- "
+ "+\n");
+ printf(" + Test Suite Summary\n");
+ printf(" + Tests Total : %2d\n", total);
+ printf(" + Tests Skipped : %2d\n", skipped);
+ printf(" + Tests Executed : %2d\n", executed);
+ printf(" + Tests Unsupported: %2d\n", unsupported);
+ printf(" + Tests Passed : %2d\n", succeeded);
+ printf(" + Tests Failed : %2d\n", failed);
+ printf(" + ------------------------------------------------------- "
+ "+\n");
+
+ printf("test cases finished!\n");
+ last_test_result = (int)failed;
+
+ if (failed)
+ return TEST_FAILED;
+ if (total == skipped)
+ return TEST_SKIPPED;
+ return TEST_SUCCESS;
+}
+
+static int
+zsda_launch_one_lcore(__rte_unused void *dummy)
+{
+ uint32_t id = rte_lcore_id();
+ uint32_t index = id - rte_get_main_lcore();
+
+ if (false == zsda_info[index].used)
+ return TEST_FAILED;
+ if (zsda_info[index].testing)
+ return TEST_FAILED;
+
+ zsda_info[index].testing = true;
+
+ switch (zsda_info[index].dev_type) {
+ case zsda_crypto:
+ zsda_unit_test_suite_runner(&cryptodev_zsda_testsuite_private,
+ &zsda_info[index]);
+ break;
+ case zsda_compress:
+ zsda_unit_test_suite_runner(&compressdev_zsda_testsuite_private,
+ &zsda_info[index]);
+ break;
+ default:
+ break;
+ }
+
+ zsda_info[id - rte_get_main_lcore()].testing = false;
+ return TEST_SUCCESS;
+}
+
+static void
+rebuild_device_info(void)
+{
+ uint8_t nb_devs = 0;
+ uint8_t i = 0;
+ uint16_t j = 0;
+ struct rte_cryptodev_info cryp_info;
+ struct rte_compressdev_info comp_info;
+
+ global_zsda_idx = 0;
+ memset(zsda_info, 0, sizeof(zsda_info));
+
+ nb_devs = rte_cryptodev_count();
+ if (nb_devs)
+ for (i = 0; i < nb_devs; i++) {
+ rte_cryptodev_info_get(i, &cryp_info);
+ for (j = 0; j < cryp_info.max_nb_queue_pairs; j++) {
+ zsda_info[global_zsda_idx].used = true;
+ zsda_info[global_zsda_idx].dev_type =
+ zsda_crypto;
+ zsda_info[global_zsda_idx].dev_id = i;
+ zsda_info[global_zsda_idx].ring_id = (uint8_t)j;
+ global_zsda_idx++;
+ }
+ }
+
+ nb_devs = rte_compressdev_count();
+ if (nb_devs)
+ for (i = 0; i < nb_devs; i++) {
+ rte_compressdev_info_get(i, &comp_info);
+ for (j = 0; j < comp_info.max_nb_queue_pairs; j++) {
+ zsda_info[global_zsda_idx].used = true;
+ zsda_info[global_zsda_idx].dev_type =
+ zsda_compress;
+ zsda_info[global_zsda_idx].dev_id = i;
+ zsda_info[global_zsda_idx].ring_id = (uint8_t)j;
+ global_zsda_idx++;
+ }
+ }
+}
+
+static int
+test_zsda_bind_cpu_test(void)
+{
+ rte_eal_mp_remote_launch(zsda_launch_one_lcore, NULL, CALL_MAIN);
+ rte_eal_mp_wait_lcore();
+ return 0;
+}
+
+static void *
+thread_normal_group_test(void *p)
+{
+ struct zsda_test_info data;
+ struct zsda_test_dev_info info = {0};
+ rte_cpuset_t cpuset;
+ char thread_name[RTE_MAX_THREAD_NAME_LEN];
+
+ CPU_ZERO(&cpuset);
+ memcpy(&data, p, sizeof(data));
+ info.dev_id = 0;
+ info.ring_id = data.arg1;
+ info.ring_id_start = 0;
+ info.ring_id_end = (info.ring_id == 0) ? 1 : info.ring_id;
+ CPU_SET(data.cpu_id, &cpuset);
+ snprintf(thread_name, sizeof(thread_name), "test-d%d-r%d-c0x%x",
+ data.arg0, data.arg1, data.cpu_id);
+ rte_thread_setname(pthread_self(), thread_name);
+ rte_thread_set_affinity(&cpuset);
+
+ if (data.zsda_test_type == run_test) {
+ switch (data.type) {
+ case zsda_crypto:
+ zsda_unit_test_suite_runner(
+ &cryptodev_zsda_testsuite_private, &info);
+ break;
+ case zsda_compress:
+ zsda_unit_test_suite_runner(
+ &compressdev_zsda_testsuite_private, &info);
+ break;
+ default:
+ break;
+ }
+ }
+ return NULL;
+}
+
+static int
+test_zsda_mul_thread_test(void)
+{
+ uint32_t i = 0;
+
+ for (i = 1; i < 10; i++) {
+ if (pthread_create(&zsda_data[i].id, NULL,
+ thread_normal_group_test, &i) != 0)
+ break;
+ }
+
+ return 0;
+}
+
+static int
+test_rebuild_zsda_deviceinfo(void)
+{
+ rebuild_device_info();
+ return 0;
+}
+
+void
+zsda_run_test(enum test_type type, uint8_t arg0, uint8_t arg1, uint8_t arg2,
+ enum zsda_group_test_type zsda_test_group_type)
+{
+ struct zsda_test_info data = {0};
+ pthread_t id;
+
+ data.arg0 = arg0;
+ data.arg1 = arg1;
+ data.arg2 = arg2;
+ data.type = zsda_test_group_type;
+ data.zsda_test_type = type;
+ data.cpu_id = RTE_PER_LCORE(_lcore_id);
+
+ switch (type) {
+ case run_test:
+ pthread_create(&id, NULL, thread_normal_group_test, &data);
+ break;
+ case run_test_invalid:
+ ZSDA_LOG(ERR, "Wrong command");
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+REGISTER_TEST_COMMAND(zsda_mul_thread_test, test_zsda_mul_thread_test);
+REGISTER_TEST_COMMAND(zsda_bind_cpu_test, test_zsda_bind_cpu_test);
+REGISTER_TEST_COMMAND(rebuild_zsda_deviceinfo, test_rebuild_zsda_deviceinfo);
diff --git a/examples/zsda/test_zsda.h b/examples/zsda/test_zsda.h
new file mode 100644
index 0000000..66417f4
--- /dev/null
+++ b/examples/zsda/test_zsda.h
@@ -0,0 +1,457 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef TEST_ZSDA_H_
+#define TEST_ZSDA_H_
+
+#include <math.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_compressdev.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_ether.h>
+#include <rte_hexdump.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_pause.h>
+#include <rte_string_fns.h>
+
+#include "rte_lcore.h"
+#include "test.h"
+#include "zsda_common.h"
+
+#define NO_COMPILE_CI 0
+#define MAX_NUM_OPS_INFLIGHT (4096)
+#define MIN_NUM_OPS_INFLIGHT (128)
+#define DEFAULT_NUM_OPS_INFLIGHT (128)
+
+#define NUM_MBUFS (8191)
+#define MBUF_CACHE_SIZE (256)
+
+#define MBUF_DATAPAYLOAD_SIZE (8192 * 2)
+#define MBUF_SIZE \
+ (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
+
+#define MAX_MBUF_SEGMENT_SIZE 65535
+#define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
+#define ZSDA_MAX_QUEUE 2048
+
+#define RTE_COMP_LEVEL_ZSDA_DEFAULT (9)
+#define TIMES_DEQUEUE 3
+#define MAX_OP_NUM_ONE_CASE 511
+#define NUM_BIG_MBUFS (512 + 1)
+
+#define DESCR_CRYPTO_CYCLE "Crypto Cycle"
+
+#define MAX_NUM_CYCLE 10
+#define MAX_NUM_WQE 512
+#define ZSDA_SGL_MAX_NUMBER 512
+#define MAX_NUM_SEGS (ZSDA_SGL_MAX_NUMBER / 32 * 31 + 1)
+#define SET_NUM_WQE_100 100
+
+#define EC_BLOCK_LEN_1K (1024)
+
+#define DATA_LEN_512 (512)
+#define DATA_LEN_4096 (4096)
+#define DATA_LEN_8192 (8192)
+
+#define SRC_PATTERN 0xa5
+#define DST_PATTERN 0xb6
+
+enum zsda_group_test_type {
+ zsda_crypto = 0,
+ zsda_compress,
+};
+
+struct zsda_thread {
+ pthread_t id;
+};
+
+enum test_type {
+ run_test = 0,
+ run_test_invalid,
+};
+
+struct zsda_test_info {
+ enum zsda_group_test_type type;
+ enum zsda_algo_core zsda_core;
+ enum test_type zsda_test_type;
+ int cpu_id;
+ uint8_t arg0;
+ uint8_t arg1;
+ uint8_t arg2;
+};
+
+struct crypto_testsuite_params {
+ struct rte_mempool *mbuf_pool;
+ struct rte_mempool *large_mbuf_pool;
+ struct rte_mempool *op_mpool;
+ struct rte_mempool *session_mpool;
+ struct rte_mempool *session_priv_mpool;
+ struct rte_cryptodev_config conf;
+ struct rte_cryptodev_qp_conf qp_conf;
+
+ uint8_t valid_devid;
+ uint8_t valid_ringid;
+};
+
+struct crypto_unittest_params {
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
+ struct rte_crypto_sym_xform aead_xform;
+
+ union {
+ struct rte_cryptodev_sym_session *sess;
+ };
+
+ struct rte_crypto_op *op;
+ struct rte_mbuf *obuf, *ibuf;
+ uint8_t *digest;
+};
+
+struct comp_testsuite_params {
+ struct rte_mempool *mbuf_pool;
+ struct rte_mempool *op_mpool;
+ struct rte_comp_xform *def_comp_xform;
+ struct rte_comp_xform *def_decomp_xform;
+
+ struct rte_compressdev_config conf;
+ struct rte_compressdev_qp_conf qp_conf;
+
+ uint8_t valid_devid;
+ uint8_t valid_ringid;
+};
+
+struct comp_unittest_params {
+ struct rte_comp_op *op;
+ struct rte_mbuf *obuf, *ibuf;
+};
+
+struct zsda_test_dev_info {
+ bool used;
+ bool testing;
+ enum zsda_group_test_type dev_type;
+ uint8_t dev_id;
+ uint8_t ring_id;
+
+ uint8_t dev_id_start;
+ uint8_t dev_id_end;
+ uint8_t ring_id_start;
+ uint8_t ring_id_end;
+ enum zsda_algo_core algo_core_id;
+
+ struct crypto_testsuite_params ts_crypto_params;
+ struct crypto_unittest_params ut_crypto_params;
+ struct comp_testsuite_params ts_comp_params;
+ struct comp_unittest_params ut_comp_params;
+};
+
+struct zsda_unit_test_case {
+ int (*setup)(struct zsda_test_dev_info *dev_info);
+ void (*teardown)(struct zsda_test_dev_info *dev_info);
+ int (*testcase)(struct zsda_test_dev_info *dev_info);
+ const char *name;
+ unsigned int enabled;
+};
+
+struct zsda_unit_test_suite {
+ const char *suite_name;
+ int (*setup)(struct zsda_test_dev_info *dev_info);
+ void (*teardown)(struct zsda_test_dev_info *dev_info);
+ struct zsda_unit_test_case unit_test_cases[];
+};
+
+extern struct zsda_unit_test_suite compressdev_zsda_testsuite_private;
+extern struct zsda_unit_test_suite cryptodev_zsda_testsuite_private;
+
+enum zsda_algo_crypto {
+ AES_XTS_256,
+ AES_XTS_512,
+ SM4,
+};
+
+enum zsda_algo_hash {
+ HASH_SHA1,
+ HASH_SHA224,
+ HASH_SHA256,
+ HASH_SHA384,
+ HASH_SHA512,
+ HASH_SM3,
+};
+
+struct data_text {
+ uint8_t *data;
+ uint32_t len;
+};
+
+struct crypto_data_config {
+ struct data_text plaintext;
+
+ struct data_text aes256_ct;
+ struct data_text aes512_ct;
+ struct data_text sm4_ct;
+
+ struct {
+ uint8_t data[64];
+ uint16_t len;
+ } key;
+
+ struct {
+ uint8_t data[16];
+ uint16_t len;
+ } iv;
+ uint32_t lbads;
+};
+
+/**
+ * Write (spread) data from buffer to mbuf data
+ *
+ * @param mbuf
+ * Destination mbuf
+ * @param offset
+ * Start offset in mbuf
+ * @param len
+ * Number of bytes to copy
+ * @param buffer
+ * Continuous source buffer
+ */
+static inline void
+pktmbuf_write(struct rte_mbuf *mbuf, uint32_t offset, uint32_t len,
+ const uint8_t *buffer)
+{
+ uint32_t n = len;
+ uint32_t l;
+ struct rte_mbuf *m;
+ char *dst;
+
+ for (m = mbuf; (m != NULL) && (offset > m->data_len); m = m->next)
+ offset -= m->data_len;
+
+ if (m == NULL) {
+ ZSDA_LOG(ERR, E_NULL);
+ return;
+ }
+
+ l = m->data_len - offset;
+
+ dst = rte_pktmbuf_mtod_offset(m, char *, offset);
+ if (len <= l) {
+ rte_memcpy(dst, buffer, len);
+ return;
+ }
+
+ rte_memcpy(dst, buffer, l);
+ buffer += l;
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ dst = rte_pktmbuf_mtod(m, char *);
+ l = m->data_len;
+ if (n < l) {
+ rte_memcpy(dst, buffer, n);
+ return;
+ }
+ rte_memcpy(dst, buffer, l);
+ buffer += l;
+ n -= l;
+ }
+}
+
+static inline struct rte_mbuf *
+create_segmented_mbuf(struct rte_mempool *mbuf_pool, uint32_t pkt_len,
+ uint32_t nb_segs, uint8_t pattern)
+{
+ struct rte_mbuf *m = NULL, *mbuf = NULL;
+ uint8_t *dst;
+ uint16_t data_len = 0;
+ uint32_t i;
+ uint32_t size;
+ uint16_t t_len;
+
+ if (pkt_len < 1) {
+ ZSDA_LOG(ERR, "Failed! pkt_len");
+ return NULL;
+ }
+
+ if (nb_segs < 1) {
+ ZSDA_LOG(ERR, "Failed! nb_segs");
+ return NULL;
+ }
+ t_len = (pkt_len >= nb_segs) ? pkt_len / nb_segs : 1;
+ size = pkt_len;
+
+ /* Create chained mbuf_src and fill it generated data */
+ for (i = 0; size > 0; i++) {
+ m = rte_pktmbuf_alloc(mbuf_pool);
+ if (i == 0)
+ mbuf = m;
+
+ if (m == NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ goto fail;
+ }
+
+ /* Make sure if tailroom is zeroed */
+ memset(m->buf_addr, pattern, m->buf_len);
+
+ data_len = (size > t_len) ? t_len : size & (0xffff);
+ if ((i == (nb_segs - 1)) && (size < MAX_DATA_MBUF_SIZE))
+ data_len = size & (0xffff);
+ dst = (uint8_t *)rte_pktmbuf_append(m, data_len);
+ if (dst == NULL) {
+ ZSDA_LOG(ERR, E_FUNC);
+ goto fail;
+ }
+
+ if (mbuf != m)
+ rte_pktmbuf_chain(mbuf, m);
+
+ size -= data_len;
+ }
+ return mbuf;
+
+fail:
+ if (mbuf)
+ rte_pktmbuf_free(mbuf);
+ return NULL;
+}
+
+static inline int
+compare_buffers(uint8_t *buffer1, uint32_t buffer1_len, const uint8_t *buffer2,
+ uint32_t buffer2_len)
+{
+ if (buffer1_len != buffer2_len) {
+ ZSDA_LOG(ERR, "Len unequal");
+ return TEST_FAILED;
+ }
+
+ if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
+ ZSDA_LOG(ERR, "Buffers are different");
+ return TEST_FAILED;
+ }
+
+ return TEST_SUCCESS;
+}
+
+struct Enqueue_dequeue_config {
+ uint8_t dev_id;
+ uint8_t queue_id;
+ uint16_t op_num;
+ void **op_array;
+ void **cq_array;
+ enum zsda_algo_core zsda_core;
+};
+
+struct Op_config {
+ void *int_data;
+ struct rte_mempool *mbuf_pool;
+ struct rte_mempool *op_mpool;
+ void **op_array;
+ enum zsda_algo_core zsda_core;
+};
+
+struct zsda_buf_config {
+ struct rte_mbuf *buf;
+ struct rte_mempool *mbuf_pool;
+ uint8_t *data;
+ uint32_t data_len;
+ uint32_t nb_segs;
+};
+
+struct Sgl_offset_config {
+ uint32_t *sgls;
+ uint32_t *offset;
+ uint32_t num_sgls;
+ uint32_t num_offset;
+ uint32_t *mul_sgls;
+ uint32_t *mul_offsets;
+};
+
+static int __rte_unused
+buf_create_process(struct zsda_buf_config *buf_config)
+{
+ struct rte_mbuf *buf = NULL;
+ struct rte_mempool *mbuf_pool = buf_config->mbuf_pool;
+ uint8_t *data = buf_config->data;
+ uint32_t data_len = buf_config->data_len;
+ uint32_t nb_segs = buf_config->nb_segs;
+
+ buf = create_segmented_mbuf(mbuf_pool, data_len, nb_segs,
+ SRC_PATTERN);
+ if (buf == NULL) {
+ ZSDA_LOG(ERR, "Cannot create mbuf!");
+ return TEST_FAILED;
+ }
+
+ pktmbuf_write(buf, 0, data_len, data);
+
+ buf_config->buf = buf;
+
+ return TEST_SUCCESS;
+}
+
+static int __rte_unused
+buf_create(struct zsda_buf_config *buf_config, bool need_malloc_room)
+{
+ uint32_t data_len = buf_config->data_len;
+ uint32_t nb_segs = buf_config->nb_segs;
+ uint8_t *data = buf_config->data;
+ uint8_t *str;
+
+ if (need_malloc_room) {
+ str = (uint8_t *)malloc(buf_config->data_len);
+ memset(str, DST_PATTERN, buf_config->data_len);
+ buf_config->data = str;
+ }
+
+ buf_create_process(buf_config);
+
+ if (need_malloc_room) {
+ free(str);
+ str = NULL;
+ buf_config->data = data;
+ buf_config->data_len = data_len;
+ buf_config->nb_segs = nb_segs;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static inline uint32_t
+set_nb_segs(uint32_t int_data_nb_segs, uint32_t data_len)
+{
+ uint32_t ret_nb_segs;
+
+ if (int_data_nb_segs == 0)
+ ret_nb_segs = ((data_len % MAX_DATA_MBUF_SIZE) == 0)
+ ? (data_len / MAX_DATA_MBUF_SIZE)
+ : ((data_len / MAX_DATA_MBUF_SIZE) + 1);
+ else
+ ret_nb_segs = int_data_nb_segs;
+ if (ret_nb_segs > (ZSDA_SGL_MAX_NUMBER / 32 * 31)) {
+ ZSDA_LOG(DEBUG, "Wrong number");
+ return 0;
+ }
+ return ret_nb_segs;
+}
+
+static inline uint32_t
+cal_num_sgl(uint32_t nb_segs)
+{
+ return (nb_segs % 31) + (nb_segs / 31 * 32);
+}
+
+void zsda_run_test(enum test_type type, uint8_t arg0, uint8_t arg1,
+ uint8_t arg2,
+ enum zsda_group_test_type zsda_test_group_type);
+
+#endif /* TEST_ZSDA_H_ */
diff --git a/examples/zsda/test_zsda_compressdev.c b/examples/zsda/test_zsda_compressdev.c
new file mode 100644
index 0000000..8be7227
--- /dev/null
+++ b/examples/zsda/test_zsda_compressdev.c
@@ -0,0 +1,678 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <rte_build_config.h>
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_comp.h>
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+#include <rte_ether.h>
+#include <rte_hexdump.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_pause.h>
+#include <rte_string_fns.h>
+#include <zlib.h>
+
+#include "test.h"
+#include "test_zsda.h"
+#include "test_zsda_compressdev.h"
+
+#define DEFAULT_WINDOW_SIZE 15
+#define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
+#define NUM_OPS (512 * 2)
+#define NUM_MAX_XFORMS (512 * 2)
+#define NUM_MAX_INFLIGHT_OPS 128
+#define CACHE_SIZE 0
+
+char test_msg[COMPRESSDEV_TEST_MSG_LEN + 1];
+
+static void
+testsuite_teardown(struct zsda_test_dev_info *dev_info)
+{
+ struct comp_testsuite_params *ts_params = &(dev_info->ts_comp_params);
+
+ rte_mempool_free(ts_params->mbuf_pool);
+ rte_mempool_free(ts_params->op_mpool);
+ rte_free(ts_params->def_comp_xform);
+ rte_free(ts_params->def_decomp_xform);
+}
+
+static int
+testsuite_setup(struct zsda_test_dev_info *dev_info)
+{
+ struct comp_testsuite_params *ts_params = &(dev_info->ts_comp_params);
+ uint8_t nb_devs;
+ char name[256] = {0};
+
+ memset(ts_params, 0, sizeof(*ts_params));
+ ts_params->valid_devid = dev_info->dev_id;
+ ts_params->valid_ringid = dev_info->ring_id;
+ ts_params->conf.socket_id = (int)(rte_socket_id() & 0xffff);
+ nb_devs = rte_compressdev_count();
+
+ if (nb_devs < 1) {
+ ZSDA_LOG(ERR, "No compress devices found.\n");
+ return TEST_SKIPPED;
+ }
+
+ snprintf(name, sizeof(name), "COMP_POOL_D%d", ts_params->valid_devid);
+
+ ts_params->mbuf_pool = rte_mempool_lookup(name);
+ ts_params->mbuf_pool = rte_pktmbuf_pool_create(
+ name, (NUM_BIG_MBUFS * 30) + 1, CACHE_SIZE, 0,
+ MAX_MBUF_SEGMENT_SIZE, (int)rte_socket_id());
+ if (ts_params->mbuf_pool == NULL) {
+ ZSDA_LOG(ERR, E_CREATE);
+ return TEST_FAILED;
+ }
+
+ snprintf(name, sizeof(name), "COMP_OPPOOL_D%d", ts_params->valid_devid);
+ ts_params->op_mpool = rte_comp_op_pool_create(
+ name, NUM_OPS * 2, 0, sizeof(uint16_t), rte_socket_id());
+ if (ts_params->op_mpool == NULL) {
+ ZSDA_LOG(ERR, E_CREATE);
+ return TEST_FAILED;
+ }
+
+ ts_params->def_comp_xform =
+ rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+ if (ts_params->def_comp_xform == NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ goto exit_func;
+ }
+
+ ts_params->def_decomp_xform =
+ rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+ if (ts_params->def_decomp_xform == NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ goto exit_func;
+ }
+
+ ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
+ ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE;
+ ts_params->def_comp_xform->compress.deflate.huffman =
+ RTE_COMP_HUFFMAN_DEFAULT;
+ ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_ZSDA_DEFAULT;
+ ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
+ ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
+
+ ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
+ ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE;
+ ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
+ ts_params->def_decomp_xform->decompress.window_size =
+ DEFAULT_WINDOW_SIZE;
+
+ return TEST_SUCCESS;
+
+exit_func:
+ testsuite_teardown(dev_info);
+ return TEST_FAILED;
+}
+
+static int
+ut_setup(struct zsda_test_dev_info *dev_info)
+{
+ struct comp_testsuite_params *ts_params = &(dev_info->ts_comp_params);
+ struct rte_compressdev_info info;
+ uint8_t ring_id_start = dev_info->ring_id_start;
+ uint8_t ring_id_end = dev_info->ring_id_end;
+ uint16_t qp_id;
+ int value = 0;
+
+ rte_compressdev_info_get(dev_info->dev_id, &info);
+ ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
+
+ ts_params->conf.socket_id = (int)(rte_socket_id() & 0xffff);
+ ts_params->conf.max_nb_priv_xforms = NUM_MAX_XFORMS;
+ ts_params->conf.max_nb_streams = 1;
+ ts_params->conf.socket_id = SOCKET_ID_ANY;
+ ts_params->qp_conf.nb_descriptors = MAX_NUM_OPS_INFLIGHT;
+
+ value = rte_compressdev_configure(ts_params->valid_devid,
+ &(ts_params->conf));
+ if (value < 0) {
+ ZSDA_LOG(ERR, E_CONFIG);
+ return -1;
+ }
+ if (dev_info->ring_id_end == 0)
+ ring_id_end = 1;
+
+ for (qp_id = ring_id_start; qp_id < ring_id_end; qp_id++) {
+ value = rte_compressdev_queue_pair_setup(
+ ts_params->valid_devid, qp_id, NUM_MAX_INFLIGHT_OPS,
+ rte_socket_id());
+ if (value < 0) {
+ ZSDA_LOG(ERR, E_START);
+ return TEST_FAILED;
+ }
+ }
+
+ if (rte_compressdev_start(ts_params->valid_devid) < 0) {
+ ZSDA_LOG(ERR, E_START);
+ return TEST_FAILED;
+ }
+
+ return 0;
+}
+
+static void
+ut_teardown(struct zsda_test_dev_info *dev_info)
+{
+ struct comp_testsuite_params *ts_params = &(dev_info->ts_comp_params);
+
+ rte_compressdev_stop(ts_params->valid_devid);
+ if (rte_compressdev_close(ts_params->valid_devid) < 0)
+ ZSDA_LOG(ERR, E_CLOSE);
+}
+
+static int
+prepare_test_data(struct Interim_data_params *int_data)
+{
+ uint32_t i = 0;
+
+ uint8_t *data_pt = NULL;
+ uint8_t *data_ct = NULL;
+
+ uint32_t len_pt = 0;
+ uint32_t len_ct = 0;
+ uint8_t data_set;
+ uint32_t data_len;
+
+ enum zsda_comp_test_cases_type test_type = int_data->test_type;
+
+ switch (test_type) {
+ case ZSDA_COMP_SINGLE_CASE:
+
+ data_len = 2048;
+ len_pt = data_len;
+ len_ct = data_len;
+
+ data_set = SRC_PATTERN;
+ data_pt = (uint8_t *)malloc(len_pt);
+ CHECK_ADDR_NULL(data_pt);
+ data_ct = (uint8_t *)malloc(len_ct);
+ CHECK_ADDR_NULL(data_ct);
+ memset(data_pt, data_set, len_pt);
+ memset(data_ct, data_set, len_ct);
+ int_data->op_num = 1;
+ int_data->num_repeat = 1;
+ break;
+
+ default:
+ break;
+ }
+
+ int_data->flag_comp_then_decomp = true;
+ for (i = 0; i < int_data->op_num; i++)
+ int_data->xforms[i] = int_data->compress_xforms;
+
+ for (i = 0; i < int_data->op_num; i++) {
+ int_data->data_pts[i] = data_pt;
+ int_data->data_cts[i] = data_ct;
+ int_data->len_pts[i] = len_pt;
+ int_data->len_cts[i] = len_ct;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_run_enqueue_dequeue(struct rte_comp_op **ops,
+ struct rte_comp_op **cq_array,
+ struct Interim_data_params *int_data)
+{
+ uint16_t num_enqd = 0;
+ uint16_t num_deqd = 0;
+ uint16_t op_num = int_data->op_num & 0xffff;
+ uint8_t dev_id = int_data->dev_id;
+ uint8_t queue_id = int_data->queue_id;
+
+ num_enqd = rte_compressdev_enqueue_burst(dev_id, queue_id, ops, op_num);
+ if (num_enqd < op_num) {
+ ZSDA_LOG(ERR, "Some operations could not be enqueued");
+ return TEST_FAILED;
+ }
+
+ while (num_deqd != op_num) {
+ num_deqd += rte_compressdev_dequeue_burst(dev_id, queue_id,
+ &cq_array[num_deqd],
+ (op_num - num_deqd));
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_results_validation(const struct Interim_data_params *int_data,
+ const struct test_private_arrays *test_priv_data)
+{
+ unsigned int loop;
+
+ uint8_t *buf1;
+ const uint8_t *buf2;
+ uint8_t *contig_buf;
+ int ret = 0;
+
+ uint16_t op_num = int_data->op_num;
+ struct rte_comp_op **cq_array = test_priv_data->cq_array;
+ uint32_t offset = int_data->offset;
+ struct rte_comp_xform **xform = int_data->xforms;
+
+ for (loop = 0; loop < op_num; loop++) {
+ buf1 = int_data->data_pts[loop];
+
+ if (xform[loop]->type == RTE_COMP_COMPRESS)
+ buf1 = int_data->data_cts[loop];
+ else if (xform[loop]->type == RTE_COMP_DECOMPRESS)
+ buf1 = int_data->data_pts[loop];
+
+ contig_buf = rte_malloc(NULL, cq_array[loop]->produced, 0);
+ if (contig_buf == NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ return TEST_FAILED;
+ }
+ buf2 = rte_pktmbuf_read(cq_array[loop]->m_dst, offset,
+ cq_array[loop]->produced, contig_buf);
+ CHECK_ADDR_NULL(buf2);
+
+ ret |= compare_buffers(buf1, cq_array[loop]->produced, buf2,
+ cq_array[loop]->produced);
+
+ if (ret != TEST_SUCCESS) {
+ ZSDA_LOG(ERR, E_COMPARE);
+ return TEST_FAILED;
+ }
+ rte_free(contig_buf);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_setup_com_bufs(const struct Interim_data_params *int_data,
+ struct test_private_arrays *test_priv_data,
+ struct rte_mempool *mbuf_pool)
+{
+ uint16_t loop;
+ uint16_t op_num = int_data->op_num;
+ uint8_t *data_srcs = NULL;
+ uint8_t *data_dsts = NULL;
+ uint32_t len_srcs = 0;
+ uint32_t len_dsts = 0;
+ uint32_t nb_segs_ibuf = 0;
+ uint32_t nb_segs_obuf = 0;
+ struct rte_comp_xform **xforms = int_data->xforms;
+ bool is_need_new_mem = false;
+ int ret = 0;
+
+ struct zsda_buf_config buf_config = {
+ .mbuf_pool = mbuf_pool,
+ };
+
+ for (loop = 0; loop < op_num; loop++) {
+ if (xforms[loop]->type == RTE_COMP_COMPRESS) {
+ data_srcs = int_data->data_pts[loop];
+ data_dsts = int_data->data_cts[loop];
+ len_srcs = int_data->len_pts[loop];
+ len_dsts = int_data->len_cts[loop];
+ nb_segs_ibuf = set_nb_segs(int_data->nb_segs_plaintext,
+ len_srcs);
+ nb_segs_obuf = set_nb_segs(int_data->nb_segs_ciphertext,
+ len_dsts);
+ test_priv_data->zsda_core = ZSDA_CORE_COMP;
+
+ } else if (xforms[loop]->type == RTE_COMP_DECOMPRESS) {
+ data_srcs = int_data->data_cts[loop];
+ data_dsts = int_data->data_pts[loop];
+ len_srcs = int_data->len_cts[loop];
+ len_dsts = int_data->len_pts[loop];
+ nb_segs_ibuf = set_nb_segs(int_data->nb_segs_ciphertext,
+ len_srcs);
+ nb_segs_obuf = set_nb_segs(int_data->nb_segs_plaintext,
+ len_dsts);
+ test_priv_data->zsda_core = ZSDA_CORE_DECOMP;
+ }
+
+ buf_config.data = data_srcs;
+ buf_config.data_len = len_srcs;
+ buf_config.nb_segs = nb_segs_ibuf;
+ is_need_new_mem = false;
+ ret |= buf_create(&buf_config, is_need_new_mem);
+ int_data->ibuf[loop] = buf_config.buf;
+
+ buf_config.data = data_dsts;
+ buf_config.data_len = len_dsts;
+ is_need_new_mem = true;
+ buf_config.nb_segs = nb_segs_obuf;
+ ret |= buf_create(&buf_config, is_need_new_mem);
+ int_data->obuf[loop] = buf_config.buf;
+ }
+
+ if (ret != TEST_SUCCESS)
+ ret = TEST_FAILED;
+
+ return ret;
+}
+
+static int
+test_comp_copy_to_dst(const struct Interim_data_params *int_data,
+ const struct test_private_arrays *test_priv_data)
+
+{
+ struct rte_comp_op **ops = test_priv_data->ops;
+ struct rte_comp_op **cq_array = test_priv_data->cq_array;
+ uint8_t *contig_buf = NULL;
+ const uint8_t *buf2 = NULL;
+ uint16_t loop;
+ uint16_t op_num = int_data->op_num;
+ uint32_t offset = int_data->offset;
+
+ for (loop = 0; loop < op_num; loop++) {
+ contig_buf = rte_malloc(NULL, cq_array[loop]->produced, 0);
+ CHECK_ADDR_NULL(contig_buf);
+ buf2 = rte_pktmbuf_read(cq_array[loop]->m_dst, offset,
+ cq_array[loop]->produced, contig_buf);
+ CHECK_ADDR_NULL(buf2);
+ if (int_data->len_cts[loop] >= cq_array[loop]->produced) {
+ memcpy(int_data->data_cts[loop], buf2,
+ cq_array[loop]->produced);
+ int_data->len_cts[loop] = cq_array[loop]->produced;
+ } else
+ ZSDA_LOG(ERR, E_COMPARE);
+
+ rte_free(contig_buf);
+ }
+
+ for (loop = 0; loop < op_num; loop++) {
+ rte_pktmbuf_free(int_data->ibuf[loop]);
+ rte_pktmbuf_free(int_data->obuf[loop]);
+ int_data->ibuf[loop] = NULL;
+ int_data->obuf[loop] = NULL;
+ }
+ rte_comp_op_bulk_free(ops, op_num);
+ ops[loop] = NULL;
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_deflate_comp_decomp_run(struct Interim_data_params *int_data,
+ const struct test_private_arrays *test_priv_data,
+ struct rte_mempool *op_mpool)
+{
+ uint16_t loop = 0;
+ int ret = 0;
+ uint16_t op_num = int_data->op_num;
+ struct rte_comp_op **ops = test_priv_data->ops;
+ struct rte_comp_op **cq_array = test_priv_data->cq_array;
+ void **priv_xforms = test_priv_data->priv_xforms;
+ uint8_t dev_id = int_data->dev_id;
+ struct rte_comp_xform **xforms = int_data->xforms;
+ struct rte_mbuf **ibuf = int_data->ibuf;
+ struct rte_mbuf **obuf = int_data->obuf;
+
+ ret = rte_comp_op_bulk_alloc(op_mpool, ops, op_num);
+ if (ret < 0) {
+ ZSDA_LOG(ERR, E_COMPARE);
+ ret = TEST_FAILED;
+ goto exit_func;
+ }
+
+ for (loop = 0; loop < op_num; loop++) {
+ ops[loop]->src.offset = int_data->offset;
+ ops[loop]->src.length =
+ rte_pktmbuf_pkt_len(ibuf[loop]) - ops[loop]->src.offset;
+ ops[loop]->dst.offset = int_data->offset;
+
+ ops[loop]->m_src = ibuf[loop];
+ ops[loop]->m_dst = obuf[loop];
+ ops[loop]->input_chksum = 0;
+ ops[loop]->flush_flag = RTE_COMP_FLUSH_FINAL;
+ ops[loop]->output_chksum = 0;
+
+ ret = rte_compressdev_private_xform_create(dev_id, xforms[loop],
+ &priv_xforms[loop]);
+ if (ret < 0) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ ret = TEST_FAILED;
+ goto exit_func;
+ }
+ ops[loop]->private_xform = priv_xforms[loop];
+ }
+
+ ret = test_run_enqueue_dequeue(ops, cq_array, int_data);
+ if (ret < 0) {
+ ZSDA_LOG(ERR, "Compression: enqueue/dequeue operation failed");
+ return TEST_FAILED;
+ }
+
+exit_func:
+
+ for (loop = 0; loop < op_num; loop++) {
+ rte_compressdev_private_xform_free(dev_id, priv_xforms[loop]);
+ rte_compressdev_private_xform_free(dev_id,
+ ops[loop]->private_xform);
+ priv_xforms[loop] = NULL;
+ ops[loop]->private_xform = NULL;
+ }
+
+ return ret;
+}
+
+static int
+test_case_comp_decomp(struct Interim_data_params *int_data,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mempool *op_mpool)
+{
+ int ret = 0;
+ unsigned int i;
+ uint16_t op_num = int_data->op_num;
+ struct test_private_arrays test_priv_data;
+ struct rte_comp_op *ops[MAX_NUM_WQE];
+ struct rte_comp_op *cq_array[MAX_NUM_WQE];
+ void *priv_xforms[MAX_NUM_WQE];
+ bool flag_comp_then_decomp = int_data->flag_comp_then_decomp;
+ uint16_t loop;
+
+ test_priv_data.ops = ops;
+ test_priv_data.cq_array = cq_array;
+ test_priv_data.priv_xforms = priv_xforms;
+
+ ret = test_setup_com_bufs(int_data, &test_priv_data, mbuf_pool);
+ if (ret < 0) {
+ ret = TEST_FAILED;
+ goto exit_func;
+ }
+
+ /* Run compression */
+ ret = test_deflate_comp_decomp_run(int_data, &test_priv_data, op_mpool);
+ if (ret < 0) {
+ ret = TEST_FAILED;
+ goto exit_func;
+ }
+
+ /* only be used to - decomp after comp */
+ if (flag_comp_then_decomp) {
+ test_comp_copy_to_dst(int_data, &test_priv_data);
+
+ for (i = 0; i < int_data->op_num; i++)
+ int_data->xforms[i]->type = RTE_COMP_DECOMPRESS;
+
+ ret = test_setup_com_bufs(int_data, &test_priv_data, mbuf_pool);
+ if (ret < 0) {
+ ret = TEST_FAILED;
+ goto exit_func;
+ }
+
+ /* Run decompression */
+ ret = test_deflate_comp_decomp_run(int_data, &test_priv_data,
+ op_mpool);
+ if (ret < 0) {
+ ret = TEST_FAILED;
+ goto exit_func;
+ }
+ }
+
+ ret = test_results_validation(int_data, &test_priv_data);
+ if (ret < 0) {
+ ZSDA_LOG(ERR, E_COMPARE);
+ ret = TEST_FAILED;
+ goto exit_func;
+ }
+
+exit_func:
+ for (loop = 0; loop < op_num; loop++) {
+ rte_pktmbuf_free(int_data->ibuf[loop]);
+ rte_pktmbuf_free(int_data->obuf[loop]);
+ rte_comp_op_bulk_free(ops, op_num);
+ int_data->ibuf[loop] = NULL;
+ int_data->obuf[loop] = NULL;
+ ops[loop] = NULL;
+ }
+
+ return ret;
+}
+
+static int
+test_zsda_one_case_Comp(struct Interim_data_params *int_data,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mempool *op_mpool, char *test_msg)
+{
+ uint32_t repeat_one_case;
+ uint32_t i = 0;
+ int ret = 0;
+
+ repeat_one_case = int_data->num_repeat == 0 ? 1 : int_data->num_repeat;
+
+ for (i = 0; i < repeat_one_case; i++) {
+ ret = test_case_comp_decomp(int_data, mbuf_pool, op_mpool);
+
+ if (ret == TEST_SUCCESS)
+ snprintf(test_msg, COMPRESSDEV_TEST_MSG_LEN, "PASS");
+ else
+ snprintf(test_msg, COMPRESSDEV_TEST_MSG_LEN, "FAIL");
+ }
+ return ret;
+}
+
+static int
+test_compressdev(struct zsda_test_dev_info *dev_info,
+ enum rte_comp_checksum_type checksum_type,
+ enum zsda_comp_test_cases_type test_type)
+{
+ int test_index = 0;
+ uint16_t i;
+ int ret = 0;
+ struct comp_testsuite_params *ts_params = &(dev_info->ts_comp_params);
+ const struct rte_compressdev_capabilities *capab;
+
+ struct rte_mempool *mbuf_pool = ts_params->mbuf_pool;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+
+ capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
+ TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
+
+ struct rte_comp_xform *compress_xform =
+ rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+ CHECK_ADDR_NULL(compress_xform);
+ memcpy(compress_xform, ts_params->def_comp_xform,
+ sizeof(struct rte_comp_xform));
+ compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
+
+ struct rte_comp_xform *decompress_xform =
+ rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+ CHECK_ADDR_NULL(decompress_xform);
+ memcpy(decompress_xform, ts_params->def_decomp_xform,
+ sizeof(struct rte_comp_xform));
+
+ uint8_t *data_plaintext_mul[512];
+ uint8_t *data_ciphertext_mul[512];
+ uint32_t len_plaintext_mul[512];
+ uint32_t len_ciphertext_mul[512];
+
+ struct rte_mbuf *ibuf[512];
+ struct rte_mbuf *obuf[512];
+
+ struct rte_comp_xform *xforms[512] = {NULL};
+
+ compress_xform->compress.chksum = checksum_type;
+ decompress_xform->decompress.chksum = checksum_type;
+
+ struct Interim_data_params int_data = {
+ .data_pts = data_plaintext_mul,
+ .data_cts = data_ciphertext_mul,
+ .len_pts = len_plaintext_mul,
+ .len_cts = len_ciphertext_mul,
+ .ibuf = ibuf,
+ .obuf = obuf,
+ .dev_id = ts_params->valid_devid,
+ .queue_id = ts_params->valid_ringid,
+ .test_type = test_type,
+ .num_repeat = 1,
+ .zlib_dir = ZLIB_NONE,
+ .xforms = xforms,
+ .compress_xforms = compress_xform,
+ .decompress_xforms = decompress_xform,
+ };
+
+ prepare_test_data(&int_data);
+
+ for (i = 0; i < int_data.num_repeat; i++) {
+ ret = test_zsda_one_case_Comp(&int_data, mbuf_pool, op_mpool,
+ test_msg);
+ ZSDA_LOG(INFO, " %u) TestCase %s\n", test_index++, test_msg);
+ goto exit_func;
+ }
+
+exit_func:
+ for (i = 0; i < int_data.op_num; i++) {
+ free(int_data.data_pts[i]);
+ free(int_data.data_cts[i]);
+ int_data.data_pts[i] = NULL;
+ int_data.data_cts[i] = NULL;
+ }
+
+ rte_free(compress_xform);
+ rte_free(decompress_xform);
+ compress_xform = NULL;
+ decompress_xform = NULL;
+
+ return ret;
+}
+
+static int __rte_unused
+test_zsda_compress_single_case_ZLIB(struct zsda_test_dev_info *dev_info)
+{
+ return test_compressdev(dev_info, RTE_COMP_CHECKSUM_ADLER32,
+ ZSDA_COMP_SINGLE_CASE);
+}
+static int __rte_unused
+test_zsda_compress_single_case_GZIP(struct zsda_test_dev_info *dev_info)
+{
+ return test_compressdev(dev_info, RTE_COMP_CHECKSUM_CRC32,
+ ZSDA_COMP_SINGLE_CASE);
+}
+/* clang-format off */
+struct zsda_unit_test_suite compressdev_zsda_testsuite_private = {
+ .suite_name = "Compressdev Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_zsda_compress_single_case_ZLIB),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_zsda_compress_single_case_GZIP),
+
+ TEST_CASES_END(), /**< NULL terminate unit test array */
+ },
+};
+/* clang-format on */
diff --git a/examples/zsda/test_zsda_compressdev.h b/examples/zsda/test_zsda_compressdev.h
new file mode 100644
index 0000000..e8f80d1
--- /dev/null
+++ b/examples/zsda/test_zsda_compressdev.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef TEST_ZSDA_COMPRESSDEV_H_
+#define TEST_ZSDA_COMPRESSDEV_H_
+
+#ifndef COMPRESSDEV_TEST_MSG_LEN
+#define COMPRESSDEV_TEST_MSG_LEN 256
+#endif
+
+enum zsda_comp_test_cases_type {
+ ZSDA_COMP_ONLY_COMP,
+ ZSDA_COMP_ONLY_DECOMP,
+ ZSDA_COMP_SINGLE_CASE,
+ ZSDA_COMP_ONLY_DECOMP_FLOW_4k,
+ ZSDA_COMP_ONLY_COMP_FLOW_4k_SAME,
+};
+
+enum zlib_direction {
+ ZLIB_NONE,
+ ZLIB_COMPRESS,
+ ZLIB_DECOMPRESS,
+ ZLIB_ALL,
+};
+
+enum Operate_type {
+ OPERATE_COMPRESSION,
+ OPERATE_DECOMPRESSION,
+ OPERATE_MIX,
+};
+
+struct Interim_data_params {
+ uint8_t **data_pts;
+ uint8_t **data_cts;
+ uint32_t *len_pts;
+ uint32_t *len_cts;
+
+ struct rte_mbuf **ibuf;
+ struct rte_mbuf **obuf;
+
+ uint32_t nb_segs_plaintext;
+ uint32_t nb_segs_ciphertext;
+ uint8_t dev_id;
+ uint8_t queue_id;
+
+ bool flag_comp_then_decomp;
+
+ uint16_t op_num;
+ uint32_t num_repeat;
+ uint32_t offset;
+
+ enum zsda_algo_core core;
+ double rate;
+
+ enum zsda_comp_test_cases_type test_type;
+ enum zlib_direction zlib_dir;
+
+ struct rte_comp_xform *compress_xforms;
+ struct rte_comp_xform *decompress_xforms;
+ struct rte_comp_xform **xforms;
+};
+
+struct test_private_arrays {
+ struct rte_comp_op **ops;
+ struct rte_comp_op **cq_array;
+ void **priv_xforms;
+ uint64_t *compress_checksum;
+ enum zsda_algo_core zsda_core;
+};
+
+struct compdev_test_data {
+ uint8_t *plaintext_data;
+ uint32_t plaintext_len;
+
+ uint8_t *ciphertext_data;
+ uint32_t ciphertext_len;
+};
+
+enum zsda_comp_checksum_type {
+ ZSDA_COMP_CHECKSUM_NONE,
+ ZSDA_COMP_CHECKSUM_CRC32_GZIP,
+ ZSDA_COMP_CHECKSUM_ADLER32_ZLIB,
+ ZSDA_COMP_CHECKSUM_CRC32_ADLER32,
+
+};
+
+struct compdev_test_case {
+ const char *test_descr;
+ struct compdev_test_data *test_data;
+};
+
+#endif
diff --git a/examples/zsda/test_zsda_cryptodev.c b/examples/zsda/test_zsda_cryptodev.c
new file mode 100644
index 0000000..9b9c357
--- /dev/null
+++ b/examples/zsda/test_zsda_cryptodev.c
@@ -0,0 +1,794 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_ether.h>
+#include <rte_hexdump.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_pause.h>
+#include <rte_string_fns.h>
+
+#include "test.h"
+#include "test_zsda.h"
+#include "test_zsda_cryptodev.h"
+#include "test_zsda_cryptodev_aes_test_vectors.h"
+#include "test_zsda_cryptodev_hash_test_vectors.h"
+
+static int
+testsuite_setup(struct zsda_test_dev_info *dev_info)
+{
+ struct crypto_testsuite_params *ts_params =
+ &(dev_info->ts_crypto_params);
+ struct rte_cryptodev_info info;
+ uint32_t nb_devs;
+ uint8_t dev_id;
+ char name[256] = {0};
+
+ memset(ts_params, 0, sizeof(*ts_params));
+ ts_params->valid_devid = dev_info->dev_id;
+ ts_params->valid_ringid = dev_info->ring_id;
+ dev_id = dev_info->dev_id;
+
+ snprintf(name, sizeof(name), "CRY_POOL_D%d", ts_params->valid_devid);
+ ts_params->mbuf_pool = rte_mempool_lookup(name);
+ if (ts_params->mbuf_pool == NULL) {
+ ts_params->mbuf_pool = rte_pktmbuf_pool_create(
+ name, NUM_BIG_MBUFS * 30, MBUF_CACHE_SIZE, 0,
+ MAX_MBUF_SEGMENT_SIZE, (int)(rte_socket_id() & 0x0fff));
+ if (ts_params->mbuf_pool == NULL) {
+ ZSDA_LOG(ERR, E_CREATE);
+ return TEST_FAILED;
+ }
+ }
+ snprintf(name, sizeof(name), "CRY_OPPOOL_D%d", ts_params->valid_devid);
+ ts_params->op_mpool = rte_crypto_op_pool_create(
+ name, RTE_CRYPTO_OP_TYPE_SYMMETRIC, NUM_MBUFS * QP_NUMS,
+ MBUF_CACHE_SIZE,
+ (DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_sym_xform)) +
+ MAXIMUM_IV_LENGTH,
+ (int)(rte_socket_id() & 0xffff));
+ if (ts_params->op_mpool == NULL) {
+ ZSDA_LOG(ERR, E_CREATE);
+ return TEST_FAILED;
+ }
+
+ nb_devs = rte_cryptodev_count();
+ if (nb_devs < 1) {
+ ZSDA_LOG(WARNING, "No crypto devices found?");
+ return TEST_SKIPPED;
+ }
+
+ rte_cryptodev_info_get(dev_id, &info);
+
+ ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
+ ts_params->conf.socket_id = SOCKET_ID_ANY;
+ ts_params->conf.ff_disable = RTE_CRYPTODEV_FF_SECURITY;
+
+ unsigned int session_size =
+ rte_cryptodev_sym_get_private_session_size(dev_id);
+
+ snprintf(name, sizeof(name), "CRY_SESS_MPOOL_D%d",
+ ts_params->valid_devid);
+ ts_params->session_mpool = rte_cryptodev_sym_session_pool_create(
+ name, MAX_NB_SESSIONS, 0, 0, 0, SOCKET_ID_ANY);
+ TEST_ASSERT_NOT_NULL(ts_params->session_mpool,
+ "session mempool allocation failed");
+
+ snprintf(name, sizeof(name), "CRY_SESS_MPOOL_PRIV_D%d",
+ ts_params->valid_devid);
+ ts_params->session_priv_mpool =
+ rte_mempool_create(name, MAX_NB_SESSIONS, session_size, 0, 0,
+ NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+ TEST_ASSERT_NOT_NULL(ts_params->session_priv_mpool,
+ "session mempool allocation failed");
+
+ TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id, &ts_params->conf),
+ "Failed to configure cryptodev %u with %u qps",
+ dev_id, ts_params->conf.nb_queue_pairs);
+
+ ts_params->qp_conf.nb_descriptors = MAX_NUM_OPS_INFLIGHT;
+ ts_params->qp_conf.mp_session = ts_params->session_mpool;
+
+ return TEST_SUCCESS;
+}
+
+static void
+testsuite_teardown(struct zsda_test_dev_info *dev_info)
+{
+ struct crypto_testsuite_params *ts_params =
+ &(dev_info->ts_crypto_params);
+
+ rte_mempool_free(ts_params->mbuf_pool);
+ rte_mempool_free(ts_params->op_mpool);
+ ts_params->mbuf_pool = NULL;
+ ts_params->op_mpool = NULL;
+
+ if (ts_params->session_priv_mpool != NULL) {
+ rte_mempool_free(ts_params->session_priv_mpool);
+ ts_params->session_priv_mpool = NULL;
+ }
+
+ if (ts_params->session_mpool != NULL) {
+ rte_mempool_free(ts_params->session_mpool);
+ ts_params->session_mpool = NULL;
+ }
+}
+
+static int
+dev_configure_and_start(struct zsda_test_dev_info *dev_info,
+ uint64_t ff_disable)
+{
+ struct crypto_testsuite_params *ts_params =
+ &(dev_info->ts_crypto_params);
+ struct crypto_unittest_params *ut_params =
+ &(dev_info->ut_crypto_params);
+ uint16_t qp_id;
+
+ memset(ut_params, 0, sizeof(*ut_params));
+
+ ts_params->conf.socket_id = SOCKET_ID_ANY;
+ ts_params->conf.ff_disable = ff_disable;
+ ts_params->qp_conf.nb_descriptors = MAX_NUM_OPS_INFLIGHT;
+ ts_params->qp_conf.mp_session = ts_params->session_mpool;
+
+ TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devid,
+ &ts_params->conf),
+ "Failed to configure cryptodev %u",
+ ts_params->valid_devid);
+
+ if (dev_info->ring_id_end == 0)
+ dev_info->ring_id_end = 1;
+
+ for (qp_id = dev_info->ring_id_start; qp_id < dev_info->ring_id_end;
+ qp_id++) {
+ TEST_ASSERT_SUCCESS(
+ rte_cryptodev_queue_pair_setup(
+ ts_params->valid_devid, qp_id,
+ &ts_params->qp_conf,
+ (int)(rte_socket_id() & 0x0fff)),
+ "Failed to setup queue pair %u on cryptodev %u", qp_id,
+ ts_params->valid_devid);
+ }
+
+ rte_cryptodev_stats_reset(ts_params->valid_devid);
+
+ TEST_ASSERT_SUCCESS(rte_cryptodev_start(ts_params->valid_devid),
+ "Failed to start cryptodev %u",
+ ts_params->valid_devid);
+
+ return TEST_SUCCESS;
+}
+
+static int
+ut_setup(struct zsda_test_dev_info *dev_info)
+{
+ return dev_configure_and_start(dev_info, RTE_CRYPTODEV_FF_SECURITY);
+}
+
+static void
+ut_teardown(struct zsda_test_dev_info *dev_info)
+{
+ struct crypto_testsuite_params *ts_params =
+ &(dev_info->ts_crypto_params);
+ struct rte_cryptodev_stats stats;
+
+ rte_cryptodev_stats_get(ts_params->valid_devid, &stats);
+ rte_cryptodev_stop(ts_params->valid_devid);
+ if (rte_cryptodev_close(ts_params->valid_devid) < 0)
+ ZSDA_LOG(ERR, E_CLOSE);
+}
+
+static int
+crypto_check_result(struct Interim_data_params *int_data,
+ struct rte_crypto_op **cq_array)
+{
+ struct rte_crypto_sym_op *sym_op;
+ int ret = 0;
+ uint16_t loop = 0;
+ const struct blockcipher_test_case *tc = int_data->tc;
+ uint16_t op_num = int_data->op_num;
+
+ uint8_t *buf1 = NULL;
+ const uint8_t *buf2 = NULL;
+ uint32_t len_buf1 = 0;
+ uint32_t len_buf2 = 0;
+ uint8_t *contig_buf;
+
+ if (cq_array[0]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+ return TEST_FAILED;
+
+ for (loop = 0; loop < op_num; loop++) {
+ sym_op = cq_array[loop]->sym;
+ if (tc->op_mask == BLOCKCIPHER_TEST_OP_ENCRYPT) {
+ buf1 = int_data->data_cts[loop];
+ len_buf1 = int_data->len_cts[loop];
+ len_buf2 = int_data->len_pts[loop];
+ } else if (tc->op_mask & BLOCKCIPHER_TEST_OP_DECRYPT) {
+ buf1 = int_data->data_pts[loop];
+ len_buf1 = int_data->len_pts[loop];
+ len_buf2 = int_data->len_cts[loop];
+ }
+ contig_buf = rte_malloc(NULL, len_buf2, 0);
+ CHECK_ADDR_NULL(contig_buf);
+ buf2 = rte_pktmbuf_read(sym_op->m_dst, 0, len_buf2, contig_buf);
+ CHECK_ADDR_NULL(buf2);
+ ret = compare_buffers(buf1, len_buf1, buf2, len_buf2);
+
+ if (ret != 0) {
+ ZSDA_LOG(ERR, E_COMPARE);
+ return TEST_FAILED;
+ }
+ rte_free(contig_buf);
+ contig_buf = NULL;
+ }
+ return ret;
+}
+
+static int
+hash_check_result(struct Interim_data_params *int_data,
+ struct rte_crypto_op **cq_array)
+{
+ struct rte_crypto_sym_op *sym_op;
+ int ret = 0;
+ uint16_t loop = 0;
+ uint16_t op_num = int_data->op_num;
+
+ uint8_t *buf1;
+ const uint8_t *buf2;
+ uint32_t len_buf1;
+ uint32_t len_buf2;
+ uint8_t *contig_buf;
+
+ for (loop = 0; loop < op_num; loop++) {
+ if (cq_array[0]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+ return TEST_FAILED;
+
+ sym_op = cq_array[loop]->sym;
+ buf1 = int_data->data_cts[loop];
+ len_buf1 = int_data->len_cts[loop];
+ len_buf2 = int_data->len_cts[loop];
+
+ contig_buf = rte_malloc(NULL, len_buf2, 0);
+ CHECK_ADDR_NULL(contig_buf);
+
+ buf2 = rte_pktmbuf_read(sym_op->m_dst, 0, len_buf2,
+ contig_buf);
+ CHECK_ADDR_NULL(buf2);
+
+ ret = compare_buffers(buf1, len_buf1, buf2, len_buf2);
+ if (ret != TEST_SUCCESS) {
+ ZSDA_LOG(ERR, E_COMPARE);
+ return TEST_FAILED;
+ }
+ rte_free(contig_buf);
+ contig_buf = NULL;
+ }
+
+ return ret;
+}
+
+static void
+prepare_test_data(struct Interim_data_params *int_data)
+{
+ uint32_t n_test_cases = 0;
+ enum zsda_blockcipher_test_type test_type = int_data->test_type;
+
+ switch (test_type) {
+
+ case ZSDA_ONLY_ENCRY:
+ case ZSDA_ONLY_DECRY:
+
+ if (test_type == ZSDA_ONLY_ENCRY) {
+ n_test_cases = RTE_DIM(zsda_test_cases_encry);
+ int_data->tcs = zsda_test_cases_encry;
+ } else {
+ n_test_cases = RTE_DIM(zsda_test_cases_decry);
+ int_data->tcs = zsda_test_cases_decry;
+ }
+ int_data->op_num = 1;
+ int_data->num_repeat = 1;
+ int_data->num_test_cases = n_test_cases;
+ break;
+
+ case ZSDA_HASH:
+ n_test_cases = RTE_DIM(zsda_test_cases_hash);
+ int_data->tcs = zsda_test_cases_hash;
+
+ int_data->op_num = 1;
+ int_data->num_repeat = 1;
+ int_data->num_test_cases = n_test_cases;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int
+cipheronly_op_config(struct Op_config *op_config)
+{
+ struct Interim_data_params *int_data = op_config->int_data;
+ struct rte_crypto_op **op_array =
+ (struct rte_crypto_op **)op_config->op_array;
+ struct blockcipher_test_case *tc = int_data->tc;
+ struct blockcipher_test_data *tdata = int_data->tc->test_data;
+ uint32_t nb_segs = int_data->nb_segs;
+ uint16_t op_num = int_data->op_num;
+ struct rte_crypto_op *op = NULL;
+ uint16_t loop = 0;
+
+ int ret = 0;
+ struct rte_mbuf *ibuf = NULL;
+ struct rte_mbuf *obuf = NULL;
+ uint8_t *data_pt = NULL;
+ uint8_t *data_ct = NULL;
+ uint32_t len_pt;
+ uint32_t len_ct;
+ struct rte_crypto_sym_op *sym_op = NULL;
+ struct rte_crypto_sym_xform *cipher_xform = NULL;
+ bool is_need_new_mem = false;
+
+ struct zsda_buf_config buf_config = {
+ .mbuf_pool = op_config->mbuf_pool,
+ };
+
+ for (loop = 0; loop < op_num; loop++) {
+ data_pt = int_data->data_pts[loop];
+ data_ct = int_data->data_cts[loop];
+ len_pt = int_data->len_pts[loop];
+ len_ct = int_data->len_cts[loop];
+ op = op_array[loop];
+
+ if (tc->op_mask & BLOCKCIPHER_TEST_OP_ENCRYPT) {
+ op_config->zsda_core = ZSDA_CORE_ENCRY;
+ op->sym->xform->cipher.op =
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+ buf_config.data = data_pt;
+ buf_config.data_len = len_pt;
+ buf_config.nb_segs = set_nb_segs(nb_segs, len_pt);
+ is_need_new_mem = false;
+ ret = buf_create(&buf_config, is_need_new_mem);
+ ibuf = buf_config.buf;
+
+ buf_config.data_len = len_ct;
+ buf_config.nb_segs = set_nb_segs(nb_segs, len_ct);
+ is_need_new_mem = true;
+ ret |= buf_create(&buf_config, is_need_new_mem);
+ obuf = buf_config.buf;
+
+ } else if (tc->op_mask & BLOCKCIPHER_TEST_OP_DECRYPT) {
+ op_config->zsda_core = ZSDA_CORE_DECRY;
+ op->sym->xform->cipher.op =
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+
+ buf_config.data = data_ct;
+ buf_config.data_len = len_ct;
+ buf_config.nb_segs = set_nb_segs(nb_segs, len_ct);
+ is_need_new_mem = false;
+ ret = buf_create(&buf_config, is_need_new_mem);
+ ibuf = buf_config.buf;
+
+ buf_config.data_len = len_pt;
+ buf_config.nb_segs = set_nb_segs(nb_segs, len_pt);
+ is_need_new_mem = true;
+ ret |= buf_create(&buf_config, is_need_new_mem);
+ obuf = buf_config.buf;
+ }
+
+ sym_op = op->sym;
+ sym_op->m_src = ibuf;
+ sym_op->m_dst = obuf;
+
+ cipher_xform = op->sym->xform;
+ cipher_xform->next = NULL;
+
+ cipher_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform->cipher.algo = tdata->crypto_algo;
+
+ cipher_xform->cipher.key.data = tdata->cipher_key.data;
+ cipher_xform->cipher.key.length = tdata->cipher_key.len;
+ cipher_xform->cipher.iv.offset = IV_OFFSET;
+
+ cipher_xform->cipher.dataunit_len = tdata->xts_dataunit_len;
+ cipher_xform->cipher.iv.length = tdata->iv.len;
+
+ rte_memcpy(rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET),
+ tdata->iv.data, tdata->iv.len);
+
+ sym_op->cipher.data.length = int_data->len_pts[loop];
+ }
+
+ return ret;
+}
+
+static void
+cipheronly_enqueue_and_dequeue(struct Enqueue_dequeue_config *endequeue_config)
+{
+ uint8_t dev_id = endequeue_config->dev_id;
+ uint8_t queue_id = endequeue_config->queue_id;
+ uint16_t op_num = endequeue_config->op_num;
+ struct rte_crypto_op **op_array =
+ (struct rte_crypto_op **)endequeue_config->op_array;
+ struct rte_crypto_op **cq_array =
+ (struct rte_crypto_op **)endequeue_config->cq_array;
+ uint16_t num_dequeue = 0;
+
+ rte_cryptodev_enqueue_burst(dev_id, queue_id, op_array, op_num);
+
+ while (num_dequeue < op_num) {
+ num_dequeue += rte_cryptodev_dequeue_burst(
+ dev_id, queue_id, &cq_array[num_dequeue],
+ op_num - num_dequeue);
+ }
+}
+
+static int
+zsda_cipheronly_config_run(struct Interim_data_params *int_data,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mempool *op_mpool)
+{
+ struct rte_crypto_op *op_array[512] = {NULL};
+ struct rte_crypto_op *cq_array[512] = {NULL};
+
+ uint8_t dev_id = int_data->dev_id;
+ uint8_t queue_id = int_data->queue_id;
+ uint16_t op_num = int_data->op_num;
+ int ret = TEST_SUCCESS;
+ uint16_t loop = 0;
+
+ for (loop = 0; loop < op_num; loop++) {
+ op_array[loop] = rte_crypto_op_alloc(
+ op_mpool, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ if (!op_array[loop]) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ ret = TEST_FAILED;
+ goto exit_func;
+ }
+
+ if (rte_crypto_op_sym_xforms_alloc(op_array[loop], 1) ==
+ NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ ret = TEST_FAILED;
+ goto exit_func;
+ }
+ }
+
+ struct Op_config op_config = {
+ .int_data = int_data,
+ .mbuf_pool = mbuf_pool,
+ .op_mpool = op_mpool,
+ .op_array = (void **)op_array,
+ };
+
+ struct Enqueue_dequeue_config endequeue_config = {
+ .dev_id = dev_id,
+ .queue_id = queue_id,
+ .op_num = op_num,
+ .op_array = (void **)op_array,
+ .cq_array = (void **)cq_array,
+ };
+
+ ret = cipheronly_op_config(&op_config);
+ if (ret) {
+ ZSDA_LOG(ERR, E_CONFIG);
+ ret = TEST_FAILED;
+ goto exit_func;
+ }
+
+ int_data->core = op_config.zsda_core;
+
+ endequeue_config.zsda_core = op_config.zsda_core;
+ cipheronly_enqueue_and_dequeue(&endequeue_config);
+
+ ret = crypto_check_result(int_data, cq_array);
+
+exit_func:
+ for (loop = 0; loop < op_num; loop++) {
+ if (op_array[loop]) {
+ rte_pktmbuf_free(op_array[loop]->sym->m_src);
+ rte_pktmbuf_free(op_array[loop]->sym->m_dst);
+ rte_crypto_op_free(op_array[loop]);
+
+ op_array[loop]->sym->m_src = NULL;
+ op_array[loop]->sym->m_dst = NULL;
+ op_array[loop] = NULL;
+ }
+ }
+
+ return ret;
+}
+
+static void
+hash_enqueue_and_dequeue(struct Enqueue_dequeue_config *endequeue_config)
+{
+ uint8_t dev_id = endequeue_config->dev_id;
+ uint8_t queue_id = endequeue_config->queue_id;
+ uint32_t op_num = endequeue_config->op_num;
+ struct rte_crypto_op **op_array =
+ (struct rte_crypto_op **)endequeue_config->op_array;
+ struct rte_crypto_op **cq_array =
+ (struct rte_crypto_op **)endequeue_config->cq_array;
+ uint16_t sum_dequeue = 0;
+
+ rte_cryptodev_enqueue_burst(dev_id, queue_id, op_array, op_num);
+
+ while (sum_dequeue < op_num) {
+ sum_dequeue += rte_cryptodev_dequeue_burst(
+ dev_id, queue_id, &cq_array[sum_dequeue], op_num);
+ }
+}
+
+
+static int
+hash_op_config(struct Op_config *op_config)
+{
+ struct Interim_data_params *int_data = op_config->int_data;
+ struct rte_mempool *mbuf_pool = op_config->mbuf_pool;
+ struct rte_crypto_op **op_array = (struct rte_crypto_op **)op_config->op_array;
+ struct blockcipher_test_data *tdata = int_data->tc->test_data;
+ uint32_t nb_segs = int_data->nb_segs;
+ uint32_t op_num = int_data->op_num;
+
+ struct rte_crypto_op *op = NULL;
+ uint32_t loop = 0;
+ int ret = 0;
+ struct rte_mbuf *ibuf = NULL;
+ struct rte_mbuf *obuf = NULL;
+ bool is_need_new_mem = false;
+
+ uint8_t *data_pt = NULL;
+ uint8_t *data_ct = NULL;
+ uint32_t len_pt;
+ uint32_t len_ct;
+ struct rte_crypto_sym_op *sym_op = NULL;
+ struct rte_crypto_sym_xform *auth_xform = NULL;
+
+ struct zsda_buf_config buf_config = {
+ .mbuf_pool = mbuf_pool,
+ };
+
+ for (loop = 0; loop < op_num; loop++) {
+ op = op_array[loop];
+ data_pt = int_data->data_pts[loop];
+ data_ct = int_data->data_cts[loop];
+ len_pt = int_data->len_pts[loop];
+ len_ct = int_data->len_cts[loop];
+ op_config->zsda_core = ZSDA_CORE_HASH;
+
+ buf_config.data = data_pt;
+ buf_config.data_len = len_pt;
+ buf_config.nb_segs = set_nb_segs(nb_segs, len_pt);
+ is_need_new_mem = false;
+ ret = buf_create(&buf_config, is_need_new_mem);
+ ibuf = buf_config.buf;
+
+ buf_config.data = data_ct;
+ buf_config.data_len = len_ct;
+ buf_config.nb_segs = 1;
+ ret |= buf_create_process(&buf_config);
+ obuf = buf_config.buf;
+
+ sym_op = op->sym;
+ sym_op->m_src = ibuf;
+ sym_op->m_dst = obuf;
+
+ if (ret) {
+ ZSDA_LOG(ERR, "Hash cannot create ibuf/obuf");
+ return TEST_FAILED;
+ }
+
+ sym_op->auth.data.length = len_pt;
+ auth_xform = op->sym->xform;
+ auth_xform->next = NULL;
+ auth_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform->auth.algo = tdata->auth_algo;
+ auth_xform->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ }
+
+ return TEST_SUCCESS;
+}
+
+
+static int
+zsda_hash_config_run(struct Interim_data_params *int_data,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mempool *op_mpool)
+{
+ uint8_t dev_id = int_data->dev_id;
+ uint8_t queue_id = int_data->queue_id;
+ uint32_t op_num = int_data->op_num;
+ struct rte_crypto_op *op_array[512] = {NULL};
+ struct rte_crypto_op *cq_array[512] = {NULL};
+ int ret = TEST_SUCCESS;
+ uint32_t loop = 0;
+
+ struct Op_config op_config = {
+ .int_data = int_data,
+ .mbuf_pool = mbuf_pool,
+ .op_mpool = op_mpool,
+ .op_array = (void **)op_array,
+ };
+
+ struct Enqueue_dequeue_config endequeue_config = {
+ .dev_id = dev_id,
+ .queue_id = queue_id,
+ .op_num = op_num,
+ .op_array = (void **)op_array,
+ .cq_array = (void **)cq_array,
+ };
+
+ for (loop = 0; loop < op_num; loop++) {
+ op_array[loop] = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ if (!op_array[loop]) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ ret = TEST_FAILED;
+ goto exit_func;
+ }
+
+ if (rte_crypto_op_sym_xforms_alloc(op_array[loop], 1) == NULL) {
+ ZSDA_LOG(ERR, E_MALLOC);
+ ret = TEST_FAILED;
+ goto exit_func;
+ }
+ }
+
+ hash_op_config(&op_config);
+ int_data->core = op_config.zsda_core;
+
+ endequeue_config.zsda_core = op_config.zsda_core;
+
+ hash_enqueue_and_dequeue(&endequeue_config);
+
+ ret = hash_check_result(int_data, cq_array);
+
+exit_func:
+ for (loop = 0; loop < op_num; loop++) {
+ if (op_array[loop]) {
+ rte_pktmbuf_free(op_array[loop]->sym->m_src);
+ rte_pktmbuf_free(op_array[loop]->sym->m_dst);
+
+ op_array[loop]->sym->m_src = NULL;
+ op_array[loop]->sym->m_dst = NULL;
+
+ rte_crypto_op_free(op_array[loop]);
+ op_array[loop] = NULL;
+ }
+ }
+
+ return ret;
+}
+
+static int
+test_zsda_one_case_cipher(struct Interim_data_params *int_data,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mempool *op_mpool, char *test_msg)
+{
+ const struct blockcipher_test_case *tc = int_data->tc;
+ uint16_t repeat_one_case;
+ uint16_t i = 0;
+ int ret = 0;
+
+ repeat_one_case = (int_data->num_repeat == 0) ? 1 : int_data->num_repeat;
+
+ if ((tc->op_mask & BLOCKCIPHER_TEST_OP_ENCRYPT) ||
+ (tc->op_mask & BLOCKCIPHER_TEST_OP_DECRYPT)) {
+ for (i = 0; i < int_data->op_num; i++) {
+ int_data->data_pts[i] = tc->test_data->plaintext.data;
+ int_data->data_cts[i] = tc->test_data->ciphertext.data;
+ int_data->len_pts[i] = tc->test_data->plaintext.len;
+ int_data->len_cts[i] = tc->test_data->ciphertext.len;
+ }
+
+ } else if (tc->op_mask & BLOCKCIPHER_TEST_OP_AUTH_GEN) {
+ for (i = 0; i < int_data->op_num; i++) {
+ int_data->data_pts[i] = tc->test_data->plaintext.data;
+ int_data->data_cts[i] = tc->test_data->digest.data;
+ int_data->len_pts[i] = tc->test_data->plaintext.len;
+ int_data->len_cts[i] = tc->test_data->digest.len;
+ }
+ }
+
+ for (i = 0; i < repeat_one_case; i++) {
+
+ if ((tc->op_mask & BLOCKCIPHER_TEST_OP_ENCRYPT) ||
+ (tc->op_mask & BLOCKCIPHER_TEST_OP_DECRYPT)) {
+ ret |= zsda_cipheronly_config_run(int_data, mbuf_pool,
+ op_mpool);
+ } else if (tc->op_mask & BLOCKCIPHER_TEST_OP_AUTH_GEN) {
+
+ ret |= zsda_hash_config_run(int_data, mbuf_pool,
+ op_mpool);
+ }
+ }
+
+ if (ret == TEST_SUCCESS)
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "PASS");
+ else
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "FAIL");
+ return ret;
+}
+
+static int
+test_blockcipher(struct zsda_test_dev_info *dev_info,
+ enum zsda_blockcipher_test_type test_type)
+{
+ struct crypto_testsuite_params *ts_params =
+ &(dev_info->ts_crypto_params);
+
+ struct rte_mempool *mbuf_pool = ts_params->mbuf_pool;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+
+ int ret = 0;
+ uint32_t i = 0, test_index = 0;
+ char test_msg[BLOCKCIPHER_TEST_MSG_LEN + 1];
+
+ uint8_t *data_plaintext_mul[MAX_NUM_WQE];
+ uint8_t *data_ciphertext_mul[MAX_NUM_WQE];
+ uint32_t len_plaintext_mul[MAX_NUM_WQE];
+ uint32_t len_ciphertext_mul[MAX_NUM_WQE];
+
+ struct Interim_data_params int_data = {
+ .dev_id = ts_params->valid_devid,
+ .queue_id = ts_params->valid_ringid,
+ .test_type = test_type,
+ .data_pts = data_plaintext_mul,
+ .data_cts = data_ciphertext_mul,
+ .len_pts = len_plaintext_mul,
+ .len_cts = len_ciphertext_mul,
+ };
+
+ prepare_test_data(&int_data);
+
+ for (i = 0; i < int_data.num_test_cases; i++) {
+ int_data.tc = &int_data.tcs[i % int_data.num_test_cases];
+ ret |= test_zsda_one_case_cipher(&int_data, mbuf_pool, op_mpool,
+ test_msg);
+ ZSDA_LOG(INFO, " %u) TestCase <%s> %s\n", test_index++,
+ int_data.tc->test_descr, test_msg);
+ }
+
+ return ret;
+}
+
+static int __rte_unused
+test_zsda_Only_Encry(struct zsda_test_dev_info *dev_info)
+{
+ return test_blockcipher(dev_info, ZSDA_ONLY_ENCRY);
+}
+static int __rte_unused
+test_zsda_Only_Decry(struct zsda_test_dev_info *dev_info)
+{
+ return test_blockcipher(dev_info, ZSDA_ONLY_DECRY);
+}
+static int __rte_unused
+test_zsda_Hash(struct zsda_test_dev_info *dev_info)
+{
+ return test_blockcipher(dev_info, ZSDA_HASH);
+}
+
+
+struct zsda_unit_test_suite cryptodev_zsda_testsuite_private = {
+ .suite_name = "Crypto Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown, test_zsda_Only_Encry),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_zsda_Only_Decry),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_zsda_Hash),
+ },
+};
+
+
diff --git a/examples/zsda/test_zsda_cryptodev.h b/examples/zsda/test_zsda_cryptodev.h
new file mode 100644
index 0000000..e86d3e2
--- /dev/null
+++ b/examples/zsda/test_zsda_cryptodev.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef TEST_ZSDA_CRYPTODEV_H_
+#define TEST_ZSDA_CRYPTODEV_H_
+
+#include "test_zsda.h"
+
+#ifndef BLOCKCIPHER_TEST_MSG_LEN
+#define BLOCKCIPHER_TEST_MSG_LEN 256
+#endif
+
+#define BLOCKCIPHER_TEST_OP_ENCRYPT 0x01
+#define BLOCKCIPHER_TEST_OP_DECRYPT 0x02
+#define BLOCKCIPHER_TEST_OP_AUTH_GEN 0x04
+#define BLOCKCIPHER_TEST_OP_AUTH_VERIFY 0x08
+
+#define BLOCKCIPHER_TEST_FEATURE_OOP 0x01
+#define BLOCKCIPHER_TEST_FEATURE_SESSIONLESS 0x02
+#define BLOCKCIPHER_TEST_FEATURE_STOPPER 0x04 /* stop upon failing */
+#define BLOCKCIPHER_TEST_FEATURE_SG 0x08 /* Scatter Gather */
+#define BLOCKCIPHER_TEST_FEATURE_MULTI_SG 0x10 /* MULTI-LEVEL Scatter Gather */
+#define BLOCKCIPHER_TEST_FEATURE_LARGE_PKT_SG 0x20 /* Large Packet MULTI-LEVEL Scatter Gather */
+#define BLOCKCIPHER_TEST_FEATURE_WINDING 0x40
+
+#define BLOCKCIPHER_TEST_OP_CIPHER \
+ (BLOCKCIPHER_TEST_OP_ENCRYPT | BLOCKCIPHER_TEST_OP_DECRYPT)
+
+#define BLOCKCIPHER_TEST_OP_AUTH \
+ (BLOCKCIPHER_TEST_OP_AUTH_GEN | BLOCKCIPHER_TEST_OP_AUTH_VERIFY)
+
+#define BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN \
+ (BLOCKCIPHER_TEST_OP_ENCRYPT | BLOCKCIPHER_TEST_OP_AUTH_GEN)
+
+#define BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC \
+ (BLOCKCIPHER_TEST_OP_DECRYPT | BLOCKCIPHER_TEST_OP_AUTH_VERIFY)
+
+#define DEFAULT_NUM_XFORMS (2)
+#define MAXIMUM_IV_LENGTH (16)
+#define IV_OFFSET \
+ (sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op) + \
+ (DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_sym_xform)))
+
+#define QP_NUMS 16
+#define VDEV_ARGS_SIZE 100
+#define MAX_NB_SESSIONS 4
+
+enum blockcipher_test_type {
+ BLKCIPHER_AES_CHAIN_TYPE, /* use aes_chain_test_cases[] */
+ BLKCIPHER_AES_TYPE, /* use aes_cipheronly_test_cases[] */
+ BLKCIPHER_AES_DOCSIS_TYPE, /* use aes_docsis_test_cases[] */
+ BLKCIPHER_3DES_CHAIN_TYPE, /* use triple_des_chain_test_cases[] */
+ BLKCIPHER_3DES_TYPE, /* triple_des_cipheronly_test_cases[] */
+ BLKCIPHER_AUTHONLY_TYPE, /* use hash_test_cases[] */
+ BLKCIPHER_DES_TYPE, /* use des_cipheronly_test_cases[] */
+ BLKCIPHER_DES_DOCSIS_TYPE /* use des_docsis_test_cases[] */
+};
+
+struct blockcipher_test_case {
+ const char *test_descr;
+ struct blockcipher_test_data *test_data;
+ uint8_t op_mask;
+};
+
+struct blockcipher_test_data {
+ enum rte_crypto_cipher_algorithm crypto_algo;
+
+ struct {
+ uint8_t data[64];
+ uint16_t len;
+ } cipher_key;
+
+ struct {
+ uint8_t data[16];
+ uint16_t len;
+ } iv;
+
+ struct {
+ uint8_t *data;
+ uint32_t len;
+ } plaintext;
+
+ struct {
+ uint8_t *data;
+ uint32_t len;
+ } ciphertext;
+
+ enum rte_crypto_auth_algorithm auth_algo;
+
+ struct {
+ uint8_t data[128];
+ uint16_t len;
+ } auth_key;
+
+ struct {
+ uint8_t data[128];
+ uint16_t len;
+ uint16_t truncated_len;
+ } digest;
+
+ unsigned int cipher_offset;
+ uint32_t xts_dataunit_len;
+ bool wrapped_key;
+
+};
+
+enum zsda_blockcipher_test_type {
+ ZSDA_ONLY_ENCRY,
+ ZSDA_ONLY_DECRY,
+ ZSDA_HASH,
+};
+
+struct hash_mul_results_check {
+ uint32_t num;
+ uint8_t **mul_results;
+ uint32_t *digest_lens;
+};
+
+struct Interim_data_params {
+ struct blockcipher_test_case *tcs;
+ struct blockcipher_test_case *tc;
+ uint8_t dev_id;
+ uint8_t queue_id;
+ uint32_t nb_segs;
+ uint16_t op_num;
+ uint16_t num_repeat;
+ uint32_t num_test_cases;
+ uint32_t len_plaintext;
+ uint32_t len_ciphertext;
+ uint32_t xts_dataunit_len;
+
+ enum zsda_algo_core core;
+
+ uint8_t **data_pts;
+ uint8_t **data_cts;
+ uint32_t *len_pts;
+ uint32_t *len_cts;
+
+ uint8_t *data_pt;
+ uint8_t *data_ct;
+ enum zsda_blockcipher_test_type test_type;
+};
+#endif /* TEST_ZSDA_CRYPTODEV_H_ */
diff --git a/examples/zsda/test_zsda_cryptodev_aes_test_vectors.h b/examples/zsda/test_zsda_cryptodev_aes_test_vectors.h
new file mode 100644
index 0000000..e92a149
--- /dev/null
+++ b/examples/zsda/test_zsda_cryptodev_aes_test_vectors.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef TEST_ZSDA_CRYPTODEV_AES_TEST_VECTORS_H_
+#define TEST_ZSDA_CRYPTODEV_AES_TEST_VECTORS_H_
+
+#include "test_zsda_cryptodev_data.h"
+
+static struct
+blockcipher_test_data zsda_test_data_xts_key_32_pt_512 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_XTS,
+ .cipher_key = {
+ .data = { /* key1 | key2 */
+ 0x2f, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
+ 0x2f, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
+ },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x03, 0x02, 0x01,
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = plaintext_zsda_0x44_512B,
+ .len = DATA_LEN_512
+ },
+ .ciphertext = {
+ .data = ciphertext_zsda_aes256xts_512bytes,
+ .len = DATA_LEN_512
+ },
+ .xts_dataunit_len = DATA_LEN_512,
+ .wrapped_key = false
+};
+
+static struct
+blockcipher_test_data zsda_test_data_xts_key_64_pt_512 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_XTS,
+ .cipher_key = {
+ .data = { /* key1 | key2 */
+ 0x2f, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x2f, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ .len = 64
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x03, 0x02, 0x01},
+ .len = 16
+ },
+ .plaintext = {
+ .data = plaintext_zsda_0x44_512B,
+ .len = DATA_LEN_512
+ },
+ .ciphertext = {
+ .data = ciphertext_zsda_aes512xts_512bytes,
+ .len = DATA_LEN_512
+ },
+ .xts_dataunit_len = DATA_LEN_512,
+ .wrapped_key = false
+};
+
+static struct
+blockcipher_test_data sm4_zsda_test_data_xts_key_32_pt_512 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_SM4_XTS,
+ .cipher_key = {
+ .data = { /* key1 | key2 */
+ 0x2f, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
+ 0x2f, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20,
+ },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x03, 0x02, 0x01},
+ .len = 16
+ },
+ .plaintext = {
+ .data = plaintext_zsda_0x44_512B,
+ .len = DATA_LEN_512
+ },
+ .ciphertext = {
+ .data = ciphertext_zsda_sm4xts256_512bytes,
+ .len = DATA_LEN_512
+ },
+ .xts_dataunit_len = DATA_LEN_512,
+ .wrapped_key = false
+};
+
+
+static struct blockcipher_test_case __rte_unused
+ zsda_test_cases_encry[] = {
+ {
+ .test_descr =
+ "AES-256-XTS Encryption (512-byte plaintext)",
+ .test_data = &zsda_test_data_xts_key_32_pt_512,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ },
+ {
+ .test_descr =
+ "AES-512-XTS Encryption (512-byte plaintext)",
+ .test_data = &zsda_test_data_xts_key_64_pt_512,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ },
+ {
+ .test_descr =
+ "SM4-XTS-256 Encryption (512-byte plaintext)",
+ .test_data = &sm4_zsda_test_data_xts_key_32_pt_512,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ },
+ };
+
+static struct blockcipher_test_case __rte_unused
+ zsda_test_cases_decry[] = {
+ {
+ .test_descr =
+ "AES-256-XTS Decryption (512-byte plaintext)",
+ .test_data = &zsda_test_data_xts_key_32_pt_512,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ },
+ {
+ .test_descr =
+ "AES-512-XTS Decryption (512-byte plaintext)",
+ .test_data = &zsda_test_data_xts_key_64_pt_512,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ },
+ {
+ .test_descr =
+ "SM4-XTS-256 Decryption (512-byte plaintext)",
+ .test_data = &sm4_zsda_test_data_xts_key_32_pt_512,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ },
+ };
+
+#endif /* TEST_ZSDA_CRYPTODEV_AES_TEST_VECTORS_H_ */
diff --git a/examples/zsda/test_zsda_cryptodev_data.h b/examples/zsda/test_zsda_cryptodev_data.h
new file mode 100644
index 0000000..526a329
--- /dev/null
+++ b/examples/zsda/test_zsda_cryptodev_data.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+static uint8_t plaintext_zsda_0x44_512B[] = {
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+ 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
+};
+static uint8_t ciphertext_zsda_aes256xts_512bytes[] = {
+ 0x57, 0x99, 0xd9, 0x04, 0x1b, 0x6b, 0x18, 0xcb, 0x34, 0x14, 0x2f, 0xec,
+ 0x4d, 0x35, 0xe6, 0x44, 0x84, 0x79, 0x48, 0x3d, 0xec, 0x25, 0x07, 0x29,
+ 0xc9, 0xd9, 0x84, 0x1e, 0x66, 0x4d, 0x5f, 0x78, 0x1b, 0x02, 0x65, 0x01,
+ 0xf5, 0x5e, 0xf0, 0xd8, 0x0a, 0x0c, 0x40, 0xae, 0x40, 0x88, 0x3b, 0x05,
+ 0x07, 0x0e, 0x12, 0xad, 0xfb, 0xca, 0x1a, 0xb6, 0x9c, 0xda, 0x8f, 0xf1,
+ 0x18, 0x66, 0xb1, 0xdf, 0x76, 0x84, 0xca, 0xcb, 0xd3, 0x08, 0xe8, 0xb6,
+ 0x4e, 0x31, 0xee, 0x1e, 0x6e, 0xab, 0x94, 0x98, 0xcf, 0x1c, 0xe3, 0xd7,
+ 0x91, 0xdd, 0x70, 0x5b, 0xd3, 0x52, 0xfd, 0xd5, 0x98, 0x07, 0xb7, 0x80,
+ 0x40, 0xe3, 0x63, 0x5f, 0xe1, 0x3d, 0x12, 0x5f, 0x30, 0x80, 0x16, 0x9e,
+ 0x9e, 0x60, 0xb0, 0x43, 0xf6, 0x2b, 0x51, 0x7c, 0xa5, 0x7f, 0x47, 0x3a,
+ 0x39, 0x7a, 0xd5, 0x21, 0x70, 0x44, 0x63, 0xb4, 0xe2, 0xc9, 0xce, 0xb0,
+ 0xb3, 0xce, 0x0a, 0xe8, 0x99, 0x3d, 0x13, 0xa6, 0x74, 0x67, 0xe5, 0x66,
+ 0x5f, 0x24, 0xe3, 0x08, 0xbf, 0x40, 0xdb, 0x7c, 0xff, 0x03, 0xf1, 0x9e,
+ 0x3f, 0x66, 0x2d, 0xc1, 0xe4, 0x2a, 0xd9, 0x8f, 0xcf, 0xea, 0xb0, 0x2f,
+ 0xb8, 0x03, 0xbd, 0xa7, 0x33, 0x40, 0x88, 0x11, 0x71, 0xd5, 0x3e, 0xe3,
+ 0x05, 0x80, 0x7c, 0xe7, 0xb7, 0x68, 0x29, 0xd1, 0x12, 0x74, 0x02, 0xac,
+ 0xa0, 0x25, 0x0a, 0x25, 0x66, 0xe8, 0x4f, 0xae, 0x3d, 0x8c, 0xb1, 0xba,
+ 0xc5, 0xdc, 0xc2, 0x68, 0x93, 0xb5, 0x8e, 0x98, 0x7a, 0x5a, 0x68, 0x61,
+ 0x45, 0xd0, 0x7d, 0x6a, 0x5a, 0xe2, 0x21, 0x80, 0x44, 0x97, 0xf2, 0x52,
+ 0xc3, 0x05, 0xea, 0x52, 0xd6, 0x56, 0x95, 0x54, 0xf6, 0xc7, 0x1b, 0xe9,
+ 0xd8, 0x5a, 0x8c, 0xb7, 0x66, 0x60, 0x13, 0x66, 0x74, 0xcf, 0xba, 0xfd,
+ 0x72, 0xd2, 0xee, 0xaf, 0xfa, 0x48, 0x4b, 0x2a, 0x45, 0xac, 0x8e, 0x23,
+ 0x96, 0x68, 0x7f, 0xb9, 0xd9, 0x53, 0x9a, 0x08, 0xe8, 0x3b, 0xeb, 0x01,
+ 0xe6, 0x2a, 0xe2, 0x8d, 0x3d, 0x9a, 0x2b, 0xd7, 0x8d, 0x29, 0xd1, 0xa1,
+ 0x71, 0xb7, 0x74, 0x14, 0x6a, 0x60, 0xd6, 0xbe, 0xbe, 0xa7, 0x22, 0x24,
+ 0x88, 0x49, 0x50, 0x5a, 0x6e, 0xb0, 0xc0, 0xcf, 0x35, 0x7a, 0x5b, 0xa9,
+ 0xd0, 0x92, 0x0d, 0x30, 0x90, 0x7c, 0x56, 0xc6, 0xa6, 0x18, 0xd2, 0x45,
+ 0xff, 0x23, 0xfa, 0x88, 0xb2, 0x1a, 0x49, 0x09, 0x33, 0x54, 0x39, 0x89,
+ 0x81, 0x32, 0x91, 0x8a, 0x5f, 0x7d, 0xa2, 0x27, 0x6a, 0xf1, 0xc2, 0x3c,
+ 0x2c, 0x11, 0x42, 0xe5, 0x88, 0x36, 0xdc, 0x08, 0xa9, 0x15, 0x37, 0x87,
+ 0x5a, 0x20, 0x4f, 0x9e, 0xca, 0x61, 0xeb, 0x30, 0x90, 0x9e, 0x9e, 0x21,
+ 0xc3, 0x52, 0x3a, 0xb0, 0x83, 0x04, 0x01, 0x82, 0xbc, 0x4c, 0x82, 0x36,
+ 0x7c, 0xba, 0xaf, 0x1a, 0x94, 0xf7, 0xa1, 0x25, 0x8f, 0x38, 0x7d, 0x79,
+ 0x47, 0x27, 0x37, 0xcc, 0xa8, 0xee, 0x68, 0x12, 0x91, 0x24, 0x78, 0xf6,
+ 0xba, 0x91, 0x6c, 0x92, 0x79, 0x00, 0xaa, 0xdd, 0x5b, 0x2d, 0x98, 0x5b,
+ 0x6d, 0x5c, 0xc3, 0x9f, 0x86, 0xb5, 0x4d, 0x4e, 0x71, 0xa9, 0xc1, 0x0c,
+ 0xfd, 0x4f, 0x3b, 0xda, 0xc8, 0x30, 0x3b, 0xc7, 0xff, 0x04, 0xdb, 0x29,
+ 0x70, 0xff, 0x13, 0x5f, 0xe4, 0x4b, 0xfb, 0x2a, 0x06, 0x2e, 0xa5, 0x99,
+ 0x9f, 0x1e, 0xb8, 0xc7, 0x83, 0x60, 0xc3, 0xd5, 0xee, 0x75, 0xb6, 0xe1,
+ 0xe4, 0xac, 0x50, 0x2b, 0x2b, 0xe3, 0xa8, 0x84, 0x52, 0x24, 0xa8, 0x95,
+ 0xc3, 0xb0, 0x81, 0x2d, 0xce, 0x9a, 0x28, 0xdf, 0x15, 0xbe, 0x80, 0x6a,
+ 0xf5, 0x0c, 0x9c, 0x0b, 0xe9, 0x72, 0xb6, 0x1b, 0x18, 0x3a, 0x86, 0xfb,
+ 0x90, 0xf5, 0x03, 0xa4, 0xf8, 0xbb, 0x47, 0x58,
+};
+
+static uint8_t ciphertext_zsda_sm4xts256_512bytes[] = {
+ 0x5c, 0x34, 0xf1, 0x1b, 0x07, 0x11, 0x11, 0x0f, 0x5d, 0xa8, 0x6e, 0xcb,
+ 0xd9, 0x23, 0x12, 0x76, 0xca, 0x40, 0xb3, 0xe6, 0x89, 0x01, 0x33, 0x80,
+ 0x9e, 0x63, 0x34, 0x4a, 0x21, 0x73, 0x4d, 0xe6, 0x4d, 0x7b, 0x5a, 0x24,
+ 0xb4, 0xbc, 0x9b, 0xdf, 0xb6, 0x12, 0x37, 0xb2, 0x89, 0xbc, 0x9a, 0xe3,
+ 0xd5, 0xbd, 0x9c, 0x17, 0x69, 0x65, 0x71, 0x27, 0x13, 0xf6, 0x78, 0x1e,
+ 0xfe, 0x31, 0x35, 0xbb, 0x17, 0xe0, 0x69, 0xfc, 0x33, 0x79, 0xfe, 0x1e,
+ 0x8c, 0x97, 0x3f, 0x16, 0xbf, 0xfd, 0x82, 0x38, 0x31, 0x35, 0x76, 0x76,
+ 0x76, 0xf2, 0x2b, 0x9d, 0xa4, 0x3c, 0x80, 0xdc, 0xca, 0x97, 0x02, 0x9e,
+ 0x0e, 0x57, 0x48, 0xed, 0x8b, 0x7e, 0x00, 0x77, 0xef, 0x98, 0x44, 0x86,
+ 0x08, 0x11, 0x56, 0x6a, 0x8c, 0x47, 0x72, 0x1d, 0xcc, 0xe6, 0x23, 0xd3,
+ 0x29, 0xa2, 0xf5, 0xb3, 0x11, 0x00, 0x87, 0x26, 0x29, 0xe3, 0x25, 0x4b,
+ 0x8f, 0x13, 0xfe, 0xc6, 0xfb, 0xa4, 0x71, 0x38, 0xea, 0x06, 0x34, 0xae,
+ 0xbd, 0xae, 0x8a, 0x84, 0xab, 0x42, 0x7c, 0x7d, 0x85, 0x3f, 0x00, 0xc0,
+ 0x8d, 0x63, 0x72, 0xbb, 0x4e, 0xbf, 0x21, 0x68, 0x1e, 0x6a, 0xc9, 0x32,
+ 0xfc, 0x02, 0x2d, 0xbd, 0x44, 0xc3, 0x9e, 0xf5, 0x41, 0x5d, 0x9d, 0xc7,
+ 0x98, 0x8e, 0xc3, 0x69, 0xb5, 0x44, 0x6a, 0xfc, 0xe2, 0x08, 0x3d, 0xe5,
+ 0x86, 0x89, 0x35, 0x34, 0x34, 0x27, 0xc9, 0x2e, 0x19, 0x69, 0xeb, 0x8a,
+ 0x32, 0x75, 0x2e, 0xbe, 0x2b, 0x09, 0x53, 0x24, 0x58, 0x17, 0x11, 0xd0,
+ 0xa6, 0x21, 0xee, 0x79, 0x0c, 0x25, 0xb7, 0xe6, 0x1c, 0x4e, 0x42, 0x6b,
+ 0x6f, 0x5f, 0xee, 0xbc, 0xea, 0x39, 0xed, 0x44, 0x54, 0xe0, 0x66, 0x33,
+ 0xf8, 0x6f, 0x5b, 0xdd, 0x85, 0x66, 0xf2, 0xd7, 0x6a, 0x82, 0xc3, 0xe9,
+ 0xb1, 0x47, 0xcd, 0x0d, 0xae, 0x3c, 0xa5, 0xf6, 0x48, 0x00, 0xec, 0x74,
+ 0xa8, 0xf8, 0xa8, 0xf8, 0x93, 0x27, 0xe1, 0x01, 0xb0, 0xfd, 0x3c, 0x53,
+ 0xe9, 0x24, 0x0b, 0x71, 0x2f, 0x4b, 0x91, 0x92, 0x88, 0x2a, 0x8a, 0xb6,
+ 0xad, 0xac, 0x0f, 0xcc, 0xe2, 0xc6, 0xa1, 0x7a, 0x23, 0xe5, 0x14, 0x67,
+ 0x4f, 0xe1, 0x11, 0x8a, 0xa5, 0x1a, 0xe5, 0xf6, 0x03, 0xf1, 0x8e, 0x8b,
+ 0x79, 0x1b, 0xc3, 0x6c, 0x5e, 0x70, 0xfd, 0xc7, 0xc4, 0x7c, 0xf3, 0x97,
+ 0x8a, 0x58, 0x08, 0xae, 0x4a, 0xd3, 0x0e, 0xdf, 0xc8, 0xcc, 0xef, 0x4e,
+ 0x0c, 0xa4, 0xd8, 0x7d, 0x9e, 0xeb, 0xc4, 0x6d, 0xac, 0x17, 0xd9, 0x56,
+ 0x5a, 0x11, 0x91, 0xcb, 0x10, 0x1c, 0x63, 0x84, 0xaa, 0x37, 0xef, 0xb0,
+ 0x66, 0xff, 0xae, 0x6f, 0x0c, 0x21, 0x18, 0xf4, 0xe5, 0xc3, 0x76, 0x15,
+ 0xfd, 0x76, 0xf5, 0xb7, 0x06, 0xe7, 0x4e, 0x22, 0x05, 0x53, 0x49, 0x04,
+ 0xa7, 0x64, 0x19, 0xa5, 0x93, 0xc3, 0xff, 0xff, 0xf0, 0x13, 0x9a, 0xca,
+ 0x61, 0xb0, 0xf5, 0x55, 0x9b, 0x1b, 0x2f, 0xb9, 0xb3, 0x10, 0x47, 0x4e,
+ 0x0e, 0xd9, 0xc2, 0xd7, 0x82, 0xe3, 0xa8, 0xbc, 0xd7, 0xa6, 0x87, 0x88,
+ 0xe7, 0x74, 0x27, 0xb9, 0xde, 0xf6, 0x58, 0x06, 0x10, 0x89, 0xd2, 0x38,
+ 0x4b, 0x7b, 0xf9, 0xd3, 0xa3, 0x86, 0xb1, 0xcc, 0x55, 0x79, 0xd9, 0xa0,
+ 0x97, 0xe6, 0x7c, 0xc0, 0x5e, 0x78, 0x05, 0x16, 0xcd, 0xa8, 0x2f, 0x33,
+ 0xe5, 0x5e, 0xb8, 0x52, 0x6b, 0x0d, 0x5f, 0xd4, 0xb5, 0x67, 0xd6, 0x3b,
+ 0x75, 0x43, 0xce, 0x2f, 0x8c, 0x1f, 0xba, 0x12, 0x20, 0xae, 0xa3, 0xeb,
+ 0xcf, 0x31, 0x7a, 0x65, 0xd2, 0xba, 0x47, 0x8f, 0xee, 0x2e, 0x39, 0xe9,
+ 0x2b, 0xa3, 0x2d, 0x86, 0x57, 0x3f, 0x87, 0xe7, 0xff, 0x0d, 0x8d, 0x17,
+ 0x17, 0x98, 0x28, 0xae, 0x55, 0xcf, 0xcf, 0x2d,
+};
+
+static uint8_t ciphertext_zsda_aes512xts_512bytes[] = {
+ 0xc5, 0x47, 0x28, 0x8d, 0x26, 0x64, 0x8f, 0xe4, 0x83, 0x45, 0xe5, 0xe2,
+ 0x0d, 0xb9, 0xa7, 0xe3, 0x72, 0x1d, 0x10, 0x5d, 0x5f, 0x5d, 0x12, 0x1e,
+ 0x26, 0xa5, 0x15, 0x55, 0x3b, 0xfd, 0xbb, 0xde, 0x80, 0x89, 0x37, 0x6e,
+ 0x5d, 0x2d, 0xfa, 0xcc, 0x64, 0x33, 0xca, 0x4a, 0xf1, 0xb2, 0x81, 0xb0,
+ 0x39, 0xb4, 0x24, 0x4d, 0x04, 0x39, 0x35, 0x04, 0x28, 0x58, 0xea, 0x6a,
+ 0x51, 0xfa, 0x1b, 0xcf, 0x97, 0xd4, 0xae, 0xc2, 0x84, 0xdc, 0xf9, 0x89,
+ 0x9d, 0xa0, 0x27, 0x6f, 0x9f, 0xf8, 0xc6, 0xdc, 0xe1, 0x3b, 0xc5, 0xe1,
+ 0x01, 0x75, 0x53, 0x37, 0x91, 0x84, 0x32, 0x3b, 0x8c, 0x19, 0xd6, 0x03,
+ 0x5d, 0xb8, 0x8c, 0x31, 0x1f, 0x88, 0xae, 0x3c, 0x62, 0x0a, 0xf4, 0x0d,
+ 0xf6, 0x4a, 0x01, 0x5f, 0x76, 0xbb, 0xaf, 0x35, 0x46, 0x7c, 0xde, 0xd1,
+ 0xe7, 0xf7, 0x89, 0xe8, 0x80, 0x55, 0x39, 0x0c, 0x40, 0x68, 0x82, 0x27,
+ 0x6c, 0xbf, 0x84, 0xf8, 0x9d, 0x46, 0xeb, 0x85, 0x46, 0x88, 0xd1, 0xe3,
+ 0xc0, 0xf5, 0x9c, 0x08, 0x62, 0xd6, 0x80, 0x01, 0xf5, 0xa8, 0x3e, 0x96,
+ 0xee, 0x4f, 0x7b, 0xf5, 0x71, 0x09, 0xd9, 0xcd, 0x47, 0xba, 0x6f, 0xf6,
+ 0x63, 0xaf, 0x9d, 0x04, 0xcd, 0xc5, 0x78, 0xee, 0x40, 0x4c, 0x51, 0xef,
+ 0xa8, 0xf1, 0x68, 0x00, 0xc7, 0x45, 0x5a, 0x18, 0x95, 0x9c, 0x06, 0x35,
+ 0x32, 0x90, 0x32, 0x1b, 0xf9, 0x0d, 0x9a, 0xe9, 0x85, 0x25, 0x2b, 0x3d,
+ 0x28, 0x00, 0x67, 0x57, 0xe2, 0x17, 0xd1, 0x15, 0xd8, 0xbd, 0xa5, 0xc1,
+ 0xbc, 0xba, 0x97, 0x49, 0x55, 0xc4, 0x1f, 0xd2, 0x0d, 0xb4, 0x04, 0x5d,
+ 0xb4, 0xc7, 0xfd, 0x38, 0xc0, 0x3b, 0x8a, 0xfa, 0xce, 0x98, 0x07, 0x09,
+ 0x9d, 0xb0, 0xe8, 0x7f, 0xc6, 0x35, 0x32, 0x9e, 0x67, 0x6f, 0x3d, 0xd1,
+ 0x4e, 0x59, 0x3d, 0x56, 0xe6, 0x47, 0x1f, 0xcc, 0x4a, 0xcf, 0x78, 0x7c,
+ 0x41, 0xcd, 0xd7, 0xc0, 0x21, 0xe9, 0xaf, 0xac, 0xa9, 0x12, 0x50, 0x19,
+ 0xc8, 0xf7, 0x97, 0xe8, 0x61, 0x35, 0x65, 0xf2, 0x81, 0xb8, 0xed, 0x17,
+ 0xcf, 0xc8, 0xbf, 0x55, 0x0a, 0xcb, 0xa1, 0x8f, 0x24, 0x99, 0x12, 0x76,
+ 0xb5, 0x77, 0x56, 0xb9, 0x23, 0xfd, 0x77, 0x34, 0xf0, 0x21, 0x32, 0x73,
+ 0x2c, 0x85, 0x42, 0x52, 0x26, 0x65, 0x30, 0xe0, 0xd0, 0x8a, 0x07, 0xf3,
+ 0xe0, 0x7d, 0xc7, 0xc6, 0x2b, 0x56, 0x27, 0xcb, 0x9c, 0xca, 0x22, 0xca,
+ 0xb7, 0xc1, 0x1f, 0x45, 0xb7, 0xad, 0x48, 0x14, 0x1d, 0xba, 0x2c, 0xcd,
+ 0x31, 0x7d, 0x1e, 0x2f, 0x1e, 0x04, 0x8d, 0x0b, 0x75, 0x93, 0x42, 0x86,
+ 0x3c, 0xb3, 0xa4, 0x38, 0x6d, 0xc0, 0x1e, 0xe7, 0x65, 0x9e, 0x5a, 0x3b,
+ 0x98, 0x69, 0xc9, 0x7d, 0x64, 0x25, 0x9c, 0xe1, 0x52, 0x7b, 0xfd, 0x68,
+ 0xc0, 0x8e, 0xe3, 0x62, 0xb6, 0x1f, 0x74, 0x58, 0x10, 0xb7, 0x9e, 0x51,
+ 0x08, 0xb2, 0x5d, 0x57, 0xfd, 0x3d, 0xb1, 0xbc, 0x84, 0xc8, 0x38, 0x92,
+ 0x3a, 0x10, 0x98, 0x2c, 0xb2, 0x10, 0x6d, 0xe2, 0xb4, 0x88, 0x8c, 0x23,
+ 0x4e, 0x80, 0x9b, 0x02, 0x40, 0x34, 0xfb, 0x86, 0x88, 0x6a, 0x5e, 0x7c,
+ 0x62, 0x2f, 0xa0, 0x3d, 0x51, 0xe4, 0x04, 0x35, 0xde, 0x80, 0x98, 0x2f,
+ 0xce, 0xa2, 0x0a, 0x94, 0x2d, 0x7f, 0x57, 0x5f, 0x34, 0x55, 0x55, 0x9d,
+ 0x82, 0xc6, 0xbe, 0x18, 0x90, 0xc0, 0x46, 0x9d, 0xda, 0x72, 0xd4, 0x24,
+ 0x5f, 0xce, 0xa8, 0x1e, 0x34, 0xfe, 0x99, 0x26, 0x69, 0xcd, 0x70, 0x96,
+ 0x69, 0xc4, 0x1a, 0x82, 0x50, 0x8c, 0x78, 0xef, 0xee, 0xf3, 0x92, 0x6e,
+ 0xe9, 0x57, 0xa6, 0xe2, 0x66, 0x11, 0x1c, 0xf7, 0x4e, 0x9b, 0xd7, 0xa9,
+ 0xd6, 0xeb, 0xcf, 0xc8, 0x51, 0x7e, 0x02, 0x42};
diff --git a/examples/zsda/test_zsda_cryptodev_hash_test_vectors.h b/examples/zsda/test_zsda_cryptodev_hash_test_vectors.h
new file mode 100644
index 0000000..4236d90
--- /dev/null
+++ b/examples/zsda/test_zsda_cryptodev_hash_test_vectors.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef TEST_ZSDA_CRYPTODEV_HASH_TEST_VECTORS_H_
+#define TEST_ZSDA_CRYPTODEV_HASH_TEST_VECTORS_H_
+
+#define LEN_HASH_PLAINTEXT 512
+
+static uint8_t plaintext_hash[] = {
+ 0x63, 0x67, 0x08, 0x2d, 0x68, 0x75, 0x6e, 0xf4, 0x68, 0x9f, 0xef, 0xc9,
+ 0x4c, 0xa2, 0x2e, 0xbb, 0xa6, 0x00, 0x72, 0xa6, 0x59, 0x1f, 0x39, 0x4f,
+ 0x4c, 0xa6, 0xe0, 0x10, 0x38, 0x5e, 0x5e, 0x87, 0x44, 0xb3, 0xc6, 0x60,
+ 0xfe, 0x76, 0xba, 0xe8, 0x13, 0x70, 0xc5, 0xba, 0xce, 0xb1, 0x7e, 0xaa,
+ 0x38, 0x55, 0xac, 0x74, 0xa3, 0xd7, 0xbd, 0x7b, 0x0c, 0x17, 0x06, 0xdc,
+ 0x48, 0x23, 0xe8, 0xde, 0xca, 0x8b, 0xf6, 0x3e, 0x96, 0xdc, 0x7a, 0x7f,
+ 0xb1, 0x49, 0x28, 0x8d, 0x67, 0x81, 0x7b, 0x28, 0x76, 0x8d, 0x00, 0xde,
+ 0xa6, 0x7d, 0x0e, 0xbd, 0x99, 0x71, 0xeb, 0x7c, 0xbe, 0x32, 0x6d, 0x2a,
+ 0xc9, 0x1a, 0x0a, 0xcc, 0x05, 0x46, 0xe0, 0x65, 0x88, 0x00, 0xde, 0x30,
+ 0xf5, 0xcf, 0x3c, 0x6b, 0x76, 0x8d, 0x00, 0xde, 0xa6, 0x7d, 0x0e, 0xbd,
+ 0x99, 0x71, 0xeb, 0x7c, 0xbe, 0x32, 0x6d, 0x2a, 0x42, 0x54, 0xf7, 0xff,
+ 0x93, 0xc9, 0xcd, 0x03, 0xd6, 0x90, 0x55, 0xb9, 0xf3, 0xdf, 0x6c, 0x65,
+ 0xd7, 0x35, 0xc0, 0x82, 0xb1, 0x02, 0x15, 0xb2, 0x36, 0x95, 0x99, 0xb6,
+ 0x45, 0x13, 0xf6, 0xbe, 0x99, 0x16, 0x45, 0x4f, 0xba, 0x83, 0x9b, 0x72,
+ 0xf2, 0x2a, 0x3f, 0xfa, 0x77, 0xe4, 0xe2, 0x7e, 0xd4, 0x5a, 0x8a, 0xc9,
+ 0xdf, 0xbf, 0xd8, 0x5b, 0x30, 0x1a, 0x58, 0xd9, 0xfd, 0xcf, 0xf1, 0xfb,
+ 0xba, 0xfa, 0xc5, 0xff, 0x06, 0xf2, 0x96, 0xe2, 0x1e, 0x4c, 0x8c, 0x48,
+ 0x68, 0xe5, 0xaf, 0xe8, 0x77, 0x92, 0xa0, 0x64, 0x1c, 0xea, 0x1f, 0x37,
+ 0xf8, 0xe8, 0x24, 0x32, 0x52, 0xb5, 0x00, 0x31, 0x1c, 0x0d, 0xf7, 0x34,
+ 0x96, 0x06, 0x98, 0x34, 0x2a, 0x76, 0x50, 0xfe, 0x46, 0xa5, 0x23, 0xf5,
+ 0x5d, 0xbc, 0x41, 0xc2, 0x2a, 0xc6, 0x43, 0x9a, 0x5e, 0x7f, 0xdb, 0x16,
+ 0x18, 0x8c, 0x6c, 0x14, 0xc8, 0xe0, 0x9f, 0x36, 0x24, 0x9a, 0xfd, 0x86,
+ 0x59, 0x36, 0xc3, 0x6f, 0xa6, 0x99, 0xc0, 0x08, 0x6f, 0x22, 0xbd, 0x25,
+ 0xb2, 0x72, 0x60, 0xa7, 0xa1, 0x18, 0x2d, 0x0c, 0x9c, 0x2b, 0xf8, 0x73,
+ 0xed, 0x03, 0xa0, 0x97, 0xd7, 0xbc, 0x0e, 0x78, 0xca, 0xcb, 0x56, 0x8f,
+ 0x39, 0x3c, 0x3b, 0xec, 0xed, 0x03, 0xa0, 0x97, 0xd7, 0xbc, 0x0e, 0x78,
+ 0xca, 0xcb, 0x56, 0x8f, 0x39, 0x3c, 0x3b, 0xec, 0x4e, 0xa9, 0x04, 0xff,
+ 0x40, 0xf3, 0x4b, 0x97, 0xa9, 0x4a, 0xb8, 0xd5, 0xed, 0x5c, 0x93, 0x43,
+ 0x76, 0x8d, 0x00, 0xde, 0xa6, 0x7d, 0x0e, 0xbd, 0x99, 0x71, 0xeb, 0x7c,
+ 0xbe, 0x32, 0x6d, 0x2a, 0x76, 0x8d, 0x00, 0xde, 0xa6, 0x7d, 0x0e, 0xbd,
+ 0x99, 0x71, 0xeb, 0x7c, 0xbe, 0x32, 0x6d, 0x2a, 0x83, 0x55, 0x5e, 0x23,
+ 0xfe, 0xc5, 0x64, 0xb6, 0xea, 0x31, 0x03, 0xe9, 0xc0, 0x1e, 0x05, 0x02,
+ 0x42, 0x54, 0xf7, 0xff, 0x93, 0xc9, 0xcd, 0x03, 0xd6, 0x90, 0x55, 0xb9,
+ 0xf3, 0xdf, 0x6c, 0x65, 0xd7, 0x35, 0xc0, 0x82, 0xb1, 0x02, 0x15, 0xb2,
+ 0x36, 0x95, 0x99, 0xb6, 0x45, 0x13, 0xf6, 0xbe, 0x68, 0x7f, 0xe0, 0x36,
+ 0x00, 0x17, 0x21, 0x5d, 0x0b, 0x3c, 0x8b, 0x05, 0x03, 0xf2, 0xbe, 0x88,
+ 0x6b, 0x02, 0x51, 0x64, 0x21, 0x48, 0x55, 0xc3, 0x14, 0x2e, 0x6a, 0x6a,
+ 0x22, 0x22, 0x69, 0xf6, 0x27, 0x73, 0xf2, 0x9e, 0xe0, 0xc9, 0xf0, 0x04,
+ 0x48, 0x23, 0x7d, 0xb6, 0x55, 0x81, 0x21, 0xf2, 0xed, 0x03, 0xa0, 0x97,
+ 0xd7, 0xbc, 0x0e, 0x78, 0xca, 0xcb, 0x56, 0x8f, 0x39, 0x3c, 0x3b, 0xec,
+ 0x42, 0x54, 0xf7, 0xff, 0x93, 0xc9, 0xcd, 0x03, 0xd6, 0x90, 0x55, 0xb9,
+ 0xf3, 0xdf, 0x6c, 0x65, 0xd7, 0x35, 0xc0, 0x82, 0xb1, 0x02, 0x15, 0xb2,
+ 0x36, 0x95, 0x99, 0xb6, 0x45, 0x13, 0xf6, 0xbe,
+};
+
+static struct blockcipher_test_data
+sha1_test_vector = {
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1,
+ .plaintext = {
+ .data = plaintext_hash,
+ .len = LEN_HASH_PLAINTEXT
+ },
+ .digest = {
+ .data = {
+ 0x27, 0x57, 0xFE, 0xCD, 0x0B, 0x06, 0x3D, 0x40,
+ 0xF8, 0x25, 0x82, 0x5D, 0xB6, 0xCD, 0x70, 0xAF,
+ 0x93, 0xCF, 0xFB, 0x2C,
+ },
+ .len = 20,
+ .truncated_len = 20
+ }
+};
+
+static struct blockcipher_test_data
+sha224_test_vector = {
+ .auth_algo = RTE_CRYPTO_AUTH_SHA224,
+ .plaintext = {
+ .data = plaintext_hash,
+ .len = LEN_HASH_PLAINTEXT
+ },
+ .digest = {
+ .data = {
+ 0xD7, 0x6A, 0x90, 0x9E, 0x08, 0xC9, 0x72, 0x9A,
+ 0x93, 0x36, 0xB5, 0x40, 0x7E, 0x8B, 0xCB, 0xEA,
+ 0x7E, 0x64, 0xA9, 0x64, 0x4B, 0xCE, 0x6E, 0x82,
+ 0x0E, 0x8C, 0x0D, 0x97
+ },
+ .len = 28,
+ .truncated_len = 28
+ }
+};
+
+static struct blockcipher_test_data
+sha256_test_vector = {
+ .auth_algo = RTE_CRYPTO_AUTH_SHA256,
+ .plaintext = {
+ .data = plaintext_hash,
+ .len = LEN_HASH_PLAINTEXT
+ },
+ .digest = {
+ .data = {
+ 0x72, 0xA6, 0x6D, 0x01, 0x2B, 0x51, 0xDA, 0xE0,
+ 0x37, 0x7B, 0x30, 0x71, 0xC8, 0x1C, 0xD4, 0x15,
+ 0x5A, 0x8A, 0x44, 0x28, 0xFA, 0x9E, 0x20, 0x5F,
+ 0xA6, 0x86, 0xA0, 0x6D, 0xFA, 0xB9, 0x16, 0x76
+ },
+ .len = 32,
+ .truncated_len = 32
+ }
+};
+
+static struct blockcipher_test_data
+sha384_test_vector = {
+ .auth_algo = RTE_CRYPTO_AUTH_SHA384,
+ .plaintext = {
+ .data = plaintext_hash,
+ .len = LEN_HASH_PLAINTEXT
+ },
+ .digest = {
+ .data = {
+ 0xF7, 0x87, 0xB9, 0xE0, 0xC1, 0x59, 0xA3, 0x3C,
+ 0x47, 0xC6, 0x9B, 0x68, 0x3C, 0x43, 0x9D, 0xB0,
+ 0xD7, 0x02, 0x40, 0xF0, 0xD6, 0xC9, 0x39, 0x07,
+ 0x7A, 0x2B, 0xEE, 0x3E, 0x51, 0x09, 0xD7, 0x1E,
+ 0x5E, 0xCE, 0xAB, 0x42, 0xC6, 0x6A, 0x0C, 0x91,
+ 0x0E, 0x75, 0x83, 0x35, 0x9C, 0x49, 0x64, 0xC2
+ },
+ .len = 48,
+ .truncated_len = 48
+ }
+};
+
+static struct blockcipher_test_data
+sha512_test_vector = {
+ .auth_algo = RTE_CRYPTO_AUTH_SHA512,
+ .plaintext = {
+ .data = plaintext_hash,
+ .len = LEN_HASH_PLAINTEXT
+ },
+ .digest = {
+ .data = {
+ 0xC9, 0xE5, 0x79, 0x46, 0x2C, 0x73, 0xBF, 0x94,
+ 0x6A, 0x3B, 0x2F, 0xBE, 0x1B, 0x52, 0x15, 0xBC,
+ 0xA6, 0xA0, 0x55, 0xD7, 0x11, 0x5F, 0xD3, 0xD4,
+ 0xB9, 0x5E, 0x3A, 0xCA, 0xE9, 0x4A, 0x0A, 0xF4,
+ 0x98, 0x01, 0xDA, 0x2A, 0x22, 0x71, 0xB2, 0x18,
+ 0x5C, 0xEA, 0xF1, 0x89, 0xE7, 0xF5, 0x2D, 0xF2,
+ 0x60, 0x41, 0xE6, 0x51, 0x53, 0x89, 0x2D, 0xF4,
+ 0x05, 0x77, 0xE0, 0xB3, 0xE6, 0x80, 0x43, 0xE1
+ },
+ .len = 64,
+ .truncated_len = 64
+ }
+};
+
+static struct blockcipher_test_data
+sm3_test_vector = {
+ .auth_algo = RTE_CRYPTO_AUTH_SM3,
+ .plaintext = {
+ .data = plaintext_hash,
+ .len = LEN_HASH_PLAINTEXT
+ },
+ .digest = {
+ .data = {
+ 0x00, 0xDD, 0x55, 0x6D, 0x24, 0xFB, 0x95, 0x9B,
+ 0xA3, 0x32, 0x26, 0x66, 0xB8, 0x96, 0xD6, 0x13,
+ 0xFE, 0xCC, 0xBF, 0xBC, 0x81, 0x3E, 0x6A, 0x47,
+ 0xB0, 0xE7, 0x06, 0x59, 0xC8, 0xA1, 0x48, 0x95
+ },
+ .len = 32,
+ .truncated_len = 32
+ }
+};
+
+static struct blockcipher_test_case zsda_test_cases_hash[] = {
+ {
+ .test_descr = "SHA1 Digest",
+ .test_data = &sha1_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+ },
+ {
+ .test_descr = "SHA224 Digest",
+ .test_data = &sha224_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+ },
+ {
+ .test_descr = "SHA256 Digest",
+ .test_data = &sha256_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+ },
+ {
+ .test_descr = "SHA384 Digest",
+ .test_data = &sha384_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+ },
+ {
+ .test_descr = "SHA512 Digest",
+ .test_data = &sha512_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+ },
+ {
+ .test_descr = "SM3 Digest",
+ .test_data = &sm3_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+ },
+};
+
+
+
+#endif /* TEST_CRYPTODEV_HASH_TEST_VECTORS_H_ */
diff --git a/lib/compressdev/rte_compressdev.h b/lib/compressdev/rte_compressdev.h
index 42bda9f..84eded2 100644
--- a/lib/compressdev/rte_compressdev.h
+++ b/lib/compressdev/rte_compressdev.h
@@ -21,6 +21,7 @@
extern "C" {
#endif
+#include <rte_common.h>
#include <rte_compat.h>
#include "rte_comp.h"
@@ -205,7 +206,10 @@ struct rte_compressdev_config {
uint16_t max_nb_streams;
/**< Max number of streams which will be created on the device */
};
-
+/** Compress device queue pair configuration structure. */
+struct rte_compressdev_qp_conf {
+ uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
+};
/**
* Configure a device.
*
@@ -313,6 +317,15 @@ __rte_experimental
uint16_t
rte_compressdev_queue_pair_count(uint8_t dev_id);
+/**
+ * Get the number of queue pairs on a specific comp device
+ *
+ * @param dev_id
+ * Compress device identifier
+ * @return
+ * - The number of configured queue pairs.
+ */
+__rte_experimental uint16_t rte_compressdev_queue_pair_count(uint8_t dev_id);
/**
* Retrieve the general I/O statistics of a device.
diff --git a/lib/compressdev/rte_compressdev_pmd.h b/lib/compressdev/rte_compressdev_pmd.h
index ea01290..c3f787e 100644
--- a/lib/compressdev/rte_compressdev_pmd.h
+++ b/lib/compressdev/rte_compressdev_pmd.h
@@ -21,6 +21,9 @@ extern "C" {
#include <dev_driver.h>
+#include <rte_dev.h>
+#include <rte_common.h>
+
#include <rte_compat.h>
#include "rte_compressdev.h"
#include "rte_compressdev_internal.h"
diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index 33b4966..2c2d171 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -71,6 +71,7 @@ struct rte_crypto_va_iova_ptr {
struct rte_crypto_sym_vec {
/** number of operations to perform */
uint32_t num;
+ struct rte_crypto_sgl *sgl;
/** array of SGL vectors */
struct rte_crypto_sgl *src_sgl;
/** array of SGL vectors for OOP, keep it NULL for inplace*/
@@ -168,6 +169,7 @@ enum rte_crypto_cipher_algorithm {
* for m_src and m_dst in the rte_crypto_sym_op must be NULL.
*/
+ RTE_CRYPTO_CIPHER_SM4_XTS,
RTE_CRYPTO_CIPHER_SM4_ECB,
/**< ShangMi 4 (SM4) algorithm in ECB mode */
RTE_CRYPTO_CIPHER_SM4_CBC,
@@ -590,6 +592,8 @@ struct rte_crypto_sym_xform {
};
};
+struct rte_cryptodev_sym_session;
+
/**
* Symmetric Cryptographic Operation.
*
diff --git a/lib/cryptodev/rte_cryptodev_pmd.h b/lib/cryptodev/rte_cryptodev_pmd.h
new file mode 100644
index 0000000..5086124
--- /dev/null
+++ b/lib/cryptodev/rte_cryptodev_pmd.h
@@ -0,0 +1,325 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _RTE_CRYPTODEV_PMD_H_
+#define _RTE_CRYPTODEV_PMD_H_
+
+/** @file
+ * RTE Crypto PMD APIs
+ *
+ * @note
+ * These API are from crypto PMD only and user applications should not call
+ * them directly.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_config.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_log.h>
+#include <rte_common.h>
+
+#include "rte_crypto.h"
+#include "rte_cryptodev.h"
+
+#define RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS 8
+
+#define RTE_CRYPTODEV_PMD_NAME_ARG ("name")
+#define RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG ("max_nb_queue_pairs")
+#define RTE_CRYPTODEV_PMD_SOCKET_ID_ARG ("socket_id")
+
+/**
+ * Get the rte_cryptodev structure device pointer for the device. Assumes a
+ * valid device index.
+ *
+ * @param dev_id Device ID value to select the device structure.
+ *
+ * @return
+ * - The rte_cryptodev structure pointer for the given device ID.
+ */
+struct rte_cryptodev *rte_cryptodev_pmd_get_dev(uint8_t dev_id);
+
+/**
+ * Get the rte_cryptodev structure device pointer for the named device.
+ *
+ * @param name device name to select the device structure.
+ *
+ * @return
+ * - The rte_cryptodev structure pointer for the given device ID.
+ */
+struct rte_cryptodev *rte_cryptodev_pmd_get_named_dev(const char *name);
+
+/**
+ * Validate if the crypto device index is valid attached crypto device.
+ *
+ * @param dev_id Crypto device index.
+ *
+ * @return
+ * - If the device index is valid (1) or not (0).
+ */
+unsigned int rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id);
+
+/**
+ * The pool of rte_cryptodev structures.
+ */
+extern struct rte_cryptodev *rte_cryptodevs;
+
+/**
+ * Definitions of all functions exported by a driver through the
+ * the generic structure of type *crypto_dev_ops* supplied in the
+ * *rte_cryptodev* structure associated with a device.
+ */
+
+/**
+ * Function used to configure device.
+ *
+ * @param dev Crypto device pointer
+ * @param config Crypto device configurations
+ *
+ * @return Returns 0 on success
+ */
+typedef int (*cryptodev_configure_t)(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config);
+
+/**
+ * Function used to start a configured device.
+ *
+ * @param dev Crypto device pointer
+ *
+ * @return Returns 0 on success
+ */
+typedef int (*cryptodev_start_t)(struct rte_cryptodev *dev);
+
+/**
+ * Function used to stop a configured device.
+ *
+ * @param dev Crypto device pointer
+ */
+typedef void (*cryptodev_stop_t)(struct rte_cryptodev *dev);
+
+/**
+ * Function used to close a configured device.
+ *
+ * @param dev Crypto device pointer
+ * @return
+ * - 0 on success.
+ * - EAGAIN if can't close as device is busy
+ */
+typedef int (*cryptodev_close_t)(struct rte_cryptodev *dev);
+
+/**
+ * Function used to get statistics of a device.
+ *
+ * @param dev Crypto device pointer
+ * @param stats Pointer to crypto device stats structure to populate
+ */
+typedef void (*cryptodev_stats_get_t)(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats);
+
+/**
+ * Function used to reset statistics of a device.
+ *
+ * @param dev Crypto device pointer
+ */
+typedef void (*cryptodev_stats_reset_t)(struct rte_cryptodev *dev);
+
+/**
+ * Function used to get specific information of a device.
+ *
+ * @param dev Crypto device pointer
+ * @param dev_info Pointer to infos structure to populate
+ */
+typedef void (*cryptodev_info_get_t)(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info);
+
+/**
+ * Setup a queue pair for a device.
+ *
+ * @param dev Crypto device pointer
+ * @param qp_id Queue Pair Index
+ * @param qp_conf Queue configuration structure
+ * @param socket_id Socket Index
+ *
+ * @return Returns 0 on success.
+ */
+typedef int (*cryptodev_queue_pair_setup_t)(
+ struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
+
+/**
+ * Release memory resources allocated by given queue pair.
+ *
+ * @param dev Crypto device pointer
+ * @param qp_id Queue Pair Index
+ *
+ * @return
+ * - 0 on success.
+ * - EAGAIN if can't close as device is busy
+ */
+typedef int (*cryptodev_queue_pair_release_t)(struct rte_cryptodev *dev,
+ uint16_t qp_id);
+
+/**
+ * Create a session mempool to allocate sessions from
+ *
+ * @param dev Crypto device pointer
+ * @param nb_objs number of sessions objects in mempool
+ * @param obj_cache_size l-core object cache size, see *rte_ring_create*
+ * @param socket_id Socket Id to allocate mempool on.
+ *
+ * @return
+ * - On success returns a pointer to a rte_mempool
+ * - On failure returns a NULL pointer
+ */
+typedef int (*cryptodev_sym_create_session_pool_t)(struct rte_cryptodev *dev,
+ unsigned int nb_objs,
+ unsigned int obj_cache_size,
+ int socket_id);
+
+/**
+ * Get the size of a cryptodev session
+ *
+ * @param dev Crypto device pointer
+ *
+ * @return
+ * - On success returns the size of the session structure for device
+ * - On failure returns 0
+ */
+typedef unsigned int (*cryptodev_sym_get_session_private_size_t)(
+ struct rte_cryptodev *dev);
+/**
+ * Get the size of a asymmetric cryptodev session
+ *
+ * @param dev Crypto device pointer
+ *
+ * @return
+ * - On success returns the size of the session structure for device
+ * - On failure returns 0
+ */
+typedef unsigned int (*cryptodev_asym_get_session_private_size_t)(
+ struct rte_cryptodev *dev);
+
+/**
+ * Perform actual crypto processing (encrypt/digest or auth/decrypt)
+ * on user provided data.
+ *
+ * @param dev Crypto device pointer
+ * @param sess Cryptodev session structure
+ * @param ofs Start and stop offsets for auth and cipher operations
+ * @param vec Vectorized operation descriptor
+ *
+ * @return
+ * - Returns number of successfully processed packets.
+ *
+ */
+typedef uint32_t (*cryptodev_sym_cpu_crypto_process_t)(
+ struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess,
+ union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec);
+
+/**
+ * Typedef that the driver provided to get service context private date size.
+ *
+ * @param dev Crypto device pointer.
+ *
+ * @return
+ * - On success return the size of the device's service context private data.
+ * - On failure return negative integer.
+ */
+typedef int (*cryptodev_sym_get_raw_dp_ctx_size_t)(struct rte_cryptodev *dev);
+
+/**
+ * Typedef that the driver provided to configure raw data-path context.
+ *
+ * @param dev Crypto device pointer.
+ * @param qp_id Crypto device queue pair index.
+ * @param ctx The raw data-path context data.
+ * @param sess_type session type.
+ * @param session_ctx Session context data. If NULL the driver
+ * shall only configure the drv_ctx_data in
+ * ctx buffer. Otherwise the driver shall only
+ * parse the session_ctx to set appropriate
+ * function pointers in ctx.
+ * @param is_update Set 0 if it is to initialize the ctx.
+ * Set 1 if ctx is initialized and only to update
+ * session context data.
+ * @return
+ * - On success return 0.
+ * - On failure return negative integer.
+ */
+typedef int (*cryptodev_sym_configure_raw_dp_ctx_t)(
+ struct rte_cryptodev *dev, uint16_t qp_id,
+ struct rte_crypto_raw_dp_ctx *ctx,
+ enum rte_crypto_op_sess_type sess_type,
+ union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
+
+/**
+ * Function for internal use by dummy drivers primarily, e.g. ring-based
+ * driver.
+ * Allocates a new cryptodev slot for an crypto device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name Unique identifier name for each device
+ * @param socket_id Socket to allocate resources on.
+ * @return
+ * - Slot in the rte_dev_devices array for a new device;
+ */
+struct rte_cryptodev *rte_cryptodev_pmd_allocate(const char *name,
+ int socket_id);
+
+/**
+ * Function for internal use by dummy drivers primarily, e.g. ring-based
+ * driver.
+ * Release the specified cryptodev device.
+ *
+ * @param cryptodev
+ * The *cryptodev* pointer is the address of the *rte_cryptodev* structure.
+ * @return
+ * - 0 on success, negative on error
+ */
+extern int rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev);
+
+/**
+ * @internal
+ *
+ * PMD assist function to provide boiler plate code for crypto driver to
+ * destroy and free resources associated with a crypto PMD device instance.
+ *
+ * @param cryptodev crypto device handle.
+ *
+ * @return
+ * - 0 on success
+ * - errno on failure
+ */
+int rte_cryptodev_pmd_destroy(struct rte_cryptodev *cryptodev);
+
+/**
+ * Executes all the user application registered callbacks for the specific
+ * device.
+ * *
+ * @param dev Pointer to cryptodev struct
+ * @param event Crypto device interrupt event type.
+ *
+ * @return
+ * void
+ */
+void rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
+ enum rte_cryptodev_event_type event);
+
+/**
+ * @internal
+ * Create unique device name
+ */
+int rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix);
+
+#ifdef __cplusplus
+#endif
+
+#endif /* _RTE_CRYPTODEV_PMD_H_ */
diff --git a/usertools/dpdk-devbind.py b/usertools/dpdk-devbind.py
index 4d9c1be..1179d8e 100755
--- a/usertools/dpdk-devbind.py
+++ b/usertools/dpdk-devbind.py
@@ -37,20 +37,15 @@
avp_vnic = {'Class': '05', 'Vendor': '1af4', 'Device': '1110',
'SVendor': None, 'SDevice': None}
-cnxk_bphy = {'Class': '08', 'Vendor': '177d', 'Device': 'a089',
- 'SVendor': None, 'SDevice': None}
-cnxk_bphy_cgx = {'Class': '08', 'Vendor': '177d', 'Device': 'a059,a060',
+octeontx2_sso = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f9,a0fa',
'SVendor': None, 'SDevice': None}
-cnxk_dma = {'Class': '08', 'Vendor': '177d', 'Device': 'a081',
- 'SVendor': None, 'SDevice': None}
-cnxk_inl_dev = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f0,a0f1',
- 'SVendor': None, 'SDevice': None}
-
-hisilicon_dma = {'Class': '08', 'Vendor': '19e5', 'Device': 'a122',
+octeontx2_npa = {'Class': '08', 'Vendor': '177d', 'Device': 'a0fb,a0fc',
+ 'SVendor': None, 'SDevice': None}
+octeontx2_dma = {'Class': '08', 'Vendor': '177d', 'Device': 'a081',
+ 'SVendor': None, 'SDevice': None}
+octeontx2_ree = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f4',
'SVendor': None, 'SDevice': None}
-intel_dlb = {'Class': '0b', 'Vendor': '8086', 'Device': '270b,2710,2714',
- 'SVendor': None, 'SDevice': None}
intel_ioat_bdw = {'Class': '08', 'Vendor': '8086',
'Device': '6f20,6f21,6f22,6f23,6f24,6f25,6f26,6f27,6f2e,6f2f',
'SVendor': None, 'SDevice': None}
@@ -65,28 +60,19 @@
intel_ntb_icx = {'Class': '06', 'Vendor': '8086', 'Device': '347e',
'SVendor': None, 'SDevice': None}
-cnxk_sso = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f9,a0fa',
- 'SVendor': None, 'SDevice': None}
-cnxk_npa = {'Class': '08', 'Vendor': '177d', 'Device': 'a0fb,a0fc',
- 'SVendor': None, 'SDevice': None}
-cn9k_ree = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f4',
- 'SVendor': None, 'SDevice': None}
-
-virtio_blk = {'Class': '01', 'Vendor': "1af4", 'Device': '1001,1042',
- 'SVendor': None, 'SDevice': None}
+zte_zsda = {'Class': '01', 'Vendor': '1cf2', 'Device': None,
+ 'SVendor': None, 'SDevice': None}
network_devices = [network_class, cavium_pkx, avp_vnic, ifpga_class]
baseband_devices = [acceleration_class]
crypto_devices = [encryption_class, intel_processor_class]
-dma_devices = [cnxk_dma, hisilicon_dma,
- intel_idxd_spr, intel_ioat_bdw, intel_ioat_icx, intel_ioat_skx]
-eventdev_devices = [cavium_sso, cavium_tim, intel_dlb, cnxk_sso]
-mempool_devices = [cavium_fpa, cnxk_npa]
+eventdev_devices = [cavium_sso, cavium_tim, octeontx2_sso]
+mempool_devices = [cavium_fpa, octeontx2_npa]
compress_devices = [cavium_zip]
-regex_devices = [cn9k_ree]
-misc_devices = [cnxk_bphy, cnxk_bphy_cgx, cnxk_inl_dev,
+regex_devices = [octeontx2_ree]
+misc_devices = [intel_ioat_bdw, intel_ioat_skx, intel_ioat_icx, intel_idxd_spr,
intel_ntb_skx, intel_ntb_icx,
- virtio_blk]
+ octeontx2_dma, zte_zsda]
# global dict ethernet devices present. Dictionary indexed by PCI address.
# Each device within this is itself a dictionary of device properties
@@ -102,7 +88,6 @@
force_flag = False
args = []
-
# check if a specific kernel module is loaded
def module_is_loaded(module):
global loaded_modules
@@ -190,13 +175,11 @@ def get_pci_device_details(dev_id, probe_lspci):
return device
-
def clear_data():
'''This function clears any old data'''
global devices
devices = {}
-
def get_device_details(devices_type):
'''This function populates the "devices" dictionary. The keys used are
the pci addresses (domain:bus:slot.func). The values are themselves
@@ -242,7 +225,7 @@ def get_device_details(devices_type):
rt_info = route.split()
for i in range(len(rt_info) - 1):
if rt_info[i] == "dev":
- ssh_if.append(rt_info[i + 1])
+ ssh_if.append(rt_info[i+1])
# based on the basic info, get extended text details
for d in devices.keys():
@@ -296,7 +279,6 @@ def device_type_match(dev, devices_type):
return True
return False
-
def dev_id_from_dev_name(dev_name):
'''Take a device "name" - a string passed in by user to identify a NIC
device, and determine the device id - i.e. the domain:bus:slot.func - for
@@ -336,9 +318,9 @@ def unbind_one(dev_id, force):
filename = "/sys/bus/pci/drivers/%s/unbind" % dev["Driver_str"]
try:
f = open(filename, "a")
- except OSError as err:
- sys.exit("Error: unbind failed for %s - Cannot open %s: %s" %
- (dev_id, filename, err))
+ except:
+ sys.exit("Error: unbind failed for %s - Cannot open %s" %
+ (dev_id, filename))
f.write(dev_id)
f.close()
@@ -376,58 +358,58 @@ def bind_one(dev_id, driver, force):
if exists(filename):
try:
f = open(filename, "w")
- except OSError as err:
- print("Error: bind failed for %s - Cannot open %s: %s"
- % (dev_id, filename, err), file=sys.stderr)
+ except:
+ print("Error: bind failed for %s - Cannot open %s"
+ % (dev_id, filename), file=sys.stderr)
return
try:
f.write("%s" % driver)
f.close()
- except OSError as err:
+ except:
print("Error: bind failed for %s - Cannot write driver %s to "
- "PCI ID: %s" % (dev_id, driver, err), file=sys.stderr)
+ "PCI ID " % (dev_id, driver), file=sys.stderr)
return
# For kernels < 3.15 use new_id to add PCI id's to the driver
else:
filename = "/sys/bus/pci/drivers/%s/new_id" % driver
try:
f = open(filename, "w")
- except OSError as err:
- print("Error: bind failed for %s - Cannot open %s: %s"
- % (dev_id, filename, err), file=sys.stderr)
+ except:
+ print("Error: bind failed for %s - Cannot open %s"
+ % (dev_id, filename), file=sys.stderr)
return
try:
# Convert Device and Vendor Id to int to write to new_id
f.write("%04x %04x" % (int(dev["Vendor"], 16),
int(dev["Device"], 16)))
f.close()
- except OSError as err:
+ except:
print("Error: bind failed for %s - Cannot write new PCI ID to "
- "driver %s: %s" % (dev_id, driver, err), file=sys.stderr)
+ "driver %s" % (dev_id, driver), file=sys.stderr)
return
# do the bind by writing to /sys
filename = "/sys/bus/pci/drivers/%s/bind" % driver
try:
f = open(filename, "a")
- except OSError as err:
- print("Error: bind failed for %s - Cannot open %s: %s"
- % (dev_id, filename, err), file=sys.stderr)
+ except:
+ print("Error: bind failed for %s - Cannot open %s"
+ % (dev_id, filename), file=sys.stderr)
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
try:
f.write(dev_id)
f.close()
- except OSError as err:
+ except:
# for some reason, closing dev_id after adding a new PCI ID to new_id
# results in IOError. however, if the device was successfully bound,
# we don't care for any errors and can safely ignore IOError
tmp = get_pci_device_details(dev_id, True)
if "Driver_str" in tmp and tmp["Driver_str"] == driver:
return
- print("Error: bind failed for %s - Cannot bind to driver %s: %s"
- % (dev_id, driver, err), file=sys.stderr)
+ print("Error: bind failed for %s - Cannot bind to driver %s"
+ % (dev_id, driver), file=sys.stderr)
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
@@ -439,15 +421,15 @@ def bind_one(dev_id, driver, force):
if exists(filename):
try:
f = open(filename, "w")
- except OSError as err:
- sys.exit("Error: unbind failed for %s - Cannot open %s: %s"
- % (dev_id, filename, err))
+ except:
+ sys.exit("Error: unbind failed for %s - Cannot open %s"
+ % (dev_id, filename))
try:
f.write("\00")
f.close()
- except OSError as err:
- sys.exit("Error: unbind failed for %s - Cannot write %s: %s"
- % (dev_id, filename, err))
+ except:
+ sys.exit("Error: unbind failed for %s - Cannot open %s"
+ % (dev_id, filename))
def unbind_all(dev_list, force=False):
@@ -481,7 +463,7 @@ def bind_all(dev_list, driver, force=False):
dev_id_from_dev_name(driver)
# if we've made it this far, this means that the "driver" was a valid
# device string, so it's probably not a valid driver name.
- sys.exit("Error: Driver '%s' does not look like a valid driver. "
+ sys.exit("Error: Driver '%s' does not look like a valid driver. " \
"Did you forget to specify the driver to bind devices to?" % driver)
except ValueError:
# driver generated error - it's not a valid device ID, so all is well
@@ -511,8 +493,8 @@ def bind_all(dev_list, driver, force=False):
continue
# update information about this device
- devices[d] = dict(devices[d].items()
- + get_pci_device_details(d, True).items())
+ devices[d] = dict(devices[d].items() +
+ get_pci_device_details(d, True).items())
# check if updated information indicates that the device was bound
if "Driver_str" in devices[d]:
@@ -526,7 +508,7 @@ def display_devices(title, dev_list, extra_params=None):
device's dictionary.'''
strings = [] # this holds the strings to print. We sort before printing
print("\n%s" % title)
- print("=" * len(title))
+ print("="*len(title))
if not dev_list:
strings.append("<none>")
else:
@@ -542,7 +524,6 @@ def display_devices(title, dev_list, extra_params=None):
strings.sort()
print("\n".join(strings)) # print one per line
-
def show_device_status(devices_type, device_name, if_field=False):
global dpdk_drivers
kernel_drv = []
@@ -585,7 +566,6 @@ def show_device_status(devices_type, device_name, if_field=False):
display_devices("Other %s devices" % device_name, no_drv,
"unused=%(Module_str)s")
-
def show_status():
'''Function called when the script is passed the "--status" option.
Displays to the user what devices are bound to the igb_uio driver, the
@@ -600,9 +580,6 @@ def show_status():
if status_dev in ["crypto", "all"]:
show_device_status(crypto_devices, "Crypto")
- if status_dev in ["dma", "all"]:
- show_device_status(dma_devices, "DMA")
-
if status_dev in ["event", "all"]:
show_device_status(eventdev_devices, "Eventdev")
@@ -671,8 +648,8 @@ def parse_args():
parser.add_argument(
'--status-dev',
help="Print the status of given device group.",
- choices=['baseband', 'compress', 'crypto', 'dma', 'event',
- 'mempool', 'misc', 'net', 'regex'])
+ choices=['baseband', 'compress', 'crypto', 'event',
+ 'mempool', 'misc', 'net', 'regex'])
bind_group = parser.add_mutually_exclusive_group()
bind_group.add_argument(
'-b',
@@ -734,7 +711,6 @@ def parse_args():
new_args.extend(pci_glob(arg))
args = new_args
-
def do_arg_actions():
'''do the actual action requested by the user'''
global b_flag
@@ -753,7 +729,6 @@ def do_arg_actions():
get_device_details(network_devices)
get_device_details(baseband_devices)
get_device_details(crypto_devices)
- get_device_details(dma_devices)
get_device_details(eventdev_devices)
get_device_details(mempool_devices)
get_device_details(compress_devices)
@@ -776,7 +751,6 @@ def main():
get_device_details(network_devices)
get_device_details(baseband_devices)
get_device_details(crypto_devices)
- get_device_details(dma_devices)
get_device_details(eventdev_devices)
get_device_details(mempool_devices)
get_device_details(compress_devices)
@@ -784,6 +758,5 @@ def main():
get_device_details(misc_devices)
do_arg_actions()
-
if __name__ == "__main__":
main()
--
2.27.0
[-- Attachment #1.1.2: Type: text/html , Size: 708532 bytes --]
next reply other threads:[~2024-06-30 12:45 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-06-28 9:17 lhx [this message]
2024-07-01 8:27 [PATCH] zsda:introduce " lhx
2024-07-02 8:52 ` David Marchand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240628091745.3329385-1-li.hanxiao@zte.com.cn \
--to=li.hanxiao@zte.com.cn \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).