DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH v4 2/8] zsda: add support for zsdadev operations
@ 2024-09-09  8:11 Hanxiao Li
  2024-09-09  8:11 ` [PATCH v4 3/8] zsda: add support for queue operation Hanxiao Li
                   ` (5 more replies)
  0 siblings, 6 replies; 7+ messages in thread
From: Hanxiao Li @ 2024-09-09  8:11 UTC (permalink / raw)
  To: dev; +Cc: wang.yong19, Hanxiao Li


[-- Attachment #1.1.1: Type: text/plain, Size: 15706 bytes --]

Add support for zsdadev operations such as dev_start and dev_stop.

Signed-off-by: Hanxiao Li <li.hanxiao@zte.com.cn>
---
 drivers/common/zsda/zsda_device.c | 476 ++++++++++++++++++++++++++++++
 drivers/common/zsda/zsda_device.h | 103 +++++++
 2 files changed, 579 insertions(+)
 create mode 100644 drivers/common/zsda/zsda_device.c
 create mode 100644 drivers/common/zsda/zsda_device.h

diff --git a/drivers/common/zsda/zsda_device.c b/drivers/common/zsda/zsda_device.c
new file mode 100644
index 0000000000..de8894f5a3
--- /dev/null
+++ b/drivers/common/zsda/zsda_device.c
@@ -0,0 +1,476 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+
+#include <errno.h>
+#include <stdint.h>
+
+#include "zsda_device.h"
+
+/* per-process array of device data */
+struct zsda_device_info zsda_devs[RTE_PMD_ZSDA_MAX_PCI_DEVICES];
+static int zsda_nb_pci_devices;
+uint8_t zsda_num_used_qps;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_zsda_map[] = {
+	{
+		RTE_PCI_DEVICE(0x1cf2, 0x8050),
+	},
+	{
+		RTE_PCI_DEVICE(0x1cf2, 0x8051),
+	},
+	{.device_id = 0},
+};
+
+static int
+zsda_check_write(uint8_t *addr, const uint32_t dst_value)
+{
+	int times = ZSDA_TIME_NUM;
+	uint32_t val;
+
+	val = ZSDA_CSR_READ32(addr);
+
+	while ((val != dst_value) && times--) {
+		val = ZSDA_CSR_READ32(addr);
+		rte_delay_us_sleep(ZSDA_TIME_SLEEP_US);
+	}
+	if (val == dst_value)
+		return ZSDA_SUCCESS;
+	else
+		return ZSDA_FAILED;
+}
+
+static uint8_t
+zsda_get_num_used_qps(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	uint8_t num_used_qps;
+
+	num_used_qps = ZSDA_CSR_READ8(mmio_base + 0);
+
+	return num_used_qps;
+}
+
+int
+zsda_admin_q_start(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	int ret = 0;
+
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_START, 0);
+
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_START, ZSDA_Q_START);
+	ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_START, ZSDA_Q_START);
+
+	return ret;
+}
+
+int
+zsda_admin_q_stop(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	int ret = 0;
+
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_STOP_RESP, ZSDA_RESP_INVALID);
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_STOP, ZSDA_Q_STOP);
+
+	ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_STOP_RESP,
+			       ZSDA_RESP_VALID);
+
+	if (ret)
+		ZSDA_LOG(INFO, "Failed! zsda_admin q stop");
+
+	return ret;
+}
+
+int
+zsda_admin_q_clear(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	int ret = 0;
+
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_CLR_RESP, ZSDA_RESP_INVALID);
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_CLR, ZSDA_RESP_VALID);
+
+	ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_CLR_RESP,
+			       ZSDA_RESP_VALID);
+
+	if (ret)
+		ZSDA_LOG(INFO, "Failed! zsda_admin q clear");
+
+	return ret;
+}
+
+static int
+zsda_queue_stop_single(uint8_t *mmio_base, const uint8_t id)
+{
+	int ret = 0;
+	uint8_t *addr_stop = mmio_base + ZSDA_IO_Q_STOP + (4 * id);
+	uint8_t *addr_resp = mmio_base + ZSDA_IO_Q_STOP_RESP + (4 * id);
+
+	ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+	ZSDA_CSR_WRITE32(addr_stop, ZSDA_Q_STOP);
+
+	ret = zsda_check_write(addr_resp, ZSDA_RESP_VALID);
+	ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+
+	return ret;
+}
+
+int
+zsda_queue_stop(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	uint8_t id;
+	int ret = 0;
+
+	for (id = 0; id < zsda_num_used_qps; id++)
+		ret |= zsda_queue_stop_single(mmio_base, id);
+
+	return ret;
+}
+
+static int
+zsda_queue_start_single(uint8_t *mmio_base, const uint8_t id)
+{
+	uint8_t *addr_start = mmio_base + ZSDA_IO_Q_START + (4 * id);
+
+	ZSDA_CSR_WRITE32(addr_start, ZSDA_Q_START);
+	return zsda_check_write(addr_start, ZSDA_Q_START);
+}
+
+int
+zsda_queue_start(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	uint8_t id;
+	int ret = 0;
+
+	for (id = 0; id < zsda_num_used_qps; id++)
+		ret |= zsda_queue_start_single(mmio_base, id);
+
+	return ret;
+}
+
+static int
+zsda_queue_clear_single(uint8_t *mmio_base, const uint8_t id)
+{
+	int ret = 0;
+	uint8_t *addr_clear = mmio_base + ZSDA_IO_Q_CLR + (4 * id);
+	uint8_t *addr_resp = mmio_base + ZSDA_IO_Q_CLR_RESP + (4 * id);
+
+	ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+	ZSDA_CSR_WRITE32(addr_clear, ZSDA_CLEAR_VALID);
+	ret = zsda_check_write(addr_resp, ZSDA_RESP_VALID);
+	ZSDA_CSR_WRITE32(addr_clear, ZSDA_CLEAR_INVALID);
+
+	return ret;
+}
+
+int
+zsda_queue_clear(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	uint8_t id;
+	int ret = 0;
+
+	for (id = 0; id < zsda_num_used_qps; id++)
+		ret |= zsda_queue_clear_single(mmio_base, id);
+
+	return ret;
+}
+
+static struct zsda_pci_device *
+zsda_pci_get_named_dev(const char *name)
+{
+	unsigned int i;
+
+	if (name == NULL) {
+		ZSDA_LOG(ERR, E_NULL);
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_PMD_ZSDA_MAX_PCI_DEVICES; i++) {
+		if (zsda_devs[i].mz &&
+		    (strcmp(((struct zsda_pci_device *)zsda_devs[i].mz->addr)
+				    ->name,
+			    name) == 0))
+			return (struct zsda_pci_device *)zsda_devs[i].mz->addr;
+	}
+
+	return NULL;
+}
+
+static uint8_t
+zsda_pci_find_free_device_index(void)
+{
+	uint32_t dev_id;
+
+	for (dev_id = 0; dev_id < RTE_PMD_ZSDA_MAX_PCI_DEVICES; dev_id++)
+		if (zsda_devs[dev_id].mz == NULL)
+			break;
+
+	return dev_id & (ZSDA_MAX_DEV - 1);
+}
+
+struct zsda_pci_device *
+zsda_get_zsda_dev_from_pci_dev(const struct rte_pci_device *pci_dev)
+{
+	char name[ZSDA_DEV_NAME_MAX_LEN];
+
+	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+	return zsda_pci_get_named_dev(name);
+}
+
+struct zsda_pci_device *
+zsda_pci_device_allocate(struct rte_pci_device *pci_dev)
+{
+	struct zsda_pci_device *zsda_pci_dev;
+	uint8_t zsda_dev_id;
+	char name[ZSDA_DEV_NAME_MAX_LEN];
+
+	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+	snprintf(name + strlen(name), (ZSDA_DEV_NAME_MAX_LEN - strlen(name)),
+		 "_zsda");
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		const struct rte_memzone *mz = rte_memzone_lookup(name);
+
+		if (mz == NULL) {
+			ZSDA_LOG(ERR, "Secondary can't find %s mz", name);
+			return NULL;
+		}
+		zsda_pci_dev = mz->addr;
+		zsda_devs[zsda_pci_dev->zsda_dev_id].mz = mz;
+		zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev = pci_dev;
+		zsda_nb_pci_devices++;
+		return zsda_pci_dev;
+	}
+
+	if (zsda_pci_get_named_dev(name) != NULL) {
+		ZSDA_LOG(ERR, E_CONFIG);
+		return NULL;
+	}
+
+	zsda_dev_id = zsda_pci_find_free_device_index();
+
+	if (zsda_dev_id == (RTE_PMD_ZSDA_MAX_PCI_DEVICES - 1)) {
+		ZSDA_LOG(ERR, "Reached maximum number of ZSDA devices");
+		return NULL;
+	}
+
+	unsigned int socket_id = rte_socket_id();
+
+	zsda_devs[zsda_dev_id].mz =
+		rte_memzone_reserve(name, sizeof(struct zsda_pci_device),
+				    (int)(socket_id & 0xfff), 0);
+
+	if (zsda_devs[zsda_dev_id].mz == NULL) {
+		ZSDA_LOG(ERR, E_MALLOC);
+		return NULL;
+	}
+
+	zsda_pci_dev = zsda_devs[zsda_dev_id].mz->addr;
+	memset(zsda_pci_dev, 0, sizeof(*zsda_pci_dev));
+	strlcpy(zsda_pci_dev->name, name, ZSDA_DEV_NAME_MAX_LEN);
+	zsda_pci_dev->zsda_dev_id = zsda_dev_id;
+	zsda_pci_dev->pci_dev = pci_dev;
+	zsda_devs[zsda_dev_id].pci_dev = pci_dev;
+
+	rte_spinlock_init(&zsda_pci_dev->arb_csr_lock);
+	zsda_nb_pci_devices++;
+
+	return zsda_pci_dev;
+}
+
+static int
+zsda_pci_device_release(const struct rte_pci_device *pci_dev)
+{
+	struct zsda_pci_device *zsda_pci_dev;
+	struct zsda_device_info *inst;
+	char name[ZSDA_DEV_NAME_MAX_LEN];
+
+	if (pci_dev == NULL)
+		return -EINVAL;
+
+	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+	snprintf(name + strlen(name),
+		 ZSDA_DEV_NAME_MAX_LEN - (strlen(name) - 1), "_zsda");
+	zsda_pci_dev = zsda_pci_get_named_dev(name);
+	if (zsda_pci_dev != NULL) {
+		inst = &zsda_devs[zsda_pci_dev->zsda_dev_id];
+
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			if (zsda_pci_dev->comp_dev != NULL) {
+				ZSDA_LOG(DEBUG, "ZSDA device %s is busy", name);
+				return -EBUSY;
+			}
+			rte_memzone_free(inst->mz);
+		}
+		memset(inst, 0, sizeof(struct zsda_device_info));
+		zsda_nb_pci_devices--;
+	}
+	return 0;
+}
+
+static int
+zsda_pci_dev_destroy(struct zsda_pci_device *zsda_pci_dev,
+		     const struct rte_pci_device *pci_dev)
+{
+	zsda_comp_dev_destroy(zsda_pci_dev);
+
+	return zsda_pci_device_release(pci_dev);
+}
+
+int
+zsda_get_queue_cfg_by_id(const struct zsda_pci_device *zsda_pci_dev,
+			 const uint8_t qid, struct qinfo *qcfg)
+{
+	struct zsda_admin_req_qcfg req = {0};
+	struct zsda_admin_resp_qcfg resp = {0};
+	int ret = 0;
+	struct rte_pci_device *pci_dev =
+		zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+
+	if (qid >= MAX_QPS_ON_FUNCTION) {
+		ZSDA_LOG(ERR, "qid beyond limit!");
+		return ZSDA_FAILED;
+	}
+
+	zsda_admin_msg_init(pci_dev);
+	req.msg_type = ZSDA_ADMIN_QUEUE_CFG_REQ;
+	req.qid = qid;
+
+	ret = zsda_send_admin_msg(pci_dev, &req, sizeof(req));
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! Send msg");
+		return ret;
+	}
+
+	ret = zsda_recv_admin_msg(pci_dev, &resp, sizeof(resp));
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! Receive msg");
+		return ret;
+	}
+
+	memcpy(qcfg, &resp.qcfg, sizeof(*qcfg));
+
+	return ZSDA_SUCCESS;
+}
+
+static int
+zsda_unmask_flr(const struct zsda_pci_device *zsda_pci_dev)
+{
+	struct zsda_admin_req_qcfg req = {0};
+	struct zsda_admin_resp_qcfg resp = {0};
+
+	int ret = 0;
+	struct rte_pci_device *pci_dev =
+		zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+
+	zsda_admin_msg_init(pci_dev);
+
+	req.msg_type = ZSDA_FLR_SET_FUNCTION;
+
+	ret = zsda_send_admin_msg(pci_dev, &req, sizeof(req));
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! Send msg");
+		return ret;
+	}
+
+	ret = zsda_recv_admin_msg(pci_dev, &resp, sizeof(resp));
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! Receive msg");
+		return ret;
+	}
+
+	return ZSDA_SUCCESS;
+}
+
+static int
+zsda_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+	       struct rte_pci_device *pci_dev)
+{
+	int ret = 0;
+	struct zsda_pci_device *zsda_pci_dev;
+
+	zsda_pci_dev = zsda_pci_device_allocate(pci_dev);
+	if (zsda_pci_dev == NULL) {
+		ZSDA_LOG(ERR, E_NULL);
+		return -ENODEV;
+	}
+
+	zsda_num_used_qps = zsda_get_num_used_qps(zsda_pci_dev->pci_dev);
+	zsda_num_used_qps++;
+
+	ret = zsda_admin_q_start(zsda_pci_dev->pci_dev);
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! admin q start");
+		return ret;
+	}
+
+	ret = zsda_queue_clear(zsda_pci_dev->pci_dev);
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! used zsda_io q clear");
+		return ret;
+	}
+
+	ret = zsda_unmask_flr(zsda_pci_dev);
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! flr close");
+		return ret;
+	}
+
+	ret = zsda_get_queue_cfg(zsda_pci_dev);
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! zsda_get_queue_cfg");
+		return ret;
+	}
+
+	ret |= zsda_comp_dev_create(zsda_pci_dev);
+
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! dev create");
+		zsda_pci_dev_destroy(zsda_pci_dev, pci_dev);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int
+zsda_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct zsda_pci_device *zsda_pci_dev;
+
+	if (pci_dev == NULL)
+		return -EINVAL;
+
+	zsda_pci_dev = zsda_get_zsda_dev_from_pci_dev(pci_dev);
+	if (zsda_pci_dev == NULL)
+		return 0;
+
+	if (zsda_admin_q_clear(zsda_pci_dev->pci_dev) == ZSDA_FAILED)
+		ZSDA_LOG(ERR, "Failed! q clear");
+
+	if (zsda_admin_q_stop(zsda_pci_dev->pci_dev) == ZSDA_FAILED)
+		ZSDA_LOG(ERR, "Failed! q stop");
+
+	return zsda_pci_dev_destroy(zsda_pci_dev, pci_dev);
+}
+
+static struct rte_pci_driver rte_zsda_pmd = {
+	.id_table = pci_id_zsda_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = zsda_pci_probe,
+	.remove = zsda_pci_remove };
+
+RTE_PMD_REGISTER_PCI(ZSDA_PCI_NAME, rte_zsda_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(ZSDA_PCI_NAME, pci_id_zsda_map);
+RTE_PMD_REGISTER_KMOD_DEP(ZSDA_PCI_NAME,
+			  "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/common/zsda/zsda_device.h b/drivers/common/zsda/zsda_device.h
new file mode 100644
index 0000000000..1b2ad0ce85
--- /dev/null
+++ b/drivers/common/zsda/zsda_device.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_DEVICE_H_
+#define _ZSDA_DEVICE_H_
+
+#include "bus_pci_driver.h"
+
+#include "zsda_common.h"
+#include "zsda_logs.h"
+
+struct zsda_device_info {
+	const struct rte_memzone *mz;
+	/**< mz to store the:  struct zsda_pci_device ,    so it can be
+	 * shared across processes
+	 */
+
+	struct rte_pci_device *pci_dev;
+
+	struct rte_device comp_rte_dev;
+	/**< This represents the compression subset of this pci device.
+	 * Register with this rather than with the one in
+	 * pci_dev so that its driver can have a compression-specific name
+	 */
+};
+
+extern struct zsda_device_info zsda_devs[];
+
+struct zsda_comp_dev_private;
+
+struct zsda_qp_hw_data {
+	bool used;
+
+	uint8_t tx_ring_num;
+	uint8_t rx_ring_num;
+	uint16_t tx_msg_size;
+	uint16_t rx_msg_size;
+};
+
+struct zsda_qp_hw {
+	struct zsda_qp_hw_data data[MAX_QPS_ON_FUNCTION];
+};
+
+/*
+ * This struct holds all the data about a ZSDA pci device
+ * including data about all services it supports.
+ * It contains
+ *  - hw_data
+ *  - config data
+ *  - runtime data
+ * Note: as this data can be shared in a multi-process scenario,
+ * any pointers in it must also point to shared memory.
+ */
+struct zsda_pci_device {
+	/* Data used by all services */
+	char name[ZSDA_DEV_NAME_MAX_LEN];
+	/**< Name of zsda pci device */
+	uint8_t zsda_dev_id;
+	/**< Id of device instance for this zsda pci device */
+
+	rte_spinlock_t arb_csr_lock;
+	/**< lock to protect accesses to the arbiter CSR */
+
+	struct rte_pci_device *pci_dev;
+
+	/* Data relating to compression service */
+	struct zsda_comp_dev_private *comp_dev;
+	/**< link back to compressdev private data */
+
+	struct zsda_qp_hw zsda_hw_qps[ZSDA_MAX_SERVICES];
+	uint16_t zsda_qp_hw_num[ZSDA_MAX_SERVICES];
+};
+
+struct zsda_pci_device *
+zsda_pci_device_allocate(struct rte_pci_device *pci_dev);
+
+struct zsda_pci_device *
+zsda_get_zsda_dev_from_pci_dev(const struct rte_pci_device *pci_dev);
+
+__rte_weak int
+zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev);
+
+__rte_weak int
+zsda_comp_dev_create(struct zsda_pci_device *zsda_pci_dev);
+
+__rte_weak int
+zsda_comp_dev_destroy(struct zsda_pci_device *zsda_pci_dev);
+
+int zsda_get_queue_cfg_by_id(const struct zsda_pci_device *zsda_pci_dev,
+			     const uint8_t qid, struct qinfo *qcfg);
+
+int zsda_queue_start(const struct rte_pci_device *pci_dev);
+int zsda_queue_stop(const struct rte_pci_device *pci_dev);
+int zsda_queue_clear(const struct rte_pci_device *pci_dev);
+
+int zsda_admin_q_start(const struct rte_pci_device *pci_dev);
+int zsda_admin_q_stop(const struct rte_pci_device *pci_dev);
+int zsda_admin_q_clear(const struct rte_pci_device *pci_dev);
+
+int zsda_set_cycle_head_tail(struct zsda_pci_device *zsda_pci_dev);
+
+#endif /* _ZSDA_DEVICE_H_ */
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 33355 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v4 3/8] zsda: add support for queue operation
  2024-09-09  8:11 [PATCH v4 2/8] zsda: add support for zsdadev operations Hanxiao Li
@ 2024-09-09  8:11 ` Hanxiao Li
  2024-09-09  8:11 ` [PATCH v4 4/8] zsda: add zsda compressdev driver and interface Hanxiao Li
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Hanxiao Li @ 2024-09-09  8:11 UTC (permalink / raw)
  To: dev; +Cc: wang.yong19, Hanxiao Li


[-- Attachment #1.1.1: Type: text/plain, Size: 26071 bytes --]

Add queue initialization, release, enqueue, dequeue and other interface.

Signed-off-by: Hanxiao Li <li.hanxiao@zte.com.cn>
---
 drivers/common/zsda/zsda_qp.c | 720 ++++++++++++++++++++++++++++++++++
 drivers/common/zsda/zsda_qp.h | 160 ++++++++
 2 files changed, 880 insertions(+)
 create mode 100644 drivers/common/zsda/zsda_qp.c
 create mode 100644 drivers/common/zsda/zsda_qp.h

diff --git a/drivers/common/zsda/zsda_qp.c b/drivers/common/zsda/zsda_qp.c
new file mode 100644
index 0000000000..f2dfe43b2e
--- /dev/null
+++ b/drivers/common/zsda/zsda_qp.c
@@ -0,0 +1,720 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <stdint.h>
+
+#include <rte_malloc.h>
+
+#include "zsda_common.h"
+#include "zsda_logs.h"
+#include "zsda_device.h"
+#include "zsda_qp.h"
+
+#define RING_DIR_TX 0
+#define RING_DIR_RX 1
+
+struct ring_size {
+	uint16_t tx_msg_size;
+	uint16_t rx_msg_size;
+};
+
+struct ring_size zsda_qp_hw_ring_size[ZSDA_MAX_SERVICES] = {
+	[ZSDA_SERVICE_COMPRESSION] = {32, 16},
+	[ZSDA_SERVICE_DECOMPRESSION] = {32, 16},
+};
+
+static void
+zsda_set_queue_head_tail(const struct zsda_pci_device *zsda_pci_dev,
+			 const uint8_t qid)
+{
+	struct rte_pci_device *pci_dev =
+		zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+
+	ZSDA_CSR_WRITE32(mmio_base + IO_DB_INITIAL_CONFIG + (qid * 4),
+			 SET_HEAD_INTI);
+}
+
+int
+zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev)
+{
+	uint8_t i;
+	uint32_t index;
+	enum zsda_service_type type;
+	struct zsda_qp_hw *zsda_hw_qps = zsda_pci_dev->zsda_hw_qps;
+	struct qinfo qcfg;
+	int ret = 0;
+
+	for (i = 0; i < zsda_num_used_qps; i++) {
+		zsda_set_queue_head_tail(zsda_pci_dev, i);
+		ret = zsda_get_queue_cfg_by_id(zsda_pci_dev, i, &qcfg);
+		type = qcfg.q_type;
+		if (ret) {
+			ZSDA_LOG(ERR, "get queue cfg!");
+			return ret;
+		}
+		if (type >= ZSDA_SERVICE_INVALID)
+			continue;
+
+		index = zsda_pci_dev->zsda_qp_hw_num[type];
+		zsda_hw_qps[type].data[index].used = true;
+		zsda_hw_qps[type].data[index].tx_ring_num = i;
+		zsda_hw_qps[type].data[index].rx_ring_num = i;
+		zsda_hw_qps[type].data[index].tx_msg_size =
+			zsda_qp_hw_ring_size[type].tx_msg_size;
+		zsda_hw_qps[type].data[index].rx_msg_size =
+			zsda_qp_hw_ring_size[type].rx_msg_size;
+
+		zsda_pci_dev->zsda_qp_hw_num[type]++;
+	}
+
+	return ret;
+}
+
+struct zsda_qp_hw *
+zsda_qps_hw_per_service(struct zsda_pci_device *zsda_pci_dev,
+			const enum zsda_service_type service)
+{
+	struct zsda_qp_hw *qp_hw = NULL;
+
+	if (service < ZSDA_SERVICE_INVALID)
+		qp_hw = &(zsda_pci_dev->zsda_hw_qps[service]);
+
+	return qp_hw;
+}
+
+uint16_t
+zsda_qps_per_service(const struct zsda_pci_device *zsda_pci_dev,
+		     const enum zsda_service_type service)
+{
+	uint16_t qp_hw_num = 0;
+
+	if (service < ZSDA_SERVICE_INVALID)
+		qp_hw_num = zsda_pci_dev->zsda_qp_hw_num[service];
+
+	return qp_hw_num;
+}
+
+uint16_t
+zsda_comp_max_nb_qps(const struct zsda_pci_device *zsda_pci_dev)
+{
+	uint16_t comp =
+		zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_COMPRESSION);
+	uint16_t decomp =
+		zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_DECOMPRESSION);
+	uint16_t min = 0;
+
+	if ((comp == MAX_QPS_ON_FUNCTION) ||
+		(decomp == MAX_QPS_ON_FUNCTION))
+		min = MAX_QPS_ON_FUNCTION;
+	else
+		min = (comp < decomp) ? comp : decomp;
+	if (min == 0)
+		return MAX_QPS_ON_FUNCTION;
+	return min;
+}
+
+
+void
+zsda_stats_get(void **queue_pairs, const uint32_t nb_queue_pairs,
+	      struct zsda_common_stat *stats)
+{
+	enum zsda_service_type type;
+	uint32_t i;
+	struct zsda_qp *qp;
+
+	if ((stats == NULL) || (queue_pairs == NULL)) {
+		ZSDA_LOG(ERR, E_NULL);
+		return;
+	}
+
+	for (i = 0; i < nb_queue_pairs; i++) {
+		qp = (struct zsda_qp *)queue_pairs[i];
+
+		if (qp == NULL) {
+			ZSDA_LOG(ERR, E_NULL);
+			break;
+		}
+
+		for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+			if (qp->srv[type].used) {
+				stats->enqueued_count +=
+					qp->srv[type].stats.enqueued_count;
+				stats->dequeued_count +=
+					qp->srv[type].stats.dequeued_count;
+				stats->enqueue_err_count +=
+					qp->srv[type].stats.enqueue_err_count;
+				stats->dequeue_err_count +=
+					qp->srv[type].stats.dequeue_err_count;
+			}
+		}
+	}
+}
+
+void
+zsda_stats_reset(void **queue_pairs, const uint32_t nb_queue_pairs)
+{
+	enum zsda_service_type type;
+	uint32_t i;
+	struct zsda_qp *qp;
+
+	if (queue_pairs == NULL) {
+		ZSDA_LOG(ERR, E_NULL);
+		return;
+	}
+
+	for (i = 0; i < nb_queue_pairs; i++) {
+		qp = (struct zsda_qp *)queue_pairs[i];
+
+		if (qp == NULL) {
+			ZSDA_LOG(ERR, E_NULL);
+			break;
+		}
+		for (type = 0; type < ZSDA_MAX_SERVICES; type++) {
+			if (qp->srv[type].used)
+				memset(&(qp->srv[type].stats), 0,
+				       sizeof(struct zsda_common_stat));
+		}
+	}
+}
+
+static const struct rte_memzone *
+zsda_queue_dma_zone_reserve(const char *queue_name, const unsigned int queue_size,
+		       const unsigned int socket_id)
+{
+	const struct rte_memzone *mz;
+
+	mz = rte_memzone_lookup(queue_name);
+	if (mz != 0) {
+		if (((size_t)queue_size <= mz->len) &&
+		    ((socket_id == (SOCKET_ID_ANY & 0xffff)) ||
+		     (socket_id == (mz->socket_id & 0xffff)))) {
+			ZSDA_LOG(DEBUG,
+				 "re-use memzone already allocated for %s",
+				 queue_name);
+			return mz;
+		}
+		ZSDA_LOG(ERR, E_MALLOC);
+		return NULL;
+	}
+
+	mz = rte_memzone_reserve_aligned(queue_name, queue_size,
+					   (int)(socket_id & 0xfff),
+					   RTE_MEMZONE_IOVA_CONTIG, queue_size);
+
+	return mz;
+}
+
+static int
+zsda_queue_create(const uint32_t dev_id, struct zsda_queue *queue,
+		  const struct zsda_qp_config *qp_conf, const uint8_t dir)
+{
+	void *io_addr;
+	const struct rte_memzone *qp_mz;
+	struct qinfo qcfg = {0};
+
+	uint16_t desc_size = ((dir == RING_DIR_TX) ? qp_conf->hw->tx_msg_size
+						   : qp_conf->hw->rx_msg_size);
+	unsigned int queue_size_bytes = qp_conf->nb_descriptors * desc_size;
+
+	queue->hw_queue_number =
+		((dir == RING_DIR_TX) ? qp_conf->hw->tx_ring_num
+				      : qp_conf->hw->rx_ring_num);
+
+	struct rte_pci_device *pci_dev = zsda_devs[dev_id].pci_dev;
+	struct zsda_pci_device *zsda_dev =
+		(struct zsda_pci_device *)zsda_devs[dev_id].mz->addr;
+
+	zsda_get_queue_cfg_by_id(zsda_dev, queue->hw_queue_number, &qcfg);
+
+	if (dir == RING_DIR_TX)
+		snprintf(queue->memz_name, sizeof(queue->memz_name),
+			 "%s_%d_%s_%s_%d", pci_dev->driver->driver.name, dev_id,
+			 qp_conf->service_str, "qptxmem",
+			 queue->hw_queue_number);
+	else
+		snprintf(queue->memz_name, sizeof(queue->memz_name),
+			 "%s_%d_%s_%s_%d", pci_dev->driver->driver.name, dev_id,
+			 qp_conf->service_str, "qprxmem",
+			 queue->hw_queue_number);
+
+	qp_mz = zsda_queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
+				       rte_socket_id());
+	if (qp_mz == NULL) {
+		ZSDA_LOG(ERR, E_MALLOC);
+		return -ENOMEM;
+	}
+
+	queue->base_addr = (uint8_t *)qp_mz->addr;
+	queue->base_phys_addr = qp_mz->iova;
+	queue->modulo_mask = MAX_NUM_OPS;
+	queue->msg_size = desc_size;
+
+	queue->head = (dir == RING_DIR_TX) ? qcfg.wq_head : qcfg.cq_head;
+	queue->tail = (dir == RING_DIR_TX) ? qcfg.wq_tail : qcfg.cq_tail;
+
+	if ((queue->head == 0) && (queue->tail == 0))
+		qcfg.cycle += 1;
+
+	queue->valid = qcfg.cycle & (ZSDA_MAX_CYCLE - 1);
+	queue->queue_size = ZSDA_MAX_DESC;
+	queue->cycle_size = ZSDA_MAX_CYCLE;
+	queue->io_addr = pci_dev->mem_resource[0].addr;
+
+	memset(queue->base_addr, 0x0, queue_size_bytes);
+	io_addr = pci_dev->mem_resource[0].addr;
+
+	if (dir == RING_DIR_TX)
+		ZSDA_CSR_WQ_RING_BASE(io_addr, queue->hw_queue_number,
+				      queue->base_phys_addr);
+	else
+		ZSDA_CSR_CQ_RING_BASE(io_addr, queue->hw_queue_number,
+				      queue->base_phys_addr);
+
+	return 0;
+}
+
+static void
+zsda_queue_delete(const struct zsda_queue *queue)
+{
+	const struct rte_memzone *mz;
+	int status;
+
+	if (queue == NULL) {
+		ZSDA_LOG(DEBUG, "Invalid queue");
+		return;
+	}
+
+	mz = rte_memzone_lookup(queue->memz_name);
+	if (mz != NULL) {
+		memset(queue->base_addr, 0x0,
+		       (uint16_t)(queue->queue_size * queue->msg_size));
+		status = rte_memzone_free(mz);
+		if (status != 0)
+			ZSDA_LOG(ERR, E_FREE);
+	} else
+		ZSDA_LOG(DEBUG, "queue %s doesn't exist", queue->memz_name);
+}
+
+static int
+zsda_cookie_init(const uint32_t dev_id, struct zsda_qp **qp_addr,
+	    const uint16_t queue_pair_id,
+	    const struct zsda_qp_config *zsda_qp_conf)
+{
+	struct zsda_qp *qp = *qp_addr;
+	struct rte_pci_device *pci_dev = zsda_devs[dev_id].pci_dev;
+	char op_cookie_pool_name[RTE_RING_NAMESIZE];
+	uint32_t i;
+	enum zsda_service_type type = zsda_qp_conf->service_type;
+
+	if (zsda_qp_conf->nb_descriptors != ZSDA_MAX_DESC)
+		ZSDA_LOG(ERR, "Can't create qp for %u descriptors",
+			 zsda_qp_conf->nb_descriptors);
+
+	qp->srv[type].nb_descriptors = zsda_qp_conf->nb_descriptors;
+
+	qp->srv[type].op_cookies = rte_zmalloc_socket(
+		"zsda PMD op cookie pointer",
+		zsda_qp_conf->nb_descriptors *
+			sizeof(*qp->srv[type].op_cookies),
+		RTE_CACHE_LINE_SIZE, zsda_qp_conf->socket_id);
+
+	if (qp->srv[type].op_cookies == NULL) {
+		ZSDA_LOG(ERR, E_MALLOC);
+		return -ENOMEM;
+	}
+
+	snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s%d_cks_%s_qp%hu",
+		 pci_dev->driver->driver.name, dev_id,
+		 zsda_qp_conf->service_str, queue_pair_id);
+
+	qp->srv[type].op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
+	if (qp->srv[type].op_cookie_pool == NULL)
+		qp->srv[type].op_cookie_pool = rte_mempool_create(
+			op_cookie_pool_name, qp->srv[type].nb_descriptors,
+			zsda_qp_conf->cookie_size, 64, 0, NULL, NULL, NULL,
+			NULL, (int)(rte_socket_id() & 0xfff), 0);
+	if (!qp->srv[type].op_cookie_pool) {
+		ZSDA_LOG(ERR, E_CREATE);
+		goto exit;
+	}
+
+	for (i = 0; i < qp->srv[type].nb_descriptors; i++) {
+		if (rte_mempool_get(qp->srv[type].op_cookie_pool,
+				    &qp->srv[type].op_cookies[i])) {
+			ZSDA_LOG(ERR, "ZSDA PMD Cannot get op_cookie");
+			goto exit;
+		}
+		memset(qp->srv[type].op_cookies[i], 0,
+		       zsda_qp_conf->cookie_size);
+	}
+	return 0;
+
+exit:
+	if (qp->srv[type].op_cookie_pool)
+		rte_mempool_free(qp->srv[type].op_cookie_pool);
+	rte_free(qp->srv[type].op_cookies);
+
+	return -EFAULT;
+}
+
+int
+zsda_queue_pair_setup(const uint32_t dev_id, struct zsda_qp **qp_addr,
+		      const uint16_t queue_pair_id,
+		      const struct zsda_qp_config *zsda_qp_conf)
+{
+	struct zsda_qp *qp = *qp_addr;
+	struct rte_pci_device *pci_dev = zsda_devs[dev_id].pci_dev;
+	int ret = 0;
+	enum zsda_service_type type = zsda_qp_conf->service_type;
+
+	if (type >= ZSDA_SERVICE_INVALID) {
+		ZSDA_LOG(ERR, "Failed! service type");
+		return -EINVAL;
+	}
+
+	if (pci_dev->mem_resource[0].addr == NULL) {
+		ZSDA_LOG(ERR, E_NULL);
+		return -EINVAL;
+	}
+
+	if (zsda_queue_create(dev_id, &(qp->srv[type].tx_q), zsda_qp_conf,
+			      RING_DIR_TX) != 0) {
+		ZSDA_LOG(ERR, E_CREATE);
+		return -EFAULT;
+	}
+
+	if (zsda_queue_create(dev_id, &(qp->srv[type].rx_q), zsda_qp_conf,
+			      RING_DIR_RX) != 0) {
+		ZSDA_LOG(ERR, E_CREATE);
+		zsda_queue_delete(&(qp->srv[type].tx_q));
+		return -EFAULT;
+	}
+
+	ret = zsda_cookie_init(dev_id, qp_addr, queue_pair_id, zsda_qp_conf);
+	if (ret) {
+		zsda_queue_delete(&(qp->srv[type].tx_q));
+		zsda_queue_delete(&(qp->srv[type].rx_q));
+		qp->srv[type].used = false;
+	}
+	qp->srv[type].used = true;
+	return ret;
+}
+
+int
+zsda_queue_pair_release(struct zsda_qp **qp_addr)
+{
+	struct zsda_qp *qp = *qp_addr;
+	uint32_t i;
+	enum zsda_service_type type;
+
+	if (qp == NULL) {
+		ZSDA_LOG(DEBUG, "qp already freed");
+		return 0;
+	}
+
+	for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+		if (!qp->srv[type].used)
+			continue;
+
+		zsda_queue_delete(&(qp->srv[type].tx_q));
+		zsda_queue_delete(&(qp->srv[type].rx_q));
+		qp->srv[type].used = false;
+		for (i = 0; i < qp->srv[type].nb_descriptors; i++)
+			rte_mempool_put(qp->srv[type].op_cookie_pool,
+					qp->srv[type].op_cookies[i]);
+
+		if (qp->srv[type].op_cookie_pool)
+			rte_mempool_free(qp->srv[type].op_cookie_pool);
+
+		rte_free(qp->srv[type].op_cookies);
+	}
+
+	rte_free(qp);
+	*qp_addr = NULL;
+
+	return 0;
+}
+
+int
+zsda_fill_sgl(const struct rte_mbuf *buf, uint32_t offset, struct zsda_sgl *sgl,
+	      const phys_addr_t sgl_phy_addr, uint32_t remain_len,
+	      struct comp_head_info *comp_head_info)
+{
+	uint32_t nr;
+	uint16_t put_in_len;
+	bool head_set = false;
+
+	for (nr = 0; (buf && (nr < (ZSDA_SGL_MAX_NUMBER - 1)));) {
+		if (offset >= rte_pktmbuf_data_len(buf)) {
+			offset -= rte_pktmbuf_data_len(buf);
+			buf = buf->next;
+			continue;
+		}
+		memset(&(sgl->buffers[nr]), 0, sizeof(struct zsda_buf));
+		if ((nr > 0) && (((nr + 1) % ZSDA_SGL_FRAGMENT_SIZE) == 0) &&
+		    (buf->next != NULL)) {
+			sgl->buffers[nr].len = SGL_TYPE_PHYS_ADDR;
+			sgl->buffers[nr].addr =
+				sgl_phy_addr +
+				((nr + 1) * sizeof(struct zsda_buf));
+			sgl->buffers[nr].type = SGL_TYPE_NEXT_LIST;
+			++nr;
+			continue;
+		}
+		if (comp_head_info && !head_set) {
+			sgl->buffers[nr].len = comp_head_info->head_len;
+			sgl->buffers[nr].addr = comp_head_info->head_phys_addr;
+			sgl->buffers[nr].type = SGL_TYPE_PHYS_ADDR;
+			++nr;
+			head_set = true;
+			remain_len -= comp_head_info->head_len;
+			continue;
+		} else {
+			put_in_len = rte_pktmbuf_data_len(buf) - (offset & 0xffff);
+			if (remain_len <= put_in_len)
+				put_in_len = remain_len;
+			remain_len -= put_in_len;
+
+			sgl->buffers[nr].len = put_in_len;
+			sgl->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+			sgl->buffers[nr].type = SGL_TYPE_PHYS_ADDR;
+		}
+		offset = 0;
+		++nr;
+		buf = buf->next;
+
+		if (remain_len == 0)
+			break;
+	}
+
+	if (nr == 0) {
+		ZSDA_LOG(ERR, "In fill_sgl, nr == 0");
+		return ZSDA_FAILED;
+	}
+
+	sgl->buffers[nr - 1].type = SGL_TYPE_LAST_PHYS_ADDR;
+
+	if (buf) {
+		if (unlikely(buf->next)) {
+			if (nr == (ZSDA_SGL_MAX_NUMBER - 1)) {
+				ZSDA_LOG(ERR, "ERR! segs size (%u)",
+					 (ZSDA_SGL_MAX_NUMBER));
+				return -EINVAL;
+			}
+		}
+	}
+
+	return ZSDA_SUCCESS;
+}
+
+int
+zsda_get_sgl_num(const struct zsda_sgl *sgl)
+{
+	int sgl_num = 0;
+
+	while (sgl->buffers[sgl_num].type != 1) {
+		sgl_num++;
+		if (sgl_num >= ZSDA_SGL_MAX_NUMBER)
+			return ZSDA_FAILED;
+	}
+	sgl_num++;
+	return sgl_num;
+}
+
+static int
+zsda_find_next_free_cookie(const struct zsda_queue *queue, void **op_cookie,
+		      uint16_t *idx)
+{
+	uint16_t old_tail = queue->tail;
+	uint16_t tail = queue->tail;
+	struct zsda_op_cookie *cookie;
+
+	do {
+		cookie = (struct zsda_op_cookie *)op_cookie[tail];
+		if (!cookie->used) {
+			*idx = tail & (queue->queue_size - 1);
+			return 0;
+		}
+		tail = zsda_modulo_16(tail++, queue->modulo_mask);
+	} while (old_tail != tail);
+
+	return -EINVAL;
+}
+
+static int
+zsda_enqueue(void *op, struct zsda_qp *qp)
+{
+	uint16_t new_tail;
+	enum zsda_service_type type;
+	void **op_cookie;
+	int ret = 0;
+	struct zsda_queue *queue;
+
+	for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+		if (qp->srv[type].used) {
+			if (!qp->srv[type].match(op))
+				continue;
+			queue = &qp->srv[type].tx_q;
+			op_cookie = qp->srv[type].op_cookies;
+
+			if (zsda_find_next_free_cookie(queue, op_cookie,
+						  &new_tail)) {
+				ret = -EBUSY;
+				break;
+			}
+			ret = qp->srv[type].tx_cb(op, queue, op_cookie,
+						  new_tail);
+			if (ret) {
+				qp->srv[type].stats.enqueue_err_count++;
+				ZSDA_LOG(ERR, "Failed! config wqe");
+				break;
+			}
+			qp->srv[type].stats.enqueued_count++;
+
+			queue->tail = zsda_modulo_16(new_tail + 1,
+						     queue->queue_size - 1);
+
+			if (new_tail > queue->tail)
+				queue->valid =
+					zsda_modulo_8(queue->valid + 1,
+					(uint8_t)(queue->cycle_size - 1));
+
+			queue->pushed_wqe++;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static void
+zsda_tx_write_tail(struct zsda_queue *queue)
+{
+	if (queue->pushed_wqe)
+		WRITE_CSR_WQ_TAIL(queue->io_addr, queue->hw_queue_number,
+				  queue->tail);
+
+	queue->pushed_wqe = 0;
+}
+
+uint16_t
+zsda_enqueue_op_burst(struct zsda_qp *qp, void **ops, uint16_t nb_ops)
+{
+	int ret = 0;
+	enum zsda_service_type type;
+	uint16_t i;
+	uint16_t nb_send = 0;
+	void *op;
+
+	if (nb_ops > ZSDA_MAX_DESC) {
+		ZSDA_LOG(ERR, "Enqueue number bigger than %d", ZSDA_MAX_DESC);
+		return 0;
+	}
+
+	for (i = 0; i < nb_ops; i++) {
+		op = ops[i];
+		ret = zsda_enqueue(op, qp);
+		if (ret < 0)
+			break;
+		nb_send++;
+	}
+
+	for (type = 0; type < ZSDA_SERVICE_INVALID; type++)
+		if (qp->srv[type].used)
+			zsda_tx_write_tail(&qp->srv[type].tx_q);
+
+	return nb_send;
+}
+
+static void
+zsda_dequeue(struct qp_srv *srv, void **ops, const uint16_t nb_ops, uint16_t *nb)
+{
+	uint16_t head;
+	struct zsda_cqe *cqe;
+	struct zsda_queue *queue = &srv->rx_q;
+	struct zsda_op_cookie *cookie;
+	head = queue->head;
+
+	while (*nb < nb_ops) {
+		cqe = (struct zsda_cqe *)((uint8_t *)queue->base_addr + head * queue->msg_size);
+
+		if (!CQE_VALID(cqe->err1))
+			break;
+		cookie = (struct zsda_op_cookie *)srv->op_cookies[cqe->sid];
+
+		if (cookie->decomp_no_tail && CQE_ERR0_RIGHT(cqe->err0))
+			cqe->err0 = 0x0000;
+
+		if (CQE_ERR0(cqe->err0) || CQE_ERR1(cqe->err1)) {
+			ZSDA_LOG(ERR,
+				 "ERR! Cqe, opcode 0x%x, sid 0x%x, "
+				 "tx_real_length 0x%x, err0 0x%x, err1 0x%x",
+				 cqe->op_code, cqe->sid, cqe->tx_real_length,
+				 cqe->err0, cqe->err1);
+			srv->stats.dequeue_err_count++;
+		} else
+			srv->stats.dequeued_count++;
+
+		ops[*nb] = cookie->op;
+		srv->rx_cb(cookie, cqe);
+		(*nb)++;
+		cookie->used = false;
+
+		head = zsda_modulo_16(head + 1, queue->modulo_mask);
+		queue->head = head;
+		WRITE_CSR_CQ_HEAD(queue->io_addr, queue->hw_queue_number, head);
+		memset(cqe, 0x0, sizeof(struct zsda_cqe));
+	}
+}
+
+uint16_t
+zsda_dequeue_op_burst(struct zsda_qp *qp, void **ops, const uint16_t nb_ops)
+{
+	uint16_t nb = 0;
+	uint32_t type;
+	struct qp_srv *srv;
+
+	for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+		if (!qp->srv[type].used)
+			continue;
+		srv = &qp->srv[type];
+		zsda_dequeue(srv, ops, nb_ops, &nb);
+		if (nb >= nb_ops)
+			return nb_ops;
+	}
+	return nb;
+}
+
+int
+zsda_common_setup_qp(uint32_t zsda_dev_id, struct zsda_qp **qp_addr,
+		const uint16_t queue_pair_id, const struct zsda_qp_config *conf)
+{
+	uint32_t i;
+	int ret = 0;
+	struct zsda_qp *qp;
+	rte_iova_t cookie_phys_addr;
+
+	ret = zsda_queue_pair_setup(zsda_dev_id, qp_addr, queue_pair_id, conf);
+	if (ret)
+		return ret;
+
+	qp = (struct zsda_qp *)*qp_addr;
+
+	for (i = 0; i < qp->srv[conf->service_type].nb_descriptors; i++) {
+		struct zsda_op_cookie *cookie =
+			qp->srv[conf->service_type].op_cookies[i];
+		cookie_phys_addr = rte_mempool_virt2iova(cookie);
+
+		cookie->comp_head_phys_addr = cookie_phys_addr +
+			offsetof(struct zsda_op_cookie, comp_head);
+
+		cookie->sgl_src_phys_addr = cookie_phys_addr +
+			offsetof(struct zsda_op_cookie, sgl_src);
+
+		cookie->sgl_dst_phys_addr = cookie_phys_addr +
+			offsetof(struct zsda_op_cookie, sgl_dst);
+	}
+	return ret;
+}
diff --git a/drivers/common/zsda/zsda_qp.h b/drivers/common/zsda/zsda_qp.h
new file mode 100644
index 0000000000..11943a9be4
--- /dev/null
+++ b/drivers/common/zsda/zsda_qp.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_QP_H_
+#define _ZSDA_QP_H_
+
+#define WQ_CSR_LBASE 0x1000
+#define WQ_CSR_UBASE 0x1004
+#define CQ_CSR_LBASE 0x1400
+#define CQ_CSR_UBASE 0x1404
+#define WQ_TAIL	     0x1800
+#define CQ_HEAD	     0x1804
+
+/**
+ * Structure associated with each queue.
+ */
+struct zsda_queue {
+	char memz_name[RTE_MEMZONE_NAMESIZE];
+	uint8_t *io_addr;
+	uint8_t *base_addr;	   /* Base address */
+	rte_iova_t base_phys_addr; /* Queue physical address */
+	uint16_t head;		   /* Shadow copy of the head */
+	uint16_t tail;		   /* Shadow copy of the tail */
+	uint16_t modulo_mask;
+	uint16_t msg_size;
+	uint16_t queue_size;
+	uint16_t cycle_size;
+	uint16_t pushed_wqe;
+
+	uint8_t hw_queue_number;
+	uint32_t csr_head; /* last written head value */
+	uint32_t csr_tail; /* last written tail value */
+
+	uint8_t valid;
+	uint16_t sid;
+};
+
+typedef void (*rx_callback)(void *cookie_in, const struct zsda_cqe *cqe);
+typedef int (*tx_callback)(void *op_in, const struct zsda_queue *queue,
+			   void **op_cookies, const uint16_t new_tail);
+typedef int (*srv_match)(const void *op_in);
+
+struct qp_srv {
+	bool used;
+	struct zsda_queue tx_q;
+	struct zsda_queue rx_q;
+	rx_callback rx_cb;
+	tx_callback tx_cb;
+	srv_match match;
+	struct zsda_common_stat stats;
+	struct rte_mempool *op_cookie_pool;
+	void **op_cookies;
+	uint16_t nb_descriptors;
+};
+
+struct zsda_qp {
+	struct qp_srv srv[ZSDA_MAX_SERVICES];
+
+	uint16_t max_inflights;
+	uint16_t min_enq_burst_threshold;
+	void *mmap_bar_addr;
+};
+
+struct zsda_qp_config {
+	enum zsda_service_type service_type;
+	const struct zsda_qp_hw_data *hw;
+	uint16_t nb_descriptors;
+	uint32_t cookie_size;
+	int socket_id;
+	const char *service_str;
+};
+
+struct comp_head_info {
+	uint32_t head_len;
+	phys_addr_t head_phys_addr;
+};
+
+extern uint8_t zsda_num_used_qps;
+
+struct zsda_qp_hw *
+zsda_qps_hw_per_service(struct zsda_pci_device *zsda_pci_dev,
+			const enum zsda_service_type service);
+uint16_t zsda_qps_per_service(const struct zsda_pci_device *zsda_pci_dev,
+			      const enum zsda_service_type service);
+
+uint16_t zsda_comp_max_nb_qps(const struct zsda_pci_device *zsda_pci_dev);
+uint16_t zsda_crypto_max_nb_qps(struct zsda_pci_device *zsda_pci_dev);
+
+int zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev);
+
+/* CSR write macro */
+#define ZSDA_CSR_WR(csrAddr, csrOffset, val)                                   \
+	rte_write32(val, (((uint8_t *)csrAddr) + csrOffset))
+#define ZSDA_CSR_WC_WR(csrAddr, csrOffset, val)                                \
+	rte_write32_wc(val, (((uint8_t *)csrAddr) + csrOffset))
+
+/* CSR read macro */
+#define ZSDA_CSR_RD(csrAddr, csrOffset)                                        \
+	rte_read32((((uint8_t *)csrAddr) + csrOffset))
+
+#define ZSDA_CSR_WQ_RING_BASE(csr_base_addr, ring, value)                      \
+	do {                                                                   \
+		uint32_t l_base = 0, u_base = 0;                               \
+		l_base = (uint32_t)(value & 0xFFFFFFFF);                       \
+		u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);    \
+		ZSDA_CSR_WR(csr_base_addr, (ring << 3) + WQ_CSR_LBASE,         \
+			    l_base);                                           \
+		ZSDA_LOG(INFO, "l_basg - offset:0x%x, value:0x%x",             \
+			 ((ring << 3) + WQ_CSR_LBASE), l_base);                \
+		ZSDA_CSR_WR(csr_base_addr, (ring << 3) + WQ_CSR_UBASE,         \
+			    u_base);                                           \
+		ZSDA_LOG(INFO, "h_base - offset:0x%x, value:0x%x",             \
+			 ((ring << 3) + WQ_CSR_UBASE), u_base);                \
+	} while (0)
+
+#define ZSDA_CSR_CQ_RING_BASE(csr_base_addr, ring, value)                      \
+	do {                                                                   \
+		uint32_t l_base = 0, u_base = 0;                               \
+		l_base = (uint32_t)(value & 0xFFFFFFFF);                       \
+		u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);    \
+		ZSDA_CSR_WR(csr_base_addr, (ring << 3) + CQ_CSR_LBASE,         \
+			    l_base);                                           \
+		ZSDA_CSR_WR(csr_base_addr, (ring << 3) + CQ_CSR_UBASE,         \
+			    u_base);                                           \
+	} while (0)
+
+#define READ_CSR_WQ_HEAD(csr_base_addr, ring)                                  \
+	ZSDA_CSR_RD(csr_base_addr, WQ_TAIL + (ring << 3))
+#define WRITE_CSR_WQ_TAIL(csr_base_addr, ring, value)                          \
+	ZSDA_CSR_WC_WR(csr_base_addr, WQ_TAIL + (ring << 3), value)
+#define READ_CSR_CQ_HEAD(csr_base_addr, ring)                                  \
+	ZSDA_CSR_RD(csr_base_addr, WQ_TAIL + (ring << 3))
+#define WRITE_CSR_CQ_HEAD(csr_base_addr, ring, value)                          \
+	ZSDA_CSR_WC_WR(csr_base_addr, CQ_HEAD + (ring << 3), value)
+
+uint16_t zsda_enqueue_op_burst(struct zsda_qp *qp, void **ops, const uint16_t nb_ops);
+uint16_t zsda_dequeue_op_burst(struct zsda_qp *qp, void **ops, const uint16_t nb_ops);
+
+int zsda_queue_pair_setup(uint32_t dev_id, struct zsda_qp **qp_addr,
+			  const uint16_t queue_pair_id,
+			  const struct zsda_qp_config *zsda_qp_conf);
+
+int zsda_queue_pair_release(struct zsda_qp **qp_addr);
+int zsda_fill_sgl(const struct rte_mbuf *buf, uint32_t offset,
+		  struct zsda_sgl *sgl, const phys_addr_t sgl_phy_addr,
+		  uint32_t remain_len, struct comp_head_info *comp_head_info);
+
+int zsda_get_sgl_num(const struct zsda_sgl *sgl);
+int zsda_sgl_opt_addr_lost(struct rte_mbuf *mbuf);
+
+int zsda_common_setup_qp(uint32_t dev_id, struct zsda_qp **qp_addr,
+		    const uint16_t queue_pair_id,
+		    const struct zsda_qp_config *conf);
+
+void zsda_stats_get(void **queue_pairs, const uint32_t nb_queue_pairs,
+		    struct zsda_common_stat *stats);
+void zsda_stats_reset(void **queue_pairs, const uint32_t nb_queue_pairs);
+
+#endif /* _ZSDA_QP_H_ */
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 69437 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v4 4/8] zsda: add zsda compressdev driver and interface
  2024-09-09  8:11 [PATCH v4 2/8] zsda: add support for zsdadev operations Hanxiao Li
  2024-09-09  8:11 ` [PATCH v4 3/8] zsda: add support for queue operation Hanxiao Li
@ 2024-09-09  8:11 ` Hanxiao Li
  2024-09-09  8:11 ` [PATCH v4 5/8] zsda: modify files for introducing zsda cryptodev Hanxiao Li
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Hanxiao Li @ 2024-09-09  8:11 UTC (permalink / raw)
  To: dev; +Cc: wang.yong19, Hanxiao Li


[-- Attachment #1.1.1: Type: text/plain, Size: 26538 bytes --]

Add zsda compressdev driver and enqueue, dequeue interface.

Signed-off-by: Hanxiao Li <li.hanxiao@zte.com.cn>
---
 drivers/compress/zsda/zsda_comp.c     | 320 ++++++++++++++++++
 drivers/compress/zsda/zsda_comp.h     |  27 ++
 drivers/compress/zsda/zsda_comp_pmd.c | 453 ++++++++++++++++++++++++++
 drivers/compress/zsda/zsda_comp_pmd.h |  39 +++
 drivers/meson.build                   |   1 +
 5 files changed, 840 insertions(+)
 create mode 100644 drivers/compress/zsda/zsda_comp.c
 create mode 100644 drivers/compress/zsda/zsda_comp.h
 create mode 100644 drivers/compress/zsda/zsda_comp_pmd.c
 create mode 100644 drivers/compress/zsda/zsda_comp_pmd.h

diff --git a/drivers/compress/zsda/zsda_comp.c b/drivers/compress/zsda/zsda_comp.c
new file mode 100644
index 0000000000..0429c02855
--- /dev/null
+++ b/drivers/compress/zsda/zsda_comp.c
@@ -0,0 +1,320 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include "zsda_comp.h"
+
+#include <zlib.h>
+
+#define ZLIB_HEADER_SIZE 2
+#define ZLIB_TRAILER_SIZE 4
+#define GZIP_HEADER_SIZE 10
+#define GZIP_TRAILER_SIZE 8
+#define CHECKSUM_SIZE 4
+
+static uint32_t zsda_read_chksum(uint8_t *data_addr, uint8_t op_code,
+				 uint32_t produced);
+
+int
+zsda_comp_match(const void *op_in)
+{
+	const struct rte_comp_op *op = (const struct rte_comp_op *)op_in;
+	const struct zsda_comp_xform *xform =
+		(struct zsda_comp_xform *)op->private_xform;
+
+	if (op->op_type != RTE_COMP_OP_STATELESS)
+		return 0;
+
+	if (xform->type != RTE_COMP_COMPRESS)
+		return 0;
+
+	return 1;
+}
+
+static uint8_t
+get_opcode(const struct zsda_comp_xform *xform)
+{
+	if (xform->type == RTE_COMP_COMPRESS) {
+		if (xform->checksum_type == RTE_COMP_CHECKSUM_NONE ||
+		    xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
+			return ZSDA_OPC_COMP_GZIP;
+		else if (xform->checksum_type == RTE_COMP_CHECKSUM_ADLER32)
+			return ZSDA_OPC_COMP_ZLIB;
+	}
+	if (xform->type == RTE_COMP_DECOMPRESS) {
+		if (xform->checksum_type == RTE_COMP_CHECKSUM_CRC32 ||
+		    xform->checksum_type == RTE_COMP_CHECKSUM_NONE)
+			return ZSDA_OPC_DECOMP_GZIP;
+		else if (xform->checksum_type == RTE_COMP_CHECKSUM_ADLER32)
+			return ZSDA_OPC_DECOMP_ZLIB;
+	}
+
+	return ZSDA_OPC_INVALID;
+}
+
+int
+zsda_build_comp_request(void *op_in, const struct zsda_queue *queue,
+		   void **op_cookies, const uint16_t new_tail)
+{
+	struct rte_comp_op *op = op_in;
+	struct zsda_comp_xform *xform =
+		(struct zsda_comp_xform *)op->private_xform;
+	struct zsda_wqe_comp *wqe =
+		(struct zsda_wqe_comp *)(queue->base_addr +
+					 (new_tail * queue->msg_size));
+
+	struct zsda_op_cookie *cookie =
+		(struct zsda_op_cookie *)op_cookies[new_tail];
+	struct zsda_sgl *sgl_src = (struct zsda_sgl *)&cookie->sgl_src;
+	struct zsda_sgl *sgl_dst = (struct zsda_sgl *)&cookie->sgl_dst;
+	struct comp_head_info comp_head_info;
+
+	uint8_t opcode;
+	int ret = 0;
+	uint32_t op_offset;
+	uint32_t op_src_len;
+	uint32_t op_dst_len;
+	uint32_t head_len;
+
+	if ((op->m_dst == NULL) || (op->m_dst == op->m_src)) {
+		ZSDA_LOG(ERR, "Failed! m_dst");
+		return -EINVAL;
+	}
+
+	opcode = get_opcode(xform);
+	if (opcode == ZSDA_OPC_INVALID) {
+		ZSDA_LOG(ERR, E_CONFIG);
+		return -EINVAL;
+	}
+
+	cookie->used = true;
+	cookie->sid = new_tail;
+	cookie->op = op;
+
+	if (opcode == ZSDA_OPC_COMP_GZIP)
+		head_len = GZIP_HEADER_SIZE;
+	else if (opcode == ZSDA_OPC_COMP_ZLIB)
+		head_len = ZLIB_HEADER_SIZE;
+	else {
+		ZSDA_LOG(ERR, "Comp, op_code error!");
+		return -EINVAL;
+	}
+
+	comp_head_info.head_len = head_len;
+	comp_head_info.head_phys_addr = cookie->comp_head_phys_addr;
+
+	op_offset = op->src.offset;
+	op_src_len = op->src.length;
+	ret = zsda_fill_sgl(op->m_src, op_offset, sgl_src,
+				   cookie->sgl_src_phys_addr, op_src_len, NULL);
+
+	op_offset = op->dst.offset;
+	op_dst_len = op->m_dst->pkt_len - op_offset;
+	op_dst_len += head_len;
+	ret |= zsda_fill_sgl(op->m_dst, op_offset, sgl_dst,
+					cookie->sgl_dst_phys_addr, op_dst_len,
+					&comp_head_info);
+
+	if (ret) {
+		ZSDA_LOG(ERR, E_FUNC);
+		return ret;
+	}
+
+	memset(wqe, 0, sizeof(struct zsda_wqe_comp));
+	wqe->rx_length = op_src_len;
+	wqe->tx_length = op_dst_len;
+	wqe->valid = queue->valid;
+	wqe->op_code = opcode;
+	wqe->sid = cookie->sid;
+	wqe->rx_sgl_type = SGL_ELM_TYPE_LIST;
+	wqe->tx_sgl_type = SGL_ELM_TYPE_LIST;
+
+	wqe->rx_addr = cookie->sgl_src_phys_addr;
+	wqe->tx_addr = cookie->sgl_dst_phys_addr;
+
+	return ret;
+}
+
+int
+zsda_decomp_match(const void *op_in)
+{
+	const struct rte_comp_op *op = (const struct rte_comp_op *)op_in;
+	const struct zsda_comp_xform *xform =
+		(struct zsda_comp_xform *)op->private_xform;
+
+	if (op->op_type != RTE_COMP_OP_STATELESS)
+		return 0;
+
+	if (xform->type != RTE_COMP_DECOMPRESS)
+		return 0;
+	return 1;
+}
+
+int
+zsda_build_decomp_request(void *op_in, const struct zsda_queue *queue,
+		     void **op_cookies, const uint16_t new_tail)
+{
+	struct rte_comp_op *op = op_in;
+	struct zsda_comp_xform *xform =
+		(struct zsda_comp_xform *)op->private_xform;
+
+	struct zsda_wqe_comp *wqe =
+		(struct zsda_wqe_comp *)(queue->base_addr +
+					 (new_tail * queue->msg_size));
+	struct zsda_op_cookie *cookie =
+		(struct zsda_op_cookie *)op_cookies[new_tail];
+	struct zsda_sgl *sgl_src = (struct zsda_sgl *)&cookie->sgl_src;
+	struct zsda_sgl *sgl_dst = (struct zsda_sgl *)&cookie->sgl_dst;
+	uint8_t opcode;
+	int ret = 0;
+
+	uint32_t op_offset;
+	uint32_t op_src_len;
+	uint32_t op_dst_len;
+
+	uint8_t *head_data;
+	uint16_t head_len;
+	struct comp_head_info comp_head_info;
+	uint8_t head_zlib[ZLIB_HEADER_SIZE] = {0x78, 0xDA};
+	uint8_t head_gzip[GZIP_HEADER_SIZE] = {0x1F, 0x8B, 0x08, 0x00, 0x00,
+					       0x00, 0x00, 0x00, 0x00, 0x03};
+
+	if ((op->m_dst == NULL) || (op->m_dst == op->m_src)) {
+		ZSDA_LOG(ERR, "Failed! m_dst");
+		return -EINVAL;
+	}
+
+	opcode = get_opcode(xform);
+	if (opcode == ZSDA_OPC_INVALID) {
+		ZSDA_LOG(ERR, E_CONFIG);
+		return -EINVAL;
+	}
+
+	cookie->used = true;
+	cookie->sid = new_tail;
+	cookie->op = op;
+
+	if (opcode == ZSDA_OPC_DECOMP_GZIP) {
+		head_data = head_gzip;
+		head_len = GZIP_HEADER_SIZE;
+	} else if (opcode == ZSDA_OPC_DECOMP_ZLIB) {
+		head_data = head_zlib;
+		head_len = ZLIB_HEADER_SIZE;
+	} else {
+		ZSDA_LOG(ERR, "Comp, op_code error!");
+		return -EINVAL;
+	}
+
+	op_offset = op->src.offset;
+	op_src_len = op->src.length;
+	op_src_len += head_len;
+	comp_head_info.head_len = head_len;
+	comp_head_info.head_phys_addr = cookie->comp_head_phys_addr;
+	cookie->decomp_no_tail = true;
+	for (int i = 0; i < head_len; i++)
+		cookie->comp_head[i] = head_data[i];
+
+	ret = zsda_fill_sgl(op->m_src, op_offset, sgl_src,
+			    cookie->sgl_src_phys_addr, op_src_len,
+			    &comp_head_info);
+
+	op_offset = op->dst.offset;
+	op_dst_len = op->m_dst->pkt_len - op_offset;
+	ret |= zsda_fill_sgl(op->m_dst, op_offset, sgl_dst,
+			     cookie->sgl_dst_phys_addr, op_dst_len, NULL);
+
+	if (ret) {
+		ZSDA_LOG(ERR, E_FUNC);
+		return ret;
+	}
+
+	memset(wqe, 0, sizeof(struct zsda_wqe_comp));
+
+	wqe->rx_length = op_src_len;
+	wqe->tx_length = op_dst_len;
+	wqe->valid = queue->valid;
+	wqe->op_code = opcode;
+	wqe->sid = cookie->sid;
+	wqe->rx_sgl_type = SGL_ELM_TYPE_LIST;
+	wqe->tx_sgl_type = SGL_ELM_TYPE_LIST;
+	wqe->rx_addr = cookie->sgl_src_phys_addr;
+	wqe->tx_addr = cookie->sgl_dst_phys_addr;
+
+	return ret;
+}
+
+void
+zsda_comp_callback(void *cookie_in, const struct zsda_cqe *cqe)
+{
+	struct zsda_op_cookie *tmp_cookie = (struct zsda_op_cookie *)cookie_in;
+	struct rte_comp_op *tmp_op = (struct rte_comp_op *)tmp_cookie->op;
+	uint8_t *data_addr =
+		(uint8_t *)tmp_op->m_dst->buf_addr + tmp_op->m_dst->data_off;
+	uint32_t chksum = 0;
+	uint16_t head_len;
+	uint16_t tail_len;
+
+	if (!(CQE_ERR0(cqe->err0) || CQE_ERR1(cqe->err1)))
+		tmp_op->status = RTE_COMP_OP_STATUS_SUCCESS;
+	else {
+		tmp_op->status = RTE_COMP_OP_STATUS_ERROR;
+		return;
+	}
+
+	/* handle chksum */
+	tmp_op->produced = cqe->tx_real_length;
+	if (cqe->op_code == ZSDA_OPC_COMP_ZLIB) {
+		head_len = ZLIB_HEADER_SIZE;
+		tail_len = ZLIB_TRAILER_SIZE;
+		chksum = zsda_read_chksum(data_addr, cqe->op_code,
+						  tmp_op->produced - head_len);
+	}
+	if (cqe->op_code == ZSDA_OPC_COMP_GZIP) {
+		head_len = GZIP_HEADER_SIZE;
+		tail_len = GZIP_TRAILER_SIZE;
+		chksum = zsda_read_chksum(data_addr, cqe->op_code,
+						  tmp_op->produced - head_len);
+	} else if (cqe->op_code == ZSDA_OPC_DECOMP_ZLIB) {
+		head_len = ZLIB_HEADER_SIZE;
+		tail_len = ZLIB_TRAILER_SIZE;
+		chksum = adler32(0, Z_NULL, 0);
+		chksum = adler32(chksum, data_addr, tmp_op->produced);
+	} else if (cqe->op_code == ZSDA_OPC_DECOMP_GZIP) {
+		head_len = GZIP_HEADER_SIZE;
+		tail_len = GZIP_TRAILER_SIZE;
+		chksum = crc32(0, Z_NULL, 0);
+		chksum = crc32(chksum, data_addr, tmp_op->produced);
+	}
+	tmp_op->output_chksum = chksum;
+
+	if (cqe->op_code == ZSDA_OPC_COMP_ZLIB ||
+	    cqe->op_code == ZSDA_OPC_COMP_GZIP) {
+		/* remove tail data*/
+		rte_pktmbuf_trim(tmp_op->m_dst, GZIP_TRAILER_SIZE);
+		/* remove head and tail length */
+		tmp_op->produced = tmp_op->produced - (head_len + tail_len);
+	}
+
+}
+
+static uint32_t
+zsda_read_chksum(uint8_t *data_addr, uint8_t op_code, uint32_t produced)
+{
+	uint8_t *chk_addr;
+	uint32_t chksum = 0;
+	int i = 0;
+
+	if (op_code == ZSDA_OPC_COMP_ZLIB) {
+		chk_addr = data_addr + produced - ZLIB_TRAILER_SIZE;
+		for (i = 0; i < CHECKSUM_SIZE; i++) {
+			chksum = chksum << 8;
+			chksum |= (*(chk_addr + i));
+		}
+	} else if (op_code == ZSDA_OPC_COMP_GZIP) {
+		chk_addr = data_addr + produced - GZIP_TRAILER_SIZE;
+		for (i = 0; i < CHECKSUM_SIZE; i++)
+			chksum |= (*(chk_addr + i) << (i * 8));
+	}
+
+	return chksum;
+}
diff --git a/drivers/compress/zsda/zsda_comp.h b/drivers/compress/zsda/zsda_comp.h
new file mode 100644
index 0000000000..697d3e3564
--- /dev/null
+++ b/drivers/compress/zsda/zsda_comp.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_COMP_H_
+#define _ZSDA_COMP_H_
+
+#include <rte_compressdev.h>
+
+#include "zsda_common.h"
+#include "zsda_device.h"
+#include "zsda_qp.h"
+
+struct zsda_comp_xform {
+	enum rte_comp_xform_type type;
+	enum rte_comp_checksum_type checksum_type;
+};
+
+int zsda_comp_match(const void *op_in);
+int zsda_build_comp_request(void *op_in, const struct zsda_queue *queue,
+		       void **op_cookies, const uint16_t new_tail);
+int zsda_decomp_match(const void *op_in);
+int zsda_build_decomp_request(void *op_in, const struct zsda_queue *queue,
+			 void **op_cookies, const uint16_t new_tail);
+void zsda_comp_callback(void *cookie_in, const struct zsda_cqe *cqe);
+
+#endif
diff --git a/drivers/compress/zsda/zsda_comp_pmd.c b/drivers/compress/zsda/zsda_comp_pmd.c
new file mode 100644
index 0000000000..cd435927a6
--- /dev/null
+++ b/drivers/compress/zsda/zsda_comp_pmd.c
@@ -0,0 +1,453 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <rte_malloc.h>
+
+#include "zsda_comp.h"
+#include "zsda_comp_pmd.h"
+
+static const struct rte_compressdev_capabilities zsda_comp_capabilities[] = {
+	{
+		.algo = RTE_COMP_ALGO_DEFLATE,
+		.comp_feature_flags = RTE_COMP_FF_HUFFMAN_DYNAMIC |
+							RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
+							RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
+							RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
+							RTE_COMP_FF_CRC32_CHECKSUM |
+							RTE_COMP_FF_ADLER32_CHECKSUM,
+		.window_size = {.min = 15, .max = 15, .increment = 0},
+	},
+};
+
+static void
+zsda_comp_stats_get(struct rte_compressdev *dev,
+		    struct rte_compressdev_stats *stats)
+{
+	struct zsda_common_stat comm = {0};
+
+	zsda_stats_get(dev->data->queue_pairs, dev->data->nb_queue_pairs,
+		       &comm);
+	stats->enqueued_count = comm.enqueued_count;
+	stats->dequeued_count = comm.dequeued_count;
+	stats->enqueue_err_count = comm.enqueue_err_count;
+	stats->dequeue_err_count = comm.dequeue_err_count;
+}
+
+static void
+zsda_comp_stats_reset(struct rte_compressdev *dev)
+{
+	zsda_stats_reset(dev->data->queue_pairs, dev->data->nb_queue_pairs);
+}
+
+static int
+zsda_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
+{
+	return zsda_queue_pair_release(
+		(struct zsda_qp **)&(dev->data->queue_pairs[queue_pair_id]));
+}
+
+
+static int
+zsda_setup_comp_queue(struct zsda_pci_device *zsda_pci_dev, const uint16_t qp_id,
+		 struct zsda_qp *qp, uint16_t nb_des, int socket_id)
+{
+	enum zsda_service_type type = ZSDA_SERVICE_COMPRESSION;
+	struct zsda_qp_config conf;
+	int ret = 0;
+	struct zsda_qp_hw *qp_hw;
+
+	qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+	conf.hw = qp_hw->data + qp_id;
+	conf.service_type = type;
+	conf.cookie_size = sizeof(struct zsda_op_cookie);
+	conf.nb_descriptors = nb_des;
+	conf.socket_id = socket_id;
+	conf.service_str = "comp";
+
+	ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+	qp->srv[type].rx_cb = zsda_comp_callback;
+	qp->srv[type].tx_cb = zsda_build_comp_request;
+	qp->srv[type].match = zsda_comp_match;
+
+	return ret;
+}
+
+static int
+zsda_setup_decomp_queue(struct zsda_pci_device *zsda_pci_dev, const uint16_t qp_id,
+		   struct zsda_qp *qp, uint16_t nb_des, int socket_id)
+{
+	enum zsda_service_type type = ZSDA_SERVICE_DECOMPRESSION;
+	struct zsda_qp_config conf;
+	int ret = 0;
+	struct zsda_qp_hw *qp_hw;
+
+	qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+	conf.hw = qp_hw->data + qp_id;
+	conf.service_type = type;
+	conf.cookie_size = sizeof(struct zsda_op_cookie);
+	conf.nb_descriptors = nb_des;
+	conf.socket_id = socket_id;
+	conf.service_str = "decomp";
+
+	ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+	qp->srv[type].rx_cb = zsda_comp_callback;
+	qp->srv[type].tx_cb = zsda_build_decomp_request;
+	qp->srv[type].match = zsda_decomp_match;
+
+	return ret;
+}
+
+static int
+zsda_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+		   uint32_t max_inflight_ops, int socket_id)
+{
+	int ret = 0;
+	struct zsda_qp *qp_new;
+
+	struct zsda_qp **qp_addr =
+		(struct zsda_qp **)&(dev->data->queue_pairs[qp_id]);
+	struct zsda_comp_dev_private *comp_priv = dev->data->dev_private;
+	struct zsda_pci_device *zsda_pci_dev = comp_priv->zsda_pci_dev;
+	uint16_t num_qps_comp =
+		zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_COMPRESSION);
+	uint16_t num_qps_decomp =
+		zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_DECOMPRESSION);
+	uint16_t nb_des = max_inflight_ops & 0xffff;
+
+	nb_des = (nb_des == NB_DES) ? nb_des : NB_DES;
+
+	if (*qp_addr != NULL) {
+		ret = zsda_comp_qp_release(dev, qp_id);
+		if (ret)
+			return ret;
+	}
+
+	qp_new = rte_zmalloc_socket("zsda PMD qp metadata", sizeof(*qp_new),
+				    RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp_new == NULL) {
+		ZSDA_LOG(ERR, E_MALLOC);
+		return -ENOMEM;
+	}
+
+	if (num_qps_comp == MAX_QPS_ON_FUNCTION)
+		ret = zsda_setup_comp_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					socket_id);
+	else if (num_qps_decomp == MAX_QPS_ON_FUNCTION)
+		ret = zsda_setup_decomp_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					  socket_id);
+	else {
+		ret = zsda_setup_comp_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					socket_id);
+		ret |= zsda_setup_decomp_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					  socket_id);
+	}
+
+	if (ret) {
+		rte_free(qp_new);
+		return ret;
+	}
+
+	qp_new->mmap_bar_addr =
+		comp_priv->zsda_pci_dev->pci_dev->mem_resource[0].addr;
+	*qp_addr = qp_new;
+
+	return ret;
+}
+
+static int
+zsda_comp_xform_size(void)
+{
+	return RTE_ALIGN_CEIL(sizeof(struct zsda_comp_xform), 8);
+}
+
+static struct rte_mempool *
+zsda_comp_create_xform_pool(struct zsda_comp_dev_private *comp_dev,
+			    struct rte_compressdev_config *config,
+			    uint32_t num_elements)
+{
+	char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
+	struct rte_mempool *mp;
+
+	snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE, "%s_xforms",
+		 comp_dev->zsda_pci_dev->name);
+
+	ZSDA_LOG(DEBUG, "xformpool: %s", xform_pool_name);
+	mp = rte_mempool_lookup(xform_pool_name);
+
+	if (mp != NULL) {
+		ZSDA_LOG(DEBUG, "xformpool already created");
+		if (mp->size != num_elements) {
+			ZSDA_LOG(DEBUG, "xformpool wrong size - delete it");
+			rte_mempool_free(mp);
+			mp = NULL;
+			comp_dev->xformpool = NULL;
+		}
+	}
+
+	if (mp == NULL)
+		mp = rte_mempool_create(xform_pool_name, num_elements,
+					zsda_comp_xform_size(), 0, 0, NULL,
+					NULL, NULL, NULL, config->socket_id, 0);
+	if (mp == NULL) {
+		ZSDA_LOG(ERR, E_CREATE);
+		return NULL;
+	}
+
+	return mp;
+}
+
+static int
+zsda_comp_private_xform_create(struct rte_compressdev *dev,
+			       const struct rte_comp_xform *xform,
+			       void **private_xform)
+{
+	struct zsda_comp_dev_private *zsda = dev->data->dev_private;
+
+	if (unlikely(private_xform == NULL)) {
+		ZSDA_LOG(ERR, E_NULL);
+		return -EINVAL;
+	}
+	if (unlikely(zsda->xformpool == NULL)) {
+		ZSDA_LOG(ERR, E_NULL);
+		return -ENOMEM;
+	}
+	if (rte_mempool_get(zsda->xformpool, private_xform)) {
+		ZSDA_LOG(ERR, E_NULL);
+		return -ENOMEM;
+	}
+
+	struct zsda_comp_xform *zsda_xform =
+		(struct zsda_comp_xform *)*private_xform;
+	zsda_xform->type = xform->type;
+
+	if (zsda_xform->type == RTE_COMP_COMPRESS)
+		zsda_xform->checksum_type = xform->compress.chksum;
+	else
+		zsda_xform->checksum_type = xform->decompress.chksum;
+
+	if (zsda_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32_ADLER32)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+zsda_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
+			     void *private_xform)
+{
+	struct zsda_comp_xform *zsda_xform =
+		(struct zsda_comp_xform *)private_xform;
+
+	if (zsda_xform) {
+		memset(zsda_xform, 0, zsda_comp_xform_size());
+		struct rte_mempool *mp = rte_mempool_from_obj(zsda_xform);
+
+		rte_mempool_put(mp, zsda_xform);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int
+zsda_comp_dev_close(struct rte_compressdev *dev)
+{
+	uint16_t i;
+	struct zsda_comp_dev_private *comp_dev = dev->data->dev_private;
+
+	for (i = 0; i < dev->data->nb_queue_pairs; i++)
+		zsda_comp_qp_release(dev, i);
+
+	rte_mempool_free(comp_dev->xformpool);
+	comp_dev->xformpool = NULL;
+
+	return 0;
+}
+
+static int
+zsda_comp_dev_config(struct rte_compressdev *dev,
+		     struct rte_compressdev_config *config)
+{
+	struct zsda_comp_dev_private *comp_dev = dev->data->dev_private;
+
+	if (config->max_nb_priv_xforms) {
+		comp_dev->xformpool = zsda_comp_create_xform_pool(
+			comp_dev, config, config->max_nb_priv_xforms);
+		if (comp_dev->xformpool == NULL)
+			return -ENOMEM;
+	} else
+		comp_dev->xformpool = NULL;
+
+	return 0;
+}
+
+static int
+zsda_comp_dev_start(struct rte_compressdev *dev)
+{
+	struct zsda_comp_dev_private *comp_dev = dev->data->dev_private;
+	int ret = 0;
+
+	ret = zsda_queue_start(comp_dev->zsda_pci_dev->pci_dev);
+
+	if (ret)
+		ZSDA_LOG(ERR, E_START_Q);
+
+	return ret;
+}
+
+static void
+zsda_comp_dev_stop(struct rte_compressdev *dev)
+{
+	struct zsda_comp_dev_private *comp_dev = dev->data->dev_private;
+
+	zsda_queue_stop(comp_dev->zsda_pci_dev->pci_dev);
+}
+
+static void
+zsda_comp_dev_info_get(struct rte_compressdev *dev,
+		       struct rte_compressdev_info *info)
+{
+	struct zsda_comp_dev_private *comp_dev = dev->data->dev_private;
+
+	if (info != NULL) {
+		info->max_nb_queue_pairs =
+			zsda_comp_max_nb_qps(comp_dev->zsda_pci_dev);
+		info->feature_flags = dev->feature_flags;
+		info->capabilities = comp_dev->zsda_dev_capabilities;
+	}
+}
+
+static uint16_t
+zsda_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops,
+			       uint16_t nb_ops)
+{
+	return zsda_enqueue_op_burst((struct zsda_qp *)qp, (void **)ops,
+				     nb_ops);
+}
+
+static uint16_t
+zsda_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops,
+			       uint16_t nb_ops)
+{
+	return zsda_dequeue_op_burst((struct zsda_qp *)qp, (void **)ops,
+				     nb_ops);
+}
+
+static struct rte_compressdev_ops compress_zsda_ops = {
+
+	.dev_configure = zsda_comp_dev_config,
+	.dev_start = zsda_comp_dev_start,
+	.dev_stop = zsda_comp_dev_stop,
+	.dev_close = zsda_comp_dev_close,
+	.dev_infos_get = zsda_comp_dev_info_get,
+
+	.stats_get = zsda_comp_stats_get,
+	.stats_reset = zsda_comp_stats_reset,
+	.queue_pair_setup = zsda_comp_qp_setup,
+	.queue_pair_release = zsda_comp_qp_release,
+
+	.private_xform_create = zsda_comp_private_xform_create,
+	.private_xform_free = zsda_comp_private_xform_free};
+
+/* An rte_driver is needed in the registration of the device with compressdev.
+ * The actual zsda pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the compression part of the pci device.
+ */
+static const char zsda_comp_drv_name[] = RTE_STR(COMPRESSDEV_NAME_ZSDA_PMD);
+static const struct rte_driver compdev_zsda_driver = {
+	.name = zsda_comp_drv_name, .alias = zsda_comp_drv_name};
+
+int
+zsda_comp_dev_create(struct zsda_pci_device *zsda_pci_dev)
+{
+	struct zsda_device_info *dev_info =
+		&zsda_devs[zsda_pci_dev->zsda_dev_id];
+
+	struct rte_compressdev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = (int)rte_socket_id(),
+	};
+
+	char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+	struct rte_compressdev *compressdev;
+	struct zsda_comp_dev_private *comp_dev;
+	const struct rte_compressdev_capabilities *capabilities;
+	uint16_t capa_size = sizeof(struct rte_compressdev_capabilities);
+
+	snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
+		 zsda_pci_dev->name, "comp");
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	dev_info->comp_rte_dev.driver = &compdev_zsda_driver;
+	dev_info->comp_rte_dev.numa_node = dev_info->pci_dev->device.numa_node;
+	dev_info->comp_rte_dev.devargs = NULL;
+
+	compressdev = rte_compressdev_pmd_create(
+		name, &(dev_info->comp_rte_dev),
+		sizeof(struct zsda_comp_dev_private), &init_params);
+
+	if (compressdev == NULL)
+		return -ENODEV;
+
+	compressdev->dev_ops = &compress_zsda_ops;
+
+	compressdev->enqueue_burst = zsda_comp_pmd_enqueue_op_burst;
+	compressdev->dequeue_burst = zsda_comp_pmd_dequeue_op_burst;
+
+	compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
+
+	snprintf(capa_memz_name, RTE_COMPRESSDEV_NAME_MAX_LEN,
+		 "ZSDA_COMP_CAPA");
+
+	comp_dev = compressdev->data->dev_private;
+	comp_dev->zsda_pci_dev = zsda_pci_dev;
+	comp_dev->compressdev = compressdev;
+	capabilities = zsda_comp_capabilities;
+
+	comp_dev->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (comp_dev->capa_mz == NULL) {
+		comp_dev->capa_mz = rte_memzone_reserve(
+			capa_memz_name, capa_size, rte_socket_id(), 0);
+	}
+	if (comp_dev->capa_mz == NULL) {
+		ZSDA_LOG(DEBUG, E_MALLOC);
+		memset(&dev_info->comp_rte_dev, 0,
+		       sizeof(dev_info->comp_rte_dev));
+		rte_compressdev_pmd_destroy(compressdev);
+		return -EFAULT;
+	}
+
+	memcpy(comp_dev->capa_mz->addr, capabilities, capa_size);
+	comp_dev->zsda_dev_capabilities = comp_dev->capa_mz->addr;
+
+	zsda_pci_dev->comp_dev = comp_dev;
+
+	return 0;
+}
+
+int
+zsda_comp_dev_destroy(struct zsda_pci_device *zsda_pci_dev)
+{
+	struct zsda_comp_dev_private *comp_dev;
+
+	if (zsda_pci_dev == NULL)
+		return -ENODEV;
+
+	comp_dev = zsda_pci_dev->comp_dev;
+	if (comp_dev == NULL)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(zsda_pci_dev->comp_dev->capa_mz);
+
+	zsda_comp_dev_close(comp_dev->compressdev);
+
+	rte_compressdev_pmd_destroy(comp_dev->compressdev);
+	zsda_pci_dev->comp_dev = NULL;
+
+	return 0;
+}
diff --git a/drivers/compress/zsda/zsda_comp_pmd.h b/drivers/compress/zsda/zsda_comp_pmd.h
new file mode 100644
index 0000000000..b496cb53a5
--- /dev/null
+++ b/drivers/compress/zsda/zsda_comp_pmd.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_COMP_PMD_H_
+#define _ZSDA_COMP_PMD_H_
+
+#include <rte_compressdev_pmd.h>
+
+/**< ZSDA Compression PMD driver name */
+#define COMPRESSDEV_NAME_ZSDA_PMD compress_zsda
+
+/** private data structure for a ZSDA compression device.
+ * This ZSDA device is a device offering only a compression service,
+ * there can be one of these on each zsda_pci_device (VF).
+ */
+struct zsda_comp_dev_private {
+	struct zsda_pci_device *zsda_pci_dev;
+	/**< The zsda pci device hosting the service */
+	struct rte_compressdev *compressdev;
+	/**< The pointer to this compression device structure */
+	const struct rte_compressdev_capabilities *zsda_dev_capabilities;
+	/* ZSDA device compression capabilities */
+	const struct rte_memzone *interim_buff_mz;
+	/**< The device's memory for intermediate buffers */
+	struct rte_mempool *xformpool;
+	/**< The device's pool for zsda_comp_xforms */
+	struct rte_mempool *streampool;
+	/**< The device's pool for zsda_comp_streams */
+	const struct rte_memzone *capa_mz;
+	/* Shared memzone for storing capabilities */
+	uint16_t min_enq_burst_threshold;
+};
+
+int zsda_comp_dev_create(struct zsda_pci_device *zsda_pci_dev);
+
+int zsda_comp_dev_destroy(struct zsda_pci_device *zsda_pci_dev);
+
+#endif /* _ZSDA_COMP_PMD_H_ */
diff --git a/drivers/meson.build b/drivers/meson.build
index 66931d4241..cdbd3b1c17 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -17,6 +17,7 @@ subdirs = [
         'common/nitrox',  # depends on bus.
         'common/qat',     # depends on bus.
         'common/sfc_efx', # depends on bus.
+        'common/zsda',    # depends on bus.
         'mempool',        # depends on common and bus.
         'dma',            # depends on common and bus.
         'net',            # depends on common, bus, mempool
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 59944 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v4 5/8] zsda: modify files for introducing zsda cryptodev
  2024-09-09  8:11 [PATCH v4 2/8] zsda: add support for zsdadev operations Hanxiao Li
  2024-09-09  8:11 ` [PATCH v4 3/8] zsda: add support for queue operation Hanxiao Li
  2024-09-09  8:11 ` [PATCH v4 4/8] zsda: add zsda compressdev driver and interface Hanxiao Li
@ 2024-09-09  8:11 ` Hanxiao Li
  2024-09-09  8:11 ` [PATCH v4 6/8] zsda: add zsda crypto-pmd Hanxiao Li
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Hanxiao Li @ 2024-09-09  8:11 UTC (permalink / raw)
  To: dev; +Cc: wang.yong19, Hanxiao Li


[-- Attachment #1.1.1: Type: text/plain, Size: 11567 bytes --]

It is necessary to make necessary modifications to
existing files based on the newly introduced content

Signed-off-by: Hanxiao Li <li.hanxiao@zte.com.cn>
---
 MAINTAINERS                       |  3 ++
 drivers/common/zsda/zsda_common.h | 50 +++++++++++++++++++++++
 drivers/common/zsda/zsda_device.c | 42 +++-----------------
 drivers/common/zsda/zsda_device.h | 20 ++++++++--
 drivers/common/zsda/zsda_qp.c     | 66 ++++++++++++++++++++++++++++++-
 lib/cryptodev/rte_crypto_sym.h    |  4 +-
 6 files changed, 142 insertions(+), 43 deletions(-)

diff --git a/MAINTAINERS b/MAINTAINERS
index ea245fc61b..9e66c72c45 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1221,6 +1221,9 @@ F: drivers/crypto/virtio/
 F: doc/guides/cryptodevs/virtio.rst
 F: doc/guides/cryptodevs/features/virtio.ini
 
+ZTE Storage Data Accelerator
+M: Hanxiao Li <li.hanxiao@zte.com.cn>
+F: drivers/crypto/zsda/
 
 Compression Drivers
 -------------------
diff --git a/drivers/common/zsda/zsda_common.h b/drivers/common/zsda/zsda_common.h
index 0dbc9b7d3c..d50a152307 100644
--- a/drivers/common/zsda/zsda_common.h
+++ b/drivers/common/zsda/zsda_common.h
@@ -97,17 +97,39 @@
 enum zsda_service_type {
 	ZSDA_SERVICE_COMPRESSION = 0,
 	ZSDA_SERVICE_DECOMPRESSION,
+	ZSDA_SERVICE_SYMMETRIC_ENCRYPT,
+	ZSDA_SERVICE_SYMMETRIC_DECRYPT,
+	ZSDA_SERVICE_HASH_ENCODE = 6,
 	ZSDA_SERVICE_INVALID,
 };
 
 #define ZSDA_MAX_SERVICES (ZSDA_SERVICE_INVALID)
 
+#define ZSDA_OPC_EC_AES_XTS_256 0x0  /* Encry AES-XTS-256 */
+#define ZSDA_OPC_EC_AES_XTS_512 0x01 /* Encry AES-XTS-512 */
+#define ZSDA_OPC_EC_SM4_XTS_256 0x02 /* Encry SM4-XTS-256 */
+#define ZSDA_OPC_DC_AES_XTS_256 0x08 /* Decry AES-XTS-256 */
+#define ZSDA_OPC_DC_AES_XTS_512 0x09 /* Decry AES-XTS-512 */
+#define ZSDA_OPC_DC_SM4_XTS_256 0x0A /* Decry SM4-XTS-256 */
 #define ZSDA_OPC_COMP_GZIP	0x10 /* Encomp deflate-Gzip */
 #define ZSDA_OPC_COMP_ZLIB	0x11 /* Encomp deflate-Zlib */
 #define ZSDA_OPC_DECOMP_GZIP	0x18 /* Decompinfalte-Gzip */
 #define ZSDA_OPC_DECOMP_ZLIB	0x19 /* Decompinfalte-Zlib */
+#define ZSDA_OPC_HASH_SHA1	0x20 /* Hash-SHA1 */
+#define ZSDA_OPC_HASH_SHA2_224	0x21 /* Hash-SHA2-224 */
+#define ZSDA_OPC_HASH_SHA2_256	0x22 /* Hash-SHA2-256 */
+#define ZSDA_OPC_HASH_SHA2_384	0x23 /* Hash-SHA2-384 */
+#define ZSDA_OPC_HASH_SHA2_512	0x24 /* Hash-SHA2-512 */
+#define ZSDA_OPC_HASH_SM3	0x25 /* Hash-SM3 */
 #define ZSDA_OPC_INVALID	0xff
 
+#define ZSDA_DIGEST_SIZE_SHA1	  (20)
+#define ZSDA_DIGEST_SIZE_SHA2_224 (28)
+#define ZSDA_DIGEST_SIZE_SHA2_256 (32)
+#define ZSDA_DIGEST_SIZE_SHA2_384 (48)
+#define ZSDA_DIGEST_SIZE_SHA2_512 (64)
+#define ZSDA_DIGEST_SIZE_SM3	  (32)
+
 #define SET_CYCLE	  0xff
 #define SET_HEAD_INTI	  0x0
 
@@ -237,9 +259,34 @@ struct zsda_op_cookie {
 	uint8_t comp_head[COMP_REMOVE_SPACE_LEN];
 } __rte_packed;
 
+#define ZSDA_CIPHER_KEY_MAX_LEN 64
+struct crypto_cfg {
+	uint8_t slba_L[8];
+	uint8_t key[ZSDA_CIPHER_KEY_MAX_LEN];
+	uint8_t lbads : 4;
+	uint8_t resv1 : 4;
+	uint8_t resv2[7];
+	uint8_t slba_H[8];
+	uint8_t resv3[8];
+} __rte_packed;
+
 struct compress_cfg {
 } __rte_packed;
 
+struct zsda_wqe_crpt {
+	uint8_t valid;
+	uint8_t op_code;
+	uint16_t sid;
+	uint8_t resv[3];
+	uint8_t rx_sgl_type : 4;
+	uint8_t tx_sgl_type : 4;
+	uint64_t rx_addr;
+	uint32_t rx_length;
+	uint64_t tx_addr;
+	uint32_t tx_length;
+	struct crypto_cfg cfg;
+} __rte_packed;
+
 struct zsda_wqe_comp {
 	uint8_t valid;
 	uint8_t op_code;
@@ -281,6 +328,9 @@ struct zsda_common_stat {
 enum zsda_algo_core {
 	ZSDA_CORE_COMP,
 	ZSDA_CORE_DECOMP,
+	ZSDA_CORE_ENCRY,
+	ZSDA_CORE_DECRY,
+	ZSDA_CORE_HASH,
 	ZSDA_CORE_INVALID,
 };
 
diff --git a/drivers/common/zsda/zsda_device.c b/drivers/common/zsda/zsda_device.c
index de8894f5a3..4ddc97e564 100644
--- a/drivers/common/zsda/zsda_device.c
+++ b/drivers/common/zsda/zsda_device.c
@@ -7,6 +7,7 @@
 #include <stdint.h>
 
 #include "zsda_device.h"
+#include "zsda_qp.h"
 
 /* per-process array of device data */
 struct zsda_device_info zsda_devs[RTE_PMD_ZSDA_MAX_PCI_DEVICES];
@@ -306,7 +307,8 @@ zsda_pci_device_release(const struct rte_pci_device *pci_dev)
 		inst = &zsda_devs[zsda_pci_dev->zsda_dev_id];
 
 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-			if (zsda_pci_dev->comp_dev != NULL) {
+			if ((zsda_pci_dev->sym_dev != NULL) ||
+			    (zsda_pci_dev->comp_dev != NULL)) {
 				ZSDA_LOG(DEBUG, "ZSDA device %s is busy", name);
 				return -EBUSY;
 			}
@@ -322,47 +324,12 @@ static int
 zsda_pci_dev_destroy(struct zsda_pci_device *zsda_pci_dev,
 		     const struct rte_pci_device *pci_dev)
 {
+	zsda_sym_dev_destroy(zsda_pci_dev);
 	zsda_comp_dev_destroy(zsda_pci_dev);
 
 	return zsda_pci_device_release(pci_dev);
 }
 
-int
-zsda_get_queue_cfg_by_id(const struct zsda_pci_device *zsda_pci_dev,
-			 const uint8_t qid, struct qinfo *qcfg)
-{
-	struct zsda_admin_req_qcfg req = {0};
-	struct zsda_admin_resp_qcfg resp = {0};
-	int ret = 0;
-	struct rte_pci_device *pci_dev =
-		zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
-
-	if (qid >= MAX_QPS_ON_FUNCTION) {
-		ZSDA_LOG(ERR, "qid beyond limit!");
-		return ZSDA_FAILED;
-	}
-
-	zsda_admin_msg_init(pci_dev);
-	req.msg_type = ZSDA_ADMIN_QUEUE_CFG_REQ;
-	req.qid = qid;
-
-	ret = zsda_send_admin_msg(pci_dev, &req, sizeof(req));
-	if (ret) {
-		ZSDA_LOG(ERR, "Failed! Send msg");
-		return ret;
-	}
-
-	ret = zsda_recv_admin_msg(pci_dev, &resp, sizeof(resp));
-	if (ret) {
-		ZSDA_LOG(ERR, "Failed! Receive msg");
-		return ret;
-	}
-
-	memcpy(qcfg, &resp.qcfg, sizeof(*qcfg));
-
-	return ZSDA_SUCCESS;
-}
-
 static int
 zsda_unmask_flr(const struct zsda_pci_device *zsda_pci_dev)
 {
@@ -432,6 +399,7 @@ zsda_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		return ret;
 	}
 
+	ret |= zsda_sym_dev_create(zsda_pci_dev);
 	ret |= zsda_comp_dev_create(zsda_pci_dev);
 
 	if (ret) {
diff --git a/drivers/common/zsda/zsda_device.h b/drivers/common/zsda/zsda_device.h
index 1b2ad0ce85..07dca183ab 100644
--- a/drivers/common/zsda/zsda_device.h
+++ b/drivers/common/zsda/zsda_device.h
@@ -18,6 +18,13 @@ struct zsda_device_info {
 
 	struct rte_pci_device *pci_dev;
 
+	// struct rte_device sym_rte_dev;
+	struct rte_device sym_rte_dev;
+	/**< This represents the crypto sym subset of this pci device.
+	 * Register with this rather than with the one in
+	 * pci_dev so that its driver can have a crypto-specific name
+	 */
+
 	struct rte_device comp_rte_dev;
 	/**< This represents the compression subset of this pci device.
 	 * Register with this rather than with the one in
@@ -27,6 +34,7 @@ struct zsda_device_info {
 
 extern struct zsda_device_info zsda_devs[];
 
+struct zsda_sym_dev_private;
 struct zsda_comp_dev_private;
 
 struct zsda_qp_hw_data {
@@ -64,6 +72,10 @@ struct zsda_pci_device {
 
 	struct rte_pci_device *pci_dev;
 
+	/* Data relating to symmetric crypto service */
+	struct zsda_sym_dev_private *sym_dev;
+	/**< link back to cryptodev private data */
+
 	/* Data relating to compression service */
 	struct zsda_comp_dev_private *comp_dev;
 	/**< link back to compressdev private data */
@@ -79,7 +91,10 @@ struct zsda_pci_device *
 zsda_get_zsda_dev_from_pci_dev(const struct rte_pci_device *pci_dev);
 
 __rte_weak int
-zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev);
+zsda_sym_dev_create(struct zsda_pci_device *zsda_pci_dev);
+
+__rte_weak int
+zsda_sym_dev_destroy(struct zsda_pci_device *zsda_pci_dev);
 
 __rte_weak int
 zsda_comp_dev_create(struct zsda_pci_device *zsda_pci_dev);
@@ -87,9 +102,6 @@ zsda_comp_dev_create(struct zsda_pci_device *zsda_pci_dev);
 __rte_weak int
 zsda_comp_dev_destroy(struct zsda_pci_device *zsda_pci_dev);
 
-int zsda_get_queue_cfg_by_id(const struct zsda_pci_device *zsda_pci_dev,
-			     const uint8_t qid, struct qinfo *qcfg);
-
 int zsda_queue_start(const struct rte_pci_device *pci_dev);
 int zsda_queue_stop(const struct rte_pci_device *pci_dev);
 int zsda_queue_clear(const struct rte_pci_device *pci_dev);
diff --git a/drivers/common/zsda/zsda_qp.c b/drivers/common/zsda/zsda_qp.c
index f2dfe43b2e..5c5ac90771 100644
--- a/drivers/common/zsda/zsda_qp.c
+++ b/drivers/common/zsda/zsda_qp.c
@@ -20,8 +20,11 @@ struct ring_size {
 };
 
 struct ring_size zsda_qp_hw_ring_size[ZSDA_MAX_SERVICES] = {
+	[ZSDA_SERVICE_SYMMETRIC_ENCRYPT] = {128, 16},
+	[ZSDA_SERVICE_SYMMETRIC_DECRYPT] = {128, 16},
 	[ZSDA_SERVICE_COMPRESSION] = {32, 16},
 	[ZSDA_SERVICE_DECOMPRESSION] = {32, 16},
+	[ZSDA_SERVICE_HASH_ENCODE] = {32, 16},
 };
 
 static void
@@ -36,6 +39,43 @@ zsda_set_queue_head_tail(const struct zsda_pci_device *zsda_pci_dev,
 			 SET_HEAD_INTI);
 }
 
+static int
+zsda_get_queue_cfg_by_id(const struct zsda_pci_device *zsda_pci_dev,
+			 const uint8_t qid, struct qinfo *qcfg)
+{
+	struct zsda_admin_req_qcfg req = {0};
+	struct zsda_admin_resp_qcfg resp = {0};
+	int ret = 0;
+	struct rte_pci_device *pci_dev =
+		zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+
+	if (qid >= MAX_QPS_ON_FUNCTION) {
+		ZSDA_LOG(ERR, "qid beyond limit!");
+		return ZSDA_FAILED;
+	}
+
+	zsda_admin_msg_init(pci_dev);
+	req.msg_type = ZSDA_ADMIN_QUEUE_CFG_REQ;
+	req.qid = qid;
+
+	ret = zsda_send_admin_msg(pci_dev, &req, sizeof(req));
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! Send msg");
+		return ret;
+	}
+
+	ret = zsda_recv_admin_msg(pci_dev, &resp, sizeof(resp));
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! Receive msg");
+		return ret;
+	}
+
+	memcpy(qcfg, &resp.qcfg, sizeof(*qcfg));
+
+	return ZSDA_SUCCESS;
+}
+
+
 int
 zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev)
 {
@@ -43,7 +83,7 @@ zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev)
 	uint32_t index;
 	enum zsda_service_type type;
 	struct zsda_qp_hw *zsda_hw_qps = zsda_pci_dev->zsda_hw_qps;
-	struct qinfo qcfg;
+	struct qinfo qcfg = {0};
 	int ret = 0;
 
 	for (i = 0; i < zsda_num_used_qps; i++) {
@@ -115,6 +155,30 @@ zsda_comp_max_nb_qps(const struct zsda_pci_device *zsda_pci_dev)
 	return min;
 }
 
+uint16_t
+zsda_crypto_max_nb_qps(struct zsda_pci_device *zsda_pci_dev)
+{
+	uint16_t encrypt = zsda_qps_per_service(zsda_pci_dev,
+						ZSDA_SERVICE_SYMMETRIC_ENCRYPT);
+	uint16_t decrypt = zsda_qps_per_service(zsda_pci_dev,
+						ZSDA_SERVICE_SYMMETRIC_DECRYPT);
+	uint16_t hash =
+		zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_HASH_ENCODE);
+	uint16_t min = 0;
+
+	if ((encrypt == MAX_QPS_ON_FUNCTION) ||
+		(decrypt == MAX_QPS_ON_FUNCTION) ||
+	    (hash == MAX_QPS_ON_FUNCTION))
+		min = MAX_QPS_ON_FUNCTION;
+	else {
+		min = (encrypt < decrypt) ? encrypt : decrypt;
+		min = (min < hash) ? min : hash;
+	}
+
+	if (min == 0)
+		return MAX_QPS_ON_FUNCTION;
+	return min;
+}
 
 void
 zsda_stats_get(void **queue_pairs, const uint32_t nb_queue_pairs,
diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index 53b18b9412..b34d041fe0 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -176,8 +176,10 @@ enum rte_crypto_cipher_algorithm {
 	/**< ShangMi 4 (SM4) algorithm in CTR mode */
 	RTE_CRYPTO_CIPHER_SM4_OFB,
 	/**< ShangMi 4 (SM4) algorithm in OFB mode */
-	RTE_CRYPTO_CIPHER_SM4_CFB
+	RTE_CRYPTO_CIPHER_SM4_CFB,
 	/**< ShangMi 4 (SM4) algorithm in CFB mode */
+	RTE_CRYPTO_CIPHER_SM4_XTS
+	/**< ShangMi 4 (SM4) algorithm in XTS mode */
 };
 
 /** Symmetric Cipher Direction */
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 23461 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v4 6/8] zsda: add zsda crypto-pmd
  2024-09-09  8:11 [PATCH v4 2/8] zsda: add support for zsdadev operations Hanxiao Li
                   ` (2 preceding siblings ...)
  2024-09-09  8:11 ` [PATCH v4 5/8] zsda: modify files for introducing zsda cryptodev Hanxiao Li
@ 2024-09-09  8:11 ` Hanxiao Li
  2024-09-09  8:11 ` [PATCH v4 7/8] zsda: add zsda crypto-sym Hanxiao Li
  2024-09-09  8:11 ` [PATCH v4 8/8] zsda: add zsda crypto-session and compile file Hanxiao Li
  5 siblings, 0 replies; 7+ messages in thread
From: Hanxiao Li @ 2024-09-09  8:11 UTC (permalink / raw)
  To: dev; +Cc: wang.yong19, Hanxiao Li


[-- Attachment #1.1.1: Type: text/plain, Size: 18271 bytes --]

Add new file zsda_sym_pmd.c, zsda_sym_pmd.h in drivers/crypto/zsda

Signed-off-by: Hanxiao Li <li.hanxiao@zte.com.cn>
---
 drivers/crypto/zsda/zsda_sym_capabilities.h | 112 +++++
 drivers/crypto/zsda/zsda_sym_pmd.c          | 429 ++++++++++++++++++++
 drivers/crypto/zsda/zsda_sym_pmd.h          |  35 ++
 3 files changed, 576 insertions(+)
 create mode 100644 drivers/crypto/zsda/zsda_sym_capabilities.h
 create mode 100644 drivers/crypto/zsda/zsda_sym_pmd.c
 create mode 100644 drivers/crypto/zsda/zsda_sym_pmd.h

diff --git a/drivers/crypto/zsda/zsda_sym_capabilities.h b/drivers/crypto/zsda/zsda_sym_capabilities.h
new file mode 100644
index 0000000000..dd387b36ad
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_capabilities.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_SYM_CAPABILITIES_H_
+#define _ZSDA_SYM_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities zsda_crypto_sym_capabilities[] = {
+	{/* SHA1 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{ .auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1,
+				.block_size = 64,
+				.key_size = {.min = 0, .max = 0, .increment = 0},
+				.digest_size = {.min = 20, .max = 20, .increment = 2},
+				.iv_size = {0} },
+			}	},
+		}
+	},
+	{/* SHA224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{ .auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA224,
+				.block_size = 64,
+				.key_size = {.min = 0, .max = 0, .increment = 0},
+				.digest_size = {.min = 28, .max = 28, .increment = 0},
+				.iv_size = {0} },
+			}	},
+		}
+	},
+	{/* SHA256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{ .auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256,
+				.block_size = 64,
+				.key_size = {.min = 0, .max = 0, .increment = 0},
+				.digest_size = {.min = 32, .max = 32, .increment = 0},
+				.iv_size = {0} },
+			} },
+		}
+	},
+	{/* SHA384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{ .auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384,
+				.block_size = 128,
+				.key_size = {.min = 0, .max = 0, .increment = 0},
+				.digest_size = {.min = 48, .max = 48, .increment = 0},
+				.iv_size = {0} },
+			} },
+		}
+	},
+	{/* SHA512 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{ .auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512,
+				.block_size = 128,
+				.key_size = {.min = 0, .max = 0, .increment = 0},
+				.digest_size = {.min = 64, .max = 64, .increment = 0},
+				.iv_size = {0} },
+			} },
+		}
+	},
+	{/* SM3 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{ .auth = {
+				.algo = RTE_CRYPTO_AUTH_SM3,
+				.block_size = 64,
+				.key_size = {.min = 0, .max = 0, .increment = 0},
+				.digest_size = {.min = 32, .max = 32, .increment = 0},
+				.iv_size = {0} },
+			} },
+		}
+	},
+	{/* AES XTS */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{ .cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_XTS,
+				.block_size = 16,
+				.key_size = {.min = 16, .max = 32, .increment = 16},
+				.iv_size = {.min = 16, .max = 16, .increment = 0} },
+			} },
+		}
+	},
+	{/* SM4 XTS */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{ .cipher = {
+				.algo = RTE_CRYPTO_CIPHER_SM4_XTS,
+				.block_size = 16,
+				.key_size = {.min = 32, .max = 32, .increment = 0},
+				.iv_size = {.min = 16, .max = 16, .increment = 0} },
+			} },
+		}
+	}
+};
+#endif /* _ZSDA_SYM_CAPABILITIES_H_ */
+
diff --git a/drivers/crypto/zsda/zsda_sym_pmd.c b/drivers/crypto/zsda/zsda_sym_pmd.c
new file mode 100644
index 0000000000..ac5a63b96e
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_pmd.c
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <rte_cryptodev.h>
+
+#include "cryptodev_pmd.h"
+#include "zsda_logs.h"
+#include "zsda_sym.h"
+#include "zsda_sym_pmd.h"
+#include "zsda_sym_session.h"
+#include "zsda_sym_capabilities.h"
+
+uint8_t zsda_sym_driver_id;
+
+static int
+zsda_sym_dev_config(__rte_unused struct rte_cryptodev *dev,
+		    __rte_unused struct rte_cryptodev_config *config)
+{
+	return ZSDA_SUCCESS;
+}
+
+static int zsda_sym_qp_release(struct rte_cryptodev *dev,
+				uint16_t queue_pair_id);
+
+static int
+zsda_sym_dev_start(struct rte_cryptodev *dev)
+{
+	struct zsda_sym_dev_private *sym_dev = dev->data->dev_private;
+	int ret = 0;
+
+	ret = zsda_queue_start(sym_dev->zsda_pci_dev->pci_dev);
+
+	if (ret)
+		ZSDA_LOG(ERR, E_START_Q);
+	return ret;
+}
+
+static void
+zsda_sym_dev_stop(struct rte_cryptodev *dev)
+{
+	struct zsda_sym_dev_private *sym_dev = dev->data->dev_private;
+
+	zsda_queue_stop(sym_dev->zsda_pci_dev->pci_dev);
+}
+
+static int
+zsda_sym_dev_close(struct rte_cryptodev *dev)
+{
+	int ret = 0;
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_queue_pairs; i++)
+		ret |= zsda_sym_qp_release(dev, i);
+
+	return ret;
+}
+
+static void
+zsda_sym_dev_info_get(struct rte_cryptodev *dev,
+		      struct rte_cryptodev_info *info)
+{
+	struct zsda_sym_dev_private *sym_priv = dev->data->dev_private;
+
+	if (info != NULL) {
+		info->max_nb_queue_pairs =
+			zsda_crypto_max_nb_qps(sym_priv->zsda_pci_dev);
+		info->feature_flags = dev->feature_flags;
+		info->capabilities = sym_priv->zsda_dev_capabilities;
+		info->driver_id = zsda_sym_driver_id;
+		info->sym.max_nb_sessions = 0;
+	}
+}
+
+static void
+zsda_sym_stats_get(struct rte_cryptodev *dev, struct rte_cryptodev_stats *stats)
+{
+	struct zsda_common_stat comm = {0};
+
+	zsda_stats_get(dev->data->queue_pairs, dev->data->nb_queue_pairs,
+		       &comm);
+	stats->enqueued_count = comm.enqueued_count;
+	stats->dequeued_count = comm.dequeued_count;
+	stats->enqueue_err_count = comm.enqueue_err_count;
+	stats->dequeue_err_count = comm.dequeue_err_count;
+}
+
+static void
+zsda_sym_stats_reset(struct rte_cryptodev *dev)
+{
+	zsda_stats_reset(dev->data->queue_pairs, dev->data->nb_queue_pairs);
+}
+
+static int
+zsda_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+	ZSDA_LOG(DEBUG, "Release sym qp %u on device %d", queue_pair_id,
+		 dev->data->dev_id);
+
+	return zsda_queue_pair_release(
+		(struct zsda_qp **)&(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static int
+zsda_setup_encrypto_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t qp_id,
+		     struct zsda_qp *qp, uint32_t nb_des, int socket_id)
+{
+	enum zsda_service_type type = ZSDA_SERVICE_SYMMETRIC_ENCRYPT;
+	struct zsda_qp_config conf;
+	int ret = 0;
+	struct zsda_qp_hw *qp_hw;
+
+	qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+	conf.hw = qp_hw->data + qp_id;
+	conf.service_type = type;
+	conf.cookie_size = sizeof(struct zsda_op_cookie);
+	conf.nb_descriptors = nb_des;
+	conf.socket_id = socket_id;
+	conf.service_str = "sym_encrypt";
+
+	ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+	qp->srv[type].rx_cb = zsda_crypto_callback;
+	qp->srv[type].tx_cb = zsda_build_cipher_request;
+	qp->srv[type].match = zsda_encry_match;
+
+	return ret;
+}
+
+static int
+zsda_setup_decrypto_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t qp_id,
+		     struct zsda_qp *qp, uint32_t nb_des, int socket_id)
+{
+	enum zsda_service_type type = ZSDA_SERVICE_SYMMETRIC_DECRYPT;
+	struct zsda_qp_config conf;
+	int ret = 0;
+	struct zsda_qp_hw *qp_hw;
+
+	qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+	conf.hw = qp_hw->data + qp_id;
+	conf.service_type = type;
+
+	conf.cookie_size = sizeof(struct zsda_op_cookie);
+	conf.nb_descriptors = nb_des;
+	conf.socket_id = socket_id;
+	conf.service_str = "sym_decrypt";
+
+	ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+	qp->srv[type].rx_cb = zsda_crypto_callback;
+	qp->srv[type].tx_cb = zsda_build_cipher_request;
+	qp->srv[type].match = zsda_decry_match;
+
+	return ret;
+}
+
+static int
+zsda_setup_hash_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t qp_id,
+		 struct zsda_qp *qp, uint32_t nb_des, int socket_id)
+{
+	enum zsda_service_type type = ZSDA_SERVICE_HASH_ENCODE;
+	struct zsda_qp_config conf;
+	int ret = 0;
+	struct zsda_qp_hw *qp_hw;
+
+	qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type);
+	conf.hw = qp_hw->data + qp_id;
+	conf.service_type = type;
+	conf.cookie_size = sizeof(struct zsda_op_cookie);
+	conf.nb_descriptors = nb_des;
+	conf.socket_id = socket_id;
+	conf.service_str = "sym_hash";
+
+	ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, &conf);
+	qp->srv[type].rx_cb = zsda_crypto_callback;
+	qp->srv[type].tx_cb = zsda_build_hash_request;
+	qp->srv[type].match = zsda_hash_match;
+
+	return ret;
+}
+
+static int
+zsda_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		  const struct rte_cryptodev_qp_conf *qp_conf,
+		  int socket_id)
+{
+	int ret = 0;
+	struct zsda_qp *qp_new;
+
+	struct zsda_qp **qp_addr =
+		(struct zsda_qp **)&(dev->data->queue_pairs[qp_id]);
+	struct zsda_sym_dev_private *sym_priv = dev->data->dev_private;
+	struct zsda_pci_device *zsda_pci_dev = sym_priv->zsda_pci_dev;
+	uint16_t num_qps_encrypt = zsda_qps_per_service(
+		zsda_pci_dev, ZSDA_SERVICE_SYMMETRIC_ENCRYPT);
+	uint16_t num_qps_decrypt = zsda_qps_per_service(
+		zsda_pci_dev, ZSDA_SERVICE_SYMMETRIC_DECRYPT);
+	uint16_t num_qps_hash = zsda_qps_per_service(
+		zsda_pci_dev, ZSDA_SERVICE_HASH_ENCODE);
+
+	uint32_t nb_des = qp_conf->nb_descriptors;
+	nb_des = (nb_des == NB_DES) ? nb_des : NB_DES;
+
+	if (*qp_addr != NULL) {
+		ret = zsda_sym_qp_release(dev, qp_id);
+		if (ret)
+			return ret;
+	}
+
+	qp_new = rte_zmalloc_socket("zsda PMD qp metadata", sizeof(*qp_new),
+				    RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp_new == NULL) {
+		ZSDA_LOG(ERR, "Failed to alloc mem for qp struct");
+		return -ENOMEM;
+	}
+
+	if (num_qps_encrypt == MAX_QPS_ON_FUNCTION)
+		ret = zsda_setup_encrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					    socket_id);
+	else if (num_qps_decrypt == MAX_QPS_ON_FUNCTION)
+		ret = zsda_setup_decrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					    socket_id);
+	else if (num_qps_hash == MAX_QPS_ON_FUNCTION)
+		ret = zsda_setup_hash_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					socket_id);
+	else {
+		ret = zsda_setup_encrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					    socket_id);
+		ret |= zsda_setup_decrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					    socket_id);
+		ret |= zsda_setup_hash_queue(zsda_pci_dev, qp_id, qp_new, nb_des,
+					socket_id);
+	}
+
+	if (ret) {
+		rte_free(qp_new);
+		return ret;
+	}
+
+	qp_new->mmap_bar_addr =
+		sym_priv->zsda_pci_dev->pci_dev->mem_resource[0].addr;
+	*qp_addr = qp_new;
+
+	return ret;
+}
+
+static unsigned int
+zsda_sym_session_get_private_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return RTE_ALIGN_CEIL(sizeof(struct zsda_sym_session), 8);
+}
+
+static int
+zsda_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+			   struct rte_crypto_sym_xform *xform,
+			   struct rte_cryptodev_sym_session *sess)
+{
+	void *sess_private_data;
+	int ret = 0;
+
+	if (unlikely(sess == NULL)) {
+		ZSDA_LOG(ERR, "Invalid session struct");
+		return -EINVAL;
+	}
+
+	sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
+
+	ret = zsda_crypto_set_session_parameters(
+			sess_private_data, xform);
+
+	if (ret != 0) {
+		ZSDA_LOG(ERR, "Failed configure session parameters");
+		return ret;
+	}
+
+	return 0;
+}
+
+static void
+zsda_sym_session_clear(struct rte_cryptodev *dev __rte_unused,
+			struct rte_cryptodev_sym_session  *sess __rte_unused)
+{}
+
+static struct rte_cryptodev_ops crypto_zsda_ops = {
+
+	.dev_configure = zsda_sym_dev_config,
+	.dev_start = zsda_sym_dev_start,
+	.dev_stop = zsda_sym_dev_stop,
+	.dev_close = zsda_sym_dev_close,
+	.dev_infos_get = zsda_sym_dev_info_get,
+
+	.stats_get = zsda_sym_stats_get,
+	.stats_reset = zsda_sym_stats_reset,
+	.queue_pair_setup = zsda_sym_qp_setup,
+	.queue_pair_release = zsda_sym_qp_release,
+
+	.sym_session_get_size = zsda_sym_session_get_private_size,
+	.sym_session_configure = zsda_sym_session_configure,
+	.sym_session_clear = zsda_sym_session_clear,
+};
+
+static uint16_t
+zsda_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+			      uint16_t nb_ops)
+{
+	return zsda_enqueue_op_burst((struct zsda_qp *)qp, (void **)ops,
+				     nb_ops);
+}
+
+static uint16_t
+zsda_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+			      uint16_t nb_ops)
+{
+	return zsda_dequeue_op_burst((struct zsda_qp *)qp, (void **)ops,
+				     nb_ops);
+}
+
+static const char zsda_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_ZSDA_SYM_PMD);
+static const struct rte_driver cryptodev_zsda_sym_driver = {
+	.name = zsda_sym_drv_name, .alias = zsda_sym_drv_name};
+
+int
+zsda_sym_dev_create(struct zsda_pci_device *zsda_pci_dev)
+{
+	int ret = 0;
+	struct zsda_device_info *dev_info =
+		&zsda_devs[zsda_pci_dev->zsda_dev_id];
+
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = (int)rte_socket_id(),
+		.private_data_size = sizeof(struct zsda_sym_dev_private)};
+
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct zsda_sym_dev_private *sym_priv;
+	const struct rte_cryptodev_capabilities *capabilities;
+	uint64_t capa_size;
+
+	init_params.max_nb_queue_pairs = zsda_crypto_max_nb_qps(zsda_pci_dev);
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", zsda_pci_dev->name,
+		 "sym_encrypt");
+	ZSDA_LOG(DEBUG, "Creating ZSDA SYM device %s", name);
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return ZSDA_SUCCESS;
+
+	dev_info->sym_rte_dev.driver = &cryptodev_zsda_sym_driver;
+	dev_info->sym_rte_dev.numa_node = dev_info->pci_dev->device.numa_node;
+	dev_info->sym_rte_dev.devargs = NULL;
+
+	cryptodev = rte_cryptodev_pmd_create(name, &(dev_info->sym_rte_dev),
+					     &init_params);
+
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	dev_info->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = zsda_sym_driver_id;
+
+	cryptodev->dev_ops = &crypto_zsda_ops;
+
+	cryptodev->enqueue_burst = zsda_sym_pmd_enqueue_op_burst;
+	cryptodev->dequeue_burst = zsda_sym_pmd_dequeue_op_burst;
+
+	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+				   RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
+				   RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+				   RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+				   RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+				   RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+				   RTE_CRYPTODEV_FF_HW_ACCELERATED;
+
+	sym_priv = cryptodev->data->dev_private;
+	sym_priv->zsda_pci_dev = zsda_pci_dev;
+	capabilities = zsda_crypto_sym_capabilities;
+	capa_size = sizeof(zsda_crypto_sym_capabilities);
+
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN, "ZSDA_SYM_CAPA");
+
+	sym_priv->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (sym_priv->capa_mz == NULL)
+		sym_priv->capa_mz = rte_memzone_reserve(
+			capa_memz_name, capa_size, rte_socket_id(), 0);
+
+	if (sym_priv->capa_mz == NULL) {
+		ZSDA_LOG(ERR, E_MALLOC);
+		ret = -EFAULT;
+		goto error;
+	}
+
+	memcpy(sym_priv->capa_mz->addr, capabilities, capa_size);
+	sym_priv->zsda_dev_capabilities = sym_priv->capa_mz->addr;
+
+	zsda_pci_dev->sym_dev = sym_priv;
+
+	return ZSDA_SUCCESS;
+
+error:
+
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&dev_info->sym_rte_dev, 0, sizeof(dev_info->sym_rte_dev));
+
+	return ret;
+}
+
+int
+zsda_sym_dev_destroy(struct zsda_pci_device *zsda_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+
+	if (zsda_pci_dev == NULL)
+		return -ENODEV;
+	if (zsda_pci_dev->sym_dev == NULL)
+		return ZSDA_SUCCESS;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(zsda_pci_dev->sym_dev->capa_mz);
+
+	cryptodev = rte_cryptodev_pmd_get_dev(zsda_pci_dev->zsda_dev_id);
+
+	rte_cryptodev_pmd_destroy(cryptodev);
+	zsda_devs[zsda_pci_dev->zsda_dev_id].sym_rte_dev.name = NULL;
+	zsda_pci_dev->sym_dev = NULL;
+
+	return ZSDA_SUCCESS;
+}
+
+static struct cryptodev_driver zsda_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(zsda_crypto_drv, cryptodev_zsda_sym_driver,
+			       zsda_sym_driver_id);
diff --git a/drivers/crypto/zsda/zsda_sym_pmd.h b/drivers/crypto/zsda/zsda_sym_pmd.h
new file mode 100644
index 0000000000..77175fed47
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_pmd.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_SYM_PMD_H_
+#define _ZSDA_SYM_PMD_H_
+
+#include "zsda_device.h"
+
+/** ZSDA Symmetric Crypto PMD driver name */
+#define CRYPTODEV_NAME_ZSDA_SYM_PMD crypto_zsda
+
+extern uint8_t zsda_sym_driver_id;
+
+/** private data structure for a ZSDA device.
+ * This ZSDA device is a device offering only symmetric crypto service,
+ * there can be one of these on each zsda_pci_device (VF).
+ */
+struct zsda_sym_dev_private {
+	struct zsda_pci_device *zsda_pci_dev;
+	/**< The zsda pci device hosting the service */
+
+	const struct rte_cryptodev_capabilities *zsda_dev_capabilities;
+	/* ZSDA device symmetric crypto capabilities */
+	const struct rte_memzone *capa_mz;
+	/* Shared memzone for storing capabilities */
+	uint16_t min_enq_burst_threshold;
+	uint32_t internal_capabilities; /* see flags ZSDA_SYM_CAP_xxx */
+};
+
+int zsda_sym_dev_create(struct zsda_pci_device *zsda_pci_dev);
+
+int zsda_sym_dev_destroy(struct zsda_pci_device *zsda_pci_dev);
+
+#endif /* _ZSDA_SYM_PMD_H_ */
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 43364 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v4 7/8] zsda: add zsda crypto-sym
  2024-09-09  8:11 [PATCH v4 2/8] zsda: add support for zsdadev operations Hanxiao Li
                   ` (3 preceding siblings ...)
  2024-09-09  8:11 ` [PATCH v4 6/8] zsda: add zsda crypto-pmd Hanxiao Li
@ 2024-09-09  8:11 ` Hanxiao Li
  2024-09-09  8:11 ` [PATCH v4 8/8] zsda: add zsda crypto-session and compile file Hanxiao Li
  5 siblings, 0 replies; 7+ messages in thread
From: Hanxiao Li @ 2024-09-09  8:11 UTC (permalink / raw)
  To: dev; +Cc: wang.yong19, Hanxiao Li


[-- Attachment #1.1.1: Type: text/plain, Size: 10019 bytes --]

Add new file zsda_sym.c, zsda_sym.h in drivers/crypto/zsda

Signed-off-by: Hanxiao Li <li.hanxiao@zte.com.cn>
---
 drivers/crypto/zsda/zsda_sym.c | 286 +++++++++++++++++++++++++++++++++
 drivers/crypto/zsda/zsda_sym.h |  25 +++
 2 files changed, 311 insertions(+)
 create mode 100644 drivers/crypto/zsda/zsda_sym.c
 create mode 100644 drivers/crypto/zsda/zsda_sym.h

diff --git a/drivers/crypto/zsda/zsda_sym.c b/drivers/crypto/zsda/zsda_sym.c
new file mode 100644
index 0000000000..425cb36643
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym.c
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include "cryptodev_pmd.h"
+
+#include "zsda_logs.h"
+#include "zsda_sym.h"
+#include "zsda_sym_pmd.h"
+#include "zsda_sym_session.h"
+
+#define choose_dst_mbuf(mbuf_src, mbuf_dst) ((mbuf_dst) == NULL ? (mbuf_src) : (mbuf_dst))
+#define LBADS_MAX_REMAINDER (16 - 1)
+
+void
+zsda_reverse_memcpy(uint8_t *dst, const uint8_t *src, size_t n)
+{
+	size_t i;
+
+	for (i = 0; i < n; ++i)
+		dst[n - 1 - i] = src[i];
+}
+
+static uint8_t
+zsda_get_opcode_hash(struct zsda_sym_session *sess)
+{
+	switch (sess->auth.algo) {
+	case RTE_CRYPTO_AUTH_SHA1:
+		return ZSDA_OPC_HASH_SHA1;
+
+	case RTE_CRYPTO_AUTH_SHA224:
+		return ZSDA_OPC_HASH_SHA2_224;
+
+	case RTE_CRYPTO_AUTH_SHA256:
+		return ZSDA_OPC_HASH_SHA2_256;
+
+	case RTE_CRYPTO_AUTH_SHA384:
+		return ZSDA_OPC_HASH_SHA2_384;
+
+	case RTE_CRYPTO_AUTH_SHA512:
+		return ZSDA_OPC_HASH_SHA2_512;
+
+	case RTE_CRYPTO_AUTH_SM3:
+		return ZSDA_OPC_HASH_SM3;
+	default:
+		break;
+	}
+
+	return ZSDA_OPC_INVALID;
+}
+
+static uint8_t
+zsda_get_opcode_crypto(struct zsda_sym_session *sess)
+{
+
+	if (sess->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+		if (sess->cipher.algo == RTE_CRYPTO_CIPHER_AES_XTS &&
+		    sess->cipher.key_encry.length == 16)
+			return ZSDA_OPC_EC_AES_XTS_256;
+		else if (sess->cipher.algo == RTE_CRYPTO_CIPHER_AES_XTS &&
+			 sess->cipher.key_encry.length == 32)
+			return ZSDA_OPC_EC_AES_XTS_512;
+		else if (sess->cipher.algo == RTE_CRYPTO_CIPHER_SM4_XTS)
+			return ZSDA_OPC_EC_SM4_XTS_256;
+	} else if (sess->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+		if (sess->cipher.algo == RTE_CRYPTO_CIPHER_AES_XTS &&
+		    sess->cipher.key_decry.length == 16)
+			return ZSDA_OPC_DC_AES_XTS_256;
+		else if (sess->cipher.algo == RTE_CRYPTO_CIPHER_AES_XTS &&
+			 sess->cipher.key_decry.length == 32)
+			return ZSDA_OPC_DC_AES_XTS_512;
+		else if (sess->cipher.algo == RTE_CRYPTO_CIPHER_SM4_XTS)
+			return ZSDA_OPC_DC_SM4_XTS_256;
+	}
+	return ZSDA_OPC_INVALID;
+}
+
+int
+zsda_encry_match(const void *op_in)
+{
+	const struct rte_crypto_op *op = (const struct rte_crypto_op *)op_in;
+	struct rte_cryptodev_sym_session *session = op->sym->session;
+	struct zsda_sym_session *sess =
+		(struct zsda_sym_session *)session->driver_priv_data;
+
+	if (sess->chain_order == ZSDA_SYM_CHAIN_ONLY_CIPHER &&
+	    sess->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+		return 1;
+	else
+		return 0;
+}
+
+int
+zsda_decry_match(const void *op_in)
+{
+	const struct rte_crypto_op *op = (const struct rte_crypto_op *)op_in;
+	struct rte_cryptodev_sym_session *session = op->sym->session;
+	struct zsda_sym_session *sess =
+		(struct zsda_sym_session *)session->driver_priv_data;
+
+	if (sess->chain_order == ZSDA_SYM_CHAIN_ONLY_CIPHER &&
+	    sess->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
+		return 1;
+	else
+		return 0;
+}
+
+int
+zsda_hash_match(const void *op_in)
+{
+	const struct rte_crypto_op *op = (const struct rte_crypto_op *)op_in;
+	struct rte_cryptodev_sym_session *session = op->sym->session;
+	struct zsda_sym_session *sess =
+		(struct zsda_sym_session *)session->driver_priv_data;
+
+	if (sess->chain_order == ZSDA_SYM_CHAIN_ONLY_AUTH)
+		return 1;
+	else
+		return 0;
+}
+
+static int
+zsda_check_len_lbads(uint32_t data_len, uint32_t lbads_size)
+{
+	if (data_len < 16) {
+		ZSDA_LOG(ERR, "data_len wrong!");
+		return ZSDA_FAILED;
+	}
+	if (lbads_size != 0) {
+		if (!(((data_len % lbads_size) == 0) ||
+		      ((data_len % lbads_size) > LBADS_MAX_REMAINDER))) {
+			ZSDA_LOG(ERR, "data_len wrong!");
+			return ZSDA_FAILED;
+		}
+	}
+
+	return 0;
+}
+
+int
+zsda_build_cipher_request(void *op_in, const struct zsda_queue *queue,
+			 void **op_cookies, const uint16_t new_tail)
+{
+	struct rte_crypto_op *op = (struct rte_crypto_op *)op_in;
+
+	struct rte_cryptodev_sym_session *session =
+		(struct rte_cryptodev_sym_session *)op->sym->session;
+	struct zsda_sym_session *sess =
+		(struct zsda_sym_session *)session->driver_priv_data;
+
+	struct zsda_wqe_crpt *wqe =
+		(struct zsda_wqe_crpt *)(queue->base_addr +
+					 (new_tail * queue->msg_size));
+	struct zsda_op_cookie *cookie =
+		(struct zsda_op_cookie *)op_cookies[new_tail];
+	struct zsda_sgl *sgl_src = (struct zsda_sgl *)&cookie->sgl_src;
+	struct zsda_sgl *sgl_dst = (struct zsda_sgl *)&cookie->sgl_dst;
+	struct rte_mbuf *mbuf;
+
+	int ret = 0;
+	uint32_t op_offset;
+	uint32_t op_src_len;
+	uint32_t op_dst_len;
+	const uint8_t *iv_addr = NULL;
+	uint8_t iv_len = 0;
+
+	ret = zsda_check_len_lbads(op->sym->cipher.data.length,
+				   sess->cipher.dataunit_len);
+	if (ret)
+		return ZSDA_FAILED;
+
+	op_offset = op->sym->cipher.data.offset;
+	op_src_len = op->sym->cipher.data.length;
+	mbuf = op->sym->m_src;
+	ret = zsda_fill_sgl(mbuf, op_offset, sgl_src, cookie->sgl_src_phys_addr,
+			    op_src_len, NULL);
+
+	mbuf = choose_dst_mbuf(op->sym->m_src, op->sym->m_dst);
+	op_dst_len = mbuf->pkt_len - op_offset;
+	ret |= zsda_fill_sgl(mbuf, op_offset, sgl_dst,
+			     cookie->sgl_dst_phys_addr, op_dst_len, NULL);
+
+	if (ret) {
+		ZSDA_LOG(ERR, E_FUNC);
+		return ret;
+	}
+
+	cookie->used = true;
+	cookie->sid = new_tail;
+	cookie->op = op;
+
+	memset(wqe, 0, sizeof(struct zsda_wqe_crpt));
+	wqe->rx_length = op_src_len;
+	wqe->tx_length = op_dst_len;
+	wqe->valid = queue->valid;
+	wqe->op_code = zsda_get_opcode_crypto(sess);
+	wqe->sid = cookie->sid;
+	wqe->rx_sgl_type = SGL_ELM_TYPE_LIST;
+	wqe->tx_sgl_type = SGL_ELM_TYPE_LIST;
+	wqe->rx_addr = cookie->sgl_src_phys_addr;
+	wqe->tx_addr = cookie->sgl_dst_phys_addr;
+	wqe->cfg.lbads = sess->cipher.dataunit_len;
+
+	if (sess->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+		memcpy((uint8_t *)wqe->cfg.key, sess->cipher.key_encry.data,
+		       ZSDA_CIPHER_KEY_MAX_LEN);
+	else
+		memcpy((uint8_t *)wqe->cfg.key, sess->cipher.key_decry.data,
+		       ZSDA_CIPHER_KEY_MAX_LEN);
+
+	iv_addr = (const uint8_t *)rte_crypto_op_ctod_offset(
+			       op, char *, sess->cipher.iv.offset);
+	iv_len = sess->cipher.iv.length;
+	zsda_reverse_memcpy((uint8_t *)wqe->cfg.slba_H, iv_addr, iv_len / 2);
+	zsda_reverse_memcpy((uint8_t *)wqe->cfg.slba_L, iv_addr + 8, iv_len / 2);
+
+	return ret;
+}
+
+int
+zsda_build_hash_request(void *op_in, const struct zsda_queue *queue,
+	       void **op_cookies, const uint16_t new_tail)
+{
+	struct rte_crypto_op *op = (struct rte_crypto_op *)op_in;
+
+	struct rte_cryptodev_sym_session *session =
+		(struct rte_cryptodev_sym_session *)op->sym->session;
+	struct zsda_sym_session *sess =
+		(struct zsda_sym_session *)session->driver_priv_data;
+
+	struct zsda_wqe_crpt *wqe =
+		(struct zsda_wqe_crpt *)(queue->base_addr +
+					 (new_tail * queue->msg_size));
+	struct zsda_op_cookie *cookie =
+		(struct zsda_op_cookie *)op_cookies[new_tail];
+	struct zsda_sgl *sgl_src = (struct zsda_sgl *)&cookie->sgl_src;
+	uint8_t opcode;
+	uint32_t op_offset;
+	uint32_t op_src_len;
+	int ret = 0;
+
+	memset(wqe, 0, sizeof(struct zsda_wqe_crpt));
+	wqe->rx_length = op->sym->auth.data.length;
+	wqe->tx_length = sess->auth.digest_length;
+
+	opcode = zsda_get_opcode_hash(sess);
+	if (opcode == ZSDA_OPC_INVALID) {
+		ZSDA_LOG(ERR, E_FUNC);
+		return ZSDA_FAILED;
+	}
+
+	op_offset = op->sym->auth.data.offset;
+	op_src_len = op->sym->auth.data.length;
+	ret = zsda_fill_sgl(op->sym->m_src, op_offset, sgl_src,
+				   cookie->sgl_src_phys_addr, op_src_len, NULL);
+	if (ret) {
+		ZSDA_LOG(ERR, E_FUNC);
+		return ret;
+	}
+
+	cookie->used = true;
+	cookie->sid = new_tail;
+	cookie->op = op;
+	wqe->valid = queue->valid;
+	wqe->op_code = opcode;
+	wqe->sid = cookie->sid;
+	wqe->rx_sgl_type = SGL_ELM_TYPE_LIST;
+	wqe->tx_sgl_type = SGL_ELM_TYPE_PHYS_ADDR;
+	wqe->rx_addr = cookie->sgl_src_phys_addr;
+	wqe->tx_addr = op->sym->auth.digest.phys_addr;
+
+	return ret;
+}
+
+void
+zsda_crypto_callback(void *cookie_in, const struct zsda_cqe *cqe)
+{
+	struct zsda_op_cookie *tmp_cookie = (struct zsda_op_cookie *)cookie_in;
+	struct rte_crypto_op *op = (struct rte_crypto_op *)tmp_cookie->op;
+
+	if (!(CQE_ERR0(cqe->err0) || CQE_ERR1(cqe->err1)))
+		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	else
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+}
diff --git a/drivers/crypto/zsda/zsda_sym.h b/drivers/crypto/zsda/zsda_sym.h
new file mode 100644
index 0000000000..c822fd38f7
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_SYM_H_
+#define _ZSDA_SYM_H_
+
+#include "zsda_common.h"
+#include "zsda_qp.h"
+
+int zsda_build_cipher_request(void *op_in, const struct zsda_queue *queue,
+			 void **op_cookies, const uint16_t new_tail);
+
+int zsda_build_hash_request(void *op_in, const struct zsda_queue *queue,
+		       void **op_cookies, const uint16_t new_tail);
+
+int zsda_encry_match(const void *op_in);
+int zsda_decry_match(const void *op_in);
+int zsda_hash_match(const void *op_in);
+
+void zsda_reverse_memcpy(uint8_t *dst, const uint8_t *src, size_t n);
+
+void zsda_crypto_callback(void *cookie_in, const struct zsda_cqe *cqe);
+
+#endif /* _ZSDA_SYM_H_ */
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 21997 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v4 8/8] zsda: add zsda crypto-session and compile file
  2024-09-09  8:11 [PATCH v4 2/8] zsda: add support for zsdadev operations Hanxiao Li
                   ` (4 preceding siblings ...)
  2024-09-09  8:11 ` [PATCH v4 7/8] zsda: add zsda crypto-sym Hanxiao Li
@ 2024-09-09  8:11 ` Hanxiao Li
  5 siblings, 0 replies; 7+ messages in thread
From: Hanxiao Li @ 2024-09-09  8:11 UTC (permalink / raw)
  To: dev; +Cc: wang.yong19, Hanxiao Li


[-- Attachment #1.1.1: Type: text/plain, Size: 21667 bytes --]

Add new file zsda_sym_session.c, zsda_symsession.h
and modify drivers/common/zsda/meson.build

Signed-off-by: Hanxiao Li <li.hanxiao@zte.com.cn>
---
 drivers/common/zsda/meson.build        |  17 +-
 drivers/common/zsda/zsda_device.h      |   1 -
 drivers/crypto/zsda/zsda_sym_session.c | 503 +++++++++++++++++++++++++
 drivers/crypto/zsda/zsda_sym_session.h |  82 ++++
 4 files changed, 600 insertions(+), 3 deletions(-)
 create mode 100644 drivers/crypto/zsda/zsda_sym_session.c
 create mode 100644 drivers/crypto/zsda/zsda_sym_session.h

diff --git a/drivers/common/zsda/meson.build b/drivers/common/zsda/meson.build
index b12ef17476..57b54201f2 100644
--- a/drivers/common/zsda/meson.build
+++ b/drivers/common/zsda/meson.build
@@ -3,7 +3,7 @@
 
 config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON'
 
-deps += ['bus_pci', 'compressdev']
+deps += ['bus_pci', 'cryptodev', 'compressdev']
 sources += files(
 		'zsda_common.c',
 		'zsda_logs.c',
@@ -15,7 +15,6 @@ zsda_compress = true
 zsda_compress_path = 'compress/zsda'
 zsda_compress_relpath = '../../' + zsda_compress_path
 includes += include_directories(zsda_compress_relpath)
-
 if zsda_compress
 zlib = dependency('zlib', required: false, method: 'pkg-config')
 	foreach f: ['zsda_comp_pmd.c', 'zsda_comp.c']
@@ -23,3 +22,17 @@ zlib = dependency('zlib', required: false, method: 'pkg-config')
 	endforeach
 	ext_deps += zlib
 endif
+
+
+zsda_crypto = true
+zsda_crypto_path = 'crypto/zsda'
+zsda_crypto_relpath = '../../' + zsda_crypto_path
+if zsda_crypto
+libcrypto = dependency('libcrypto', required: false, method: 'pkg-config')
+	foreach f: ['zsda_sym_pmd.c', 'zsda_sym_session.c', 'zsda_sym.c']
+		sources += files(join_paths(zsda_crypto_relpath, f))
+	endforeach
+	deps += ['security']
+	ext_deps += libcrypto
+	cflags += ['-DBUILD_ZSDA_SYM']
+endif
diff --git a/drivers/common/zsda/zsda_device.h b/drivers/common/zsda/zsda_device.h
index 07dca183ab..51ff741840 100644
--- a/drivers/common/zsda/zsda_device.h
+++ b/drivers/common/zsda/zsda_device.h
@@ -18,7 +18,6 @@ struct zsda_device_info {
 
 	struct rte_pci_device *pci_dev;
 
-	// struct rte_device sym_rte_dev;
 	struct rte_device sym_rte_dev;
 	/**< This represents the crypto sym subset of this pci device.
 	 * Register with this rather than with the one in
diff --git a/drivers/crypto/zsda/zsda_sym_session.c b/drivers/crypto/zsda/zsda_sym_session.c
new file mode 100644
index 0000000000..dbeb569985
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_session.c
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include "cryptodev_pmd.h"
+
+#include "zsda_sym_session.h"
+#include "zsda_logs.h"
+
+/**************** AES KEY EXPANSION ****************/
+/**
+ * AES S-boxes
+ * Sbox table: 8bits input convert to 8bits output
+ **/
+static const unsigned char aes_sbox[256] = {
+	/* 0     1    2      3     4    5     6     7      8    9     A      B
+	 * C     D    E     F
+	 */
+	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b,
+	0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26,
+	0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2,
+	0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed,
+	0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f,
+	0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
+	0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14,
+	0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d,
+	0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f,
+	0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11,
+	0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f,
+	0xb0, 0x54, 0xbb, 0x16};
+
+/**
+ * The round constant word array, Rcon[i]
+ *
+ * From Wikipedia's article on the Rijndael key schedule @
+ * https://en.wikipedia.org/wiki/Rijndael_key_schedule#Rcon "Only the first some
+ * of these constants are actually used – up to rcon[10] for AES-128 (as 11
+ * round keys are needed), up to rcon[8] for AES-192, up to rcon[7] for AES-256.
+ * rcon[0] is not used in AES algorithm."
+ */
+static const unsigned char Rcon[11] = {0x8d, 0x01, 0x02, 0x04, 0x08, 0x10,
+				       0x20, 0x40, 0x80, 0x1b, 0x36};
+
+#define GET_AES_SBOX_VAL(num) (aes_sbox[(num)])
+
+/**************** SM4 KEY EXPANSION ****************/
+/*
+ * 32-bit integer manipulation macros (big endian)
+ */
+#ifndef GET_ULONG_BE
+#define GET_ULONG_BE(n, b, i)                                                  \
+	{                                                                      \
+		(n) = ((unsigned int)(b)[(i)] << 24) |                         \
+		      ((unsigned int)(b)[(i) + 1] << 16) |                     \
+		      ((unsigned int)(b)[(i) + 2] << 8) |                      \
+		      ((unsigned int)(b)[(i) + 3]);                            \
+	}
+#endif
+
+#ifndef PUT_ULONG_BE
+#define PUT_ULONG_BE(n, b, i)                                                  \
+	{                                                                      \
+		(b)[(i)] = (unsigned char)((n) >> 24);                         \
+		(b)[(i) + 1] = (unsigned char)((n) >> 16);                     \
+		(b)[(i) + 2] = (unsigned char)((n) >> 8);                      \
+		(b)[(i) + 3] = (unsigned char)((n));                           \
+	}
+#endif
+
+/**
+ *rotate shift left marco definition
+ *
+ **/
+#define SHL(x, n)  (((x)&0xFFFFFFFF) << n)
+#define ROTL(x, n) (SHL((x), n) | ((x) >> (32 - n)))
+
+/**
+ * SM4 S-boxes
+ * Sbox table: 8bits input convert to 8 bitg288s output
+ **/
+static unsigned char sm4_sbox[16][16] = {
+	{0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, 0x16, 0xb6, 0x14, 0xc2,
+	 0x28, 0xfb, 0x2c, 0x05},
+	{0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, 0xaa, 0x44, 0x13, 0x26,
+	 0x49, 0x86, 0x06, 0x99},
+	{0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, 0x33, 0x54, 0x0b, 0x43,
+	 0xed, 0xcf, 0xac, 0x62},
+	{0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, 0x80, 0xdf, 0x94, 0xfa,
+	 0x75, 0x8f, 0x3f, 0xa6},
+	{0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, 0x83, 0x59, 0x3c, 0x19,
+	 0xe6, 0x85, 0x4f, 0xa8},
+	{0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, 0xf8, 0xeb, 0x0f, 0x4b,
+	 0x70, 0x56, 0x9d, 0x35},
+	{0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, 0x25, 0x22, 0x7c, 0x3b,
+	 0x01, 0x21, 0x78, 0x87},
+	{0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, 0x4c, 0x36, 0x02, 0xe7,
+	 0xa0, 0xc4, 0xc8, 0x9e},
+	{0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, 0xa3, 0xf7, 0xf2, 0xce,
+	 0xf9, 0x61, 0x15, 0xa1},
+	{0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, 0xad, 0x93, 0x32, 0x30,
+	 0xf5, 0x8c, 0xb1, 0xe3},
+	{0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, 0xc0, 0x29, 0x23, 0xab,
+	 0x0d, 0x53, 0x4e, 0x6f},
+	{0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, 0x03, 0xff, 0x6a, 0x72,
+	 0x6d, 0x6c, 0x5b, 0x51},
+	{0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, 0x11, 0xd9, 0x5c, 0x41,
+	 0x1f, 0x10, 0x5a, 0xd8},
+	{0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, 0x2d, 0x74, 0xd0, 0x12,
+	 0xb8, 0xe5, 0xb4, 0xb0},
+	{0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, 0x65, 0xb9, 0xf1, 0x09,
+	 0xc5, 0x6e, 0xc6, 0x84},
+	{0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, 0x79, 0xee, 0x5f, 0x3e,
+	 0xd7, 0xcb, 0x39, 0x48},
+};
+
+/* System parameter */
+static const unsigned int FK[4] = {0xa3b1bac6, 0x56aa3350, 0x677d9197,
+				   0xb27022dc};
+
+/* fixed parameter */
+static const unsigned int CK[32] = {
+	0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269, 0x70777e85, 0x8c939aa1,
+	0xa8afb6bd, 0xc4cbd2d9, 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
+	0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9, 0xc0c7ced5, 0xdce3eaf1,
+	0xf8ff060d, 0x141b2229, 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
+	0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209, 0x10171e25, 0x2c333a41,
+	0x484f565d, 0x646b7279};
+
+/*
+ * private function:
+ * look up in SM4 S-boxes and get the related value.
+ * args:    [in] inch: 0x00~0xFF (8 bits unsigned value).
+ */
+static unsigned char
+sm4Sbox(unsigned char inch)
+{
+	unsigned char *pTable = (unsigned char *)sm4_sbox;
+	unsigned char retVal = (unsigned char)(pTable[inch]);
+	return retVal;
+}
+
+/* private function:
+ * Calculating round encryption key.
+ * args:    [in] ka: ka is a 32 bits unsigned value;
+ * return:  sk[i]: i{0,1,2,3,...31}.
+ */
+static unsigned int
+sm4CalciRK(unsigned int ka)
+{
+	unsigned int bb = 0;
+	unsigned int rk = 0;
+	unsigned char a[4];
+	unsigned char b[4];
+
+	PUT_ULONG_BE(ka, a, 0)
+	b[0] = sm4Sbox(a[0]);
+	b[1] = sm4Sbox(a[1]);
+	b[2] = sm4Sbox(a[2]);
+	b[3] = sm4Sbox(a[3]);
+	GET_ULONG_BE(bb, b, 0)
+	rk = bb ^ (ROTL(bb, 13)) ^ (ROTL(bb, 23));
+	return rk;
+}
+
+static void
+zsda_sm4_key_expansion(unsigned int SK[32], const uint8_t key[16])
+{
+	unsigned int MK[4];
+	unsigned int k[36];
+	unsigned int i = 0;
+
+	GET_ULONG_BE(MK[0], key, 0);
+	GET_ULONG_BE(MK[1], key, 4);
+	GET_ULONG_BE(MK[2], key, 8);
+	GET_ULONG_BE(MK[3], key, 12);
+	k[0] = MK[0] ^ FK[0];
+	k[1] = MK[1] ^ FK[1];
+	k[2] = MK[2] ^ FK[2];
+	k[3] = MK[3] ^ FK[3];
+	for (; i < 32; i++) {
+		k[i + 4] = k[i] ^
+			   (sm4CalciRK(k[i + 1] ^ k[i + 2] ^ k[i + 3] ^ CK[i]));
+		SK[i] = k[i + 4];
+	}
+}
+
+static void
+u32_to_u8(uint32_t *u_int32_t_data, uint8_t *u8_data)
+{
+	*(u8_data + 0) = ((*u_int32_t_data & 0xFF000000) >> 24) & (0xFF);
+	*(u8_data + 1) = ((*u_int32_t_data & 0x00FF0000) >> 16) & (0xFF);
+	*(u8_data + 2) = ((*u_int32_t_data & 0x0000FF00) >> 8) & (0xFF);
+	*(u8_data + 3) = (*u_int32_t_data & 0x000000FF);
+}
+
+static void
+zsda_aes_key_expansion(uint8_t *round_key, uint32_t round_num,
+		       const uint8_t *key, uint32_t key_len)
+{
+	uint32_t i, j, k, nk, nr;
+	uint8_t tempa[4];
+
+	nk = key_len >> 2;
+	nr = round_num;
+
+	/* The first round key is the key itself. */
+	for (i = 0; i < nk; ++i) {
+		round_key[(i * 4) + 0] = key[(i * 4) + 0];
+
+		round_key[(i * 4) + 1] = key[(i * 4) + 1];
+
+		round_key[(i * 4) + 2] = key[(i * 4) + 2];
+		round_key[(i * 4) + 3] = key[(i * 4) + 3];
+	}
+
+	/* All other round keys are found from the previous round keys. */
+	for (i = nk; i < (4 * (nr + 1)); ++i) {
+		k = (i - 1) * 4;
+		tempa[0] = round_key[k + 0];
+		tempa[1] = round_key[k + 1];
+		tempa[2] = round_key[k + 2];
+		tempa[3] = round_key[k + 3];
+
+		if ((nk != 0) && ((i % nk) == 0)) {
+			/* This function shifts the 4 bytes in a word to the
+			 * left once. [a0,a1,a2,a3] becomes [a1,a2,a3,a0]
+			 * Function RotWord()
+			 */
+			{
+				const u_int8_t u8tmp = tempa[0];
+
+				tempa[0] = tempa[1];
+				tempa[1] = tempa[2];
+				tempa[2] = tempa[3];
+				tempa[3] = u8tmp;
+			}
+
+			/* SubWord() is a function that takes a four-byte input
+			 * word and applies the S-box to each of the four bytes
+			 * to produce an output word. Function Subword()
+			 */
+			{
+				tempa[0] = GET_AES_SBOX_VAL(tempa[0]);
+				tempa[1] = GET_AES_SBOX_VAL(tempa[1]);
+				tempa[2] = GET_AES_SBOX_VAL(tempa[2]);
+				tempa[3] = GET_AES_SBOX_VAL(tempa[3]);
+			}
+
+			tempa[0] = tempa[0] ^ Rcon[i / nk];
+		}
+
+		if (nk == 8) {
+			if ((i % nk) == 4) {
+				/* Function Subword() */
+				{
+					tempa[0] = GET_AES_SBOX_VAL(tempa[0]);
+					tempa[1] = GET_AES_SBOX_VAL(tempa[1]);
+					tempa[2] = GET_AES_SBOX_VAL(tempa[2]);
+					tempa[3] = GET_AES_SBOX_VAL(tempa[3]);
+				}
+			}
+		}
+
+		j = i * 4;
+		k = (i - nk) * 4;
+		round_key[j + 0] = round_key[k + 0] ^ tempa[0];
+		round_key[j + 1] = round_key[k + 1] ^ tempa[1];
+		round_key[j + 2] = round_key[k + 2] ^ tempa[2];
+		round_key[j + 3] = round_key[k + 3] ^ tempa[3];
+	}
+}
+
+static void
+zsda_decry_set_key(uint8_t key[64], const uint8_t *key1_ptr, uint8_t skey_len,
+	      enum rte_crypto_cipher_algorithm algo)
+{
+	uint8_t round_num;
+	uint8_t dec_key1[ZSDA_AES_MAX_KEY_BYTE_LEN] = {0};
+	uint8_t aes_round_key[ZSDA_AES_MAX_EXP_BYTE_SIZE] = {0};
+	uint32_t sm4_round_key[ZSDA_SM4_MAX_EXP_DWORD_SIZE] = {0};
+
+	switch (algo) {
+	case RTE_CRYPTO_CIPHER_AES_XTS:
+		round_num = (skey_len == ZSDA_SYM_XTS_256_SKEY_LEN)
+				    ? ZSDA_AES256_ROUND_NUM
+				    : ZSDA_AES512_ROUND_NUM;
+		zsda_aes_key_expansion(aes_round_key, round_num, key1_ptr,
+				       skey_len);
+		rte_memcpy(dec_key1,
+			   ((uint8_t *)aes_round_key + (16 * round_num)), 16);
+
+		if (skey_len == ZSDA_SYM_XTS_512_SKEY_LEN &&
+			(16 * round_num) <= ZSDA_AES_MAX_EXP_BYTE_SIZE) {
+			for (int i = 0; i < 16; i++) {
+				dec_key1[i + 16] =
+					aes_round_key[(16 * (round_num - 1)) + i];
+			}
+		}
+		break;
+	case RTE_CRYPTO_CIPHER_SM4_XTS:
+		zsda_sm4_key_expansion(sm4_round_key, key1_ptr);
+		for (size_t i = 0; i < 4; i++)
+			u32_to_u8((uint32_t *)sm4_round_key +
+					  ZSDA_SM4_MAX_EXP_DWORD_SIZE - 1 - i,
+				  dec_key1 + (4 * i));
+		break;
+	default:
+		ZSDA_LOG(ERR, "unknown cipher algo!");
+		return;
+	}
+
+	if (skey_len == ZSDA_SYM_XTS_256_SKEY_LEN) {
+		zsda_reverse_memcpy((uint8_t *)key + ZSDA_SYM_XTS_256_KEY2_OFF,
+			       key1_ptr + skey_len, skey_len);
+		zsda_reverse_memcpy((uint8_t *)key + ZSDA_SYM_XTS_256_KEY1_OFF,
+			       dec_key1, skey_len);
+	} else {
+		zsda_reverse_memcpy(key, key1_ptr + skey_len, skey_len);
+		zsda_reverse_memcpy((uint8_t *)key + ZSDA_SYM_XTS_512_KEY1_OFF,
+			       dec_key1, skey_len);
+	}
+}
+
+static uint8_t
+zsda_sym_lbads(uint32_t dataunit_len)
+{
+	uint8_t lbads;
+
+	switch (dataunit_len) {
+	case ZSDA_AES_LBADS_512:
+		lbads = ZSDA_AES_LBADS_INDICATE_512;
+		break;
+	case ZSDA_AES_LBADS_4096:
+		lbads = ZSDA_AES_LBADS_INDICATE_4096;
+		break;
+	case ZSDA_AES_LBADS_8192:
+		lbads = ZSDA_AES_LBADS_INDICATE_8192;
+		break;
+	case ZSDA_AES_LBADS_0:
+		lbads = ZSDA_AES_LBADS_INDICATE_0;
+		break;
+	default:
+		ZSDA_LOG(ERR, "dataunit_len should be 0/512/4096/8192 - %d.",
+			 dataunit_len);
+		lbads = ZSDA_AES_LBADS_INDICATE_INVALID;
+		break;
+	}
+	return lbads;
+}
+
+static int
+zsda_set_session_cipher(struct zsda_sym_session *sess,
+				   struct rte_crypto_cipher_xform *cipher_xform)
+{
+	uint8_t skey_len = 0;
+	const uint8_t *key1_ptr = NULL;
+
+	if (cipher_xform->key.length > ZSDA_CIPHER_KEY_MAX_LEN) {
+		ZSDA_LOG(ERR, "key length not supported");
+		return -EINVAL;
+	}
+
+	sess->chain_order = ZSDA_SYM_CHAIN_ONLY_CIPHER;
+	sess->cipher.iv.offset = cipher_xform->iv.offset;
+	sess->cipher.iv.length = cipher_xform->iv.length;
+	sess->cipher.op = cipher_xform->op;
+	sess->cipher.algo = cipher_xform->algo;
+	sess->cipher.dataunit_len = cipher_xform->dataunit_len;
+	sess->cipher.lbads = zsda_sym_lbads(cipher_xform->dataunit_len);
+	if (sess->cipher.lbads == 0xff) {
+		ZSDA_LOG(ERR, "dataunit_len wrong!");
+		return -EINVAL;
+	}
+
+	skey_len = (cipher_xform->key.length / 2) & 0xff;
+
+	/* key set */
+	if (sess->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+		sess->cipher.key_encry.length = cipher_xform->key.length;
+		if (skey_len == ZSDA_SYM_XTS_256_SKEY_LEN) {
+			zsda_reverse_memcpy((uint8_t *)sess->cipher.key_encry.data +
+					       ZSDA_SYM_XTS_256_KEY2_OFF,
+				       (cipher_xform->key.data + skey_len),
+				       skey_len);
+			zsda_reverse_memcpy(((uint8_t *)sess->cipher.key_encry.data +
+					ZSDA_SYM_XTS_256_KEY1_OFF),
+				       cipher_xform->key.data, skey_len);
+		} else
+			zsda_reverse_memcpy((uint8_t *)sess->cipher.key_encry.data,
+				       cipher_xform->key.data,
+				       cipher_xform->key.length);
+	} else if (sess->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+		sess->cipher.key_decry.length = cipher_xform->key.length;
+		key1_ptr = cipher_xform->key.data;
+		zsda_decry_set_key(sess->cipher.key_decry.data, key1_ptr, skey_len,
+			      sess->cipher.algo);
+	}
+
+	return 0;
+}
+
+static void
+zsda_set_session_auth(struct zsda_sym_session *sess,
+				 struct rte_crypto_auth_xform *xform)
+{
+	sess->auth.op = xform->op;
+	sess->auth.algo = xform->algo;
+	sess->auth.digest_length = xform->digest_length;
+	sess->chain_order = ZSDA_SYM_CHAIN_ONLY_AUTH;
+}
+
+static struct rte_crypto_auth_xform *
+zsda_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+	do {
+		if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+			return &xform->auth;
+
+		xform = xform->next;
+	} while (xform);
+
+	return NULL;
+}
+
+static struct rte_crypto_cipher_xform *
+zsda_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+	do {
+		if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+			return &xform->cipher;
+
+		xform = xform->next;
+	} while (xform);
+
+	return NULL;
+}
+
+/** Configure the session from a crypto xform chain */
+static enum zsda_sym_chain_order
+zsda_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+	enum zsda_sym_chain_order res = ZSDA_SYM_CHAIN_NOT_SUPPORTED;
+
+	if (xform != NULL) {
+		if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+			if (xform->next == NULL)
+				res = ZSDA_SYM_CHAIN_ONLY_AUTH;
+			else if (xform->next->type ==
+					RTE_CRYPTO_SYM_XFORM_CIPHER)
+				res = ZSDA_SYM_CHAIN_AUTH_CIPHER;
+		}
+		if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+			if (xform->next == NULL)
+				res = ZSDA_SYM_CHAIN_ONLY_CIPHER;
+			else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+				res = ZSDA_SYM_CHAIN_CIPHER_AUTH;
+		}
+	}
+
+	return res;
+}
+
+/* Set session cipher parameters */
+int
+zsda_crypto_set_session_parameters(void *sess_priv,
+			 struct rte_crypto_sym_xform *xform)
+{
+
+	struct zsda_sym_session *sess = (struct zsda_sym_session *) sess_priv;
+	struct rte_crypto_cipher_xform *cipher_xform =
+			zsda_get_cipher_xform(xform);
+	struct rte_crypto_auth_xform *auth_xform =
+			zsda_get_auth_xform(xform);
+
+	int ret = 0;
+
+	sess->chain_order = zsda_crypto_get_chain_order(xform);
+	switch (sess->chain_order) {
+	case ZSDA_SYM_CHAIN_ONLY_CIPHER:
+		zsda_set_session_cipher(sess, cipher_xform);
+		break;
+	case ZSDA_SYM_CHAIN_ONLY_AUTH:
+		zsda_set_session_auth(sess, auth_xform);
+		break;
+
+	default:
+		ZSDA_LOG(ERR, "Invalid chain order");
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
diff --git a/drivers/crypto/zsda/zsda_sym_session.h b/drivers/crypto/zsda/zsda_sym_session.h
new file mode 100644
index 0000000000..1797e46cb3
--- /dev/null
+++ b/drivers/crypto/zsda/zsda_sym_session.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_SYM_SESSION_H_
+#define _ZSDA_SYM_SESSION_H_
+
+#include "zsda_sym.h"
+
+#define ZSDA_SYM_XTS_IV_SLBA_OFF  (8)
+#define ZSDA_SYM_XTS_256_SKEY_LEN (16)
+#define ZSDA_SYM_XTS_512_SKEY_LEN (32)
+#define ZSDA_SYM_XTS_256_KEY2_OFF (16)
+#define ZSDA_SYM_XTS_256_KEY1_OFF (48)
+#define ZSDA_SYM_XTS_512_KEY1_OFF (32)
+#define ZSDA_SYM_MIN_SRC_LEN_HASH (16)
+
+#define ZSDA_AES256_ROUND_NUM	    (10)
+#define ZSDA_AES512_ROUND_NUM	    (14)
+#define ZSDA_AES_MAX_EXP_BYTE_SIZE  (240)
+#define ZSDA_AES_MAX_KEY_BYTE_LEN   (32)
+#define ZSDA_SM4_MAX_EXP_DWORD_SIZE (32)
+
+#define ZSDA_AES_LBADS_0	  (0)
+#define ZSDA_AES_LBADS_512	  (512)
+#define ZSDA_AES_LBADS_4096	  (4096)
+#define ZSDA_AES_LBADS_8192	  (8192)
+
+#define ZSDA_AES_LBADS_INDICATE_0       (0x0)
+#define ZSDA_AES_LBADS_INDICATE_512     (0x9)
+#define ZSDA_AES_LBADS_INDICATE_4096    (0xC)
+#define ZSDA_AES_LBADS_INDICATE_8192    (0xD)
+#define ZSDA_AES_LBADS_INDICATE_INVALID (0xff)
+
+enum zsda_sym_chain_order {
+	ZSDA_SYM_CHAIN_ONLY_CIPHER,
+	ZSDA_SYM_CHAIN_ONLY_AUTH,
+	ZSDA_SYM_CHAIN_CIPHER_AUTH,
+	ZSDA_SYM_CHAIN_AUTH_CIPHER,
+	ZSDA_SYM_CHAIN_NOT_SUPPORTED
+};
+
+struct __rte_cache_aligned zsda_sym_session {
+	enum zsda_sym_chain_order chain_order;
+
+	/* Cipher Parameters */
+	struct {
+		enum rte_crypto_cipher_operation op;
+		enum rte_crypto_cipher_algorithm algo;
+		struct {
+			uint8_t data[ZSDA_CIPHER_KEY_MAX_LEN];
+			size_t length;
+		} key_encry;
+		struct {
+			uint8_t data[ZSDA_CIPHER_KEY_MAX_LEN];
+			size_t length;
+		} key_decry;
+		struct {
+			uint32_t offset;
+			size_t length;
+		} iv;
+
+		uint32_t dataunit_len;
+		uint8_t lbads;
+	} cipher;
+
+	struct {
+		enum rte_crypto_auth_operation op;
+		/* Auth operation */
+		enum rte_crypto_auth_algorithm algo;
+		/* Auth algorithm */
+		uint16_t digest_length;
+	} auth;
+
+	bool cipher_first;
+};
+
+
+int zsda_crypto_set_session_parameters(void *sess_priv,
+				       struct rte_crypto_sym_xform *xform);
+
+#endif /* _ZSDA_SYM_SESSION_H_ */
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 54190 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2024-09-09  8:13 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-09-09  8:11 [PATCH v4 2/8] zsda: add support for zsdadev operations Hanxiao Li
2024-09-09  8:11 ` [PATCH v4 3/8] zsda: add support for queue operation Hanxiao Li
2024-09-09  8:11 ` [PATCH v4 4/8] zsda: add zsda compressdev driver and interface Hanxiao Li
2024-09-09  8:11 ` [PATCH v4 5/8] zsda: modify files for introducing zsda cryptodev Hanxiao Li
2024-09-09  8:11 ` [PATCH v4 6/8] zsda: add zsda crypto-pmd Hanxiao Li
2024-09-09  8:11 ` [PATCH v4 7/8] zsda: add zsda crypto-sym Hanxiao Li
2024-09-09  8:11 ` [PATCH v4 8/8] zsda: add zsda crypto-session and compile file Hanxiao Li

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).