DPDK patches and discussions
 help / color / mirror / Atom feed
From: Hanxiao Li <li.hanxiao@zte.com.cn>
To: dev@dpdk.org
Cc: Hanxiao Li <li.hanxiao@zte.com.cn>
Subject: [PATCH v11 05/12] common/zsda: configure zsda queue base functions
Date: Thu, 17 Oct 2024 17:22:02 +0800	[thread overview]
Message-ID: <20241017092310.1863628-6-li.hanxiao@zte.com.cn> (raw)
In-Reply-To: <20241017092310.1863628-1-li.hanxiao@zte.com.cn>


[-- Attachment #1.1.1: Type: text/plain, Size: 25525 bytes --]

Add support for zsdadev queue interfaces,
including queue start, stop, create, remove, etc.

Signed-off-by: Hanxiao Li <li.hanxiao@zte.com.cn>
---
 drivers/common/zsda/meson.build |   1 +
 drivers/common/zsda/zsda_qp.c   | 715 ++++++++++++++++++++++++++++++++
 drivers/common/zsda/zsda_qp.h   | 146 +++++++
 3 files changed, 862 insertions(+)
 create mode 100644 drivers/common/zsda/zsda_qp.c
 create mode 100644 drivers/common/zsda/zsda_qp.h

diff --git a/drivers/common/zsda/meson.build b/drivers/common/zsda/meson.build
index 44f2375d8d..4d1eec8867 100644
--- a/drivers/common/zsda/meson.build
+++ b/drivers/common/zsda/meson.build
@@ -12,4 +12,5 @@ sources += files(
 		'zsda_common.c',
 		'zsda_logs.c',
 		'zsda_device.c',
+		'zsda_qp.c',
 		)
diff --git a/drivers/common/zsda/zsda_qp.c b/drivers/common/zsda/zsda_qp.c
new file mode 100644
index 0000000000..f1fe6c5817
--- /dev/null
+++ b/drivers/common/zsda/zsda_qp.c
@@ -0,0 +1,715 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <stdint.h>
+
+#include <rte_malloc.h>
+
+#include "zsda_common.h"
+#include "zsda_logs.h"
+#include "zsda_device.h"
+#include "zsda_qp.h"
+
+#define RING_DIR_TX 0
+#define RING_DIR_RX 1
+
+struct ring_size {
+	uint16_t tx_msg_size;
+	uint16_t rx_msg_size;
+};
+uint8_t zsda_num_used_qps;
+
+struct ring_size zsda_qp_hw_ring_size[ZSDA_MAX_SERVICES] = {
+	[ZSDA_SERVICE_SYMMETRIC_ENCRYPT] = {128, 16},
+	[ZSDA_SERVICE_SYMMETRIC_DECRYPT] = {128, 16},
+	[ZSDA_SERVICE_COMPRESSION] = {32, 16},
+	[ZSDA_SERVICE_DECOMPRESSION] = {32, 16},
+	[ZSDA_SERVICE_HASH_ENCODE] = {32, 16},
+};
+
+static void
+zsda_set_queue_head_tail(const struct zsda_pci_device *zsda_pci_dev,
+			 const uint8_t qid)
+{
+	struct rte_pci_device *pci_dev =
+		zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+
+	ZSDA_CSR_WRITE32(mmio_base + IO_DB_INITIAL_CONFIG + (qid * 4),
+			 SET_HEAD_INTI);
+}
+
+static int
+zsda_get_queue_cfg_by_id(const struct zsda_pci_device *zsda_pci_dev,
+			 const uint8_t qid, struct qinfo *qcfg)
+{
+	struct zsda_admin_req_qcfg req = {0};
+	struct zsda_admin_resp_qcfg resp = {0};
+	int ret = 0;
+	struct rte_pci_device *pci_dev =
+		zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+
+	if (qid >= MAX_QPS_ON_FUNCTION) {
+		ZSDA_LOG(ERR, "qid beyond limit!");
+		return ZSDA_FAILED;
+	}
+
+	zsda_admin_msg_init(pci_dev);
+	req.msg_type = ZSDA_ADMIN_QUEUE_CFG_REQ;
+	req.qid = qid;
+
+	ret = zsda_send_admin_msg(pci_dev, &req, sizeof(req));
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! Send msg");
+		return ret;
+	}
+
+	ret = zsda_recv_admin_msg(pci_dev, &resp, sizeof(resp));
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! Receive msg");
+		return ret;
+	}
+
+	memcpy(qcfg, &resp.qcfg, sizeof(*qcfg));
+
+	return ZSDA_SUCCESS;
+}
+
+
+int
+zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev)
+{
+	uint8_t i;
+	uint32_t index;
+	enum zsda_service_type type;
+	struct zsda_qp_hw *zsda_hw_qps = zsda_pci_dev->zsda_hw_qps;
+	struct qinfo qcfg = {0};
+	int ret = 0;
+
+	for (i = 0; i < zsda_num_used_qps; i++) {
+		zsda_set_queue_head_tail(zsda_pci_dev, i);
+		ret = zsda_get_queue_cfg_by_id(zsda_pci_dev, i, &qcfg);
+		type = qcfg.q_type;
+		if (ret) {
+			ZSDA_LOG(ERR, "get queue cfg!");
+			return ret;
+		}
+		if (type >= ZSDA_SERVICE_INVALID)
+			continue;
+
+		index = zsda_pci_dev->zsda_qp_hw_num[type];
+		zsda_hw_qps[type].data[index].used = true;
+		zsda_hw_qps[type].data[index].tx_ring_num = i;
+		zsda_hw_qps[type].data[index].rx_ring_num = i;
+		zsda_hw_qps[type].data[index].tx_msg_size =
+			zsda_qp_hw_ring_size[type].tx_msg_size;
+		zsda_hw_qps[type].data[index].rx_msg_size =
+			zsda_qp_hw_ring_size[type].rx_msg_size;
+
+		zsda_pci_dev->zsda_qp_hw_num[type]++;
+	}
+
+	return ret;
+}
+
+
+static uint8_t
+zsda_get_num_used_qps(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	uint8_t num_used_qps;
+
+	num_used_qps = ZSDA_CSR_READ8(mmio_base + 0);
+
+	return num_used_qps;
+}
+
+static int
+zsda_check_write(uint8_t *addr, const uint32_t dst_value)
+{
+	int times = ZSDA_TIME_NUM;
+	uint32_t val;
+
+	val = ZSDA_CSR_READ32(addr);
+
+	while ((val != dst_value) && times--) {
+		val = ZSDA_CSR_READ32(addr);
+		rte_delay_us_sleep(ZSDA_TIME_SLEEP_US);
+	}
+	if (val == dst_value)
+		return ZSDA_SUCCESS;
+	else
+		return ZSDA_FAILED;
+}
+
+
+static int
+zsda_admin_q_start(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	int ret = 0;
+
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_START, 0);
+
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_START, ZSDA_Q_START);
+	ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_START, ZSDA_Q_START);
+
+	return ret;
+}
+
+static int
+zsda_admin_q_stop(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	int ret = 0;
+
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_STOP_RESP, ZSDA_RESP_INVALID);
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_STOP, ZSDA_Q_STOP);
+
+	ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_STOP_RESP,
+			       ZSDA_RESP_VALID);
+
+	if (ret)
+		ZSDA_LOG(INFO, "Failed! zsda_admin q stop");
+
+	return ret;
+}
+
+static int
+zsda_admin_q_clear(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	int ret = 0;
+
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_CLR_RESP, ZSDA_RESP_INVALID);
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_CLR, ZSDA_RESP_VALID);
+
+	ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_CLR_RESP,
+			       ZSDA_RESP_VALID);
+
+	if (ret)
+		ZSDA_LOG(INFO, "Failed! zsda_admin q clear");
+
+	return ret;
+}
+
+
+static int
+zsda_queue_start_single(uint8_t *mmio_base, const uint8_t id)
+{
+	uint8_t *addr_start = mmio_base + ZSDA_IO_Q_START + (4 * id);
+
+	ZSDA_CSR_WRITE32(addr_start, ZSDA_Q_START);
+	return zsda_check_write(addr_start, ZSDA_Q_START);
+}
+
+
+static int
+zsda_queue_stop_single(uint8_t *mmio_base, const uint8_t id)
+{
+	int ret = 0;
+	uint8_t *addr_stop = mmio_base + ZSDA_IO_Q_STOP + (4 * id);
+	uint8_t *addr_resp = mmio_base + ZSDA_IO_Q_STOP_RESP + (4 * id);
+
+	ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+	ZSDA_CSR_WRITE32(addr_stop, ZSDA_Q_STOP);
+
+	ret = zsda_check_write(addr_resp, ZSDA_RESP_VALID);
+	ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+
+	return ret;
+}
+
+static int
+zsda_queue_clear_single(uint8_t *mmio_base, const uint8_t id)
+{
+	int ret = 0;
+	uint8_t *addr_clear = mmio_base + ZSDA_IO_Q_CLR + (4 * id);
+	uint8_t *addr_resp = mmio_base + ZSDA_IO_Q_CLR_RESP + (4 * id);
+
+	ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+	ZSDA_CSR_WRITE32(addr_clear, ZSDA_CLEAR_VALID);
+	ret = zsda_check_write(addr_resp, ZSDA_RESP_VALID);
+	ZSDA_CSR_WRITE32(addr_clear, ZSDA_CLEAR_INVALID);
+
+	return ret;
+}
+
+int
+zsda_queue_start(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	uint8_t id;
+	int ret = 0;
+
+	for (id = 0; id < zsda_num_used_qps; id++)
+		ret |= zsda_queue_start_single(mmio_base, id);
+
+	return ret;
+}
+
+int
+zsda_queue_stop(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	uint8_t id;
+	int ret = 0;
+
+	for (id = 0; id < zsda_num_used_qps; id++)
+		ret |= zsda_queue_stop_single(mmio_base, id);
+
+	return ret;
+}
+
+static int
+zsda_queue_clear(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	uint8_t id;
+	int ret = 0;
+
+	for (id = 0; id < zsda_num_used_qps; id++)
+		ret |= zsda_queue_clear_single(mmio_base, id);
+
+	return ret;
+}
+
+static uint16_t
+zsda_qps_per_service(const struct zsda_pci_device *zsda_pci_dev,
+		     const enum zsda_service_type service)
+{
+	uint16_t qp_hw_num = 0;
+
+	if (service < ZSDA_SERVICE_INVALID)
+		qp_hw_num = zsda_pci_dev->zsda_qp_hw_num[service];
+	return qp_hw_num;
+}
+
+struct zsda_num_qps zsda_nb_qps;
+static void
+zsda_get_nb_qps(const struct zsda_pci_device *zsda_pci_dev)
+{
+	zsda_nb_qps.encomp =
+		zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_COMPRESSION);
+	zsda_nb_qps.decomp =
+		zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_DECOMPRESSION);
+	zsda_nb_qps.encrypt =
+		zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_SYMMETRIC_ENCRYPT);
+	zsda_nb_qps.decrypt =
+		zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_SYMMETRIC_DECRYPT);
+	zsda_nb_qps.hash =
+		zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_HASH_ENCODE);
+}
+
+int
+zsda_queue_init(struct zsda_pci_device *zsda_pci_dev)
+{
+	int ret = 0;
+
+	zsda_num_used_qps = zsda_get_num_used_qps(zsda_pci_dev->pci_dev);
+	zsda_num_used_qps++;
+
+	ret = zsda_admin_q_start(zsda_pci_dev->pci_dev);
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! admin q start");
+		return ret;
+	}
+
+	ret = zsda_queue_clear(zsda_pci_dev->pci_dev);
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! used zsda_io q clear");
+		return ret;
+	}
+
+	ret = zsda_get_queue_cfg(zsda_pci_dev);
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! zsda_get_queue_cfg");
+		return ret;
+	}
+
+	zsda_get_nb_qps(zsda_pci_dev);
+
+	return ret;
+}
+
+
+int
+zsda_queue_remove(struct zsda_pci_device *zsda_pci_dev)
+{
+	int ret = 0;
+
+	if (zsda_admin_q_clear(zsda_pci_dev->pci_dev) == ZSDA_FAILED)
+		ZSDA_LOG(ERR, "Failed! q clear");
+
+	if (zsda_admin_q_stop(zsda_pci_dev->pci_dev) == ZSDA_FAILED)
+		ZSDA_LOG(ERR, "Failed! q stop");
+
+	return ret;
+}
+
+struct zsda_qp_hw *
+zsda_qps_hw_per_service(struct zsda_pci_device *zsda_pci_dev,
+			const enum zsda_service_type service)
+{
+	struct zsda_qp_hw *qp_hw = NULL;
+
+	if (service < ZSDA_SERVICE_INVALID)
+		qp_hw = &(zsda_pci_dev->zsda_hw_qps[service]);
+
+	return qp_hw;
+}
+
+static const struct rte_memzone *
+zsda_queue_dma_zone_reserve(const char *queue_name, const unsigned int queue_size,
+		       const unsigned int socket_id)
+{
+	const struct rte_memzone *mz;
+
+	mz = rte_memzone_lookup(queue_name);
+	if (mz != 0) {
+		if (((size_t)queue_size <= mz->len) &&
+		    ((socket_id == (SOCKET_ID_ANY & 0xffff)) ||
+		     (socket_id == (mz->socket_id & 0xffff)))) {
+			ZSDA_LOG(DEBUG,
+				 "re-use memzone already allocated for %s",
+				 queue_name);
+			return mz;
+		}
+		ZSDA_LOG(ERR, E_MALLOC);
+		return NULL;
+	}
+
+	mz = rte_memzone_reserve_aligned(queue_name, queue_size,
+					   (int)(socket_id & 0xfff),
+					   RTE_MEMZONE_IOVA_CONTIG, queue_size);
+
+	return mz;
+}
+
+static int
+zsda_queue_create(const uint32_t dev_id, struct zsda_queue *queue,
+		  const struct zsda_qp_config *qp_conf, const uint8_t dir)
+{
+	void *io_addr;
+	const struct rte_memzone *qp_mz;
+	struct qinfo qcfg = {0};
+
+	uint16_t desc_size = ((dir == RING_DIR_TX) ? qp_conf->hw->tx_msg_size
+						   : qp_conf->hw->rx_msg_size);
+	unsigned int queue_size_bytes = qp_conf->nb_descriptors * desc_size;
+
+	queue->hw_queue_number =
+		((dir == RING_DIR_TX) ? qp_conf->hw->tx_ring_num
+				      : qp_conf->hw->rx_ring_num);
+
+	struct rte_pci_device *pci_dev = zsda_devs[dev_id].pci_dev;
+	struct zsda_pci_device *zsda_dev =
+		(struct zsda_pci_device *)zsda_devs[dev_id].mz->addr;
+
+	zsda_get_queue_cfg_by_id(zsda_dev, queue->hw_queue_number, &qcfg);
+
+	if (dir == RING_DIR_TX)
+		snprintf(queue->memz_name, sizeof(queue->memz_name),
+			 "%s_%d_%s_%s_%d", pci_dev->driver->driver.name, dev_id,
+			 qp_conf->service_str, "qptxmem",
+			 queue->hw_queue_number);
+	else
+		snprintf(queue->memz_name, sizeof(queue->memz_name),
+			 "%s_%d_%s_%s_%d", pci_dev->driver->driver.name, dev_id,
+			 qp_conf->service_str, "qprxmem",
+			 queue->hw_queue_number);
+
+	qp_mz = zsda_queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
+				       rte_socket_id());
+	if (qp_mz == NULL) {
+		ZSDA_LOG(ERR, E_MALLOC);
+		return -ENOMEM;
+	}
+
+	queue->base_addr = qp_mz->addr;
+	queue->base_phys_addr = qp_mz->iova;
+	queue->modulo_mask = MAX_NUM_OPS;
+	queue->msg_size = desc_size;
+
+	queue->head = (dir == RING_DIR_TX) ? qcfg.wq_head : qcfg.cq_head;
+	queue->tail = (dir == RING_DIR_TX) ? qcfg.wq_tail : qcfg.cq_tail;
+
+	if ((queue->head == 0) && (queue->tail == 0))
+		qcfg.cycle += 1;
+
+	queue->valid = qcfg.cycle & (ZSDA_MAX_CYCLE - 1);
+	queue->queue_size = ZSDA_MAX_DESC;
+	queue->cycle_size = ZSDA_MAX_CYCLE;
+	queue->io_addr = pci_dev->mem_resource[0].addr;
+
+	memset(queue->base_addr, 0x0, queue_size_bytes);
+	io_addr = pci_dev->mem_resource[0].addr;
+
+	if (dir == RING_DIR_TX)
+		ZSDA_CSR_WQ_RING_BASE(io_addr, queue->hw_queue_number,
+				      queue->base_phys_addr);
+	else
+		ZSDA_CSR_CQ_RING_BASE(io_addr, queue->hw_queue_number,
+				      queue->base_phys_addr);
+
+	return 0;
+}
+
+static void
+zsda_queue_delete(const struct zsda_queue *queue)
+{
+	const struct rte_memzone *mz;
+	int status;
+
+	if (queue == NULL) {
+		ZSDA_LOG(DEBUG, "Invalid queue");
+		return;
+	}
+
+	mz = rte_memzone_lookup(queue->memz_name);
+	if (mz != NULL) {
+		memset(queue->base_addr, 0x0,
+		       (uint16_t)(queue->queue_size * queue->msg_size));
+		status = rte_memzone_free(mz);
+		if (status != 0)
+			ZSDA_LOG(ERR, E_FREE);
+	} else
+		ZSDA_LOG(DEBUG, "queue %s doesn't exist", queue->memz_name);
+}
+
+void
+zsda_stats_reset(void **queue_pairs, const uint32_t nb_queue_pairs)
+{
+	enum zsda_service_type type;
+	uint32_t i;
+	struct zsda_qp *qp;
+
+	if (queue_pairs == NULL) {
+		ZSDA_LOG(ERR, E_NULL);
+		return;
+	}
+
+	for (i = 0; i < nb_queue_pairs; i++) {
+		qp = queue_pairs[i];
+
+		if (qp == NULL) {
+			ZSDA_LOG(ERR, E_NULL);
+			break;
+		}
+		for (type = 0; type < ZSDA_MAX_SERVICES; type++) {
+			if (qp->srv[type].used)
+				memset(&(qp->srv[type].stats), 0,
+				       sizeof(struct zsda_common_stat));
+		}
+	}
+}
+
+void
+zsda_stats_get(void **queue_pairs, const uint32_t nb_queue_pairs,
+	      struct zsda_common_stat *stats)
+{
+	enum zsda_service_type type;
+	uint32_t i;
+	struct zsda_qp *qp;
+
+	if ((stats == NULL) || (queue_pairs == NULL)) {
+		ZSDA_LOG(ERR, E_NULL);
+		return;
+	}
+
+	for (i = 0; i < nb_queue_pairs; i++) {
+		qp = queue_pairs[i];
+
+		if (qp == NULL) {
+			ZSDA_LOG(ERR, E_NULL);
+			break;
+		}
+
+		for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+			if (qp->srv[type].used) {
+				stats->enqueued_count +=
+					qp->srv[type].stats.enqueued_count;
+				stats->dequeued_count +=
+					qp->srv[type].stats.dequeued_count;
+				stats->enqueue_err_count +=
+					qp->srv[type].stats.enqueue_err_count;
+				stats->dequeue_err_count +=
+					qp->srv[type].stats.dequeue_err_count;
+			}
+		}
+	}
+}
+
+
+static int
+zsda_cookie_init(const uint32_t dev_id, struct zsda_qp **qp_addr,
+	    const uint16_t queue_pair_id,
+	    const struct zsda_qp_config *zsda_qp_conf)
+{
+	struct zsda_qp *qp = *qp_addr;
+	struct rte_pci_device *pci_dev = zsda_devs[dev_id].pci_dev;
+	char op_cookie_pool_name[RTE_RING_NAMESIZE];
+	uint32_t i;
+	enum zsda_service_type type = zsda_qp_conf->service_type;
+
+	if (zsda_qp_conf->nb_descriptors != ZSDA_MAX_DESC)
+		ZSDA_LOG(ERR, "Can't create qp for %u descriptors",
+			 zsda_qp_conf->nb_descriptors);
+
+	qp->srv[type].nb_descriptors = zsda_qp_conf->nb_descriptors;
+
+	qp->srv[type].op_cookies = rte_zmalloc_socket(
+		"zsda PMD op cookie pointer",
+		zsda_qp_conf->nb_descriptors *
+			sizeof(*qp->srv[type].op_cookies),
+		RTE_CACHE_LINE_SIZE, zsda_qp_conf->socket_id);
+
+	if (qp->srv[type].op_cookies == NULL) {
+		ZSDA_LOG(ERR, E_MALLOC);
+		return -ENOMEM;
+	}
+
+	snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s%d_cks_%s_qp%hu",
+		 pci_dev->driver->driver.name, dev_id,
+		 zsda_qp_conf->service_str, queue_pair_id);
+
+	qp->srv[type].op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
+	if (qp->srv[type].op_cookie_pool == NULL)
+		qp->srv[type].op_cookie_pool = rte_mempool_create(
+			op_cookie_pool_name, qp->srv[type].nb_descriptors,
+			zsda_qp_conf->cookie_size, 64, 0, NULL, NULL, NULL,
+			NULL, (int)(rte_socket_id() & 0xfff), 0);
+	if (!qp->srv[type].op_cookie_pool) {
+		ZSDA_LOG(ERR, E_CREATE);
+		goto exit;
+	}
+
+	for (i = 0; i < qp->srv[type].nb_descriptors; i++) {
+		if (rte_mempool_get(qp->srv[type].op_cookie_pool,
+				    &qp->srv[type].op_cookies[i])) {
+			ZSDA_LOG(ERR, "ZSDA PMD Cannot get op_cookie");
+			goto exit;
+		}
+		memset(qp->srv[type].op_cookies[i], 0,
+		       zsda_qp_conf->cookie_size);
+	}
+	return 0;
+
+exit:
+	if (qp->srv[type].op_cookie_pool)
+		rte_mempool_free(qp->srv[type].op_cookie_pool);
+	rte_free(qp->srv[type].op_cookies);
+
+	return -EFAULT;
+}
+
+static int
+zsda_queue_pair_setup(const uint32_t dev_id, struct zsda_qp **qp_addr,
+		      const uint16_t queue_pair_id,
+		      const struct zsda_qp_config *zsda_qp_conf)
+{
+	struct zsda_qp *qp = *qp_addr;
+	struct rte_pci_device *pci_dev = zsda_devs[dev_id].pci_dev;
+	int ret = 0;
+	enum zsda_service_type type = zsda_qp_conf->service_type;
+
+	if (type >= ZSDA_SERVICE_INVALID) {
+		ZSDA_LOG(ERR, "Failed! service type");
+		return -EINVAL;
+	}
+
+	if (pci_dev->mem_resource[0].addr == NULL) {
+		ZSDA_LOG(ERR, E_NULL);
+		return -EINVAL;
+	}
+
+	if (zsda_queue_create(dev_id, &(qp->srv[type].tx_q), zsda_qp_conf,
+			      RING_DIR_TX) != 0) {
+		ZSDA_LOG(ERR, E_CREATE);
+		return -EFAULT;
+	}
+
+	if (zsda_queue_create(dev_id, &(qp->srv[type].rx_q), zsda_qp_conf,
+			      RING_DIR_RX) != 0) {
+		ZSDA_LOG(ERR, E_CREATE);
+		zsda_queue_delete(&(qp->srv[type].tx_q));
+		return -EFAULT;
+	}
+
+	ret = zsda_cookie_init(dev_id, qp_addr, queue_pair_id, zsda_qp_conf);
+	if (ret) {
+		zsda_queue_delete(&(qp->srv[type].tx_q));
+		zsda_queue_delete(&(qp->srv[type].rx_q));
+		qp->srv[type].used = false;
+	}
+	qp->srv[type].used = true;
+	return ret;
+}
+
+int
+zsda_queue_pair_release(struct zsda_qp **qp_addr)
+{
+	struct zsda_qp *qp = *qp_addr;
+	uint32_t i;
+	enum zsda_service_type type;
+
+	if (qp == NULL) {
+		ZSDA_LOG(DEBUG, "qp already freed");
+		return 0;
+	}
+
+	for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {
+		if (!qp->srv[type].used)
+			continue;
+
+		zsda_queue_delete(&(qp->srv[type].tx_q));
+		zsda_queue_delete(&(qp->srv[type].rx_q));
+		qp->srv[type].used = false;
+		for (i = 0; i < qp->srv[type].nb_descriptors; i++)
+			rte_mempool_put(qp->srv[type].op_cookie_pool,
+					qp->srv[type].op_cookies[i]);
+
+		if (qp->srv[type].op_cookie_pool)
+			rte_mempool_free(qp->srv[type].op_cookie_pool);
+
+		rte_free(qp->srv[type].op_cookies);
+	}
+
+	rte_free(qp);
+	*qp_addr = NULL;
+
+	return 0;
+}
+
+int
+zsda_common_setup_qp(uint32_t zsda_dev_id, struct zsda_qp **qp_addr,
+		const uint16_t queue_pair_id, const struct zsda_qp_config *conf)
+{
+	uint32_t i;
+	int ret = 0;
+	struct zsda_qp *qp;
+	rte_iova_t cookie_phys_addr;
+
+	ret = zsda_queue_pair_setup(zsda_dev_id, qp_addr, queue_pair_id, conf);
+	if (ret)
+		return ret;
+
+	qp = (struct zsda_qp *)*qp_addr;
+
+	for (i = 0; i < qp->srv[conf->service_type].nb_descriptors; i++) {
+		struct zsda_op_cookie *cookie =
+			qp->srv[conf->service_type].op_cookies[i];
+		cookie_phys_addr = rte_mempool_virt2iova(cookie);
+
+		cookie->comp_head_phys_addr = cookie_phys_addr +
+			offsetof(struct zsda_op_cookie, comp_head);
+
+		cookie->sgl_src_phys_addr = cookie_phys_addr +
+			offsetof(struct zsda_op_cookie, sgl_src);
+
+		cookie->sgl_dst_phys_addr = cookie_phys_addr +
+			offsetof(struct zsda_op_cookie, sgl_dst);
+	}
+	return ret;
+}
diff --git a/drivers/common/zsda/zsda_qp.h b/drivers/common/zsda/zsda_qp.h
new file mode 100644
index 0000000000..a9f0d38ba5
--- /dev/null
+++ b/drivers/common/zsda/zsda_qp.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_QP_H_
+#define _ZSDA_QP_H_
+
+#define WQ_CSR_LBASE 0x1000
+#define WQ_CSR_UBASE 0x1004
+#define CQ_CSR_LBASE 0x1400
+#define CQ_CSR_UBASE 0x1404
+#define WQ_TAIL	     0x1800
+#define CQ_HEAD	     0x1804
+
+/* CSR write macro */
+#define ZSDA_CSR_WR(csrAddr, csrOffset, val)                                   \
+	rte_write32(val, (((uint8_t *)csrAddr) + csrOffset))
+#define ZSDA_CSR_WC_WR(csrAddr, csrOffset, val)                                \
+	rte_write32_wc(val, (((uint8_t *)csrAddr) + csrOffset))
+
+/* CSR read macro */
+#define ZSDA_CSR_RD(csrAddr, csrOffset)                                        \
+	rte_read32((((uint8_t *)csrAddr) + csrOffset))
+
+#define ZSDA_CSR_WQ_RING_BASE(csr_base_addr, ring, value)                      \
+	do {                                                                   \
+		uint32_t l_base = 0, u_base = 0;                               \
+		l_base = (uint32_t)(value & 0xFFFFFFFF);                       \
+		u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);    \
+		ZSDA_CSR_WR(csr_base_addr, (ring << 3) + WQ_CSR_LBASE,         \
+			    l_base);                                           \
+		ZSDA_LOG(INFO, "l_basg - offset:0x%x, value:0x%x",             \
+			 ((ring << 3) + WQ_CSR_LBASE), l_base);                \
+		ZSDA_CSR_WR(csr_base_addr, (ring << 3) + WQ_CSR_UBASE,         \
+			    u_base);                                           \
+		ZSDA_LOG(INFO, "h_base - offset:0x%x, value:0x%x",             \
+			 ((ring << 3) + WQ_CSR_UBASE), u_base);                \
+	} while (0)
+
+#define ZSDA_CSR_CQ_RING_BASE(csr_base_addr, ring, value)                      \
+	do {                                                                   \
+		uint32_t l_base = 0, u_base = 0;                               \
+		l_base = (uint32_t)(value & 0xFFFFFFFF);                       \
+		u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);    \
+		ZSDA_CSR_WR(csr_base_addr, (ring << 3) + CQ_CSR_LBASE,         \
+			    l_base);                                           \
+		ZSDA_CSR_WR(csr_base_addr, (ring << 3) + CQ_CSR_UBASE,         \
+			    u_base);                                           \
+	} while (0)
+
+#define READ_CSR_WQ_HEAD(csr_base_addr, ring)                                  \
+	ZSDA_CSR_RD(csr_base_addr, WQ_TAIL + (ring << 3))
+#define WRITE_CSR_WQ_TAIL(csr_base_addr, ring, value)                          \
+	ZSDA_CSR_WC_WR(csr_base_addr, WQ_TAIL + (ring << 3), value)
+#define READ_CSR_CQ_HEAD(csr_base_addr, ring)                                  \
+	ZSDA_CSR_RD(csr_base_addr, WQ_TAIL + (ring << 3))
+#define WRITE_CSR_CQ_HEAD(csr_base_addr, ring, value)                          \
+	ZSDA_CSR_WC_WR(csr_base_addr, CQ_HEAD + (ring << 3), value)
+
+/**
+ * Structure associated with each queue.
+ */
+struct zsda_queue {
+	char memz_name[RTE_MEMZONE_NAMESIZE];
+	uint8_t *io_addr;
+	uint8_t *base_addr;	   /* Base address */
+	rte_iova_t base_phys_addr; /* Queue physical address */
+	uint16_t head;		   /* Shadow copy of the head */
+	uint16_t tail;		   /* Shadow copy of the tail */
+	uint16_t modulo_mask;
+	uint16_t msg_size;
+	uint16_t queue_size;
+	uint16_t cycle_size;
+	uint16_t pushed_wqe;
+
+	uint8_t hw_queue_number;
+	uint32_t csr_head; /* last written head value */
+	uint32_t csr_tail; /* last written tail value */
+
+	uint8_t valid;
+	uint16_t sid;
+};
+
+typedef int (*rx_callback)(void *cookie_in, struct zsda_cqe *cqe);
+typedef int (*tx_callback)(void *op_in, const struct zsda_queue *queue,
+			   void **op_cookies, const uint16_t new_tail);
+typedef int (*srv_match)(const void *op_in);
+
+struct qp_srv {
+	bool used;
+	struct zsda_queue tx_q;
+	struct zsda_queue rx_q;
+	rx_callback rx_cb;
+	tx_callback tx_cb;
+	srv_match match;
+	struct zsda_common_stat stats;
+	struct rte_mempool *op_cookie_pool;
+	void **op_cookies;
+	uint16_t nb_descriptors;
+};
+
+struct zsda_qp {
+	struct qp_srv srv[ZSDA_MAX_SERVICES];
+};
+
+struct zsda_qp_config {
+	enum zsda_service_type service_type;
+	const struct zsda_qp_hw_data *hw;
+	uint16_t nb_descriptors;
+	uint32_t cookie_size;
+	int socket_id;
+	const char *service_str;
+};
+
+struct zsda_num_qps {
+	uint16_t encomp;
+	uint16_t decomp;
+	uint16_t encrypt;
+	uint16_t decrypt;
+	uint16_t hash;
+};
+
+extern struct zsda_num_qps zsda_nb_qps;
+
+int zsda_queue_start(const struct rte_pci_device *pci_dev);
+int zsda_queue_stop(const struct rte_pci_device *pci_dev);
+
+int zsda_queue_init(struct zsda_pci_device *zsda_pci_dev);
+
+struct zsda_qp_hw *
+zsda_qps_hw_per_service(struct zsda_pci_device *zsda_pci_dev,
+			const enum zsda_service_type service);
+
+int zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev);
+
+int zsda_queue_pair_release(struct zsda_qp **qp_addr);
+
+int zsda_common_setup_qp(uint32_t dev_id, struct zsda_qp **qp_addr,
+		    const uint16_t queue_pair_id,
+		    const struct zsda_qp_config *conf);
+
+void zsda_stats_get(void **queue_pairs, const uint32_t nb_queue_pairs,
+		    struct zsda_common_stat *stats);
+void zsda_stats_reset(void **queue_pairs, const uint32_t nb_queue_pairs);
+
+#endif /* _ZSDA_QP_H_ */
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 62166 bytes --]

  parent reply	other threads:[~2024-10-17  9:28 UTC|newest]

Thread overview: 95+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-01  8:27 [PATCH] zsda:introduce zsda drivers and examples lhx
2024-07-02  8:52 ` David Marchand
2024-07-02 13:11 ` [EXTERNAL] " Akhil Goyal
2024-09-09  8:08 ` [PATCH v4 1/8] zsda: Introduce zsda device drivers Hanxiao Li
2024-09-10  9:15 ` [PATCH v5 " Hanxiao Li
2024-09-10  9:18   ` [PATCH v5 2/8] zsda: add support for zsdadev operations Hanxiao Li
2024-09-10  9:18     ` [PATCH v5 3/8] zsda: add support for queue operation Hanxiao Li
2024-09-10  9:18     ` [PATCH v5 4/8] zsda: add zsda compressdev driver and interface Hanxiao Li
2024-09-10  9:18     ` [PATCH v5 5/8] zsda: modify files for introducing zsda cryptodev Hanxiao Li
2024-09-10  9:18     ` [PATCH v5 6/8] zsda: add zsda crypto-pmd Hanxiao Li
2024-09-10  9:18     ` [PATCH v5 7/8] zsda: add zsda crypto-sym Hanxiao Li
2024-09-10  9:18     ` [PATCH v5 8/8] zsda: add zsda crypto-session and compile file Hanxiao Li
2024-09-11  7:52   ` [PATCH v6 1/8] zsda: Introduce zsda device drivers Hanxiao Li
2024-09-11  7:54     ` [PATCH v6 2/8] zsda: add support for zsdadev operations Hanxiao Li
2024-09-11  7:54       ` [PATCH v6 3/8] zsda: add support for queue operation Hanxiao Li
2024-09-11 16:01         ` Stephen Hemminger
2024-09-11  7:54       ` [PATCH v6 4/8] zsda: add zsda compressdev driver and interface Hanxiao Li
2024-09-11  7:54       ` [PATCH v6 5/8] zsda: modify files for introducing zsda cryptodev Hanxiao Li
2024-09-17 18:22         ` [EXTERNAL] " Akhil Goyal
2024-09-11  7:54       ` [PATCH v6 6/8] zsda: add zsda crypto-pmd Hanxiao Li
2024-09-17 18:25         ` [EXTERNAL] " Akhil Goyal
2024-09-11  7:54       ` [PATCH v6 7/8] zsda: add zsda crypto-sym Hanxiao Li
2024-09-11  7:54       ` [PATCH v6 8/8] zsda: add zsda crypto-session and compile file Hanxiao Li
2024-09-17 18:33         ` [EXTERNAL] " Akhil Goyal
2024-09-17 18:13     ` [EXTERNAL] [PATCH v6 1/8] zsda: Introduce zsda device drivers Akhil Goyal
2024-09-27 12:44     ` [PATCH v7 0/8] drivers/zsda: introduce zsda drivers Hanxiao Li
2024-09-27 13:01     ` [PATCH v7 1/8] common/zsda: add common function and log macro Hanxiao Li
2024-09-27 13:09       ` [PATCH v7 0/8] drivers/zsda: introduce zsda drivers Hanxiao Li
2024-09-27 13:09         ` [PATCH v7 2/8] common/zsda: configure device Hanxiao Li
2024-09-27 13:09         ` [PATCH v7 3/8] common/zsda: configure queues Hanxiao Li
2024-09-27 13:09         ` [PATCH v7 4/8] compress/zsda: configure drivers of compressdev Hanxiao Li
2024-09-27 13:09         ` [PATCH v7 5/8] crypto/zsda: configure drivers, sessions, capabilities of cryptodev Hanxiao Li
2024-09-27 13:09         ` [PATCH v7 6/8] lib/cryptodev: add sm4 xts for crypto Hanxiao Li
2024-09-27 13:09         ` [PATCH v7 7/8] app/test: add sm4-xts test Hanxiao Li
2024-09-27 13:09         ` [PATCH v7 8/8] doc/guides: add documents and release notes for two drivers Hanxiao Li
2024-09-29  7:35       ` Hanxiao Li
2024-10-01  7:31         ` [EXTERNAL] " Akhil Goyal
2024-09-29 14:58       ` [PATCH v8 0/8] drivers/zsda: introduce zsda drivers Hanxiao Li
2024-09-29 14:58         ` [PATCH v8 1/8] common/zsda: add common function and log macro Hanxiao Li
2024-09-29 15:04           ` [PATCH v8 2/8] common/zsda: configure device Hanxiao Li
2024-09-29 15:05             ` [PATCH v8 3/8] common/zsda: configure queues Hanxiao Li
2024-10-01  7:39               ` [EXTERNAL] " Akhil Goyal
2024-09-29 15:05             ` [PATCH v8 4/8] compress/zsda: configure drivers of compressdev Hanxiao Li
2024-10-01  7:38               ` [EXTERNAL] " Akhil Goyal
2024-09-29 15:05             ` [PATCH v8 5/8] crypto/zsda: configure drivers, sessions, capabilities of cryptodev Hanxiao Li
2024-10-01  7:35               ` [EXTERNAL] " Akhil Goyal
2024-09-29 15:05             ` [PATCH v8 6/8] lib/cryptodev: add sm4 xts for crypto Hanxiao Li
2024-10-01  7:28               ` [EXTERNAL] " Akhil Goyal
2024-10-09 21:09                 ` Akhil Goyal
2024-09-29 15:05             ` [PATCH v8 7/8] app/test: add sm4-xts test Hanxiao Li
2024-09-29 15:05             ` [PATCH v8 8/8] doc/guides: add documents and release notes for two drivers Hanxiao Li
2024-09-29 19:41               ` Stephen Hemminger
2024-10-01  7:43             ` [EXTERNAL] [PATCH v8 2/8] common/zsda: configure device Akhil Goyal
2024-10-11  1:50           ` [PATCH v9 00/12] drivers/zsda: introduce zsda drivers Hanxiao Li
2024-10-11  1:50             ` [PATCH v9 01/12] zsda: add zsdadev driver documents Hanxiao Li
2024-10-11  1:54               ` [PATCH v9 02/12] config: add zsda device number Hanxiao Li
2024-10-11  1:56               ` [PATCH v9 03/12] common/zsda: add some common functions Hanxiao Li
2024-10-11  1:56                 ` [PATCH v9 04/12] common/zsda: configure zsda device Hanxiao Li
2024-10-11  1:56                 ` [PATCH v9 05/12] common/zsda: configure zsda queue base functions Hanxiao Li
2024-10-11  1:56                 ` [PATCH v9 06/12] common/zsda: configure zsda queue enqueue functions Hanxiao Li
2024-10-11  1:56                 ` [PATCH v9 07/12] common/zsda: configure zsda queue dequeue functions Hanxiao Li
2024-10-11  1:56                 ` [PATCH v9 08/12] compress/zsda: add zsda compress driver Hanxiao Li
2024-10-11  1:56                 ` [PATCH v9 09/12] compress/zsda: add zsda compress PMD Hanxiao Li
2024-10-11  1:56                 ` [PATCH v9 10/12] crypto/zsda: add crypto sessions configuration Hanxiao Li
2024-10-11  1:56                 ` [PATCH v9 11/12] crypto/zsda: add zsda crypto driver Hanxiao Li
2024-10-11  1:56                 ` [PATCH v9 12/12] crypto/zsda: add zsda crypto PMD Hanxiao Li
2024-10-12 19:03                   ` Stephen Hemminger
2024-10-16  8:33               ` [PATCH v10 00/12] drivers/zsda: introduce zsda drivers Hanxiao Li
2024-10-16  8:33                 ` [PATCH v10 01/12] zsda: add zsdadev driver documents Hanxiao Li
2024-10-16  8:37                   ` [PATCH v10 02/12] config: add zsda device number Hanxiao Li
2024-10-16  8:38                   ` [PATCH v10 03/12] common/zsda: add some common functions Hanxiao Li
2024-10-16  8:38                     ` [PATCH v10 04/12] common/zsda: configure zsda device Hanxiao Li
2024-10-16  8:38                     ` [PATCH v10 05/12] common/zsda: configure zsda queue base functions Hanxiao Li
2024-10-16  8:38                     ` [PATCH v10 06/12] common/zsda: configure zsda queue enqueue functions Hanxiao Li
2024-10-16  8:38                     ` [PATCH v10 07/12] common/zsda: configure zsda queue dequeue functions Hanxiao Li
2024-10-16  8:38                     ` [PATCH v10 08/12] compress/zsda: add zsda compress driver Hanxiao Li
2024-10-16  8:38                     ` [PATCH v10 09/12] compress/zsda: add zsda compress PMD Hanxiao Li
2024-10-16  8:38                     ` [PATCH v10 10/12] crypto/zsda: add crypto sessions configuration Hanxiao Li
2024-10-16  8:38                     ` [PATCH v10 11/12] crypto/zsda: add zsda crypto driver Hanxiao Li
2024-10-16  8:38                     ` [PATCH v10 12/12] crypto/zsda: add zsda crypto PMD Hanxiao Li
2024-10-17  9:21                   ` [PATCH v11 00/12] drivers/zsda: introduce zsda drivers Hanxiao Li
2024-10-17  9:21                     ` [PATCH v11 01/12] zsda: add zsdadev driver documents Hanxiao Li
2024-10-17  9:21                     ` [PATCH v11 02/12] config: add zsda device number Hanxiao Li
2024-10-17  9:22                     ` [PATCH v11 03/12] common/zsda: add some common functions Hanxiao Li
2024-10-17  9:22                     ` [PATCH v11 04/12] common/zsda: configure zsda device Hanxiao Li
2024-10-17  9:22                     ` Hanxiao Li [this message]
2024-10-17 19:30                       ` [PATCH v11 05/12] common/zsda: configure zsda queue base functions Stephen Hemminger
2024-10-17  9:22                     ` [PATCH v11 06/12] common/zsda: configure zsda queue enqueue functions Hanxiao Li
2024-10-17  9:22                     ` [PATCH v11 07/12] common/zsda: configure zsda queue dequeue functions Hanxiao Li
2024-10-17  9:22                     ` [PATCH v11 08/12] compress/zsda: add zsda compress driver Hanxiao Li
2024-10-17  9:22                     ` [PATCH v11 09/12] compress/zsda: add zsda compress PMD Hanxiao Li
2024-10-17  9:22                     ` [PATCH v11 10/12] crypto/zsda: add crypto sessions configuration Hanxiao Li
2024-10-17  9:22                     ` [PATCH v11 11/12] crypto/zsda: add zsda crypto driver Hanxiao Li
2024-10-17  9:22                     ` [PATCH v11 12/12] crypto/zsda: add zsda crypto PMD Hanxiao Li
2024-10-17 19:24                     ` [PATCH v11 00/12] drivers/zsda: introduce zsda drivers Stephen Hemminger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241017092310.1863628-6-li.hanxiao@zte.com.cn \
    --to=li.hanxiao@zte.com.cn \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).