DPDK patches and discussions
 help / color / mirror / Atom feed
From: Yong Zhang <zhang.yong25@zte.com.cn>
To: thomas@monjalon.net, stephen@networkplumber.org, dev@dpdk.org
Cc: zhang.yong25@zte.com.cn, wang.yong19@zte.com.cn,
	li.min10@zte.com.cn, ran.ming@zte.com.cn
Subject: [v5,2/5] raw/gdtc: add support for queue setup operation
Date: Thu, 14 Nov 2024 17:20:10 +0800	[thread overview]
Message-ID: <20241114092047.3514280-3-zhang.yong25@zte.com.cn> (raw)
In-Reply-To: <20241114092047.3514280-1-zhang.yong25@zte.com.cn>


[-- Attachment #1.1.1: Type: text/plain, Size: 9685 bytes --]

Add queue initialization and release interface.

Signed-off-by: Yong Zhang <zhang.yong25@zte.com.cn>
---
 drivers/raw/gdtc/gdtc_rawdev.c | 244 +++++++++++++++++++++++++++++++++
 drivers/raw/gdtc/gdtc_rawdev.h |  19 +++
 2 files changed, 263 insertions(+)

diff --git a/drivers/raw/gdtc/gdtc_rawdev.c b/drivers/raw/gdtc/gdtc_rawdev.c
index 6f20ecdad6..c3e59fcdab 100644
--- a/drivers/raw/gdtc/gdtc_rawdev.c
+++ b/drivers/raw/gdtc/gdtc_rawdev.c
@@ -28,10 +28,42 @@
 
 #include "gdtc_rawdev.h"
 
+/*
+ * User define:
+ * ep_id-bit[15:12] vfunc_num-bit[11:4] func_num-bit[3:1] vfunc_active-bit0
+ * host ep_id:5~8   zf ep_id:9
+ */
+#define ZXDH_GDMA_ZF_USER                       0x9000      /* ep4 pf0 */
+#define ZXDH_GDMA_PF_NUM_SHIFT                  1
+#define ZXDH_GDMA_VF_NUM_SHIFT                  4
+#define ZXDH_GDMA_EP_ID_SHIFT                   12
+#define ZXDH_GDMA_VF_EN                         1
+#define ZXDH_GDMA_EPID_OFFSET                   5
+
 /* Register offset */
 #define ZXDH_GDMA_BASE_OFFSET                   0x100000
+#define ZXDH_GDMA_EXT_ADDR_OFFSET               0x218
+#define ZXDH_GDMA_CONTROL_OFFSET                0x230
+#define ZXDH_GDMA_TC_CNT_OFFSET                 0x23c
+#define ZXDH_GDMA_LLI_USER_OFFSET               0x228
+
+#define ZXDH_GDMA_CHAN_FORCE_CLOSE              (1 << 31)
+
+/* TC count & Error interrupt status register */
+#define ZXDH_GDMA_SRC_LLI_ERR                   (1 << 16)
+#define ZXDH_GDMA_SRC_DATA_ERR                  (1 << 17)
+#define ZXDH_GDMA_DST_ADDR_ERR                  (1 << 18)
+#define ZXDH_GDMA_ERR_STATUS                    (1 << 19)
+#define ZXDH_GDMA_ERR_INTR_ENABLE               (1 << 20)
+#define ZXDH_GDMA_TC_CNT_CLEAN                  (1)
 
 #define ZXDH_GDMA_CHAN_SHIFT                    0x80
+#define LOW32_MASK                              0xffffffff
+#define LOW16_MASK                              0xffff
+
+static int zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id);
+static int zxdh_gdma_queue_free(struct rte_rawdev *dev, uint16_t queue_id);
+
 char zxdh_gdma_driver_name[] = "rawdev_zxdh_gdma";
 char dev_name[] = "zxdh_gdma";
 
@@ -41,9 +73,221 @@ zxdh_gdma_rawdev_get_priv(const struct rte_rawdev *rawdev)
 	return rawdev->dev_private;
 }
 
+static inline struct zxdh_gdma_queue *
+zxdh_gdma_get_queue(struct rte_rawdev *dev, uint16_t queue_id)
+{
+	struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);
+
+	if (queue_id >= ZXDH_GDMA_TOTAL_CHAN_NUM) {
+		ZXDH_PMD_LOG(ERR, "queue id %d is invalid", queue_id);
+		return NULL;
+	}
+
+	return &(gdmadev->vqs[queue_id]);
+}
+
+static void
+zxdh_gdma_write_reg(struct rte_rawdev *dev, uint16_t queue_id, uint32_t offset, uint32_t val)
+{
+	struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);
+	uint32_t addr = 0;
+
+	addr = offset + queue_id * ZXDH_GDMA_CHAN_SHIFT;
+	*(uint32_t *)(gdmadev->base_addr + addr) = val;
+}
+
+static int
+zxdh_gdma_rawdev_queue_setup(struct rte_rawdev *dev,
+				uint16_t queue_id,
+				rte_rawdev_obj_t queue_conf,
+				size_t conf_size)
+{
+	struct zxdh_gdma_rawdev *gdmadev = NULL;
+	struct zxdh_gdma_queue *queue = NULL;
+	struct zxdh_gdma_queue_config *qconfig = NULL;
+	struct zxdh_gdma_rbp *rbp = NULL;
+	uint16_t i = 0;
+	uint8_t is_txq = 0;
+	uint32_t src_user = 0;
+	uint32_t dst_user = 0;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if ((queue_conf == NULL) || (conf_size != sizeof(struct zxdh_gdma_queue_config)))
+		return -EINVAL;
+
+	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
+	qconfig = (struct zxdh_gdma_queue_config *)queue_conf;
+
+	for (i = 0; i < ZXDH_GDMA_TOTAL_CHAN_NUM; i++) {
+		if (gdmadev->vqs[i].enable == 0)
+			break;
+	}
+	if (i >= ZXDH_GDMA_TOTAL_CHAN_NUM) {
+		ZXDH_PMD_LOG(ERR, "Failed to setup queue, no avail queues");
+		return -1;
+	}
+	queue_id = i;
+	if (zxdh_gdma_queue_init(dev, queue_id) != 0) {
+		ZXDH_PMD_LOG(ERR, "Failed to init queue");
+		return -1;
+	}
+	queue = &(gdmadev->vqs[queue_id]);
+
+	rbp = qconfig->rbp;
+	if ((rbp->srbp != 0) && (rbp->drbp == 0)) {
+		is_txq = 0;
+		dst_user = ZXDH_GDMA_ZF_USER;
+		src_user = ((rbp->spfid << ZXDH_GDMA_PF_NUM_SHIFT) |
+			((rbp->sportid + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
+
+		if (rbp->svfid != 0)
+			src_user |= (ZXDH_GDMA_VF_EN |
+						 ((rbp->svfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
+
+		ZXDH_PMD_LOG(DEBUG, "rxq->qidx:%d setup src_user(ep:%d pf:%d vf:%d) success",
+					queue_id, (uint8_t)rbp->sportid, (uint8_t)rbp->spfid,
+					(uint8_t)rbp->svfid);
+	} else if ((rbp->srbp == 0) && (rbp->drbp != 0)) {
+		is_txq = 1;
+		src_user = ZXDH_GDMA_ZF_USER;
+		dst_user = ((rbp->dpfid << ZXDH_GDMA_PF_NUM_SHIFT) |
+			((rbp->dportid + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
+
+		if (rbp->dvfid != 0)
+			dst_user |= (ZXDH_GDMA_VF_EN |
+						 ((rbp->dvfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
+
+		ZXDH_PMD_LOG(DEBUG, "txq->qidx:%d setup dst_user(ep:%d pf:%d vf:%d) success",
+					queue_id, (uint8_t)rbp->dportid, (uint8_t)rbp->dpfid,
+					(uint8_t)rbp->dvfid);
+	} else {
+		ZXDH_PMD_LOG(ERR, "Failed to setup queue, srbp/drbp is invalid");
+		return -EINVAL;
+	}
+	queue->is_txq = is_txq;
+
+	/* setup queue user info */
+	queue->user = (src_user & LOW16_MASK) | (dst_user << 16);
+
+	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_EXT_ADDR_OFFSET, queue->user);
+	gdmadev->used_num++;
+
+	return queue_id;
+}
+
 static const struct rte_rawdev_ops zxdh_gdma_rawdev_ops = {
+	.queue_setup = zxdh_gdma_rawdev_queue_setup,
 };
 
+static int
+zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id)
+{
+	char name[RTE_MEMZONE_NAMESIZE];
+	struct zxdh_gdma_queue *queue = NULL;
+	const struct rte_memzone *mz = NULL;
+	uint32_t size = 0;
+	int ret = 0;
+
+	queue = zxdh_gdma_get_queue(dev, queue_id);
+	if (queue == NULL)
+		return -EINVAL;
+
+	queue->enable = 1;
+	queue->vq_id  = queue_id;
+	queue->flag   = 0;
+	queue->tc_cnt = 0;
+
+	/* Init sw_ring */
+	queue->sw_ring.job = rte_calloc(NULL, queue->queue_size, sizeof(struct zxdh_gdma_job *), 0);
+	if (queue->sw_ring.job == NULL) {
+		ZXDH_PMD_LOG(ERR, "can not allocate sw_ring");
+		ret = -ENOMEM;
+		goto free_queue;
+	}
+
+	/* Cache up to size-1 job in the ring to prevent overwriting hardware prefetching */
+	queue->sw_ring.free_cnt = queue->queue_size - 1;
+	queue->sw_ring.deq_cnt  = 0;
+	queue->sw_ring.pend_cnt = 0;
+	queue->sw_ring.enq_idx  = 0;
+	queue->sw_ring.deq_idx  = 0;
+	queue->sw_ring.used_idx = 0;
+
+	/* Init ring */
+	snprintf(name, RTE_MEMZONE_NAMESIZE, "gdma_vq%d_ring", queue_id);
+	size = ZXDH_GDMA_RING_SIZE * sizeof(struct zxdh_gdma_buff_desc);
+	mz = rte_memzone_reserve_aligned(name, size, rte_socket_id(),
+							RTE_MEMZONE_IOVA_CONTIG, size);
+	if (mz == NULL) {
+		if (rte_errno == EEXIST)
+			mz = rte_memzone_lookup(name);
+		if (mz == NULL) {
+			ZXDH_PMD_LOG(ERR, "can not allocate ring %s", name);
+			ret = -ENOMEM;
+			goto free_queue;
+		}
+	}
+	memset(mz->addr, 0, size);
+	queue->ring.ring_mz   = mz;
+	queue->ring.desc      = (struct zxdh_gdma_buff_desc *)(mz->addr);
+	queue->ring.ring_mem  = mz->iova;
+	queue->ring.avail_idx = 0;
+	ZXDH_PMD_LOG(INFO, "queue%u ring phy addr:0x%"PRIx64" virt addr:%p",
+						queue_id, mz->iova, mz->addr);
+
+	/* Initialize the hardware channel */
+	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET,
+		ZXDH_GDMA_CHAN_FORCE_CLOSE);
+	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET,
+		ZXDH_GDMA_ERR_INTR_ENABLE | ZXDH_GDMA_ERR_STATUS | ZXDH_GDMA_TC_CNT_CLEAN);
+	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_USER_OFFSET,
+		ZXDH_GDMA_ZF_USER);
+
+	return 0;
+
+free_queue:
+	zxdh_gdma_queue_free(dev, queue_id);
+	return ret;
+}
+
+static int
+zxdh_gdma_queue_free(struct rte_rawdev *dev, uint16_t queue_id)
+{
+	struct zxdh_gdma_rawdev *gdmadev = NULL;
+	struct zxdh_gdma_queue *queue = NULL;
+	uint32_t val = 0;
+
+	queue = zxdh_gdma_get_queue(dev, queue_id);
+	if (queue == NULL)
+		return -EINVAL;
+
+	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
+	gdmadev->used_num--;
+
+	/* disable gdma channel */
+	val = ZXDH_GDMA_CHAN_FORCE_CLOSE;
+	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);
+
+	queue->enable           = 0;
+	queue->is_txq           = 0;
+	queue->flag             = 0;
+	queue->user             = 0;
+	queue->tc_cnt           = 0;
+	queue->ring.avail_idx   = 0;
+	queue->sw_ring.free_cnt = 0;
+	queue->sw_ring.deq_cnt  = 0;
+	queue->sw_ring.pend_cnt = 0;
+	queue->sw_ring.enq_idx  = 0;
+	queue->sw_ring.deq_idx  = 0;
+	queue->sw_ring.used_idx = 0;
+	rte_free(queue->sw_ring.job);
+	rte_memzone_free(queue->ring.ring_mz);
+
+	return 0;
+}
+
 static int
 zxdh_gdma_map_resource(struct rte_pci_device *dev)
 {
diff --git a/drivers/raw/gdtc/gdtc_rawdev.h b/drivers/raw/gdtc/gdtc_rawdev.h
index 9f943c49c6..29b169d079 100644
--- a/drivers/raw/gdtc/gdtc_rawdev.h
+++ b/drivers/raw/gdtc/gdtc_rawdev.h
@@ -100,4 +100,23 @@ struct zxdh_gdma_rawdev {
 	struct zxdh_gdma_queue vqs[ZXDH_GDMA_TOTAL_CHAN_NUM];
 };
 
+struct zxdh_gdma_rbp {
+	uint32_t use_ultrashort:1;
+	uint32_t enable:1;
+	uint32_t dportid:3;
+	uint32_t dpfid:3;
+	uint32_t dvfid:8; /*using route by port for destination */
+	uint32_t drbp:1;
+	uint32_t sportid:3;
+	uint32_t spfid:3;
+	uint32_t svfid:8;
+	uint32_t srbp:1;
+};
+
+struct zxdh_gdma_queue_config {
+	uint32_t lcore_id;
+	uint32_t flags;
+	struct zxdh_gdma_rbp *rbp;
+};
+
 #endif /* __GDTC_RAWDEV_H__ */
-- 
2.43.0

[-- Attachment #1.1.2: Type: text/html , Size: 23378 bytes --]

  reply	other threads:[~2024-11-14  9:27 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-29 13:45 [v4,1/5] raw/gdtc: introduce gdtc raw device driver Yong Zhang
2024-10-29 13:45 ` [v4,2/5] raw/gdtc: add support for queue setup operation Yong Zhang
2024-11-12  5:05   ` Stephen Hemminger
2024-10-29 13:45 ` [v4,3/5] raw/gdtc: add support for standard rawdev operations Yong Zhang
2024-10-29 13:45 ` [v4,4/5] raw/gdtc: add support for enqueue operation Yong Zhang
2024-10-29 13:45 ` [v4,5/5] raw/gdtc: add support for dequeue operation Yong Zhang
2024-11-04  2:15 ` Re:[PATCH] raw/gdtc: introduce gdtc raw device driver Yong Zhang
2024-11-12  4:12   ` [PATCH] " Thomas Monjalon
2024-11-12  5:13     ` Stephen Hemminger
2024-11-14  9:36     ` zhang.yong25
2024-11-12  5:08 ` [v4,1/5] " Stephen Hemminger
2024-11-12  5:08 ` Stephen Hemminger
2024-11-13  9:22   ` zhang.yong25
2024-11-13 14:59     ` Stephen Hemminger
2024-11-12  5:09 ` Stephen Hemminger
2024-11-12  5:48 ` Stephen Hemminger
2024-11-14  9:20 ` [v5,1/5] " Yong Zhang
2024-11-14  9:20   ` Yong Zhang [this message]
2024-11-14  9:20   ` [v5,3/5] raw/gdtc: add support for standard rawdev operations Yong Zhang
2024-11-14  9:20   ` [v5,4/5] raw/gdtc: add support for enqueue operation Yong Zhang
2024-11-14  9:20   ` [v5,5/5] raw/gdtc: add support for dequeue operation Yong Zhang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241114092047.3514280-3-zhang.yong25@zte.com.cn \
    --to=zhang.yong25@zte.com.cn \
    --cc=dev@dpdk.org \
    --cc=li.min10@zte.com.cn \
    --cc=ran.ming@zte.com.cn \
    --cc=stephen@networkplumber.org \
    --cc=thomas@monjalon.net \
    --cc=wang.yong19@zte.com.cn \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).