DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ziyang Xuan <xuanziyang2@huawei.com>
To: <dev@dpdk.org>
Cc: <ferruh.yigit@intel.com>, <cloud.wangxiaoyun@huawei.com>,
	<zhouguoyang@huawei.com>, <shahar.belkar@huawei.com>,
	<luoxianjun@huawei.com>, Ziyang Xuan <xuanziyang2@huawei.com>
Subject: [dpdk-dev] [PATCH v5 05/15] net/hinic/base: add eq mechanism function code
Date: Wed, 19 Jun 2019 23:56:05 +0800	[thread overview]
Message-ID: <0b2dd974286b662bd665b6e798cf288e76be5c4b.1560958308.git.xuanziyang2@huawei.com> (raw)
In-Reply-To: <cover.1560958308.git.xuanziyang2@huawei.com>

Eqs include aeq and ceq. PMD supports aeq only. Aeq is a kind of
queue for mgmt asynchronous message and mgmt command response message.
This patch introduces data structures, initialization, and related
interfaces about aeq.

Signed-off-by: Ziyang Xuan <xuanziyang2@huawei.com>
---
 drivers/net/hinic/base/hinic_pmd_eqs.c | 609 +++++++++++++++++++++++++
 drivers/net/hinic/base/hinic_pmd_eqs.h | 101 ++++
 2 files changed, 710 insertions(+)
 create mode 100644 drivers/net/hinic/base/hinic_pmd_eqs.c
 create mode 100644 drivers/net/hinic/base/hinic_pmd_eqs.h

diff --git a/drivers/net/hinic/base/hinic_pmd_eqs.c b/drivers/net/hinic/base/hinic_pmd_eqs.c
new file mode 100644
index 000000000..8d216cfcf
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_eqs.c
@@ -0,0 +1,609 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_compat.h"
+#include "hinic_csr.h"
+#include "hinic_pmd_hwdev.h"
+#include "hinic_pmd_hwif.h"
+#include "hinic_pmd_mgmt.h"
+#include "hinic_pmd_eqs.h"
+
+#define AEQ_CTRL_0_INTR_IDX_SHIFT		0
+#define AEQ_CTRL_0_DMA_ATTR_SHIFT		12
+#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT		20
+#define AEQ_CTRL_0_INTR_MODE_SHIFT		31
+
+#define AEQ_CTRL_0_INTR_IDX_MASK		0x3FFU
+#define AEQ_CTRL_0_DMA_ATTR_MASK		0x3FU
+#define AEQ_CTRL_0_PCI_INTF_IDX_MASK		0x3U
+#define AEQ_CTRL_0_INTR_MODE_MASK		0x1U
+
+#define AEQ_CTRL_0_SET(val, member)		\
+				(((val) & AEQ_CTRL_0_##member##_MASK) << \
+				AEQ_CTRL_0_##member##_SHIFT)
+
+#define AEQ_CTRL_0_CLEAR(val, member)		\
+				((val) & (~(AEQ_CTRL_0_##member##_MASK \
+					<< AEQ_CTRL_0_##member##_SHIFT)))
+
+#define AEQ_CTRL_1_LEN_SHIFT			0
+#define AEQ_CTRL_1_ELEM_SIZE_SHIFT		24
+#define AEQ_CTRL_1_PAGE_SIZE_SHIFT		28
+
+#define AEQ_CTRL_1_LEN_MASK			0x1FFFFFU
+#define AEQ_CTRL_1_ELEM_SIZE_MASK		0x3U
+#define AEQ_CTRL_1_PAGE_SIZE_MASK		0xFU
+
+#define AEQ_CTRL_1_SET(val, member)		\
+				(((val) & AEQ_CTRL_1_##member##_MASK) << \
+				AEQ_CTRL_1_##member##_SHIFT)
+
+#define AEQ_CTRL_1_CLEAR(val, member)		\
+				((val) & (~(AEQ_CTRL_1_##member##_MASK \
+					<< AEQ_CTRL_1_##member##_SHIFT)))
+
+#define CEQ_CTRL_0_INTR_IDX_SHIFT		0
+#define CEQ_CTRL_0_DMA_ATTR_SHIFT		12
+#define CEQ_CTRL_0_LIMIT_KICK_SHIFT		20
+#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT		24
+#define CEQ_CTRL_0_INTR_MODE_SHIFT		31
+
+#define CEQ_CTRL_0_INTR_IDX_MASK		0x3FFU
+#define CEQ_CTRL_0_DMA_ATTR_MASK		0x3FU
+#define CEQ_CTRL_0_LIMIT_KICK_MASK		0xFU
+#define CEQ_CTRL_0_PCI_INTF_IDX_MASK		0x3U
+#define CEQ_CTRL_0_INTR_MODE_MASK		0x1U
+
+#define CEQ_CTRL_0_SET(val, member)		\
+				(((val) & CEQ_CTRL_0_##member##_MASK) << \
+					CEQ_CTRL_0_##member##_SHIFT)
+
+#define CEQ_CTRL_1_LEN_SHIFT			0
+#define CEQ_CTRL_1_PAGE_SIZE_SHIFT		28
+
+#define CEQ_CTRL_1_LEN_MASK			0x1FFFFFU
+#define CEQ_CTRL_1_PAGE_SIZE_MASK		0xFU
+
+#define CEQ_CTRL_1_SET(val, member)		\
+				(((val) & CEQ_CTRL_1_##member##_MASK) << \
+					CEQ_CTRL_1_##member##_SHIFT)
+
+#define EQ_CONS_IDX_CONS_IDX_SHIFT		0
+#define EQ_CONS_IDX_XOR_CHKSUM_SHIFT		24
+#define EQ_CONS_IDX_INT_ARMED_SHIFT		31
+
+#define EQ_CONS_IDX_CONS_IDX_MASK		0x1FFFFFU
+#define EQ_CONS_IDX_XOR_CHKSUM_MASK		0xFU
+#define EQ_CONS_IDX_INT_ARMED_MASK		0x1U
+
+#define EQ_CONS_IDX_SET(val, member)		\
+				(((val) & EQ_CONS_IDX_##member##_MASK) << \
+				EQ_CONS_IDX_##member##_SHIFT)
+
+#define EQ_CONS_IDX_CLEAR(val, member)		\
+				((val) & (~(EQ_CONS_IDX_##member##_MASK \
+					<< EQ_CONS_IDX_##member##_SHIFT)))
+
+#define EQ_WRAPPED(eq)			((u32)(eq)->wrapped << EQ_VALID_SHIFT)
+
+#define EQ_CONS_IDX(eq)		((eq)->cons_idx | \
+				((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT))
+
+#define EQ_CONS_IDX_REG_ADDR(eq)	(((eq)->type == HINIC_AEQ) ? \
+				HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) :\
+				HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
+
+#define EQ_PROD_IDX_REG_ADDR(eq)	(((eq)->type == HINIC_AEQ) ? \
+				HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) :\
+				HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
+
+#define GET_EQ_NUM_PAGES(eq, size)		\
+		((u16)(ALIGN((eq)->eq_len * (u32)(eq)->elem_size, (size)) \
+		/ (size)))
+
+#define GET_EQ_NUM_ELEMS(eq, pg_size)	((pg_size) / (u32)(eq)->elem_size)
+
+#define PAGE_IN_4K(page_size)		((page_size) >> 12)
+#define EQ_SET_HW_PAGE_SIZE_VAL(eq) ((u32)ilog2(PAGE_IN_4K((eq)->page_size)))
+
+#define ELEMENT_SIZE_IN_32B(eq)		(((eq)->elem_size) >> 5)
+#define EQ_SET_HW_ELEM_SIZE_VAL(eq)	((u32)ilog2(ELEMENT_SIZE_IN_32B(eq)))
+
+#define AEQ_DMA_ATTR_DEFAULT			0
+#define CEQ_DMA_ATTR_DEFAULT			0
+
+#define CEQ_LMT_KICK_DEFAULT			0
+
+#define EQ_WRAPPED_SHIFT			20
+
+#define	EQ_VALID_SHIFT				31
+
+#define aeq_to_aeqs(eq) \
+		container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
+
+static u8 eq_cons_idx_checksum_set(u32 val)
+{
+	u8 checksum = 0;
+	u8 idx;
+
+	for (idx = 0; idx < 32; idx += 4)
+		checksum ^= ((val >> idx) & 0xF);
+
+	return (checksum & 0xF);
+}
+
+/**
+ * set_eq_cons_idx - write the cons idx to the hw
+ * @eq: The event queue to update the cons idx for
+ * @arm_state: indicate whether report interrupts when generate eq element
+ **/
+static void set_eq_cons_idx(struct hinic_eq *eq, u32 arm_state)
+{
+	u32 eq_cons_idx, eq_wrap_ci, val;
+	u32 addr = EQ_CONS_IDX_REG_ADDR(eq);
+
+	eq_wrap_ci = EQ_CONS_IDX(eq);
+
+	/* Read Modify Write */
+	val = hinic_hwif_read_reg(eq->hwdev->hwif, addr);
+
+	val = EQ_CONS_IDX_CLEAR(val, CONS_IDX) &
+		EQ_CONS_IDX_CLEAR(val, INT_ARMED) &
+		EQ_CONS_IDX_CLEAR(val, XOR_CHKSUM);
+
+	/* Just aeq0 use int_arm mode for pmd drv to recv
+	 * asyn event&mbox recv data
+	 */
+	if (eq->q_id == 0)
+		eq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |
+			EQ_CONS_IDX_SET(arm_state, INT_ARMED);
+	else
+		eq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |
+			EQ_CONS_IDX_SET(HINIC_EQ_NOT_ARMED, INT_ARMED);
+
+	val |= eq_cons_idx;
+
+	val |= EQ_CONS_IDX_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
+
+	hinic_hwif_write_reg(eq->hwdev->hwif, addr, val);
+}
+
+/**
+ * eq_update_ci - update the cons idx of event queue
+ * @eq: the event queue to update the cons idx for
+ **/
+void eq_update_ci(struct hinic_eq *eq)
+{
+	set_eq_cons_idx(eq, HINIC_EQ_ARMED);
+}
+
+struct hinic_ceq_ctrl_reg {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u16 func_id;
+	u16 q_id;
+	u32 ctrl0;
+	u32 ctrl1;
+};
+
+static int set_ceq_ctrl_reg(struct hinic_hwdev *hwdev, u16 q_id,
+			    u32 ctrl0, u32 ctrl1)
+{
+	struct hinic_ceq_ctrl_reg ceq_ctrl;
+	u16 in_size = sizeof(ceq_ctrl);
+
+	memset(&ceq_ctrl, 0, in_size);
+	ceq_ctrl.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	ceq_ctrl.func_id = hinic_global_func_id(hwdev);
+	ceq_ctrl.q_id = q_id;
+	ceq_ctrl.ctrl0 = ctrl0;
+	ceq_ctrl.ctrl1 = ctrl1;
+
+	return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+				     HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP,
+				     &ceq_ctrl, in_size, NULL, NULL, 0);
+}
+
+/**
+ * set_eq_ctrls - setting eq's ctrls registers
+ * @eq: the event queue for setting
+ **/
+static int set_eq_ctrls(struct hinic_eq *eq)
+{
+	enum hinic_eq_type type = eq->type;
+	struct hinic_hwif *hwif = eq->hwdev->hwif;
+	struct irq_info *eq_irq = &eq->eq_irq;
+	u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size;
+	u32 pci_intf_idx = HINIC_PCI_INTF_IDX(hwif);
+	int ret = 0;
+
+	if (type == HINIC_AEQ) {
+		/* set ctrl0 */
+		addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
+
+		val = hinic_hwif_read_reg(hwif, addr);
+
+		val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) &
+			AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
+			AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
+			AEQ_CTRL_0_CLEAR(val, INTR_MODE);
+
+		ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
+			AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR)	|
+			AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX)	|
+			AEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);
+
+		val |= ctrl0;
+
+		hinic_hwif_write_reg(hwif, addr, val);
+
+		/* set ctrl1 */
+		addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
+
+		page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
+		elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
+
+		ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN)		|
+			AEQ_CTRL_1_SET(elem_size, ELEM_SIZE)	|
+			AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+
+		hinic_hwif_write_reg(hwif, addr, ctrl1);
+	} else {
+		ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
+			CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR)	|
+			CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) |
+			CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX)	|
+			CEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);
+
+		page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
+
+		ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN) |
+			CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+
+		/* set ceq ctrl reg through mgmt cpu */
+		ret = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1);
+	}
+
+	return ret;
+}
+
+/**
+ * ceq_elements_init - Initialize all the elements in the ceq
+ * @eq: the event queue
+ * @init_val: value to init with it the elements
+ **/
+static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
+{
+	u16 i;
+	u32 *ceqe;
+
+	for (i = 0; i < eq->eq_len; i++) {
+		ceqe = GET_CEQ_ELEM(eq, i);
+		*(ceqe) = cpu_to_be32(init_val);
+	}
+
+	rte_wmb();	/* Write the init values */
+}
+
+/**
+ * aeq_elements_init - initialize all the elements in the aeq
+ * @eq: the event queue
+ * @init_val: value to init with it the elements
+ **/
+static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
+{
+	struct hinic_aeq_elem *aeqe;
+	u16 i;
+
+	for (i = 0; i < eq->eq_len; i++) {
+		aeqe = GET_AEQ_ELEM(eq, i);
+		aeqe->desc = cpu_to_be32(init_val);
+	}
+
+	rte_wmb();	/* Write the init values */
+}
+
+/**
+ * alloc_eq_pages - allocate the pages for the queue
+ * @eq: the event queue
+ **/
+static int alloc_eq_pages(struct hinic_eq *eq)
+{
+	struct hinic_hwif *hwif = eq->hwdev->hwif;
+	u32 init_val;
+	u64 dma_addr_size, virt_addr_size;
+	u16 pg_num, i;
+	int err;
+
+	dma_addr_size = eq->num_pages * sizeof(*eq->dma_addr);
+	virt_addr_size = eq->num_pages * sizeof(*eq->virt_addr);
+
+	eq->dma_addr = kzalloc(dma_addr_size, GFP_KERNEL);
+	if (!eq->dma_addr) {
+		PMD_DRV_LOG(ERR, "Allocate dma addr array failed");
+		return -ENOMEM;
+	}
+
+	eq->virt_addr = kzalloc(virt_addr_size, GFP_KERNEL);
+	if (!eq->virt_addr) {
+		PMD_DRV_LOG(ERR, "Allocate virt addr array failed");
+		err = -ENOMEM;
+		goto virt_addr_alloc_err;
+	}
+
+	for (pg_num = 0; pg_num < eq->num_pages; pg_num++) {
+		eq->virt_addr[pg_num] =
+			(u8 *)dma_zalloc_coherent_aligned(eq->hwdev,
+					eq->page_size, &eq->dma_addr[pg_num],
+					GFP_KERNEL);
+		if (!eq->virt_addr[pg_num]) {
+			err = -ENOMEM;
+			goto dma_alloc_err;
+		}
+
+		hinic_hwif_write_reg(hwif,
+				     HINIC_EQ_HI_PHYS_ADDR_REG(eq->type,
+				     eq->q_id, pg_num),
+				     upper_32_bits(eq->dma_addr[pg_num]));
+
+		hinic_hwif_write_reg(hwif,
+				     HINIC_EQ_LO_PHYS_ADDR_REG(eq->type,
+				     eq->q_id, pg_num),
+				     lower_32_bits(eq->dma_addr[pg_num]));
+	}
+
+	init_val = EQ_WRAPPED(eq);
+
+	if (eq->type == HINIC_AEQ)
+		aeq_elements_init(eq, init_val);
+	else
+		ceq_elements_init(eq, init_val);
+
+	return 0;
+
+dma_alloc_err:
+	for (i = 0; i < pg_num; i++)
+		dma_free_coherent(eq->hwdev, eq->page_size,
+				  eq->virt_addr[i], eq->dma_addr[i]);
+
+virt_addr_alloc_err:
+	kfree(eq->dma_addr);
+	return err;
+}
+
+/**
+ * free_eq_pages - free the pages of the queue
+ * @eq: the event queue
+ **/
+static void free_eq_pages(struct hinic_eq *eq)
+{
+	struct hinic_hwdev *hwdev = eq->hwdev;
+	u16 pg_num;
+
+	for (pg_num = 0; pg_num < eq->num_pages; pg_num++)
+		dma_free_coherent(hwdev, eq->page_size,
+				  eq->virt_addr[pg_num],
+				  eq->dma_addr[pg_num]);
+
+	kfree(eq->virt_addr);
+	kfree(eq->dma_addr);
+}
+
+#define MSIX_ENTRY_IDX_0 (0)
+
+/**
+ * init_eq - initialize eq
+ * @eq:	the event queue
+ * @hwdev: the pointer to the private hardware device object
+ * @q_id: Queue id number
+ * @q_len: the number of EQ elements
+ * @type: the type of the event queue, ceq or aeq
+ * @page_size: the page size of the event queue
+ * @entry: msix entry associated with the event queue
+ * Return: 0 - Success, Negative - failure
+ **/
+static int init_eq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id,
+		   u16 q_len, enum hinic_eq_type type, u32 page_size,
+		   __rte_unused struct irq_info *entry)
+{
+	int err = 0;
+
+	eq->hwdev = hwdev;
+	eq->q_id = q_id;
+	eq->type = type;
+	eq->page_size = page_size;
+	eq->eq_len = q_len;
+
+	/* clear eq_len to force eqe drop in hardware */
+	if (eq->type == HINIC_AEQ) {
+		hinic_hwif_write_reg(eq->hwdev->hwif,
+				     HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
+	} else {
+		err = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+		if (err) {
+			PMD_DRV_LOG(ERR, "Set ceq control registers ctrl0[0] ctrl1[0] failed");
+			return err;
+		}
+	}
+
+	eq->cons_idx = 0;
+	eq->wrapped = 0;
+
+	eq->elem_size = (type == HINIC_AEQ) ?
+			HINIC_AEQE_SIZE : HINIC_CEQE_SIZE;
+	eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
+	eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, page_size);
+
+	if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
+		PMD_DRV_LOG(ERR, "Number element in eq page is not power of 2");
+		return -EINVAL;
+	}
+
+	if (eq->num_pages > HINIC_EQ_MAX_PAGES) {
+		PMD_DRV_LOG(ERR, "Too many pages for eq, num_pages: %d",
+			eq->num_pages);
+		return -EINVAL;
+	}
+
+	err = alloc_eq_pages(eq);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Allocate pages for eq failed");
+		return err;
+	}
+
+	/* pmd use MSIX_ENTRY_IDX_0*/
+	eq->eq_irq.msix_entry_idx = MSIX_ENTRY_IDX_0;
+
+	err = set_eq_ctrls(eq);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Init eq control registers failed");
+		goto init_eq_ctrls_err;
+	}
+
+	hinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
+	set_eq_cons_idx(eq, HINIC_EQ_ARMED);
+
+	if (eq->q_id == 0)
+		hinic_set_msix_state(hwdev, 0, HINIC_MSIX_ENABLE);
+
+	eq->poll_retry_nr = HINIC_RETRY_NUM;
+
+	return 0;
+
+init_eq_ctrls_err:
+	free_eq_pages(eq);
+
+	return err;
+}
+
+/**
+ * remove_eq - remove eq
+ * @eq:	the event queue
+ **/
+static void remove_eq(struct hinic_eq *eq)
+{
+	struct irq_info *entry = &eq->eq_irq;
+
+	if (eq->type == HINIC_AEQ) {
+		if (eq->q_id == 0)
+			hinic_set_msix_state(eq->hwdev, entry->msix_entry_idx,
+					     HINIC_MSIX_DISABLE);
+
+		/* clear eq_len to avoid hw access host memory */
+		hinic_hwif_write_reg(eq->hwdev->hwif,
+				     HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
+	} else {
+		(void)set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+	}
+
+	/* update cons_idx to avoid invalid interrupt */
+	eq->cons_idx = (u16)hinic_hwif_read_reg(eq->hwdev->hwif,
+						EQ_PROD_IDX_REG_ADDR(eq));
+	set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED);
+
+	free_eq_pages(eq);
+}
+
+/**
+ * hinic_aeqs_init - init all the aeqs
+ * @hwdev: the pointer to the private hardware device object
+ * @num_aeqs: number of aeq
+ * @msix_entries: msix entries associated with the event queues
+ * Return: 0 - Success, Negative - failure
+ **/
+static int
+hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs,
+		struct irq_info *msix_entries)
+{
+	struct hinic_aeqs *aeqs;
+	int err;
+	u16 i, q_id;
+
+	aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);
+	if (!aeqs)
+		return -ENOMEM;
+
+	hwdev->aeqs = aeqs;
+	aeqs->hwdev = hwdev;
+	aeqs->num_aeqs = num_aeqs;
+
+	for (q_id = HINIC_AEQN_START; q_id < num_aeqs; q_id++) {
+		err = init_eq(&aeqs->aeq[q_id], hwdev, q_id,
+			      HINIC_DEFAULT_AEQ_LEN, HINIC_AEQ,
+			      HINIC_EQ_PAGE_SIZE, &msix_entries[q_id]);
+		if (err) {
+			PMD_DRV_LOG(ERR, "Init aeq %d failed", q_id);
+			goto init_aeq_err;
+		}
+	}
+
+	return 0;
+
+init_aeq_err:
+	for (i = 0; i < q_id; i++)
+		remove_eq(&aeqs->aeq[i]);
+
+	kfree(aeqs);
+
+	return err;
+}
+
+/**
+ * hinic_aeqs_free - free all the aeqs
+ * @hwdev: the pointer to the private hardware device object
+ **/
+static void hinic_aeqs_free(struct hinic_hwdev *hwdev)
+{
+	struct hinic_aeqs *aeqs = hwdev->aeqs;
+	u16 q_id;
+
+	/* hinic pmd use aeq[1~3], aeq[0] used in kernel only */
+	for (q_id = HINIC_AEQN_START; q_id < aeqs->num_aeqs ; q_id++)
+		remove_eq(&aeqs->aeq[q_id]);
+
+	kfree(aeqs);
+}
+
+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev)
+{
+	struct hinic_eq *eq;
+	u32 addr, ci, pi;
+	int q_id;
+
+	for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) {
+		eq = &hwdev->aeqs->aeq[q_id];
+		addr = EQ_CONS_IDX_REG_ADDR(eq);
+		ci = hinic_hwif_read_reg(hwdev->hwif, addr);
+		addr = EQ_PROD_IDX_REG_ADDR(eq);
+		pi = hinic_hwif_read_reg(hwdev->hwif, addr);
+		PMD_DRV_LOG(ERR, "aeq id: %d, ci: 0x%x, pi: 0x%x",
+			q_id, ci, pi);
+	}
+}
+
+int hinic_comm_aeqs_init(struct hinic_hwdev *hwdev)
+{
+	int rc;
+	u16 num_aeqs;
+	struct irq_info aeq_irqs[HINIC_MAX_AEQS];
+
+	num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif);
+	if (num_aeqs < HINIC_MAX_AEQS) {
+		PMD_DRV_LOG(ERR, "Warning: PMD need %d AEQs, Chip have %d",
+			HINIC_MAX_AEQS, num_aeqs);
+		return HINIC_ERROR;
+	}
+
+	memset(aeq_irqs, 0, sizeof(aeq_irqs));
+	rc = hinic_aeqs_init(hwdev, num_aeqs, aeq_irqs);
+	if (rc != HINIC_OK)
+		PMD_DRV_LOG(ERR, "Initialize aeqs failed, rc: %d", rc);
+
+	return rc;
+}
+
+void hinic_comm_aeqs_free(struct hinic_hwdev *hwdev)
+{
+	hinic_aeqs_free(hwdev);
+}
diff --git a/drivers/net/hinic/base/hinic_pmd_eqs.h b/drivers/net/hinic/base/hinic_pmd_eqs.h
new file mode 100644
index 000000000..fdb98544d
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_eqs.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_EQS_H_
+#define _HINIC_PMD_EQS_H_
+
+#define HINIC_EQ_PAGE_SIZE		0x00001000
+
+#define HINIC_AEQN_START		0
+#define HINIC_MAX_AEQS			4
+
+#define HINIC_EQ_MAX_PAGES		8
+
+#define HINIC_AEQE_SIZE			64
+#define HINIC_CEQE_SIZE			4
+
+#define HINIC_AEQE_DESC_SIZE		4
+#define HINIC_AEQE_DATA_SIZE		\
+			(HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE)
+
+#define HINIC_DEFAULT_AEQ_LEN		64
+
+#define HINIC_RECV_NEXT_AEQE		HINIC_ERROR
+#define HINIC_RECV_DONE			HINIC_OK
+
+#define GET_EQ_ELEMENT(eq, idx)		\
+		(((u8 *)(eq)->virt_addr[(idx) / (eq)->num_elem_in_pg]) + \
+		(((u32)(idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
+
+#define GET_AEQ_ELEM(eq, idx)		\
+			((struct hinic_aeq_elem *)GET_EQ_ELEMENT((eq), (idx)))
+
+#define GET_CEQ_ELEM(eq, idx)	((u32 *)GET_EQ_ELEMENT((eq), (idx)))
+
+enum hinic_eq_intr_mode {
+	HINIC_INTR_MODE_ARMED,
+	HINIC_INTR_MODE_ALWAYS,
+};
+
+enum hinic_eq_ci_arm_state {
+	HINIC_EQ_NOT_ARMED,
+	HINIC_EQ_ARMED,
+};
+
+enum hinic_aeq_type {
+	HINIC_HW_INTER_INT = 0,
+	HINIC_MBX_FROM_FUNC = 1,
+	HINIC_MSG_FROM_MGMT_CPU = 2,
+	HINIC_API_RSP = 3,
+	HINIC_API_CHAIN_STS = 4,
+	HINIC_MBX_SEND_RSLT = 5,
+	HINIC_MAX_AEQ_EVENTS
+};
+
+#define HINIC_RETRY_NUM	(10)
+
+struct hinic_eq {
+	struct hinic_hwdev		*hwdev;
+	u16				q_id;
+	enum hinic_eq_type		type;
+	u32				page_size;
+	u16				eq_len;
+
+	u16				cons_idx;
+	u16				wrapped;
+
+	u16				elem_size;
+	u16				num_pages;
+	u32				num_elem_in_pg;
+
+	struct irq_info			eq_irq;
+
+	dma_addr_t			*dma_addr;
+	u8				**virt_addr;
+
+	u16				poll_retry_nr;
+};
+
+struct hinic_aeq_elem {
+	u8	aeqe_data[HINIC_AEQE_DATA_SIZE];
+	u32	desc;
+};
+
+struct hinic_aeqs {
+	struct hinic_hwdev	*hwdev;
+	u16			poll_retry_nr;
+
+	struct hinic_eq		aeq[HINIC_MAX_AEQS];
+	u16			num_aeqs;
+};
+
+void eq_update_ci(struct hinic_eq *eq);
+
+void hinic_dump_aeq_info(struct hinic_hwdev *hwdev);
+
+int hinic_comm_aeqs_init(struct hinic_hwdev *hwdev);
+
+void hinic_comm_aeqs_free(struct hinic_hwdev *hwdev);
+
+#endif /* _HINIC_PMD_EQS_H_ */
-- 
2.18.0


  parent reply	other threads:[~2019-06-19 16:13 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-19 15:45 [dpdk-dev] [PATCH v5 00/15] A new net PMD - hinic Ziyang Xuan
2019-06-19 15:46 ` [dpdk-dev] [PATCH v5 01/15] net/hinic/base: add HW registers definition Ziyang Xuan
2019-06-19 15:50 ` [dpdk-dev] [PATCH v5 02/15] net/hinic/base: add HW interfaces of bar operation Ziyang Xuan
2019-06-19 15:47   ` Ziyang Xuan
2019-06-19 15:54 ` [dpdk-dev] [PATCH v5 03/15] net/hinic/base: add api command channel code Ziyang Xuan
2019-06-19 15:55 ` [dpdk-dev] [PATCH v5 04/15] net/hinic/base: add support for cmdq mechanism Ziyang Xuan
2019-06-19 15:56 ` Ziyang Xuan [this message]
2019-06-19 15:57 ` [dpdk-dev] [PATCH v5 06/15] net/hinic/base: add mgmt module function code Ziyang Xuan
2019-06-19 16:02 ` [dpdk-dev] [PATCH v5 07/15] net/hinic/base: add code about hardware operation Ziyang Xuan
2019-06-19 15:58   ` Ziyang Xuan
2019-06-19 16:04 ` [dpdk-dev] [PATCH v5 08/15] net/hinic/base: add nic business configurations Ziyang Xuan
2019-06-19 16:05 ` [dpdk-dev] [PATCH v5 09/15] net/hinic/base: add context and work queue support Ziyang Xuan
2019-06-19 16:08 ` [dpdk-dev] [PATCH v5 10/15] net/hinic: add various headers Ziyang Xuan
2019-06-26 11:54   ` Ferruh Yigit
2019-06-19 16:09 ` [dpdk-dev] [PATCH v5 11/15] net/hinic: add hinic PMD build and doc files Ziyang Xuan
2019-06-19 16:13   ` Ziyang Xuan
2019-06-26 11:55   ` Ferruh Yigit
2019-06-19 16:16 ` [dpdk-dev] [PATCH v5 12/15] net/hinic: add device initailization Ziyang Xuan
2019-06-19 16:14   ` Ziyang Xuan
2019-06-19 16:30   ` Ziyang Xuan
2019-06-19 16:18 ` [dpdk-dev] [PATCH v5 13/15] net/hinic: add start stop close queue ops Ziyang Xuan
2019-06-19 16:32   ` Ziyang Xuan
2019-06-19 16:20 ` [dpdk-dev] [PATCH v5 14/15] net/hinic: add tx/rx package burst Ziyang Xuan
2019-06-19 16:25   ` Ziyang Xuan
2019-06-26 11:54   ` Ferruh Yigit
2019-06-26 15:58     ` [dpdk-dev] 答复: " Xuanziyang (William, Chip Application Design Logic and Hardware Development Dept IT_Products & Solutions)
2019-06-26 16:05       ` Ferruh Yigit
2019-06-19 16:23 ` [dpdk-dev] [PATCH v5 15/15] net/hinic: add rss stats promisc ops Ziyang Xuan
2019-06-26 11:56 ` [dpdk-dev] [PATCH v5 00/15] A new net PMD - hinic Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=0b2dd974286b662bd665b6e798cf288e76be5c4b.1560958308.git.xuanziyang2@huawei.com \
    --to=xuanziyang2@huawei.com \
    --cc=cloud.wangxiaoyun@huawei.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=luoxianjun@huawei.com \
    --cc=shahar.belkar@huawei.com \
    --cc=zhouguoyang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).