DPDK patches and discussions
 help / color / mirror / Atom feed
From: <wanry@3snic.com>
To: <dev@dpdk.org>
Cc: <ferruh.yigit@amd.com>, Renyong Wan <wanry@3snic.com>,
	Steven Song <steven.song@3snic.com>
Subject: [PATCH v5 05/32] net/sssnic: add event queue
Date: Mon, 4 Sep 2023 12:56:31 +0800	[thread overview]
Message-ID: <20230904045658.238185-6-wanry@3snic.com> (raw)
In-Reply-To: <20230904045658.238185-1-wanry@3snic.com>

From: Renyong Wan <wanry@3snic.com>

Event queue is intended for receiving event from hardware as well
as mailbox response message.

Signed-off-by: Steven Song <steven.song@3snic.com>
Signed-off-by: Renyong Wan <wanry@3snic.com>
---
v4:
* Fixed dereferencing type-punned pointer.
* Fixed coding style issue of COMPLEX_MACRO.
---
 drivers/net/sssnic/base/meson.build     |   1 +
 drivers/net/sssnic/base/sssnic_eventq.c | 432 ++++++++++++++++++++++++
 drivers/net/sssnic/base/sssnic_eventq.h |  84 +++++
 drivers/net/sssnic/base/sssnic_hw.c     |   9 +-
 drivers/net/sssnic/base/sssnic_hw.h     |   5 +
 drivers/net/sssnic/base/sssnic_reg.h    |  51 +++
 drivers/net/sssnic/sssnic_ethdev.c      |   1 +
 7 files changed, 582 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/sssnic/base/sssnic_eventq.c
 create mode 100644 drivers/net/sssnic/base/sssnic_eventq.h

diff --git a/drivers/net/sssnic/base/meson.build b/drivers/net/sssnic/base/meson.build
index 3e64112c72..7758faa482 100644
--- a/drivers/net/sssnic/base/meson.build
+++ b/drivers/net/sssnic/base/meson.build
@@ -3,6 +3,7 @@
 
 sources = [
         'sssnic_hw.c',
+        'sssnic_eventq.c'
 ]
 
 c_args = cflags
diff --git a/drivers/net/sssnic/base/sssnic_eventq.c b/drivers/net/sssnic/base/sssnic_eventq.c
new file mode 100644
index 0000000000..a74b74f756
--- /dev/null
+++ b/drivers/net/sssnic/base/sssnic_eventq.c
@@ -0,0 +1,432 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2022 Shenzhen 3SNIC Information Technology Co., Ltd.
+ */
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_bus_pci.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <ethdev_pci.h>
+#include <ethdev_driver.h>
+
+#include "../sssnic_log.h"
+#include "sssnic_hw.h"
+#include "sssnic_reg.h"
+#include "sssnic_eventq.h"
+
+#define SSSNIC_EVENTQ_DEF_DEPTH 64
+#define SSSNIC_EVENTQ_NUM_PAGES 4
+#define SSSNIC_EVENTQ_MAX_PAGE_SZ 0x400000
+#define SSSNIC_EVENTQ_MIN_PAGE_SZ 0x1000
+
+#define SSSNIC_EVENT_ADDR(base_addr, event_sz, idx)                            \
+	((struct sssnic_event *)(((uint8_t *)(base_addr)) + ((idx) * (event_sz))))
+
+static inline struct sssnic_event *
+sssnic_eventq_peek(struct sssnic_eventq *eq)
+{
+	uint16_t page = eq->ci / eq->page_len;
+	uint16_t idx = eq->ci % eq->page_len;
+
+	return SSSNIC_EVENT_ADDR(eq->pages[page]->addr, eq->entry_size, idx);
+}
+
+static inline void
+sssnic_eventq_reg_write(struct sssnic_eventq *eq, uint32_t reg, uint32_t val)
+{
+	sssnic_cfg_reg_write(eq->hw, reg, val);
+}
+
+static inline uint32_t
+sssnic_eventq_reg_read(struct sssnic_eventq *eq, uint32_t reg)
+{
+	return sssnic_cfg_reg_read(eq->hw, reg);
+}
+
+static inline void
+sssnic_eventq_reg_write64(struct sssnic_eventq *eq, uint32_t reg, uint64_t val)
+{
+	sssnic_cfg_reg_write(eq->hw, reg, (uint32_t)((val >> 16) >> 16));
+	sssnic_cfg_reg_write(eq->hw, reg + sizeof(uint32_t), (uint32_t)val);
+}
+
+/* all eventq registers that to be access must be selected first */
+static inline void
+sssnic_eventq_reg_select(struct sssnic_eventq *eq)
+{
+	sssnic_eventq_reg_write(eq, SSSNIC_EVENTQ_IDX_SEL_REG, eq->qid);
+}
+
+static const struct rte_memzone *
+sssnic_eventq_page_alloc(struct sssnic_eventq *eq, int page_idx)
+{
+	const struct rte_memzone *mz = NULL;
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+
+	snprintf(mz_name, sizeof(mz_name), "sssnic%u_eq%d_page%d",
+		SSSNIC_ETH_PORT_ID(eq->hw), eq->qid, page_idx);
+	mz = rte_memzone_reserve_aligned(mz_name, eq->page_size, SOCKET_ID_ANY,
+		RTE_MEMZONE_IOVA_CONTIG, eq->page_size);
+	return mz;
+}
+
+static uint32_t
+sssnic_eventq_page_size_calc(uint32_t depth, uint32_t entry_size)
+{
+	uint32_t pages = SSSNIC_EVENTQ_NUM_PAGES;
+	uint32_t size;
+
+	size = RTE_ALIGN(depth * entry_size, SSSNIC_EVENTQ_MIN_PAGE_SZ);
+	if (size <= pages * SSSNIC_EVENTQ_MIN_PAGE_SZ) {
+		/* use minimum page size */
+		return SSSNIC_EVENTQ_MIN_PAGE_SZ;
+	}
+
+	/* Calculate how many pages of minimum size page the big size page covers */
+	size = RTE_ALIGN(size / pages, SSSNIC_EVENTQ_MIN_PAGE_SZ);
+	pages = rte_fls_u32(size / SSSNIC_EVENTQ_MIN_PAGE_SZ);
+
+	return SSSNIC_EVENTQ_MIN_PAGE_SZ * pages;
+}
+
+static int
+sssnic_eventq_pages_setup(struct sssnic_eventq *eq)
+{
+	const struct rte_memzone *mz;
+	struct sssnic_event *ev;
+	int i, j;
+
+	eq->pages = rte_zmalloc(NULL,
+		eq->num_pages * sizeof(struct rte_memzone *), 1);
+	if (eq->pages == NULL) {
+		PMD_DRV_LOG(ERR, "Could not alloc memory for pages");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < eq->num_pages; i++) {
+		mz = sssnic_eventq_page_alloc(eq, i);
+		if (mz == NULL) {
+			PMD_DRV_LOG(ERR,
+				"Could not alloc DMA memory for eventq page %d",
+				i);
+			goto alloc_dma_fail;
+		}
+		/* init eventq entries */
+		for (j = 0; j < eq->page_len; j++) {
+			ev = SSSNIC_EVENT_ADDR(mz->addr, eq->entry_size, j);
+			ev->desc.u32 = 0;
+		}
+		eq->pages[i] = mz;
+		sssnic_eventq_reg_write64(eq,
+			SSSNIC_EVENTQ_PAGE_ADDR_REG + i * sizeof(uint64_t),
+			mz->iova);
+	}
+
+	return 0;
+
+alloc_dma_fail:
+	while (i--)
+		rte_memzone_free(eq->pages[i]);
+	rte_free(eq->pages);
+	return -ENOMEM;
+}
+
+static void
+sssnic_eventq_pages_cleanup(struct sssnic_eventq *eq)
+{
+	int i;
+
+	if (eq->pages == NULL)
+		return;
+	for (i = 0; i < eq->num_pages; i++)
+		rte_memzone_free(eq->pages[i]);
+	rte_free(eq->pages);
+	eq->pages = NULL;
+}
+
+static void
+sssnic_eventq_ctrl_setup(struct sssnic_eventq *eq)
+{
+	struct sssnic_hw *hw = eq->hw;
+	struct sssnic_eventq_ctrl0_reg ctrl_0;
+	struct sssnic_eventq_ctrl1_reg ctrl_1;
+
+	ctrl_0.u32 = sssnic_eventq_reg_read(eq, SSSNIC_EVENTQ_CTRL0_REG);
+	ctrl_0.intr_idx = eq->msix_entry;
+	ctrl_0.dma_attr = SSSNIC_REG_EVENTQ_DEF_DMA_ATTR;
+	ctrl_0.pci_idx = hw->attr.pci_idx;
+	ctrl_0.intr_mode = SSSNIC_REG_EVENTQ_INTR_MODE_0;
+	sssnic_eventq_reg_write(eq, SSSNIC_EVENTQ_CTRL0_REG, ctrl_0.u32);
+
+	ctrl_1.page_size = rte_log2_u32(eq->page_size >> 12);
+	ctrl_1.depth = eq->depth;
+	ctrl_1.entry_size = rte_log2_u32(eq->entry_size >> 5);
+	sssnic_eventq_reg_write(eq, SSSNIC_EVENTQ_CTRL1_REG, ctrl_1.u32);
+}
+
+/* synchronize current software CI to hardware.
+ * @ informed: indate event will be informed by interrupt.
+ *             0: not to be informed
+ *             1: informed by interrupt
+ */
+static void
+sssnic_eventq_ci_update(struct sssnic_eventq *eq, int informed)
+{
+	struct sssnic_eventq_ci_ctrl_reg reg;
+
+	reg.u32 = 0;
+	if (eq->qid == 0)
+		reg.informed = !!informed;
+	reg.qid = eq->qid;
+	reg.ci = eq->ci_wrapped;
+	sssnic_eventq_reg_write(eq, SSSNIC_EVENTQ_CI_CTRL_REG, reg.u32);
+}
+
+static int
+sssnic_eventq_init(struct sssnic_hw *hw, struct sssnic_eventq *eq, uint16_t qid)
+{
+	int ret;
+
+	if (hw == NULL || eq == NULL) {
+		PMD_DRV_LOG(ERR,
+			"Bad parameter for event queue initialization.");
+		return -EINVAL;
+	}
+
+	eq->hw = hw;
+	eq->msix_entry = 0; /* eventq uses msix 0 in PMD driver */
+	eq->qid = qid;
+	eq->depth = SSSNIC_EVENTQ_DEF_DEPTH;
+	eq->entry_size = SSSNIC_EVENT_SIZE;
+	eq->page_size = sssnic_eventq_page_size_calc(eq->depth, eq->entry_size);
+	eq->page_len = eq->page_size / eq->entry_size;
+	if (eq->page_len & (eq->page_len - 1)) {
+		PMD_DRV_LOG(ERR, "Invalid page length: %d, must be power of 2",
+			eq->page_len);
+		return -EINVAL;
+	}
+	eq->num_pages = RTE_ALIGN((eq->depth * eq->entry_size), eq->page_size) /
+			eq->page_size;
+	if (eq->num_pages > SSSNIC_EVENTQ_NUM_PAGES) {
+		PMD_DRV_LOG(ERR,
+			"Invalid number of pages: %d, can't be more than %d pages.",
+			eq->num_pages, SSSNIC_EVENTQ_NUM_PAGES);
+		return -EINVAL;
+	}
+
+	/* select the eq which registers to be acesss */
+	sssnic_eventq_reg_select(eq);
+	rte_wmb();
+	/* clear entries in eventq */
+	sssnic_eventq_reg_write(eq, SSSNIC_EVENTQ_CTRL1_REG, 0);
+	rte_wmb();
+	/* reset pi to 0 */
+	sssnic_eventq_reg_write(eq, SSSNIC_EVENTQ_PROD_IDX_REG, 0);
+
+	ret = sssnic_eventq_pages_setup(eq);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to setup eventq pages!");
+		return ret;
+	}
+	sssnic_eventq_ctrl_setup(eq);
+	sssnic_eventq_ci_update(eq, 1);
+	if (qid == 0)
+		sssnic_msix_state_set(eq->hw, 0, SSSNIC_MSIX_ENABLE);
+
+	PMD_DRV_LOG(DEBUG,
+		"eventq %u: q_depth=%u, entry_size=%u, num_pages=%u, page_size=%u, page_len=%u",
+		qid, eq->depth, eq->entry_size, eq->num_pages, eq->page_size,
+		eq->page_len);
+
+	return 0;
+}
+
+static void
+sssnic_eventq_shutdown(struct sssnic_eventq *eq)
+{
+	if (eq->qid == 0)
+		sssnic_msix_state_set(eq->hw, 0, SSSNIC_MSIX_DISABLE);
+
+	sssnic_eventq_reg_select(eq);
+	rte_wmb();
+
+	sssnic_eventq_reg_write(eq, SSSNIC_EVENTQ_CTRL1_REG, 0);
+	eq->ci = sssnic_eventq_reg_read(eq, SSSNIC_EVENTQ_PROD_IDX_REG);
+	sssnic_eventq_ci_update(eq, 0);
+	sssnic_eventq_pages_cleanup(eq);
+}
+
+static void
+sssnic_event_be_to_cpu_32(struct sssnic_event *in, struct sssnic_event *out)
+{
+	uint32_t i;
+	uint32_t count;
+	uint32_t *dw_in = (uint32_t *)in;
+	uint32_t *dw_out = (uint32_t *)out;
+
+	count = SSSNIC_EVENT_SIZE / sizeof(uint32_t);
+	for (i = 0; i < count; i++) {
+		*dw_out = rte_be_to_cpu_32(*dw_in);
+		dw_out++;
+		dw_in++;
+	}
+}
+
+static int
+sssinc_event_handle(struct sssnic_eventq *eq, struct sssnic_event *event)
+{
+	struct sssnic_event ev;
+	sssnic_event_handler_func_t *func;
+	void *data;
+
+	sssnic_event_be_to_cpu_32(event, &ev);
+	if (ev.desc.code < SSSNIC_EVENT_CODE_MIN ||
+		ev.desc.code > SSSNIC_EVENT_CODE_MAX) {
+		PMD_DRV_LOG(ERR, "Event code %d is not supported",
+			ev.desc.code);
+		return -1;
+	}
+
+	func = eq->handlers[ev.desc.code].func;
+	data = eq->handlers[ev.desc.code].data;
+	if (func == NULL) {
+		PMD_DRV_LOG(NOTICE,
+			"Could not find handler for event qid:%u code:%d",
+			eq->qid, ev.desc.code);
+		return -1;
+	}
+
+	return func(eq, &ev, data);
+}
+
+/* Poll one valid event in timeout_ms */
+static struct sssnic_event *
+sssnic_eventq_poll(struct sssnic_eventq *eq, uint32_t timeout_ms)
+{
+	struct sssnic_event *event;
+	struct sssnic_eventd desc;
+	uint64_t end;
+
+	if (timeout_ms > 0)
+		end = rte_get_timer_cycles() +
+		      rte_get_timer_hz() * timeout_ms / 1000;
+
+	do {
+		event = sssnic_eventq_peek(eq);
+		desc.u32 = rte_be_to_cpu_32(event->desc.u32);
+		if (desc.wrapped != eq->wrapped)
+			return event;
+
+		if (timeout_ms > 0)
+			rte_delay_us_sleep(1000);
+	} while ((timeout_ms > 0) &&
+		 (((long)(rte_get_timer_cycles() - end)) < 0));
+
+	return NULL;
+}
+
+/*  Take one or more events to handle. */
+int
+sssnic_eventq_flush(struct sssnic_hw *hw, uint16_t qid, uint32_t timeout_ms)
+{
+	int found = 0;
+	uint32_t i = 0;
+	int done = 0;
+	struct sssnic_event *event;
+	struct sssnic_eventq *eq;
+
+	if (qid >= hw->num_eventqs) {
+		PMD_DRV_LOG(ERR,
+			"Bad parameter, event queue id must be less than %u",
+			hw->num_eventqs);
+		return -EINVAL;
+	}
+
+	eq = &hw->eventqs[qid];
+	for (i = 0; i < eq->depth; i++) {
+		event = sssnic_eventq_poll(eq, timeout_ms);
+		if (event == NULL)
+			break;
+		done = sssinc_event_handle(eq, event);
+		eq->ci++;
+		if (eq->ci == eq->depth) {
+			eq->ci = 0;
+			eq->wrapped = !eq->wrapped;
+		}
+
+		found++;
+		if (done == SSSNIC_EVENT_DONE)
+			break;
+	}
+
+	SSSNIC_DEBUG("found:%d, done:%d, ci:%u, depth:%u, wrapped:%u", found,
+		done, eq->ci, eq->depth, eq->wrapped);
+
+	if (!found)
+		return -ETIME;
+
+	sssnic_eventq_ci_update(eq, 1);
+
+	if (event == NULL || done != SSSNIC_EVENT_DONE)
+		return -ETIME;
+
+	return 0;
+}
+
+int
+sssnic_eventq_all_init(struct sssnic_hw *hw)
+{
+	struct sssnic_eventq *eventqs;
+	int num_eventqs;
+	int i = 0;
+	int ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	num_eventqs = hw->attr.num_aeq;
+	eventqs = rte_zmalloc(NULL, sizeof(struct sssnic_eventq) * num_eventqs,
+		1);
+	if (eventqs == NULL) {
+		PMD_DRV_LOG(ERR, "Could not alloc memory for event queue");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_eventqs; i++) {
+		ret = sssnic_eventq_init(hw, &eventqs[i], i);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR, "Failed to initialize event queue: %d",
+				i);
+			goto init_eventq_fail;
+		}
+	}
+	hw->eventqs = eventqs;
+	hw->num_eventqs = num_eventqs;
+
+	PMD_DRV_LOG(INFO, "Initialized %d event queues", num_eventqs);
+
+	return 0;
+
+init_eventq_fail:
+	while (i--)
+		sssnic_eventq_shutdown(&eventqs[i]);
+	rte_free(eventqs);
+	return ret;
+}
+
+void
+sssnic_eventq_all_shutdown(struct sssnic_hw *hw)
+{
+	int i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (hw->eventqs == NULL)
+		return;
+
+	for (i = 0; i < hw->num_eventqs; i++)
+		sssnic_eventq_shutdown(&hw->eventqs[i]);
+	rte_free(hw->eventqs);
+	hw->eventqs = NULL;
+}
diff --git a/drivers/net/sssnic/base/sssnic_eventq.h b/drivers/net/sssnic/base/sssnic_eventq.h
new file mode 100644
index 0000000000..a196c10f48
--- /dev/null
+++ b/drivers/net/sssnic/base/sssnic_eventq.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2022 Shenzhen 3SNIC Information Technology Co., Ltd.
+ */
+
+#ifndef _SSSNIC_EVENTQ_H_
+#define _SSSNIC_EVENTQ_H_
+
+#define SSSNIC_MAX_NUM_EVENTQ 4
+#define SSSNIC_MIN_NUM_EVENTQ 2
+
+#define SSSNIC_EVENT_DESC_SIZE sizeof(uint32_t)
+#define SSSNIC_EVENT_SIZE 64
+#define SSSNIC_EVENT_DATA_SIZE (SSSNIC_EVENT_SIZE - SSSNIC_EVENT_DESC_SIZE)
+
+enum sssnic_event_code {
+	SSSNIC_EVENT_CODE_RESVD = 0,
+	SSSNIC_EVENT_FROM_FUNC = 1, /* event from PF and VF */
+	SSSNIC_EVENT_FROM_MPU = 2, /* event form management processor unit*/
+};
+#define SSSNIC_EVENT_CODE_MIN SSSNIC_EVENT_FROM_FUNC
+#define SSSNIC_EVENT_CODE_MAX SSSNIC_EVENT_FROM_MPU
+
+struct sssnic_eventq;
+struct sssnic_event;
+
+/* Indicate that sssnic event has been finished to handle */
+#define SSSNIC_EVENT_DONE 1
+
+typedef int sssnic_event_handler_func_t(struct sssnic_eventq *eq,
+	struct sssnic_event *ev, void *data);
+
+struct sssnic_event_handler {
+	sssnic_event_handler_func_t *func;
+	void *data;
+};
+
+struct sssnic_eventq {
+	struct sssnic_hw *hw;
+	uint16_t qid;
+	uint16_t entry_size;
+	uint32_t depth; /* max number of entries in eventq */
+	uint16_t page_len; /* number of entries in a page */
+	uint16_t num_pages; /* number pages to store event entries */
+	uint32_t page_size;
+	const struct rte_memzone **pages;
+	union {
+		uint32_t ci_wrapped;
+		struct {
+			uint32_t ci : 19;
+			uint32_t wrapped : 1;
+			uint32_t resvd : 12;
+		};
+	};
+	uint16_t msix_entry;
+	struct sssnic_event_handler handlers[SSSNIC_EVENT_CODE_MAX + 1];
+};
+
+/* event descriptor */
+struct sssnic_eventd {
+	union {
+		uint32_t u32;
+		struct {
+			uint32_t code : 7;
+			uint32_t src : 1;
+			uint32_t size : 8;
+			uint32_t resvd : 15;
+			uint32_t wrapped : 1;
+		};
+	};
+};
+
+/* event entry */
+struct sssnic_event {
+	uint8_t data[SSSNIC_EVENT_DATA_SIZE];
+	struct sssnic_eventd desc;
+};
+
+int sssnic_eventq_flush(struct sssnic_hw *hw, uint16_t qid,
+	uint32_t timeout_ms);
+
+int sssnic_eventq_all_init(struct sssnic_hw *hw);
+void sssnic_eventq_all_shutdown(struct sssnic_hw *hw);
+
+#endif /* _SSSNIC_EVENTQ_H_ */
diff --git a/drivers/net/sssnic/base/sssnic_hw.c b/drivers/net/sssnic/base/sssnic_hw.c
index 8b7bba7644..44e04486a5 100644
--- a/drivers/net/sssnic/base/sssnic_hw.c
+++ b/drivers/net/sssnic/base/sssnic_hw.c
@@ -9,6 +9,7 @@
 #include "../sssnic_log.h"
 #include "sssnic_hw.h"
 #include "sssnic_reg.h"
+#include "sssnic_eventq.h"
 
 static int
 wait_for_sssnic_hw_ready(struct sssnic_hw *hw)
@@ -196,12 +197,18 @@ sssnic_hw_init(struct sssnic_hw *hw)
 		return ret;
 	}
 
+	ret = sssnic_eventq_all_init(hw);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to initialize event queues");
+		return ret;
+	}
+
 	return -EINVAL;
 }
 
 void
 sssnic_hw_shutdown(struct sssnic_hw *hw)
 {
-	RTE_SET_USED(hw);
 	PMD_INIT_FUNC_TRACE();
+	sssnic_eventq_all_shutdown(hw);
 }
diff --git a/drivers/net/sssnic/base/sssnic_hw.h b/drivers/net/sssnic/base/sssnic_hw.h
index 65d4d562b4..6caf3a6d66 100644
--- a/drivers/net/sssnic/base/sssnic_hw.h
+++ b/drivers/net/sssnic/base/sssnic_hw.h
@@ -51,8 +51,13 @@ struct sssnic_hw {
 	uint8_t *db_base_addr;
 	uint8_t *db_mem_len;
 	struct sssnic_hw_attr attr;
+	struct sssnic_eventq *eventqs;
+	uint8_t num_eventqs;
+	uint16_t eth_port_id;
 };
 
+#define SSSNIC_ETH_PORT_ID(hw) ((hw)->eth_port_id)
+
 int sssnic_hw_init(struct sssnic_hw *hw);
 void sssnic_hw_shutdown(struct sssnic_hw *hw);
 void sssnic_msix_state_set(struct sssnic_hw *hw, uint16_t msix_id, int state);
diff --git a/drivers/net/sssnic/base/sssnic_reg.h b/drivers/net/sssnic/base/sssnic_reg.h
index 77d83292eb..e38d39a691 100644
--- a/drivers/net/sssnic/base/sssnic_reg.h
+++ b/drivers/net/sssnic/base/sssnic_reg.h
@@ -18,6 +18,14 @@
 
 #define SSSNIC_MSIX_CTRL_REG 0x58
 
+#define SSSNIC_EVENTQ_CI_CTRL_REG 0x50
+#define SSSNIC_EVENTQ_IDX_SEL_REG 0x210
+#define SSSNIC_EVENTQ_CTRL0_REG 0x200
+#define SSSNIC_EVENTQ_CTRL1_REG 0x204
+#define SSSNIC_EVENTQ_CONS_IDX_REG 0x208
+#define SSSNIC_EVENTQ_PROD_IDX_REG 0x20c
+#define SSSNIC_EVENTQ_PAGE_ADDR_REG 0x240
+
 /* registers of mgmt */
 #define SSSNIC_AF_ELECTION_REG 0x6000
 #define SSSNIC_MF_ELECTION_REG 0x6020
@@ -142,6 +150,49 @@ struct sssnic_msix_ctrl_reg {
 	};
 };
 
+#define SSSNIC_REG_EVENTQ_INTR_MODE_0 0 /* armed mode */
+#define SSSNIC_REG_EVENTQ_INTR_MODE_1 1 /* allway mode */
+#define SSSNIC_REG_EVENTQ_DEF_DMA_ATTR 0
+struct sssnic_eventq_ctrl0_reg {
+	union {
+		uint32_t u32;
+		struct {
+			uint32_t intr_idx : 10;
+			uint32_t resvd_0 : 2;
+			uint32_t dma_attr : 6;
+			uint32_t resvd_1 : 2;
+			uint32_t pci_idx : 1;
+			uint32_t resvd_2 : 8;
+			uint32_t intr_mode : 1;
+		};
+	};
+};
+
+struct sssnic_eventq_ctrl1_reg {
+	union {
+		uint32_t u32;
+		struct {
+			uint32_t depth : 21;
+			uint32_t resvd_0 : 3;
+			uint32_t entry_size : 2;
+			uint32_t resvd_1 : 2;
+			uint32_t page_size : 4;
+		};
+	};
+};
+
+struct sssnic_eventq_ci_ctrl_reg {
+	union {
+		uint32_t u32;
+		struct {
+			uint32_t ci : 21;
+			uint32_t informed : 1;
+			uint32_t resvd_0 : 8;
+			uint32_t qid : 2;
+		};
+	};
+};
+
 static inline uint32_t
 sssnic_cfg_reg_read(struct sssnic_hw *hw, uint32_t reg)
 {
diff --git a/drivers/net/sssnic/sssnic_ethdev.c b/drivers/net/sssnic/sssnic_ethdev.c
index e198b1e1d0..460ff604aa 100644
--- a/drivers/net/sssnic/sssnic_ethdev.c
+++ b/drivers/net/sssnic/sssnic_ethdev.c
@@ -40,6 +40,7 @@ sssnic_ethdev_init(struct rte_eth_dev *ethdev)
 	}
 	netdev->hw = hw;
 	hw->pci_dev = pci_dev;
+	hw->eth_port_id = ethdev->data->port_id;
 	ret = sssnic_hw_init(hw);
 	if (ret != 0) {
 		rte_free(hw);
-- 
2.27.0


  parent reply	other threads:[~2023-09-04  4:58 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-09-04  4:56 [PATCH v5 00/32] Introduce sssnic PMD for 3SNIC's 9x0 serials Ethernet adapters wanry
2023-09-04  4:56 ` [PATCH v5 01/32] net/sssnic: add build and doc infrastructure wanry
2023-09-26 13:06   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 02/32] net/sssnic: add log type and log macros wanry
2023-09-04  4:56 ` [PATCH v5 03/32] net/sssnic: support probe and remove wanry
2023-09-18 16:08   ` Stephen Hemminger
2023-09-19  2:00     ` Renyong Wan
2023-09-04  4:56 ` [PATCH v5 04/32] net/sssnic: initialize hardware base wanry
2023-09-18  2:28   ` Stephen Hemminger
2023-09-18  4:47     ` Renyong Wan
2023-09-04  4:56 ` wanry [this message]
2023-09-04  4:56 ` [PATCH v5 06/32] net/sssnic/base: add message definition and utility wanry
2023-09-18  2:31   ` Stephen Hemminger
2023-09-18  5:08     ` Renyong Wan
2023-09-04  4:56 ` [PATCH v5 07/32] net/sssnic/base: add mailbox support wanry
2023-09-18  2:32   ` Stephen Hemminger
2023-09-18  5:10     ` Renyong Wan
2023-09-26 13:13   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 08/32] net/sssnic/base: add work queue wanry
2023-09-18  2:33   ` Stephen Hemminger
2023-09-18  5:11     ` Renyong Wan
2023-09-04  4:56 ` [PATCH v5 09/32] net/sssnic/base: add control queue wanry
2023-09-18  2:36   ` Stephen Hemminger
2023-09-18  5:22     ` Renyong Wan
2023-09-04  4:56 ` [PATCH v5 10/32] net/sssnic: add dev configure and infos get wanry
2023-09-04  4:56 ` [PATCH v5 11/32] net/sssnic: add dev MAC ops wanry
2023-09-26 13:07   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 12/32] net/sssnic: support dev link status wanry
2023-09-04  4:56 ` [PATCH v5 13/32] net/sssnic: support link status event wanry
2023-09-26 13:08   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 14/32] net/sssnic: support Rx queue setup and release wanry
2023-09-04  4:56 ` [PATCH v5 15/32] net/sssnic: support Tx " wanry
2023-09-04  4:56 ` [PATCH v5 16/32] net/sssnic: support Rx queue start and stop wanry
2023-09-04  4:56 ` [PATCH v5 17/32] net/sssnic: support Tx " wanry
2023-09-04  4:56 ` [PATCH v5 18/32] net/sssnic: add Rx interrupt support wanry
2023-09-04  4:56 ` [PATCH v5 19/32] net/sssnic: support dev start and stop wanry
2023-09-26 13:09   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 20/32] net/sssnic: support dev close and reset wanry
2023-09-26 13:09   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 21/32] net/sssnic: add allmulticast and promiscuous ops wanry
2023-09-04  4:56 ` [PATCH v5 22/32] net/sssnic: add basic and extended stats ops wanry
2023-09-04  4:56 ` [PATCH v5 23/32] net/sssnic: support Rx packet burst wanry
2023-09-04  4:56 ` [PATCH v5 24/32] net/sssnic: support Tx " wanry
2023-09-26 13:10   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 25/32] net/sssnic: add RSS support wanry
2023-09-04  4:56 ` [PATCH v5 26/32] net/sssnic: support dev MTU set wanry
2023-09-04  4:56 ` [PATCH v5 27/32] net/sssnic: support dev queue info get wanry
2023-09-04  4:56 ` [PATCH v5 28/32] net/sssnic: support dev firmware version get wanry
2023-09-04  4:56 ` [PATCH v5 29/32] net/sssnic: add dev flow control ops wanry
2023-09-26 13:12   ` Ferruh Yigit
2023-09-04  4:56 ` [PATCH v5 30/32] net/sssnic: support VLAN offload and filter wanry
2023-09-04  4:56 ` [PATCH v5 31/32] net/sssnic: add generic flow ops wanry
2023-09-04  4:56 ` [PATCH v5 32/32] net/sssnic: add VF dev support wanry
2023-09-26 13:11   ` Ferruh Yigit
2023-09-18  2:37 ` [PATCH v5 00/32] Introduce sssnic PMD for 3SNIC's 9x0 serials Ethernet adapters Stephen Hemminger
2023-09-18  3:23   ` Renyong Wan
2023-09-19  3:19 ` Stephen Hemminger
2023-09-19  5:18   ` Renyong Wan
2023-09-19  3:21 ` Stephen Hemminger
2023-09-19  5:18   ` Renyong Wan
2023-09-19  3:23 ` Stephen Hemminger
2023-09-19  5:19   ` Renyong Wan
2023-09-19 15:24 ` Stephen Hemminger
2023-09-26 13:13 ` Ferruh Yigit
2024-03-29 11:32   ` Ferruh Yigit
2024-07-31 17:32     ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230904045658.238185-6-wanry@3snic.com \
    --to=wanry@3snic.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    --cc=steven.song@3snic.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).