From: Kyo Liu <kyo.liu@nebula-matrix.com>
To: kyo.liu@nebula-matrix.com, dev@dpdk.org
Cc: Dimon Zhao <dimon.zhao@nebula-matrix.com>,
Leon Yu <leon.yu@nebula-matrix.com>,
Sam Chen <sam.chen@nebula-matrix.com>
Subject: [PATCH v1 06/17] net/nbl: add Dispatch layer definitions and implementation
Date: Thu, 12 Jun 2025 08:58:27 +0000 [thread overview]
Message-ID: <20250612085840.729830-7-kyo.liu@nebula-matrix.com> (raw)
In-Reply-To: <20250612085840.729830-1-kyo.liu@nebula-matrix.com>
add Dispatch layer related definetions
Signed-off-by: Kyo Liu <kyo.liu@nebula-matrix.com>
---
drivers/net/nbl/meson.build | 1 +
drivers/net/nbl/nbl_core.c | 7 +
drivers/net/nbl/nbl_core.h | 4 +
drivers/net/nbl/nbl_dispatch.c | 466 ++++++++++++++++++
drivers/net/nbl/nbl_dispatch.h | 29 ++
drivers/net/nbl/nbl_include/nbl_def_channel.h | 18 +
drivers/net/nbl/nbl_include/nbl_def_common.h | 4 +
.../net/nbl/nbl_include/nbl_def_dispatch.h | 77 +++
.../net/nbl/nbl_include/nbl_def_resource.h | 5 +
drivers/net/nbl/nbl_include/nbl_include.h | 17 +
10 files changed, 628 insertions(+)
create mode 100644 drivers/net/nbl/nbl_dispatch.c
create mode 100644 drivers/net/nbl/nbl_dispatch.h
create mode 100644 drivers/net/nbl/nbl_include/nbl_def_dispatch.h
diff --git a/drivers/net/nbl/meson.build b/drivers/net/nbl/meson.build
index f34121260e..23601727ef 100644
--- a/drivers/net/nbl/meson.build
+++ b/drivers/net/nbl/meson.build
@@ -12,6 +12,7 @@ includes += include_directories('nbl_hw')
sources = files(
'nbl_ethdev.c',
'nbl_core.c',
+ 'nbl_dispatch.c',
'nbl_common/nbl_common.c',
'nbl_common/nbl_thread.c',
'nbl_hw/nbl_channel.c',
diff --git a/drivers/net/nbl/nbl_core.c b/drivers/net/nbl/nbl_core.c
index 70600401fe..548eb3a2fd 100644
--- a/drivers/net/nbl/nbl_core.c
+++ b/drivers/net/nbl/nbl_core.c
@@ -50,8 +50,14 @@ int nbl_core_init(struct nbl_adapter *adapter, struct rte_eth_dev *eth_dev)
if (ret)
goto res_init_fail;
+ ret = nbl_disp_init(adapter);
+ if (ret)
+ goto disp_init_fail;
+
return 0;
+disp_init_fail:
+ product_base_ops->res_remove(adapter);
res_init_fail:
product_base_ops->chan_remove(adapter);
chan_init_fail:
@@ -66,6 +72,7 @@ void nbl_core_remove(struct nbl_adapter *adapter)
product_base_ops = nbl_core_get_product_ops(adapter->caps.product_type);
+ nbl_disp_remove(adapter);
product_base_ops->res_remove(adapter);
product_base_ops->chan_remove(adapter);
product_base_ops->phy_remove(adapter);
diff --git a/drivers/net/nbl/nbl_core.h b/drivers/net/nbl/nbl_core.h
index f693913b47..2730539050 100644
--- a/drivers/net/nbl/nbl_core.h
+++ b/drivers/net/nbl/nbl_core.h
@@ -10,6 +10,7 @@
#include "nbl_def_phy.h"
#include "nbl_def_channel.h"
#include "nbl_def_resource.h"
+#include "nbl_def_dispatch.h"
#define NBL_VENDOR_ID (0x1F0F)
#define NBL_DEVICE_ID_M18110 (0x3403)
@@ -35,10 +36,12 @@
#define NBL_ADAPTER_TO_PHY_MGT(adapter) ((adapter)->core.phy_mgt)
#define NBL_ADAPTER_TO_CHAN_MGT(adapter) ((adapter)->core.chan_mgt)
#define NBL_ADAPTER_TO_RES_MGT(adapter) ((adapter)->core.res_mgt)
+#define NBL_ADAPTER_TO_DISP_MGT(adapter) ((adapter)->core.disp_mgt)
#define NBL_ADAPTER_TO_PHY_OPS_TBL(adapter) ((adapter)->intf.phy_ops_tbl)
#define NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter) ((adapter)->intf.channel_ops_tbl)
#define NBL_ADAPTER_TO_RES_OPS_TBL(adapter) ((adapter)->intf.resource_ops_tbl)
+#define NBL_ADAPTER_TO_DISP_OPS_TBL(adapter) ((adapter)->intf.dispatch_ops_tbl)
struct nbl_core {
void *phy_mgt;
@@ -52,6 +55,7 @@ struct nbl_interface {
struct nbl_phy_ops_tbl *phy_ops_tbl;
struct nbl_channel_ops_tbl *channel_ops_tbl;
struct nbl_resource_ops_tbl *resource_ops_tbl;
+ struct nbl_dispatch_ops_tbl *dispatch_ops_tbl;
};
struct nbl_adapter {
diff --git a/drivers/net/nbl/nbl_dispatch.c b/drivers/net/nbl/nbl_dispatch.c
new file mode 100644
index 0000000000..ffeeba3048
--- /dev/null
+++ b/drivers/net/nbl/nbl_dispatch.c
@@ -0,0 +1,466 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Nebulamatrix Technology Co., Ltd.
+ */
+
+#include "nbl_dispatch.h"
+
+static int nbl_disp_alloc_txrx_queues(void *priv, u16 vsi_id, u16 queue_num)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops;
+
+ res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ return res_ops->alloc_txrx_queues(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt),
+ vsi_id, queue_num);
+}
+
+static int nbl_disp_chan_alloc_txrx_queues_req(void *priv, u16 vsi_id,
+ u16 queue_num)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_channel_ops *chan_ops;
+ struct nbl_chan_param_alloc_txrx_queues param = {0};
+ struct nbl_chan_param_alloc_txrx_queues result = {0};
+ struct nbl_chan_send_info chan_send;
+
+ chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt);
+
+ param.vsi_id = vsi_id;
+ param.queue_num = queue_num;
+
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_ALLOC_TXRX_QUEUES, ¶m,
+ sizeof(param), &result, sizeof(result), 1);
+ chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
+
+ return 0;
+}
+
+static void nbl_disp_free_txrx_queues(void *priv, u16 vsi_id)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops;
+
+ res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ res_ops->free_txrx_queues(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id);
+}
+
+static void nbl_disp_chan_free_txrx_queues_req(void *priv, u16 vsi_id)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_channel_ops *chan_ops;
+ struct nbl_chan_param_free_txrx_queues param = {0};
+ struct nbl_chan_send_info chan_send;
+
+ chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt);
+
+ param.vsi_id = vsi_id;
+
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_FREE_TXRX_QUEUES, ¶m,
+ sizeof(param), NULL, 0, 1);
+ chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
+}
+
+static void nbl_disp_clear_queues(void *priv, u16 vsi_id)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+
+ NBL_OPS_CALL(res_ops->clear_queues, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id));
+}
+
+static void nbl_disp_chan_clear_queues_req(void *priv, u16 vsi_id)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt);
+ struct nbl_chan_send_info chan_send = {0};
+
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CLEAR_QUEUE, &vsi_id, sizeof(vsi_id),
+ NULL, 0, 1);
+ chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
+}
+
+static int nbl_disp_start_tx_ring(void *priv,
+ struct nbl_start_tx_ring_param *param,
+ u64 *dma_addr)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops;
+
+ res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ return res_ops->start_tx_ring(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt),
+ param, dma_addr);
+}
+
+static void nbl_disp_release_tx_ring(void *priv, u16 queue_idx)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops;
+
+ res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ return res_ops->release_tx_ring(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt),
+ queue_idx);
+}
+
+static void nbl_disp_stop_tx_ring(void *priv, u16 queue_idx)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops;
+
+ res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ return res_ops->stop_tx_ring(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt),
+ queue_idx);
+}
+
+static int nbl_disp_start_rx_ring(void *priv,
+ struct nbl_start_rx_ring_param *param,
+ u64 *dma_addr)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops;
+
+ res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ return res_ops->start_rx_ring(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt),
+ param, dma_addr);
+}
+
+static int nbl_disp_alloc_rx_bufs(void *priv, u16 queue_idx)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops;
+
+ res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ return res_ops->alloc_rx_bufs(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt),
+ queue_idx);
+}
+
+static void nbl_disp_release_rx_ring(void *priv, u16 queue_idx)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops;
+
+ res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ return res_ops->release_rx_ring(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt),
+ queue_idx);
+}
+
+static void nbl_disp_stop_rx_ring(void *priv, u16 queue_idx)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops;
+
+ res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ return res_ops->stop_rx_ring(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt),
+ queue_idx);
+}
+
+static void nbl_disp_update_rx_ring(void *priv, u16 index)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops;
+
+ res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ res_ops->update_rx_ring(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), index);
+}
+
+static int nbl_disp_alloc_rings(void *priv, u16 tx_num, u16 rx_num, u16 queue_offset)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops;
+
+ res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ return res_ops->alloc_rings(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt),
+ tx_num, rx_num, queue_offset);
+}
+
+static void nbl_disp_remove_rings(void *priv)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops;
+
+ res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ res_ops->remove_rings(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt));
+}
+
+static int
+nbl_disp_setup_queue(void *priv, struct nbl_txrx_queue_param *param, bool is_tx)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops;
+
+ res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ return res_ops->setup_queue(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt),
+ param, is_tx);
+}
+
+static int
+nbl_disp_chan_setup_queue_req(void *priv,
+ struct nbl_txrx_queue_param *queue_param,
+ bool is_tx)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_channel_ops *chan_ops;
+ struct nbl_chan_param_setup_queue param = {0};
+ struct nbl_chan_send_info chan_send;
+
+ chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt);
+
+ memcpy(¶m.queue_param, queue_param, sizeof(param.queue_param));
+ param.is_tx = is_tx;
+
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_SETUP_QUEUE, ¶m,
+ sizeof(param), NULL, 0, 1);
+ return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
+}
+
+static void nbl_disp_remove_all_queues(void *priv, u16 vsi_id)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops;
+
+ res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ res_ops->remove_all_queues(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id);
+}
+
+static void nbl_disp_chan_remove_all_queues_req(void *priv, u16 vsi_id)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_channel_ops *chan_ops;
+ struct nbl_chan_param_remove_all_queues param = {0};
+ struct nbl_chan_send_info chan_send;
+
+ chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt);
+
+ param.vsi_id = vsi_id;
+
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_REMOVE_ALL_QUEUES,
+ ¶m, sizeof(param), NULL, 0, 1);
+ chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
+}
+
+#define NBL_DISP_OPS_TBL \
+do { \
+ NBL_DISP_SET_OPS(alloc_txrx_queues, nbl_disp_alloc_txrx_queues, \
+ NBL_DISP_CTRL_LVL_MGT, \
+ NBL_CHAN_MSG_ALLOC_TXRX_QUEUES, \
+ nbl_disp_chan_alloc_txrx_queues_req, \
+ NULL); \
+ NBL_DISP_SET_OPS(free_txrx_queues, nbl_disp_free_txrx_queues, \
+ NBL_DISP_CTRL_LVL_MGT, \
+ NBL_CHAN_MSG_FREE_TXRX_QUEUES, \
+ nbl_disp_chan_free_txrx_queues_req, \
+ NULL); \
+ NBL_DISP_SET_OPS(alloc_rings, nbl_disp_alloc_rings, \
+ NBL_DISP_CTRL_LVL_ALWAYS, -1, \
+ NULL, NULL); \
+ NBL_DISP_SET_OPS(remove_rings, nbl_disp_remove_rings, \
+ NBL_DISP_CTRL_LVL_ALWAYS, -1, \
+ NULL, NULL); \
+ NBL_DISP_SET_OPS(start_tx_ring, nbl_disp_start_tx_ring, \
+ NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \
+ NBL_DISP_SET_OPS(stop_tx_ring, nbl_disp_stop_tx_ring, \
+ NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \
+ NBL_DISP_SET_OPS(release_tx_ring, nbl_disp_release_tx_ring, \
+ NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \
+ NBL_DISP_SET_OPS(start_rx_ring, nbl_disp_start_rx_ring, \
+ NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \
+ NBL_DISP_SET_OPS(alloc_rx_bufs, nbl_disp_alloc_rx_bufs, \
+ NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \
+ NBL_DISP_SET_OPS(stop_rx_ring, nbl_disp_stop_rx_ring, \
+ NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \
+ NBL_DISP_SET_OPS(release_rx_ring, nbl_disp_release_rx_ring, \
+ NBL_DISP_CTRL_LVL_ALWAYS, -1, NULL, NULL); \
+ NBL_DISP_SET_OPS(update_rx_ring, nbl_disp_update_rx_ring, \
+ NBL_DISP_CTRL_LVL_ALWAYS, -1, \
+ NULL, NULL); \
+ NBL_DISP_SET_OPS(setup_queue, nbl_disp_setup_queue, \
+ NBL_DISP_CTRL_LVL_MGT, \
+ NBL_CHAN_MSG_SETUP_QUEUE, \
+ nbl_disp_chan_setup_queue_req, NULL); \
+ NBL_DISP_SET_OPS(remove_all_queues, nbl_disp_remove_all_queues, \
+ NBL_DISP_CTRL_LVL_MGT, \
+ NBL_CHAN_MSG_REMOVE_ALL_QUEUES, \
+ nbl_disp_chan_remove_all_queues_req, NULL); \
+ NBL_DISP_SET_OPS(clear_queues, nbl_disp_clear_queues, \
+ NBL_DISP_CTRL_LVL_MGT, \
+ NBL_CHAN_MSG_CLEAR_QUEUE, \
+ nbl_disp_chan_clear_queues_req, NULL); \
+} while (0)
+
+/* Structure starts here, adding an op should not modify anything below */
+static int nbl_disp_setup_msg(struct nbl_dispatch_mgt *disp_mgt)
+{
+ struct nbl_channel_ops *chan_ops;
+ int ret = 0;
+
+ chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt);
+
+#define NBL_DISP_SET_OPS(disp_op, res_func, ctrl_lvl2, msg_type, msg_req, msg_resp) \
+do { \
+ typeof(msg_type) _msg_type = (msg_type); \
+ typeof(msg_resp) _msg_resp = (msg_resp); \
+ uint32_t _ctrl_lvl = rte_bit_relaxed_get32(ctrl_lvl2, &disp_mgt->ctrl_lvl); \
+ if (_msg_type >= 0 && _msg_resp != NULL && _ctrl_lvl) \
+ ret += chan_ops->register_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), \
+ _msg_type, _msg_resp, disp_mgt); \
+} while (0)
+ NBL_DISP_OPS_TBL;
+#undef NBL_DISP_SET_OPS
+
+ return ret;
+}
+
+/* Ctrl lvl means that if a certain level is set, then all disp_ops that decleared this lvl
+ * will go directly to res_ops, rather than send a channel msg, and vice versa.
+ */
+static int nbl_disp_setup_ctrl_lvl(struct nbl_dispatch_mgt *disp_mgt, u32 lvl)
+{
+ struct nbl_dispatch_ops *disp_ops;
+
+ disp_ops = NBL_DISP_MGT_TO_DISP_OPS(disp_mgt);
+
+ rte_bit_relaxed_set32(lvl, &disp_mgt->ctrl_lvl);
+
+#define NBL_DISP_SET_OPS(disp_op, res_func, ctrl, msg_type, msg_req, msg_resp) \
+do { \
+ disp_ops->NBL_NAME(disp_op) = \
+ rte_bit_relaxed_get32(ctrl, &disp_mgt->ctrl_lvl) ? res_func : msg_req; ;\
+} while (0)
+ NBL_DISP_OPS_TBL;
+#undef NBL_DISP_SET_OPS
+
+ return 0;
+}
+
+static int nbl_disp_setup_disp_mgt(struct nbl_dispatch_mgt **disp_mgt)
+{
+ *disp_mgt = rte_zmalloc("nbl_disp_mgt", sizeof(struct nbl_dispatch_mgt), 0);
+ if (!*disp_mgt)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void nbl_disp_remove_disp_mgt(struct nbl_dispatch_mgt **disp_mgt)
+{
+ rte_free(*disp_mgt);
+ *disp_mgt = NULL;
+}
+
+static void nbl_disp_remove_ops(struct nbl_dispatch_ops_tbl **disp_ops_tbl)
+{
+ rte_free(NBL_DISP_OPS_TBL_TO_OPS(*disp_ops_tbl));
+ rte_free(*disp_ops_tbl);
+ *disp_ops_tbl = NULL;
+}
+
+static int nbl_disp_setup_ops(struct nbl_dispatch_ops_tbl **disp_ops_tbl,
+ struct nbl_dispatch_mgt *disp_mgt)
+{
+ struct nbl_dispatch_ops *disp_ops;
+
+ *disp_ops_tbl = rte_zmalloc("nbl_disp_ops_tbl", sizeof(struct nbl_dispatch_ops_tbl), 0);
+ if (!*disp_ops_tbl)
+ return -ENOMEM;
+
+ disp_ops = rte_zmalloc("nbl_dispatch_ops", sizeof(struct nbl_dispatch_ops), 0);
+ if (!disp_ops) {
+ rte_free(*disp_ops_tbl);
+ return -ENOMEM;
+ }
+
+ NBL_DISP_OPS_TBL_TO_OPS(*disp_ops_tbl) = disp_ops;
+ NBL_DISP_OPS_TBL_TO_PRIV(*disp_ops_tbl) = disp_mgt;
+
+ return 0;
+}
+
+int nbl_disp_init(void *p)
+{
+ struct nbl_adapter *adapter = (struct nbl_adapter *)p;
+ struct nbl_dispatch_mgt **disp_mgt;
+ struct nbl_dispatch_ops_tbl **disp_ops_tbl;
+ struct nbl_resource_ops_tbl *res_ops_tbl;
+ struct nbl_channel_ops_tbl *chan_ops_tbl;
+ struct nbl_product_dispatch_ops *disp_product_ops = NULL;
+ int ret = 0;
+
+ disp_mgt = (struct nbl_dispatch_mgt **)&NBL_ADAPTER_TO_DISP_MGT(adapter);
+ disp_ops_tbl = &NBL_ADAPTER_TO_DISP_OPS_TBL(adapter);
+ res_ops_tbl = NBL_ADAPTER_TO_RES_OPS_TBL(adapter);
+ chan_ops_tbl = NBL_ADAPTER_TO_CHAN_OPS_TBL(adapter);
+ disp_product_ops = nbl_dispatch_get_product_ops(adapter->caps.product_type);
+
+ ret = nbl_disp_setup_disp_mgt(disp_mgt);
+ if (ret)
+ return ret;
+
+ ret = nbl_disp_setup_ops(disp_ops_tbl, *disp_mgt);
+ if (ret)
+ goto setup_ops_fail;
+
+ NBL_DISP_MGT_TO_RES_OPS_TBL(*disp_mgt) = res_ops_tbl;
+ NBL_DISP_MGT_TO_CHAN_OPS_TBL(*disp_mgt) = chan_ops_tbl;
+ NBL_DISP_MGT_TO_DISP_OPS_TBL(*disp_mgt) = *disp_ops_tbl;
+
+ if (disp_product_ops->dispatch_init) {
+ ret = disp_product_ops->dispatch_init(*disp_mgt);
+ if (ret)
+ goto dispatch_init_fail;
+ }
+
+ ret = nbl_disp_setup_ctrl_lvl(*disp_mgt, NBL_DISP_CTRL_LVL_ALWAYS);
+ if (ret)
+ goto setup_ctrl_lvl_fail;
+ return 0;
+
+setup_ctrl_lvl_fail:
+ disp_product_ops->dispatch_uninit(*disp_mgt);
+dispatch_init_fail:
+ nbl_disp_remove_ops(disp_ops_tbl);
+setup_ops_fail:
+ nbl_disp_remove_disp_mgt(disp_mgt);
+
+ return ret;
+}
+
+void nbl_disp_remove(void *p)
+{
+ struct nbl_adapter *adapter = (struct nbl_adapter *)p;
+ struct nbl_dispatch_mgt **disp_mgt;
+ struct nbl_dispatch_ops_tbl **disp_ops_tbl;
+
+ disp_mgt = (struct nbl_dispatch_mgt **)&NBL_ADAPTER_TO_DISP_MGT(adapter);
+ disp_ops_tbl = &NBL_ADAPTER_TO_DISP_OPS_TBL(adapter);
+
+ nbl_disp_remove_ops(disp_ops_tbl);
+ nbl_disp_remove_disp_mgt(disp_mgt);
+}
+
+static int nbl_disp_leonis_init(void *p)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)p;
+ int ret;
+
+ nbl_disp_setup_ctrl_lvl(disp_mgt, NBL_DISP_CTRL_LVL_NET);
+ ret = nbl_disp_setup_msg(disp_mgt);
+
+ return ret;
+}
+
+static int nbl_disp_leonis_uninit(void *p)
+{
+ RTE_SET_USED(p);
+ return 0;
+}
+
+static struct nbl_product_dispatch_ops nbl_product_dispatch_ops[NBL_PRODUCT_MAX] = {
+ {
+ .dispatch_init = nbl_disp_leonis_init,
+ .dispatch_uninit = nbl_disp_leonis_uninit,
+ },
+};
+
+struct nbl_product_dispatch_ops *nbl_dispatch_get_product_ops(enum nbl_product_type product_type)
+{
+ return &nbl_product_dispatch_ops[product_type];
+}
diff --git a/drivers/net/nbl/nbl_dispatch.h b/drivers/net/nbl/nbl_dispatch.h
new file mode 100644
index 0000000000..dcdf87576a
--- /dev/null
+++ b/drivers/net/nbl/nbl_dispatch.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Nebulamatrix Technology Co., Ltd.
+ */
+
+#ifndef _NBL_DISPATCH_H_
+#define _NBL_DISPATCH_H_
+
+#include "nbl_ethdev.h"
+
+#define NBL_DISP_MGT_TO_RES_OPS_TBL(disp_mgt) ((disp_mgt)->res_ops_tbl)
+#define NBL_DISP_MGT_TO_RES_OPS(disp_mgt) (NBL_DISP_MGT_TO_RES_OPS_TBL(disp_mgt)->ops)
+#define NBL_DISP_MGT_TO_RES_PRIV(disp_mgt) (NBL_DISP_MGT_TO_RES_OPS_TBL(disp_mgt)->priv)
+#define NBL_DISP_MGT_TO_CHAN_OPS_TBL(disp_mgt) ((disp_mgt)->chan_ops_tbl)
+#define NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt) (NBL_DISP_MGT_TO_CHAN_OPS_TBL(disp_mgt)->ops)
+#define NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt) (NBL_DISP_MGT_TO_CHAN_OPS_TBL(disp_mgt)->priv)
+#define NBL_DISP_MGT_TO_DISP_OPS_TBL(disp_mgt) ((disp_mgt)->disp_ops_tbl)
+#define NBL_DISP_MGT_TO_DISP_OPS(disp_mgt) (NBL_DISP_MGT_TO_DISP_OPS_TBL(disp_mgt)->ops)
+#define NBL_DISP_MGT_TO_DISP_PRIV(disp_mgt) (NBL_DISP_MGT_TO_DISP_OPS_TBL(disp_mgt)->priv)
+
+struct nbl_dispatch_mgt {
+ struct nbl_resource_ops_tbl *res_ops_tbl;
+ struct nbl_channel_ops_tbl *chan_ops_tbl;
+ struct nbl_dispatch_ops_tbl *disp_ops_tbl;
+ uint32_t ctrl_lvl;
+};
+
+struct nbl_product_dispatch_ops *nbl_dispatch_get_product_ops(enum nbl_product_type product_type);
+
+#endif
diff --git a/drivers/net/nbl/nbl_include/nbl_def_channel.h b/drivers/net/nbl/nbl_include/nbl_def_channel.h
index faf5d3ed3d..25d54a435d 100644
--- a/drivers/net/nbl/nbl_include/nbl_def_channel.h
+++ b/drivers/net/nbl/nbl_include/nbl_def_channel.h
@@ -281,6 +281,24 @@ enum nbl_chan_msg_type {
NBL_CHAN_MSG_MAX,
};
+struct nbl_chan_param_alloc_txrx_queues {
+ u16 vsi_id;
+ u16 queue_num;
+};
+
+struct nbl_chan_param_free_txrx_queues {
+ u16 vsi_id;
+};
+
+struct nbl_chan_param_setup_queue {
+ struct nbl_txrx_queue_param queue_param;
+ bool is_tx;
+};
+
+struct nbl_chan_param_remove_all_queues {
+ u16 vsi_id;
+};
+
struct nbl_chan_send_info {
uint16_t dstid;
uint16_t msg_type;
diff --git a/drivers/net/nbl/nbl_include/nbl_def_common.h b/drivers/net/nbl/nbl_include/nbl_def_common.h
index 0bfc6a233b..fb2ccb28bf 100644
--- a/drivers/net/nbl/nbl_include/nbl_def_common.h
+++ b/drivers/net/nbl/nbl_include/nbl_def_common.h
@@ -13,6 +13,10 @@
# define NBL_PRIU64 "llu"
# endif
+#define NBL_OPS_CALL(func, para) \
+ ({ typeof(func) _func = (func); \
+ (!_func) ? 0 : _func para; })
+
struct nbl_dma_mem {
void *va;
uint64_t pa;
diff --git a/drivers/net/nbl/nbl_include/nbl_def_dispatch.h b/drivers/net/nbl/nbl_include/nbl_def_dispatch.h
new file mode 100644
index 0000000000..5fd890b699
--- /dev/null
+++ b/drivers/net/nbl/nbl_include/nbl_def_dispatch.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Nebulamatrix Technology Co., Ltd.
+ */
+
+#ifndef _NBL_DEF_DISPATCH_H_
+#define _NBL_DEF_DISPATCH_H_
+
+#include "nbl_include.h"
+
+#define NBL_DISP_OPS_TBL_TO_OPS(disp_ops_tbl) ((disp_ops_tbl)->ops)
+#define NBL_DISP_OPS_TBL_TO_PRIV(disp_ops_tbl) ((disp_ops_tbl)->priv)
+
+enum {
+ NBL_DISP_CTRL_LVL_NEVER = 0,
+ NBL_DISP_CTRL_LVL_MGT,
+ NBL_DISP_CTRL_LVL_NET,
+ NBL_DISP_CTRL_LVL_ALWAYS,
+ NBL_DISP_CTRL_LVL_MAX,
+};
+
+struct nbl_dispatch_ops {
+ int (*add_macvlan)(void *priv, u8 *mac, u16 vlan_id, u16 vsi_id);
+ int (*get_mac_addr)(void *priv, u8 *mac);
+ void (*del_macvlan)(void *priv, u8 *mac, u16 vlan_id, u16 vsi_id);
+ int (*add_multi_rule)(void *priv, u16 vsi);
+ void (*del_multi_rule)(void *priv, u16 vsi);
+ int (*cfg_multi_mcast)(void *priv, u16 vsi, u16 enable);
+ void (*clear_flow)(void *priv, u16 vsi_id);
+ void (*get_firmware_version)(void *priv, char *firmware_verion, u8 max_len);
+ int (*set_promisc_mode)(void *priv, u16 vsi_id, u16 mode);
+ int (*alloc_txrx_queues)(void *priv, u16 vsi_id, u16 queue_num);
+ void (*free_txrx_queues)(void *priv, u16 vsi_id);
+ u16 (*get_vsi_id)(void *priv);
+ void (*get_eth_id)(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id);
+ int (*setup_txrx_queues)(void *priv, u16 vsi_id, u16 queue_num);
+ void (*remove_txrx_queues)(void *priv, u16 vsi_id);
+ int (*alloc_rings)(void *priv, u16 tx_num, u16 rx_num, u16 queue_offset);
+ void (*remove_rings)(void *priv);
+ int (*start_tx_ring)(void *priv, struct nbl_start_tx_ring_param *param, u64 *dma_addr);
+ void (*stop_tx_ring)(void *priv, u16 queue_idx);
+ void (*release_tx_ring)(void *priv, u16 queue_idx);
+ int (*start_rx_ring)(void *priv, struct nbl_start_rx_ring_param *param, u64 *dma_addr);
+ int (*alloc_rx_bufs)(void *priv, u16 queue_idx);
+ void (*stop_rx_ring)(void *priv, u16 queue_idx);
+ void (*release_rx_ring)(void *priv, u16 queue_idx);
+ void (*update_rx_ring)(void *priv, u16 index);
+ u16 (*get_tx_ehdr_len)(void *priv);
+ void (*cfg_txrx_vlan)(void *priv, u16 vlan_tci, u16 vlan_proto);
+ int (*setup_queue)(void *priv, struct nbl_txrx_queue_param *param, bool is_tx);
+ void (*remove_all_queues)(void *priv, u16 vsi_id);
+ int (*register_vsi2q)(void *priv, u16 vsi_index, u16 vsi_id,
+ u16 queue_offset, u16 queue_num);
+ int (*setup_q2vsi)(void *priv, u16 vsi_id);
+ void (*remove_q2vsi)(void *priv, u16 vsi_id);
+ int (*setup_rss)(void *priv, u16 vsi_id);
+ void (*remove_rss)(void *priv, u16 vsi_id);
+ int (*cfg_dsch)(void *priv, u16 vsi_id, bool vld);
+ int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps);
+ void (*remove_cqs)(void *priv, u16 vsi_id);
+ int (*set_rxfh_indir)(void *priv, u16 vsi_id, u32 *indir, u32 indir_size);
+ void (*clear_queues)(void *priv, u16 vsi_id);
+ u16 (*xmit_pkts)(void *priv, void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts);
+ u16 (*recv_pkts)(void *priv, void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts);
+ u16 (*get_vsi_global_qid)(void *priv, u16 vsi_id, u16 local_qid);
+
+ void (*dummy_func)(void *priv);
+};
+
+struct nbl_dispatch_ops_tbl {
+ struct nbl_dispatch_ops *ops;
+ void *priv;
+};
+
+int nbl_disp_init(void *p);
+void nbl_disp_remove(void *p);
+
+#endif
diff --git a/drivers/net/nbl/nbl_include/nbl_def_resource.h b/drivers/net/nbl/nbl_include/nbl_def_resource.h
index c1cf041c74..43302df842 100644
--- a/drivers/net/nbl/nbl_include/nbl_def_resource.h
+++ b/drivers/net/nbl/nbl_include/nbl_def_resource.h
@@ -25,6 +25,11 @@ struct nbl_resource_ops {
int (*reset_stats)(void *priv);
void (*update_rx_ring)(void *priv, u16 queue_idx);
u16 (*get_tx_ehdr_len)(void *priv);
+ int (*alloc_txrx_queues)(void *priv, u16 vsi_id, u16 queue_num);
+ void (*free_txrx_queues)(void *priv, u16 vsi_id);
+ void (*clear_queues)(void *priv, u16 vsi_id);
+ int (*setup_queue)(void *priv, struct nbl_txrx_queue_param *param, bool is_tx);
+ void (*remove_all_queues)(void *priv, u16 vsi_id);
u64 (*restore_abnormal_ring)(void *priv, u16 local_queue_id, int type);
int (*restart_abnormal_ring)(void *priv, int ring_index, int type);
void (*cfg_txrx_vlan)(void *priv, u16 vlan_tci, u16 vlan_proto);
diff --git a/drivers/net/nbl/nbl_include/nbl_include.h b/drivers/net/nbl/nbl_include/nbl_include.h
index caf77dc8d6..9337666d16 100644
--- a/drivers/net/nbl/nbl_include/nbl_include.h
+++ b/drivers/net/nbl/nbl_include/nbl_include.h
@@ -92,4 +92,21 @@ struct nbl_start_tx_ring_param {
const struct rte_eth_txconf *conf;
};
+struct nbl_txrx_queue_param {
+ u16 vsi_id;
+ u64 dma;
+ u64 avail;
+ u64 used;
+ u16 desc_num;
+ u16 local_queue_id;
+ u16 intr_en;
+ u16 intr_mask;
+ u16 global_vector_id;
+ u16 half_offload_en;
+ u16 split;
+ u16 extend_header;
+ u16 cxt;
+ u16 rxcsum;
+};
+
#endif
--
2.43.0
next prev parent reply other threads:[~2025-06-12 8:59 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-12 8:58 [PATCH v1 00/17] NBL PMD for Nebulamatrix NICs Kyo Liu
2025-06-12 8:58 ` [PATCH v1 01/17] net/nbl: add doc and minimum nbl build framework Kyo Liu
2025-06-12 8:58 ` [PATCH v1 02/17] net/nbl: add simple probe/remove and log module Kyo Liu
2025-06-12 17:49 ` Stephen Hemminger
2025-06-13 2:32 ` 回复:[PATCH " Kyo.Liu
2025-06-12 8:58 ` [PATCH v1 03/17] net/nbl: add PHY layer definitions and implementation Kyo Liu
2025-06-12 8:58 ` [PATCH v1 04/17] net/nbl: add Channel " Kyo Liu
2025-06-12 8:58 ` [PATCH v1 05/17] net/nbl: add Resource " Kyo Liu
2025-06-12 8:58 ` Kyo Liu [this message]
2025-06-12 8:58 ` [PATCH v1 07/17] net/nbl: add Dev " Kyo Liu
2025-06-12 8:58 ` [PATCH v1 08/17] net/nbl: add complete device init and uninit functionality Kyo Liu
2025-06-12 8:58 ` [PATCH v1 09/17] net/nbl: add uio and vfio mode for nbl Kyo Liu
2025-06-12 8:58 ` [PATCH v1 10/17] net/nbl: bus/pci: introduce get_iova_mode for pci dev Kyo Liu
2025-06-12 17:40 ` Stephen Hemminger
2025-06-13 2:28 ` 回复:[PATCH " Kyo.Liu
2025-06-13 7:35 ` [PATCH " David Marchand
2025-06-13 15:21 ` 回复:[PATCH " Stephen Hemminger
2025-06-12 8:58 ` [PATCH v1 11/17] net/nbl: add nbl coexistence mode for nbl Kyo Liu
2025-06-12 8:58 ` [PATCH v1 12/17] net/nbl: add nbl ethdev configuration Kyo Liu
2025-06-12 8:58 ` [PATCH v1 13/17] net/nbl: add nbl device rxtx queue setup and release ops Kyo Liu
2025-06-12 8:58 ` [PATCH v1 14/17] net/nbl: add nbl device start and stop ops Kyo Liu
2025-06-12 8:58 ` [PATCH v1 15/17] net/nbl: add nbl device tx and rx burst Kyo Liu
2025-06-12 8:58 ` [PATCH v1 16/17] net/nbl: add nbl device xstats and stats Kyo Liu
2025-06-12 8:58 ` [PATCH v1 17/17] net/nbl: nbl device support set mtu and promisc Kyo Liu
2025-06-12 17:35 ` [PATCH v1 00/17] NBL PMD for Nebulamatrix NICs Stephen Hemminger
2025-06-12 17:44 ` Stephen Hemminger
2025-06-13 2:31 ` 回复:[PATCH " Kyo.Liu
2025-06-12 17:46 ` [PATCH " Stephen Hemminger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250612085840.729830-7-kyo.liu@nebula-matrix.com \
--to=kyo.liu@nebula-matrix.com \
--cc=dev@dpdk.org \
--cc=dimon.zhao@nebula-matrix.com \
--cc=leon.yu@nebula-matrix.com \
--cc=sam.chen@nebula-matrix.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).