From: Kyo Liu <kyo.liu@nebula-matrix.com>
To: kyo.liu@nebula-matrix.com, dev@dpdk.org
Cc: Dimon Zhao <dimon.zhao@nebula-matrix.com>,
Leon Yu <leon.yu@nebula-matrix.com>,
Sam Chen <sam.chen@nebula-matrix.com>
Subject: [PATCH v1 14/17] net/nbl: add nbl device start and stop ops
Date: Thu, 12 Jun 2025 08:58:35 +0000 [thread overview]
Message-ID: <20250612085840.729830-15-kyo.liu@nebula-matrix.com> (raw)
In-Reply-To: <20250612085840.729830-1-kyo.liu@nebula-matrix.com>
Implement NBL device start and stop functions
Signed-off-by: Kyo Liu <kyo.liu@nebula-matrix.com>
---
drivers/net/nbl/nbl_dev/nbl_dev.c | 173 +++++++++++++++++-
drivers/net/nbl/nbl_dispatch.c | 121 +++++++++++-
drivers/net/nbl/nbl_ethdev.c | 5 +
drivers/net/nbl/nbl_hw/nbl_txrx.c | 72 +++++++-
drivers/net/nbl/nbl_hw/nbl_txrx.h | 14 +-
drivers/net/nbl/nbl_include/nbl_def_channel.h | 20 ++
drivers/net/nbl/nbl_include/nbl_def_common.h | 6 +-
.../net/nbl/nbl_include/nbl_def_dispatch.h | 2 +-
.../net/nbl/nbl_include/nbl_def_resource.h | 4 +
drivers/net/nbl/nbl_include/nbl_include.h | 6 +-
10 files changed, 392 insertions(+), 31 deletions(-)
diff --git a/drivers/net/nbl/nbl_dev/nbl_dev.c b/drivers/net/nbl/nbl_dev/nbl_dev.c
index 4faa58ace8..bdd06613e6 100644
--- a/drivers/net/nbl/nbl_dev/nbl_dev.c
+++ b/drivers/net/nbl/nbl_dev/nbl_dev.c
@@ -38,22 +38,179 @@ static int nbl_dev_configure(struct rte_eth_dev *eth_dev)
return ret;
}
+static int nbl_dev_txrx_start(struct rte_eth_dev *eth_dev)
+{
+ struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
+ struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter);
+ struct nbl_dev_ring_mgt *ring_mgt = &dev_mgt->net_dev->ring_mgt;
+ struct nbl_dispatch_ops *disp_ops = NBL_DEV_MGT_TO_DISP_OPS(dev_mgt);
+ struct nbl_txrx_queue_param param = {0};
+ struct nbl_dev_ring *ring;
+ int ret = 0;
+ int i;
+
+ eth_dev->data->scattered_rx = 0;
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ ring = &ring_mgt->tx_rings[i];
+ param.desc_num = ring->desc_num;
+ param.vsi_id = dev_mgt->net_dev->vsi_id;
+ param.dma = ring->dma;
+ param.local_queue_id = i + ring_mgt->queue_offset;
+ param.intr_en = 0;
+ param.intr_mask = 0;
+ param.extend_header = 1;
+ param.split = 0;
+
+ ret = disp_ops->setup_queue(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), ¶m, true);
+ if (ret) {
+ NBL_LOG(ERR, "setup_tx_queue failed %d", ret);
+ return ret;
+ }
+
+ ring->global_queue_id =
+ disp_ops->get_vsi_global_qid(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt),
+ param.vsi_id, param.local_queue_id);
+ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ ring = &ring_mgt->rx_rings[i];
+ param.desc_num = ring->desc_num;
+ param.vsi_id = dev_mgt->net_dev->vsi_id;
+ param.dma = ring->dma;
+ param.local_queue_id = i + ring_mgt->queue_offset;
+ param.intr_en = 0;
+ param.intr_mask = 0;
+ param.half_offload_en = 1;
+ param.extend_header = 1;
+ param.split = 0;
+ param.rxcsum = 1;
+
+ ret = disp_ops->setup_queue(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), ¶m, false);
+ if (ret) {
+ NBL_LOG(ERR, "setup_rx_queue failed %d", ret);
+ return ret;
+ }
+
+ ret = disp_ops->alloc_rx_bufs(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), i);
+ if (ret) {
+ NBL_LOG(ERR, "alloc_rx_bufs failed %d", ret);
+ return ret;
+ }
+
+ ring->global_queue_id =
+ disp_ops->get_vsi_global_qid(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt),
+ param.vsi_id, param.local_queue_id);
+ disp_ops->update_rx_ring(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), i);
+ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ ret = disp_ops->cfg_dsch(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), dev_mgt->net_dev->vsi_id, true);
+ if (ret) {
+ NBL_LOG(ERR, "cfg_dsch failed %d", ret);
+ goto cfg_dsch_fail;
+ }
+ ret = disp_ops->setup_cqs(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt),
+ dev_mgt->net_dev->vsi_id, eth_dev->data->nb_rx_queues, true);
+ if (ret)
+ goto setup_cqs_fail;
+
+ return ret;
+
+setup_cqs_fail:
+ disp_ops->cfg_dsch(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), dev_mgt->net_dev->vsi_id, false);
+cfg_dsch_fail:
+ disp_ops->remove_all_queues(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt),
+ dev_mgt->net_dev->vsi_id);
+
+ return ret;
+}
+
static int nbl_dev_port_start(struct rte_eth_dev *eth_dev)
{
- RTE_SET_USED(eth_dev);
+ struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
+ struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter);
+ int ret;
+
+ if (adapter == NULL)
+ return -EINVAL;
+ ret = nbl_userdev_port_config(adapter, NBL_USER_NETWORK);
+ if (ret)
+ return ret;
+
+ ret = nbl_dev_txrx_start(eth_dev);
+ if (ret) {
+ NBL_LOG(ERR, "dev_txrx_start failed %d", ret);
+ nbl_userdev_port_config(adapter, NBL_KERNEL_NETWORK);
+ return ret;
+ }
+
+ common->pf_start = 1;
return 0;
}
+static void nbl_clear_queues(struct rte_eth_dev *eth_dev)
+{
+ struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
+ struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter);
+ struct nbl_dispatch_ops *disp_ops = NBL_DEV_MGT_TO_DISP_OPS(dev_mgt);
+ int i;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ disp_ops->stop_tx_ring(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), i);
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+ disp_ops->stop_rx_ring(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), i);
+}
+
+static void nbl_dev_txrx_stop(struct rte_eth_dev *eth_dev)
+{
+ struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
+ struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter);
+ struct nbl_dispatch_ops *disp_ops = NBL_DEV_MGT_TO_DISP_OPS(dev_mgt);
+
+ disp_ops->cfg_dsch(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), dev_mgt->net_dev->vsi_id, false);
+ disp_ops->remove_cqs(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), dev_mgt->net_dev->vsi_id);
+ disp_ops->remove_all_queues(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), dev_mgt->net_dev->vsi_id);
+}
+
static int nbl_dev_port_stop(struct rte_eth_dev *eth_dev)
{
- RTE_SET_USED(eth_dev);
+ struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
+ struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter);
+ common->pf_start = 0;
+ rte_delay_ms(NBL_SAFE_THREADS_WAIT_TIME);
+
+ nbl_clear_queues(eth_dev);
+ nbl_dev_txrx_stop(eth_dev);
+ nbl_userdev_port_config(adapter, NBL_KERNEL_NETWORK);
return 0;
}
+static void nbl_release_queues(struct rte_eth_dev *eth_dev)
+{
+ struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
+ struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter);
+ struct nbl_dispatch_ops *disp_ops = NBL_DEV_MGT_TO_DISP_OPS(dev_mgt);
+ int i;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ disp_ops->release_tx_ring(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), i);
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+ disp_ops->release_rx_ring(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), i);
+}
+
static int nbl_dev_close(struct rte_eth_dev *eth_dev)
{
- RTE_SET_USED(eth_dev);
+ struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
+ struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter);
+
+ /* pf may not start, so no queue need release */
+ if (common->pf_start)
+ nbl_release_queues(eth_dev);
+
return 0;
}
@@ -180,13 +337,13 @@ static void nbl_dev_leonis_uninit(void *adapter)
static int nbl_dev_common_start(struct nbl_dev_mgt *dev_mgt)
{
const struct nbl_dispatch_ops *disp_ops = NBL_DEV_MGT_TO_DISP_OPS(dev_mgt);
- struct nbl_dev_net_mgt *net_dev = dev_mgt->net_dev;
- struct nbl_common_info *common = dev_mgt->common;
+ struct nbl_dev_net_mgt *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt);
+ struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt);
struct nbl_board_port_info *board_info;
u8 *mac;
int ret;
- board_info = &dev_mgt->common->board_info;
+ board_info = &common->board_info;
disp_ops->get_board_info(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), board_info);
mac = net_dev->eth_dev->data->mac_addrs->addr_bytes;
@@ -228,8 +385,8 @@ static void nbl_dev_leonis_stop(void *p)
{
struct nbl_adapter *adapter = (struct nbl_adapter *)p;
struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter);
- struct nbl_dev_net_mgt *net_dev = dev_mgt->net_dev;
- const struct nbl_common_info *common = dev_mgt->common;
+ struct nbl_dev_net_mgt *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt);
+ const struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt);
const struct nbl_dispatch_ops *disp_ops = NBL_DEV_MGT_TO_DISP_OPS(dev_mgt);
u8 *mac;
diff --git a/drivers/net/nbl/nbl_dispatch.c b/drivers/net/nbl/nbl_dispatch.c
index 4265e5309c..2d44909aab 100644
--- a/drivers/net/nbl/nbl_dispatch.c
+++ b/drivers/net/nbl/nbl_dispatch.c
@@ -335,6 +335,31 @@ static void nbl_disp_chan_get_eth_id_req(void *priv, u16 vsi_id, u8 *eth_mode, u
*eth_id = result.eth_id;
}
+static u16 nbl_disp_get_vsi_global_qid(void *priv, u16 vsi_id, u16 local_qid)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+
+ return NBL_OPS_CALL(res_ops->get_vsi_global_qid,
+ (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, local_qid));
+}
+
+static u16
+nbl_disp_chan_get_vsi_global_qid_req(void *priv, u16 vsi_id, u16 local_qid)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt);
+ struct nbl_chan_vsi_qid_info param = {0};
+ struct nbl_chan_send_info chan_send;
+
+ param.vsi_id = vsi_id;
+ param.local_qid = local_qid;
+
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_GET_VSI_GLOBAL_QUEUE_ID,
+ ¶m, sizeof(param), NULL, 0, 1);
+ return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
+}
+
static int nbl_disp_chan_setup_q2vsi(void *priv, u16 vsi_id)
{
struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
@@ -488,8 +513,7 @@ static void nbl_disp_chan_clear_flow_req(void *priv, u16 vsi_id)
struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt);
struct nbl_chan_send_info chan_send = {0};
- NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CLEAR_FLOW, &vsi_id, sizeof(vsi_id),
- NULL, 0, 1);
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CLEAR_FLOW, &vsi_id, sizeof(vsi_id), NULL, 0, 1);
chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
}
@@ -514,8 +538,7 @@ nbl_disp_chan_add_macvlan_req(void *priv, u8 *mac, u16 vlan_id, u16 vsi_id)
param.vlan = vlan_id;
param.vsi = vsi_id;
- NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_ADD_MACVLAN,
- ¶m, sizeof(param), NULL, 0, 1);
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_ADD_MACVLAN, ¶m, sizeof(param), NULL, 0, 1);
return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
}
@@ -540,8 +563,7 @@ nbl_disp_chan_del_macvlan_req(void *priv, u8 *mac, u16 vlan_id, u16 vsi_id)
param.vlan = vlan_id;
param.vsi = vsi_id;
- NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_DEL_MACVLAN,
- ¶m, sizeof(param), NULL, 0, 1);
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_DEL_MACVLAN, ¶m, sizeof(param), NULL, 0, 1);
chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
}
@@ -562,8 +584,7 @@ static int nbl_disp_chan_add_multi_rule_req(void *priv, u16 vsi_id)
param.vsi = vsi_id;
- NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_ADD_MULTI_RULE,
- ¶m, sizeof(param), NULL, 0, 1);
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_ADD_MULTI_RULE, ¶m, sizeof(param), NULL, 0, 1);
return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
}
@@ -584,8 +605,74 @@ static void nbl_disp_chan_del_multi_rule_req(void *priv, u16 vsi)
param.vsi = vsi;
- NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_DEL_MULTI_RULE,
- ¶m, sizeof(param), NULL, 0, 1);
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_DEL_MULTI_RULE, ¶m, sizeof(param), NULL, 0, 1);
+ chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
+}
+
+static int nbl_disp_cfg_dsch(void *priv, u16 vsi_id, bool vld)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+
+ return NBL_OPS_CALL(res_ops->cfg_dsch, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, vld));
+}
+
+static int nbl_disp_chan_cfg_dsch_req(void *priv, u16 vsi_id, bool vld)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt);
+ struct nbl_chan_param_cfg_dsch param = {0};
+ struct nbl_chan_send_info chan_send;
+
+ param.vsi_id = vsi_id;
+ param.vld = vld;
+
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_CFG_DSCH, ¶m, sizeof(param), NULL, 0, 1);
+ return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
+}
+
+static int nbl_disp_setup_cqs(void *priv, u16 vsi_id, u16 real_qps, bool rss_indir_set)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+
+ return NBL_OPS_CALL(res_ops->setup_cqs,
+ (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id, real_qps, rss_indir_set));
+}
+
+static int nbl_disp_chan_setup_cqs_req(void *priv, u16 vsi_id, u16 real_qps, bool rss_indir_set)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt);
+ struct nbl_chan_param_setup_cqs param = {0};
+ struct nbl_chan_send_info chan_send;
+
+ param.vsi_id = vsi_id;
+ param.real_qps = real_qps;
+ param.rss_indir_set = rss_indir_set;
+
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_SETUP_CQS, ¶m, sizeof(param), NULL, 0, 1);
+ return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
+}
+
+static void nbl_disp_remove_cqs(void *priv, u16 vsi_id)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+
+ NBL_OPS_CALL(res_ops->remove_cqs, (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), vsi_id));
+}
+
+static void nbl_disp_chan_remove_cqs_req(void *priv, u16 vsi_id)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt);
+ struct nbl_chan_param_remove_cqs param = {0};
+ struct nbl_chan_send_info chan_send;
+
+ param.vsi_id = vsi_id;
+
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_MSG_REMOVE_CQS, ¶m, sizeof(param), NULL, 0, 1);
chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
}
@@ -654,6 +741,11 @@ do { \
NBL_DISP_SET_OPS(get_eth_id, nbl_disp_get_eth_id, \
NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_GET_ETH_ID,\
nbl_disp_chan_get_eth_id_req, NULL); \
+ NBL_DISP_SET_OPS(get_vsi_global_qid, \
+ nbl_disp_get_vsi_global_qid, \
+ NBL_DISP_CTRL_LVL_MGT, \
+ NBL_CHAN_MSG_GET_VSI_GLOBAL_QUEUE_ID, \
+ nbl_disp_chan_get_vsi_global_qid_req, NULL); \
NBL_DISP_SET_OPS(setup_q2vsi, nbl_disp_chan_setup_q2vsi, \
NBL_DISP_CTRL_LVL_MGT, \
NBL_CHAN_MSG_SETUP_Q2VSI, \
@@ -696,6 +788,15 @@ do { \
NBL_DISP_CTRL_LVL_MGT, \
NBL_CHAN_MSG_DEL_MULTI_RULE, \
nbl_disp_chan_del_multi_rule_req, NULL); \
+ NBL_DISP_SET_OPS(cfg_dsch, nbl_disp_cfg_dsch, \
+ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_CFG_DSCH, \
+ nbl_disp_chan_cfg_dsch_req, NULL); \
+ NBL_DISP_SET_OPS(setup_cqs, nbl_disp_setup_cqs, \
+ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_SETUP_CQS, \
+ nbl_disp_chan_setup_cqs_req, NULL); \
+ NBL_DISP_SET_OPS(remove_cqs, nbl_disp_remove_cqs, \
+ NBL_DISP_CTRL_LVL_MGT, NBL_CHAN_MSG_REMOVE_CQS,\
+ nbl_disp_chan_remove_cqs_req, NULL); \
} while (0)
/* Structure starts here, adding an op should not modify anything below */
diff --git a/drivers/net/nbl/nbl_ethdev.c b/drivers/net/nbl/nbl_ethdev.c
index e7694988ce..5cbeed6a33 100644
--- a/drivers/net/nbl/nbl_ethdev.c
+++ b/drivers/net/nbl/nbl_ethdev.c
@@ -10,10 +10,15 @@ RTE_LOG_REGISTER_SUFFIX(nbl_logtype_driver, driver, INFO);
static int nbl_dev_release_pf(struct rte_eth_dev *eth_dev)
{
struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
+ struct nbl_dev_ops_tbl *dev_ops_tbl;
+ struct nbl_dev_ops *dev_ops;
if (!adapter)
return -EINVAL;
NBL_LOG(INFO, "start to close device %s", eth_dev->device->name);
+ dev_ops_tbl = NBL_ADAPTER_TO_DEV_OPS_TBL(adapter);
+ dev_ops = NBL_DEV_OPS_TBL_TO_OPS(dev_ops_tbl);
+ dev_ops->dev_close(eth_dev);
nbl_core_stop(adapter);
nbl_core_remove(adapter);
return 0;
diff --git a/drivers/net/nbl/nbl_hw/nbl_txrx.c b/drivers/net/nbl/nbl_hw/nbl_txrx.c
index 941b3b50dc..27b549beda 100644
--- a/drivers/net/nbl/nbl_hw/nbl_txrx.c
+++ b/drivers/net/nbl/nbl_hw/nbl_txrx.c
@@ -63,7 +63,7 @@ static void nbl_res_txrx_stop_tx_ring(void *priv, u16 queue_idx)
tx_ring->desc[i].flags = 0;
}
- tx_ring->avail_used_flags = BIT(NBL_PACKED_DESC_F_AVAIL);
+ tx_ring->avail_used_flags = NBL_PACKED_DESC_F_AVAIL_BIT;
tx_ring->used_wrap_counter = 1;
tx_ring->next_to_clean = NBL_TX_RS_THRESH - 1;
tx_ring->next_to_use = 0;
@@ -166,7 +166,7 @@ static int nbl_res_txrx_start_tx_ring(void *priv,
tx_ring->notify_qid =
(res_mgt->res_info.base_qid + txrx_mgt->queue_offset + param->queue_idx) * 2 + 1;
tx_ring->ring_phys_addr = (u64)NBL_DMA_ADDERSS_FULL_TRANSLATE(common, memzone->iova);
- tx_ring->avail_used_flags = BIT(NBL_PACKED_DESC_F_AVAIL);
+ tx_ring->avail_used_flags = NBL_PACKED_DESC_F_AVAIL_BIT;
tx_ring->used_wrap_counter = 1;
tx_ring->next_to_clean = NBL_TX_RS_THRESH - 1;
tx_ring->next_to_use = 0;
@@ -314,8 +314,62 @@ static int nbl_res_txrx_start_rx_ring(void *priv,
static int nbl_res_alloc_rx_bufs(void *priv, u16 queue_idx)
{
- RTE_SET_USED(priv);
- RTE_SET_USED(queue_idx);
+ struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv;
+ struct nbl_res_rx_ring *rxq = NBL_RES_MGT_TO_RX_RING(res_mgt, queue_idx);
+ struct nbl_rx_entry *rx_entry = rxq->rx_entry;
+ volatile struct nbl_packed_desc *rx_desc;
+ struct nbl_rx_entry *rxe;
+ struct rte_mbuf *mbuf;
+ u64 dma_addr;
+ int i;
+ u32 frame_size = rxq->eth_dev->data->mtu + NBL_ETH_OVERHEAD + rxq->exthdr_len;
+ u16 buf_length;
+
+ rxq->avail_used_flags = NBL_PACKED_DESC_F_AVAIL_BIT | NBL_PACKED_DESC_F_WRITE_BIT;
+ rxq->used_wrap_counter = 1;
+
+ for (i = 0; i < rxq->nb_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mempool);
+ if (mbuf == NULL) {
+ NBL_LOG(ERR, "RX mbuf alloc failed for queue %u", rxq->queue_id);
+ return -ENOMEM;
+ }
+ dma_addr = NBL_DMA_ADDERSS_FULL_TRANSLATE(rxq, rte_mbuf_data_iova_default(mbuf));
+ rx_desc = &rxq->desc[i];
+ rxe = &rx_entry[i];
+ rx_desc->addr = dma_addr;
+ rx_desc->len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
+ rx_desc->flags = rxq->avail_used_flags;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ rxe->mbuf = mbuf;
+ }
+
+ rxq->next_to_clean = 0;
+ rxq->next_to_use = 0;
+ rxq->vq_free_cnt = 0;
+ rxq->avail_used_flags ^= NBL_PACKED_DESC_F_AVAIL_USED;
+
+ buf_length = rte_pktmbuf_data_room_size(rxq->mempool) - RTE_PKTMBUF_HEADROOM;
+ if (buf_length >= NBL_BUF_LEN_16K) {
+ rxq->buf_length = NBL_BUF_LEN_16K;
+ } else if (buf_length >= NBL_BUF_LEN_8K) {
+ rxq->buf_length = NBL_BUF_LEN_8K;
+ } else if (buf_length >= NBL_BUF_LEN_4K) {
+ rxq->buf_length = NBL_BUF_LEN_4K;
+ } else if (buf_length >= NBL_BUF_LEN_2K) {
+ rxq->buf_length = NBL_BUF_LEN_2K;
+ } else {
+ NBL_LOG(ERR, "mempool mbuf length should be at least 2kB, but current value is %u",
+ buf_length);
+ nbl_res_txrx_stop_rx_ring(res_mgt, queue_idx);
+ return -EINVAL;
+ }
+
+ if (frame_size > rxq->buf_length)
+ rxq->eth_dev->data->scattered_rx = 1;
+
+ rxq->buf_length = rxq->buf_length - RTE_PKTMBUF_HEADROOM;
+
return 0;
}
@@ -335,8 +389,14 @@ static void nbl_res_txrx_release_rx_ring(void *priv, u16 queue_idx)
static void nbl_res_txrx_update_rx_ring(void *priv, u16 index)
{
- RTE_SET_USED(priv);
- RTE_SET_USED(index);
+ struct nbl_resource_mgt *res_mgt = (struct nbl_resource_mgt *)priv;
+ struct nbl_phy_ops *phy_ops = NBL_RES_MGT_TO_PHY_OPS(res_mgt);
+ struct nbl_res_rx_ring *rx_ring = NBL_RES_MGT_TO_RX_RING(res_mgt, index);
+
+ phy_ops->update_tail_ptr(NBL_RES_MGT_TO_PHY_PRIV(res_mgt),
+ rx_ring->notify_qid,
+ ((!!(rx_ring->avail_used_flags & NBL_PACKED_DESC_F_AVAIL_BIT)) |
+ rx_ring->next_to_use));
}
/* NBL_TXRX_SET_OPS(ops_name, func)
diff --git a/drivers/net/nbl/nbl_hw/nbl_txrx.h b/drivers/net/nbl/nbl_hw/nbl_txrx.h
index 83696dbc72..5cf6e83c3f 100644
--- a/drivers/net/nbl/nbl_hw/nbl_txrx.h
+++ b/drivers/net/nbl/nbl_hw/nbl_txrx.h
@@ -7,16 +7,26 @@
#include "nbl_resource.h"
+#define NBL_PACKED_DESC_F_NEXT (0)
+#define NBL_PACKED_DESC_F_WRITE (1)
#define NBL_PACKED_DESC_F_AVAIL (7)
#define NBL_PACKED_DESC_F_USED (15)
-#define NBL_VRING_DESC_F_NEXT (1 << 0)
-#define NBL_VRING_DESC_F_WRITE (1 << 1)
+#define NBL_PACKED_DESC_F_NEXT_BIT (1 << NBL_PACKED_DESC_F_NEXT)
+#define NBL_PACKED_DESC_F_WRITE_BIT (1 << NBL_PACKED_DESC_F_WRITE)
+#define NBL_PACKED_DESC_F_AVAIL_BIT (1 << NBL_PACKED_DESC_F_AVAIL)
+#define NBL_PACKED_DESC_F_USED_BIT (1 << NBL_PACKED_DESC_F_USED)
+#define NBL_PACKED_DESC_F_AVAIL_USED (NBL_PACKED_DESC_F_AVAIL_BIT | \
+ NBL_PACKED_DESC_F_USED_BIT)
#define NBL_TX_RS_THRESH (32)
#define NBL_TX_HEADER_LEN (32)
#define NBL_VQ_HDR_NAME_MAXSIZE (32)
#define NBL_DESC_PER_LOOP_VEC_MAX (8)
+#define NBL_BUF_LEN_16K (16384)
+#define NBL_BUF_LEN_8K (8192)
+#define NBL_BUF_LEN_4K (4096)
+#define NBL_BUF_LEN_2K (2048)
union nbl_tx_extend_head {
struct nbl_tx_ehdr_leonis {
diff --git a/drivers/net/nbl/nbl_include/nbl_def_channel.h b/drivers/net/nbl/nbl_include/nbl_def_channel.h
index 829014fa16..f20cc1ab7c 100644
--- a/drivers/net/nbl/nbl_include/nbl_def_channel.h
+++ b/drivers/net/nbl/nbl_include/nbl_def_channel.h
@@ -321,6 +321,11 @@ struct nbl_chan_param_get_eth_id {
u8 logic_eth_id;
};
+struct nbl_chan_vsi_qid_info {
+ u16 vsi_id;
+ u16 local_qid;
+};
+
struct nbl_chan_param_register_vsi2q {
u16 vsi_index;
u16 vsi_id;
@@ -350,6 +355,21 @@ struct nbl_chan_param_del_multi_rule {
u16 vsi;
};
+struct nbl_chan_param_cfg_dsch {
+ u16 vsi_id;
+ bool vld;
+};
+
+struct nbl_chan_param_setup_cqs {
+ u16 vsi_id;
+ u16 real_qps;
+ bool rss_indir_set;
+};
+
+struct nbl_chan_param_remove_cqs {
+ u16 vsi_id;
+};
+
struct nbl_chan_send_info {
uint16_t dstid;
uint16_t msg_type;
diff --git a/drivers/net/nbl/nbl_include/nbl_def_common.h b/drivers/net/nbl/nbl_include/nbl_def_common.h
index 9773efc246..722a372548 100644
--- a/drivers/net/nbl/nbl_include/nbl_def_common.h
+++ b/drivers/net/nbl/nbl_include/nbl_def_common.h
@@ -27,11 +27,15 @@
#define NBL_DEV_USER_TYPE ('n')
#define NBL_DEV_USER_DATA_LEN (2044)
-#define NBL_DEV_USER_PCI_OFFSET_SHIFT 40
+#define NBL_DEV_USER_PCI_OFFSET_SHIFT (40)
#define NBL_DEV_USER_OFFSET_TO_INDEX(off) ((off) >> NBL_DEV_USER_PCI_OFFSET_SHIFT)
#define NBL_DEV_USER_INDEX_TO_OFFSET(index) ((u64)(index) << NBL_DEV_USER_PCI_OFFSET_SHIFT)
#define NBL_DEV_SHM_MSG_RING_INDEX (6)
+#define NBL_VLAN_TAG_SIZE (4)
+#define NBL_ETH_OVERHEAD \
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + NBL_VLAN_TAG_SIZE * 2)
+
struct nbl_dev_user_channel_msg {
u16 msg_type;
u16 dst_id;
diff --git a/drivers/net/nbl/nbl_include/nbl_def_dispatch.h b/drivers/net/nbl/nbl_include/nbl_def_dispatch.h
index ac261db26a..e38c5a84aa 100644
--- a/drivers/net/nbl/nbl_include/nbl_def_dispatch.h
+++ b/drivers/net/nbl/nbl_include/nbl_def_dispatch.h
@@ -59,7 +59,7 @@ struct nbl_dispatch_ops {
int (*setup_rss)(void *priv, u16 vsi_id);
void (*remove_rss)(void *priv, u16 vsi_id);
int (*cfg_dsch)(void *priv, u16 vsi_id, bool vld);
- int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps);
+ int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps, bool rss_indir_set);
void (*remove_cqs)(void *priv, u16 vsi_id);
int (*set_rxfh_indir)(void *priv, u16 vsi_id, u32 *indir, u32 indir_size);
void (*clear_queues)(void *priv, u16 vsi_id);
diff --git a/drivers/net/nbl/nbl_include/nbl_def_resource.h b/drivers/net/nbl/nbl_include/nbl_def_resource.h
index a40ccc4fd8..5fec287581 100644
--- a/drivers/net/nbl/nbl_include/nbl_def_resource.h
+++ b/drivers/net/nbl/nbl_include/nbl_def_resource.h
@@ -18,6 +18,7 @@ struct nbl_resource_ops {
int (*unregister_net)(void *priv);
u16 (*get_vsi_id)(void *priv);
void (*get_eth_id)(void *priv, u16 vsi_id, u8 *eth_mode, u8 *eth_id);
+ u16 (*get_vsi_global_qid)(void *priv, u16 vsi_id, u16 local_qid);
int (*setup_q2vsi)(void *priv, u16 vsi_id);
void (*remove_q2vsi)(void *priv, u16 vsi_id);
int (*register_vsi2q)(void *priv, u16 vsi_index, u16 vsi_id,
@@ -57,6 +58,9 @@ struct nbl_resource_ops {
void (*del_multi_rule)(void *priv, u16 vsi_id);
int (*cfg_multi_mcast)(void *priv, u16 vsi_id, u16 enable);
void (*clear_flow)(void *priv, u16 vsi_id);
+ int (*cfg_dsch)(void *priv, u16 vsi_id, bool vld);
+ int (*setup_cqs)(void *priv, u16 vsi_id, u16 real_qps, bool rss_indir_set);
+ void (*remove_cqs)(void *priv, u16 vsi_id);
};
struct nbl_resource_ops_tbl {
diff --git a/drivers/net/nbl/nbl_include/nbl_include.h b/drivers/net/nbl/nbl_include/nbl_include.h
index 0efeb11b46..7f751ea9ce 100644
--- a/drivers/net/nbl/nbl_include/nbl_include.h
+++ b/drivers/net/nbl/nbl_include/nbl_include.h
@@ -59,7 +59,9 @@ typedef int16_t s16;
typedef int8_t s8;
/* Used for macros to pass checkpatch */
-#define NBL_NAME(x) x
+#define NBL_NAME(x) x
+#define BIT(a) (1UL << (a))
+#define NBL_SAFE_THREADS_WAIT_TIME (20)
enum {
NBL_VSI_DATA = 0, /* default vsi in kernel or independent dpdk */
@@ -82,8 +84,6 @@ struct nbl_func_caps {
u32 rsv:30;
};
-#define BIT(a) (1UL << (a))
-
struct nbl_start_rx_ring_param {
u16 queue_idx;
u16 nb_desc;
--
2.43.0
next prev parent reply other threads:[~2025-06-12 9:01 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-12 8:58 [PATCH v1 00/17] NBL PMD for Nebulamatrix NICs Kyo Liu
2025-06-12 8:58 ` [PATCH v1 01/17] net/nbl: add doc and minimum nbl build framework Kyo Liu
2025-06-12 8:58 ` [PATCH v1 02/17] net/nbl: add simple probe/remove and log module Kyo Liu
2025-06-12 17:49 ` Stephen Hemminger
2025-06-13 2:32 ` 回复:[PATCH " Kyo.Liu
2025-06-12 8:58 ` [PATCH v1 03/17] net/nbl: add PHY layer definitions and implementation Kyo Liu
2025-06-12 8:58 ` [PATCH v1 04/17] net/nbl: add Channel " Kyo Liu
2025-06-12 8:58 ` [PATCH v1 05/17] net/nbl: add Resource " Kyo Liu
2025-06-12 8:58 ` [PATCH v1 06/17] net/nbl: add Dispatch " Kyo Liu
2025-06-12 8:58 ` [PATCH v1 07/17] net/nbl: add Dev " Kyo Liu
2025-06-12 8:58 ` [PATCH v1 08/17] net/nbl: add complete device init and uninit functionality Kyo Liu
2025-06-12 8:58 ` [PATCH v1 09/17] net/nbl: add uio and vfio mode for nbl Kyo Liu
2025-06-12 8:58 ` [PATCH v1 10/17] net/nbl: bus/pci: introduce get_iova_mode for pci dev Kyo Liu
2025-06-12 17:40 ` Stephen Hemminger
2025-06-13 2:28 ` 回复:[PATCH " Kyo.Liu
2025-06-13 7:35 ` [PATCH " David Marchand
2025-06-13 15:21 ` 回复:[PATCH " Stephen Hemminger
2025-06-12 8:58 ` [PATCH v1 11/17] net/nbl: add nbl coexistence mode for nbl Kyo Liu
2025-06-12 8:58 ` [PATCH v1 12/17] net/nbl: add nbl ethdev configuration Kyo Liu
2025-06-12 8:58 ` [PATCH v1 13/17] net/nbl: add nbl device rxtx queue setup and release ops Kyo Liu
2025-06-12 8:58 ` Kyo Liu [this message]
2025-06-12 8:58 ` [PATCH v1 15/17] net/nbl: add nbl device tx and rx burst Kyo Liu
2025-06-12 8:58 ` [PATCH v1 16/17] net/nbl: add nbl device xstats and stats Kyo Liu
2025-06-12 8:58 ` [PATCH v1 17/17] net/nbl: nbl device support set mtu and promisc Kyo Liu
2025-06-12 17:35 ` [PATCH v1 00/17] NBL PMD for Nebulamatrix NICs Stephen Hemminger
2025-06-12 17:44 ` Stephen Hemminger
2025-06-13 2:31 ` 回复:[PATCH " Kyo.Liu
2025-06-12 17:46 ` [PATCH " Stephen Hemminger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250612085840.729830-15-kyo.liu@nebula-matrix.com \
--to=kyo.liu@nebula-matrix.com \
--cc=dev@dpdk.org \
--cc=dimon.zhao@nebula-matrix.com \
--cc=leon.yu@nebula-matrix.com \
--cc=sam.chen@nebula-matrix.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).