* [PATCH v1 2/4] net/nbl: add support for Tx and Rx VLAN offload
2025-11-07 7:34 [PATCH v1 0/4] NBL add new features Dimon Zhao
2025-11-07 7:34 ` [PATCH v1 1/4] net/nbl: change default Rx extension header size to 12 bytes Dimon Zhao
@ 2025-11-07 7:34 ` Dimon Zhao
2025-11-07 16:10 ` Stephen Hemminger
2025-11-07 7:34 ` [PATCH v1 3/4] net/nbl: add support for imissed stats Dimon Zhao
2025-11-07 7:34 ` [PATCH v1 4/4] net/nbl: update documentation and maintainers Dimon Zhao
3 siblings, 1 reply; 7+ messages in thread
From: Dimon Zhao @ 2025-11-07 7:34 UTC (permalink / raw)
To: dimon.zhao, dev; +Cc: Alvin Wang, Leon Yu, Sam Chen
We simulate support for Tx and Rx VLAN offload,
while in reality we handle Tx VLAN insertion
and Rx VLAN stripping in software.
This implementation is necessary because some of our customers
assume our NICs natively support Tx and Rx VLAN offload capabilities.
They use packet vlan offload during packet transmission and reception
without checking the eth_dev capabilities.
Signed-off-by: Dimon Zhao <dimon.zhao@nebula-matrix.com>
---
drivers/net/nbl/nbl_dev/nbl_dev.c | 5 ++++
drivers/net/nbl/nbl_hw/nbl_txrx.c | 47 ++++++++++++++++++++++++++++++-
2 files changed, 51 insertions(+), 1 deletion(-)
diff --git a/drivers/net/nbl/nbl_dev/nbl_dev.c b/drivers/net/nbl/nbl_dev/nbl_dev.c
index 1992568088..900b6efd97 100644
--- a/drivers/net/nbl/nbl_dev/nbl_dev.c
+++ b/drivers/net/nbl/nbl_dev/nbl_dev.c
@@ -301,6 +301,7 @@ int nbl_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_
struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter);
struct nbl_dev_ring_mgt *ring_mgt = &dev_mgt->net_dev->ring_mgt;
+ struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt);
struct nbl_board_port_info *board_info = &dev_mgt->common->board_info;
u8 speed_mode = board_info->speed;
@@ -331,6 +332,10 @@ int nbl_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_
dev_info->default_txportconf.nb_queues = ring_mgt->tx_ring_num;
dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
+ if (!common->is_vf) {
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ }
switch (speed_mode) {
case NBL_FW_PORT_SPEED_100G:
dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
diff --git a/drivers/net/nbl/nbl_hw/nbl_txrx.c b/drivers/net/nbl/nbl_hw/nbl_txrx.c
index 3c93765a5f..ea77d258ba 100644
--- a/drivers/net/nbl/nbl_hw/nbl_txrx.c
+++ b/drivers/net/nbl/nbl_hw/nbl_txrx.c
@@ -237,8 +237,11 @@ static int nbl_res_txrx_start_rx_ring(void *priv,
const struct nbl_hw_ops *hw_ops = NBL_RES_MGT_TO_HW_OPS(res_mgt);
struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt);
const struct rte_memzone *memzone;
+ uint64_t offloads;
u32 size;
+ offloads = param->conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
+
if (eth_dev->data->rx_queues[param->queue_idx] != NULL) {
NBL_LOG(WARNING, "re-setup an already allocated rx queue");
nbl_res_txrx_stop_rx_ring(priv, param->queue_idx);
@@ -284,8 +287,8 @@ static int nbl_res_txrx_start_rx_ring(void *priv,
rx_ring->dma_limit_msb = common->dma_limit_msb;
rx_ring->common = common;
rx_ring->notify = hw_ops->get_tail_ptr(NBL_RES_MGT_TO_HW_PRIV(res_mgt));
+ rx_ring->offloads = offloads;
rx_ring->rx_hash_en = param->rx_hash_en;
-
switch (param->product) {
case NBL_LEONIS_TYPE:
if (param->rx_hash_en)
@@ -441,6 +444,23 @@ static inline void nbl_fill_rx_ring(struct nbl_res_rx_ring *rxq,
rxq->next_to_use = desc_index;
}
+static inline void nbl_res_txrx_vlan_insert_out_mbuf(struct rte_mbuf *tx_pkt,
+ union nbl_tx_extend_head *u,
+ u16 vlan_proto, u16 vlan_tci)
+{
+ struct rte_vlan_hdr *vlan_hdr;
+ struct rte_ether_hdr *ether_hdr;
+
+ ether_hdr = (struct rte_ether_hdr *)((u8 *)u + sizeof(struct nbl_tx_ehdr_leonis));
+ rte_memcpy(ether_hdr, rte_pktmbuf_mtod(tx_pkt, u8 *), sizeof(struct rte_ether_hdr));
+
+ vlan_hdr = (struct rte_vlan_hdr *)(ether_hdr + 1);
+ vlan_hdr->vlan_tci = rte_cpu_to_be_16(vlan_tci);
+ vlan_hdr->eth_proto = ether_hdr->ether_type;
+
+ ether_hdr->ether_type = rte_cpu_to_be_16(vlan_proto);
+}
+
static u16
nbl_res_txrx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts, u16 extend_set)
{
@@ -481,6 +501,12 @@ nbl_res_txrx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts, u
tx_pkt = *tx_pkts++;
+ if (tx_pkt->ol_flags & RTE_MBUF_F_TX_VLAN) {
+ required_headroom += sizeof(struct rte_vlan_hdr);
+ /* extend_hdr + ether_hdr + vlan_hdr */
+ tx_extend_len = required_headroom + sizeof(struct rte_ether_hdr);
+ }
+
if (rte_pktmbuf_headroom(tx_pkt) >= required_headroom) {
can_push = 1;
u = rte_pktmbuf_mtod_offset(tx_pkt, union nbl_tx_extend_head *,
@@ -489,6 +515,21 @@ nbl_res_txrx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts, u
can_push = 0;
u = (union nbl_tx_extend_head *)(&tx_region[desc_index]);
}
+
+ if (tx_pkt->ol_flags & RTE_MBUF_F_TX_VLAN) {
+ if (likely(can_push)) {
+ if (rte_vlan_insert(&tx_pkt)) {
+ can_push = 0;
+ u = (union nbl_tx_extend_head *)(&tx_region[desc_index]);
+ }
+ }
+ if (unlikely(!can_push)) {
+ addr_offset += sizeof(struct rte_ether_hdr);
+ nbl_res_txrx_vlan_insert_out_mbuf(tx_pkt, u, RTE_ETHER_TYPE_VLAN,
+ tx_pkt->vlan_tci);
+ }
+ }
+
nb_descs = !can_push + tx_pkt->nb_segs;
if (nb_descs > txq->vq_free_cnt) {
@@ -642,6 +683,10 @@ nbl_res_txrx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts)
if (--num_sg)
continue;
+
+ if (rxq->eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ rte_vlan_strip(rx_mbuf);
+
if (drop) {
rxq->rxq_stats.rx_drop_proto++;
rte_pktmbuf_free(rx_mbuf);
--
2.34.1
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH v1 3/4] net/nbl: add support for imissed stats
2025-11-07 7:34 [PATCH v1 0/4] NBL add new features Dimon Zhao
2025-11-07 7:34 ` [PATCH v1 1/4] net/nbl: change default Rx extension header size to 12 bytes Dimon Zhao
2025-11-07 7:34 ` [PATCH v1 2/4] net/nbl: add support for Tx and Rx VLAN offload Dimon Zhao
@ 2025-11-07 7:34 ` Dimon Zhao
2025-11-07 16:05 ` Stephen Hemminger
2025-11-07 7:34 ` [PATCH v1 4/4] net/nbl: update documentation and maintainers Dimon Zhao
3 siblings, 1 reply; 7+ messages in thread
From: Dimon Zhao @ 2025-11-07 7:34 UTC (permalink / raw)
To: dimon.zhao, dev; +Cc: Alvin Wang, Leon Yu, Sam Chen
Add an imissed statistic to monitor the number of RX packets
dropped by the hardware.
Signed-off-by: Dimon Zhao <dimon.zhao@nebula-matrix.com>
---
drivers/net/nbl/nbl_dev/nbl_dev.c | 213 +++++++++++++++++-
drivers/net/nbl/nbl_dev/nbl_dev.h | 9 +
drivers/net/nbl/nbl_dispatch.c | 65 ++++++
drivers/net/nbl/nbl_include/nbl_def_channel.h | 5 +
.../net/nbl/nbl_include/nbl_def_dispatch.h | 4 +
.../net/nbl/nbl_include/nbl_def_resource.h | 4 +
drivers/net/nbl/nbl_include/nbl_include.h | 6 +
7 files changed, 304 insertions(+), 2 deletions(-)
diff --git a/drivers/net/nbl/nbl_dev/nbl_dev.c b/drivers/net/nbl/nbl_dev/nbl_dev.c
index 900b6efd97..d54e0cfa68 100644
--- a/drivers/net/nbl/nbl_dev/nbl_dev.c
+++ b/drivers/net/nbl/nbl_dev/nbl_dev.c
@@ -132,6 +132,101 @@ static int nbl_dev_txrx_start(struct rte_eth_dev *eth_dev)
return ret;
}
+static int nbl_dev_update_hw_stats(struct rte_eth_dev *eth_dev)
+{
+ struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
+ struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter);
+ struct nbl_dispatch_ops *disp_ops = NBL_DEV_MGT_TO_DISP_OPS(dev_mgt);
+ struct nbl_dev_net_mgt *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt);
+ u32 *uvn_stat_pkt_drop;
+ int i = 0;
+ int ret = 0;
+
+ if (!net_dev->hw_stats_inited)
+ return 0;
+ uvn_stat_pkt_drop = rte_zmalloc("nbl_uvn_stat_pkt_drop",
+ sizeof(*uvn_stat_pkt_drop) * (eth_dev->data->nb_rx_queues), 0);
+ if (!uvn_stat_pkt_drop) {
+ ret = -ENOMEM;
+ goto alloc_uvn_stat_pkt_drop_fail;
+ }
+ ret = disp_ops->get_uvn_pkt_drop_stats(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt),
+ net_dev->vsi_id,
+ eth_dev->data->nb_rx_queues, uvn_stat_pkt_drop);
+ if (ret)
+ goto get_uvn_pkt_drop_stats_fail;
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+ net_dev->hw_stats.total_uvn_stat_pkt_drop[i] += uvn_stat_pkt_drop[i];
+ rte_free(uvn_stat_pkt_drop);
+ uvn_stat_pkt_drop = NULL;
+
+ return 0;
+
+get_uvn_pkt_drop_stats_fail:
+ rte_free(uvn_stat_pkt_drop);
+alloc_uvn_stat_pkt_drop_fail:
+ return ret;
+}
+
+static void nbl_dev_update_hw_stats_handler(void *param)
+{
+ struct rte_eth_dev *eth_dev = param;
+
+ nbl_dev_update_hw_stats(eth_dev);
+
+ rte_eal_alarm_set(NBL_ALARM_INTERNAL, nbl_dev_update_hw_stats_handler, eth_dev);
+}
+
+static int nbl_dev_hw_stats_start(struct rte_eth_dev *eth_dev)
+{
+ struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
+ struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter);
+ struct nbl_dispatch_ops *disp_ops = NBL_DEV_MGT_TO_DISP_OPS(dev_mgt);
+ struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter);
+ struct nbl_dev_net_mgt *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt);
+ struct nbl_ustore_stats ustore_stats = {0};
+ int ret;
+
+ net_dev->hw_stats.total_uvn_stat_pkt_drop =
+ rte_zmalloc("nbl_total_uvn_stat_pkt_drop",
+ sizeof(u64) * (eth_dev->data->nb_rx_queues), 0);
+ if (!net_dev->hw_stats.total_uvn_stat_pkt_drop) {
+ ret = -ENOMEM;
+ goto alloc_total_uvn_stat_pkt_drop_fail;
+ }
+ if (!common->is_vf) {
+ net_dev->hw_stats.start_ustore_stats =
+ rte_zmalloc("nbl_start_ustore_stats", sizeof(struct nbl_ustore_stats), 0);
+ if (!net_dev->hw_stats.start_ustore_stats) {
+ ret = -ENOMEM;
+ goto alloc_start_ustore_stats_fail;
+ }
+ }
+ if (!common->is_vf) {
+ ret = disp_ops->get_ustore_total_pkt_drop_stats(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt),
+ common->eth_id, &ustore_stats);
+ if (ret) {
+ net_dev->hw_stats_inited = false;
+ return 0;
+ }
+ net_dev->hw_stats_inited = true;
+ net_dev->hw_stats.start_ustore_stats->rx_drop_packets =
+ ustore_stats.rx_drop_packets;
+ net_dev->hw_stats.start_ustore_stats->rx_trun_packets =
+ ustore_stats.rx_trun_packets;
+ }
+
+ rte_eal_alarm_set(NBL_ALARM_INTERNAL, nbl_dev_update_hw_stats_handler, eth_dev);
+
+ return 0;
+
+alloc_start_ustore_stats_fail:
+ rte_free(net_dev->hw_stats.total_uvn_stat_pkt_drop);
+ net_dev->hw_stats.total_uvn_stat_pkt_drop = NULL;
+alloc_total_uvn_stat_pkt_drop_fail:
+ return ret;
+}
+
int nbl_dev_port_start(struct rte_eth_dev *eth_dev)
{
struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
@@ -152,6 +247,10 @@ int nbl_dev_port_start(struct rte_eth_dev *eth_dev)
return ret;
}
+ ret = nbl_dev_hw_stats_start(eth_dev);
+ if (ret)
+ return ret;
+
common->pf_start = 1;
return 0;
}
@@ -181,6 +280,24 @@ static void nbl_dev_txrx_stop(struct rte_eth_dev *eth_dev)
disp_ops->remove_all_queues(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), dev_mgt->net_dev->vsi_id);
}
+static int nbl_dev_hw_stats_stop(struct rte_eth_dev *eth_dev)
+{
+ struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
+ struct nbl_common_info *common = NBL_ADAPTER_TO_COMMON(adapter);
+ struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter);
+ struct nbl_dev_net_mgt *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt);
+
+ rte_free(net_dev->hw_stats.total_uvn_stat_pkt_drop);
+ net_dev->hw_stats.total_uvn_stat_pkt_drop = NULL;
+ if (!common->is_vf) {
+ rte_free(net_dev->hw_stats.start_ustore_stats);
+ net_dev->hw_stats.start_ustore_stats = NULL;
+ }
+ rte_eal_alarm_cancel(nbl_dev_update_hw_stats_handler, eth_dev);
+
+ return 0;
+}
+
int nbl_dev_port_stop(struct rte_eth_dev *eth_dev)
{
struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
@@ -188,6 +305,7 @@ int nbl_dev_port_stop(struct rte_eth_dev *eth_dev)
common->pf_start = 0;
rte_delay_ms(NBL_SAFE_THREADS_WAIT_TIME);
+ nbl_dev_hw_stats_stop(eth_dev);
nbl_clear_queues(eth_dev);
nbl_dev_txrx_stop(eth_dev);
nbl_userdev_port_config(adapter, NBL_KERNEL_NETWORK);
@@ -376,8 +494,50 @@ int nbl_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats,
struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter);
struct nbl_dispatch_ops *disp_ops = NBL_DEV_MGT_TO_DISP_OPS(dev_mgt);
+ struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt);
+ struct nbl_dev_net_mgt *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt);
+ struct nbl_ustore_stats ustore_stats = {0};
+ int i = 0;
+ int ret = 0;
+
+ ret = disp_ops->get_stats(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), rte_stats, qstats);
+ if (ret)
+ goto get_stats_fail;
- return disp_ops->get_stats(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt), rte_stats, qstats);
+ if (!net_dev->hw_stats_inited)
+ return 0;
+
+ rte_eal_alarm_cancel(nbl_dev_update_hw_stats_handler, eth_dev);
+ ret = nbl_dev_update_hw_stats(eth_dev);
+ if (ret)
+ goto update_hw_stats_fail;
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ if (qstats && i < RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ qstats->q_errors[i] = net_dev->hw_stats.total_uvn_stat_pkt_drop[i];
+ rte_stats->imissed += net_dev->hw_stats.total_uvn_stat_pkt_drop[i];
+ }
+
+ if (!common->is_vf) {
+ ret = disp_ops->get_ustore_total_pkt_drop_stats(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt),
+ common->eth_id, &ustore_stats);
+ if (ret)
+ goto get_ustore_total_pkt_drop_stats_fail;
+ rte_stats->imissed += ustore_stats.rx_drop_packets -
+ net_dev->hw_stats.start_ustore_stats->rx_drop_packets;
+ rte_stats->imissed += ustore_stats.rx_trun_packets -
+ net_dev->hw_stats.start_ustore_stats->rx_trun_packets;
+ }
+
+ rte_eal_alarm_set(NBL_ALARM_INTERNAL, nbl_dev_update_hw_stats_handler, eth_dev);
+
+ return 0;
+
+get_ustore_total_pkt_drop_stats_fail:
+update_hw_stats_fail:
+ rte_eal_alarm_set(NBL_ALARM_INTERNAL, nbl_dev_update_hw_stats_handler, eth_dev);
+get_stats_fail:
+ return ret;
}
int nbl_stats_reset(struct rte_eth_dev *eth_dev)
@@ -385,8 +545,57 @@ int nbl_stats_reset(struct rte_eth_dev *eth_dev)
struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter);
struct nbl_dispatch_ops *disp_ops = NBL_DEV_MGT_TO_DISP_OPS(dev_mgt);
+ struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt);
+ struct nbl_dev_net_mgt *net_dev = NBL_DEV_MGT_TO_NET_DEV(dev_mgt);
+ u32 *uvn_stat_pkt_drop;
+ struct nbl_ustore_stats ustore_stats = {0};
+ int i = 0;
+ int ret = 0;
+
+ ret = disp_ops->reset_stats(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt));
+
+ if (!net_dev->hw_stats_inited || ret)
+ return ret;
+
+ rte_eal_alarm_cancel(nbl_dev_update_hw_stats_handler, eth_dev);
- return disp_ops->reset_stats(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt));
+ uvn_stat_pkt_drop = rte_zmalloc("nbl_uvn_stat_pkt_drop",
+ sizeof(*uvn_stat_pkt_drop) * (eth_dev->data->nb_rx_queues), 0);
+ if (!uvn_stat_pkt_drop) {
+ ret = -ENOMEM;
+ goto alloc_uvn_stat_pkt_drop_fail;
+ }
+ ret = disp_ops->get_uvn_pkt_drop_stats(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt),
+ net_dev->vsi_id,
+ eth_dev->data->nb_rx_queues, uvn_stat_pkt_drop);
+ if (ret)
+ goto get_uvn_pkt_drop_stats_fail;
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+ net_dev->hw_stats.total_uvn_stat_pkt_drop[i] = 0;
+ if (!common->is_vf) {
+ ret = disp_ops->get_ustore_total_pkt_drop_stats(NBL_DEV_MGT_TO_DISP_PRIV(dev_mgt),
+ common->eth_id, &ustore_stats);
+ if (ret)
+ goto get_ustore_total_pkt_drop_stats_fail;
+ net_dev->hw_stats.start_ustore_stats->rx_drop_packets =
+ ustore_stats.rx_drop_packets;
+ net_dev->hw_stats.start_ustore_stats->rx_trun_packets =
+ ustore_stats.rx_trun_packets;
+ }
+ rte_free(uvn_stat_pkt_drop);
+ uvn_stat_pkt_drop = NULL;
+
+ rte_eal_alarm_set(NBL_ALARM_INTERNAL, nbl_dev_update_hw_stats_handler, eth_dev);
+
+ return 0;
+
+get_ustore_total_pkt_drop_stats_fail:
+get_uvn_pkt_drop_stats_fail:
+ rte_free(uvn_stat_pkt_drop);
+ uvn_stat_pkt_drop = NULL;
+alloc_uvn_stat_pkt_drop_fail:
+ rte_eal_alarm_set(NBL_ALARM_INTERNAL, nbl_dev_update_hw_stats_handler, eth_dev);
+ return ret;
}
static int nbl_dev_update_hw_xstats(struct nbl_dev_mgt *dev_mgt, struct rte_eth_xstat *xstats,
diff --git a/drivers/net/nbl/nbl_dev/nbl_dev.h b/drivers/net/nbl/nbl_dev/nbl_dev.h
index 46683d0aff..5053f442a8 100644
--- a/drivers/net/nbl/nbl_dev/nbl_dev.h
+++ b/drivers/net/nbl/nbl_dev/nbl_dev.h
@@ -20,6 +20,8 @@
#define NBL_FRAME_SIZE_MAX (9600)
#define NBL_DEV_MIN_RX_BUFSIZE (2048)
+#define NBL_ALARM_INTERNAL (10000000)
+
struct nbl_dev_ring {
u16 index;
u64 dma;
@@ -38,6 +40,11 @@ struct nbl_dev_ring_mgt {
bool rx_hash_en;
};
+struct nbl_hw_stats {
+ u64 *total_uvn_stat_pkt_drop;
+ struct nbl_ustore_stats *start_ustore_stats;
+};
+
struct nbl_dev_net_mgt {
const struct rte_eth_dev *eth_dev;
struct nbl_dev_ring_mgt ring_mgt;
@@ -52,6 +59,8 @@ struct nbl_dev_net_mgt {
u8 trust:1;
u8 promisc:1;
u8 rsv:6;
+ struct nbl_hw_stats hw_stats;
+ bool hw_stats_inited;
};
struct nbl_dev_mgt {
diff --git a/drivers/net/nbl/nbl_dispatch.c b/drivers/net/nbl/nbl_dispatch.c
index 96d2c84c40..52d37ba7fe 100644
--- a/drivers/net/nbl/nbl_dispatch.c
+++ b/drivers/net/nbl/nbl_dispatch.c
@@ -816,6 +816,60 @@ static int nbl_disp_get_stats(void *priv, struct rte_eth_stats *rte_stats,
return res_ops->get_stats(NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), rte_stats, qstats);
}
+static int nbl_disp_get_uvn_pkt_drop_stats(void *priv, u16 vsi_id,
+ u16 num_queues, u32 *uvn_stat_pkt_drop)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ int ret = 0;
+
+ ret = NBL_OPS_CALL(res_ops->get_uvn_pkt_drop_stats,
+ (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt),
+ vsi_id, num_queues, uvn_stat_pkt_drop));
+ return ret;
+}
+
+static int nbl_disp_chan_get_uvn_pkt_drop_stats_req(void *priv, u16 vsi_id,
+ u16 num_queues, u32 *uvn_stat_pkt_drop)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ const struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt);
+ struct nbl_chan_param_get_uvn_pkt_drop_stats param = {0};
+ struct nbl_chan_send_info chan_send = {0};
+
+ param.vsi_id = vsi_id;
+ param.num_queues = num_queues;
+
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_GET_UVN_PKT_DROP_STATS,
+ ¶m, sizeof(param),
+ uvn_stat_pkt_drop, num_queues * sizeof(*uvn_stat_pkt_drop), 1);
+ return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
+}
+
+static int nbl_disp_get_ustore_total_pkt_drop_stats(void *priv, u8 eth_id,
+ struct nbl_ustore_stats *ustore_stats)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ struct nbl_resource_ops *res_ops = NBL_DISP_MGT_TO_RES_OPS(disp_mgt);
+ int ret = 0;
+
+ ret = NBL_OPS_CALL(res_ops->get_ustore_total_pkt_drop_stats,
+ (NBL_DISP_MGT_TO_RES_PRIV(disp_mgt), eth_id, ustore_stats));
+ return ret;
+}
+
+static int nbl_disp_chan_get_ustore_total_pkt_drop_stats_req(void *priv, u8 eth_id,
+ struct nbl_ustore_stats *ustore_stats)
+{
+ struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
+ const struct nbl_channel_ops *chan_ops = NBL_DISP_MGT_TO_CHAN_OPS(disp_mgt);
+ struct nbl_chan_send_info chan_send = {0};
+
+ NBL_CHAN_SEND(chan_send, 0, NBL_CHAN_GET_USTORE_TOTAL_PKT_DROP_STATS,
+ ð_id, sizeof(eth_id), ustore_stats, sizeof(*ustore_stats), 1);
+ return chan_ops->send_msg(NBL_DISP_MGT_TO_CHAN_PRIV(disp_mgt), &chan_send);
+}
+
static int nbl_disp_reset_stats(void *priv)
{
struct nbl_dispatch_mgt *disp_mgt = (struct nbl_dispatch_mgt *)priv;
@@ -1111,6 +1165,17 @@ do { \
NBL_DISP_SET_OPS(get_stats, nbl_disp_get_stats, \
NBL_DISP_CTRL_LVL_ALWAYS, -1, \
NULL, NULL); \
+ NBL_DISP_SET_OPS(get_uvn_pkt_drop_stats, \
+ nbl_disp_get_uvn_pkt_drop_stats, \
+ NBL_DISP_CTRL_LVL_MGT, \
+ NBL_CHAN_GET_UVN_PKT_DROP_STATS, \
+ nbl_disp_chan_get_uvn_pkt_drop_stats_req, NULL);\
+ NBL_DISP_SET_OPS(get_ustore_total_pkt_drop_stats, \
+ nbl_disp_get_ustore_total_pkt_drop_stats, \
+ NBL_DISP_CTRL_LVL_MGT, \
+ NBL_CHAN_GET_USTORE_TOTAL_PKT_DROP_STATS, \
+ nbl_disp_chan_get_ustore_total_pkt_drop_stats_req,\
+ NULL); \
NBL_DISP_SET_OPS(reset_stats, nbl_disp_reset_stats, \
NBL_DISP_CTRL_LVL_ALWAYS, -1, \
NULL, NULL); \
diff --git a/drivers/net/nbl/nbl_include/nbl_def_channel.h b/drivers/net/nbl/nbl_include/nbl_def_channel.h
index 6b150ed715..55880737f1 100644
--- a/drivers/net/nbl/nbl_include/nbl_def_channel.h
+++ b/drivers/net/nbl/nbl_include/nbl_def_channel.h
@@ -395,6 +395,11 @@ struct nbl_chan_param_get_private_stat_data {
u32 data_len;
};
+struct nbl_chan_param_get_uvn_pkt_drop_stats {
+ u16 vsi_id;
+ u16 num_queues;
+};
+
struct nbl_chan_send_info {
uint16_t dstid;
uint16_t msg_type;
diff --git a/drivers/net/nbl/nbl_include/nbl_def_dispatch.h b/drivers/net/nbl/nbl_include/nbl_def_dispatch.h
index 5284e9a929..45e7504a07 100644
--- a/drivers/net/nbl/nbl_include/nbl_def_dispatch.h
+++ b/drivers/net/nbl/nbl_include/nbl_def_dispatch.h
@@ -76,6 +76,10 @@ struct nbl_dispatch_ops {
void (*get_link_state)(void *priv, u8 eth_id, struct nbl_eth_link_info *eth_link_info);
int (*get_stats)(void *priv, struct rte_eth_stats *rte_stats,
struct eth_queue_stats *qstats);
+ int (*get_uvn_pkt_drop_stats)(void *priv, u16 vsi_id,
+ u16 num_queues, u32 *uvn_stat_pkt_drop);
+ int (*get_ustore_total_pkt_drop_stats)(void *priv, u8 eth_id,
+ struct nbl_ustore_stats *ustore_stats);
int (*reset_stats)(void *priv);
int (*get_txrx_xstats_cnt)(void *priv, u16 *xstats_cnt);
int (*get_txrx_xstats)(void *priv, struct rte_eth_xstat *xstats,
diff --git a/drivers/net/nbl/nbl_include/nbl_def_resource.h b/drivers/net/nbl/nbl_include/nbl_def_resource.h
index b8f19f81b5..6935598789 100644
--- a/drivers/net/nbl/nbl_include/nbl_def_resource.h
+++ b/drivers/net/nbl/nbl_include/nbl_def_resource.h
@@ -46,6 +46,10 @@ struct nbl_resource_ops {
void (*release_rx_ring)(void *priv, u16 queue_idx);
int (*get_stats)(void *priv, struct rte_eth_stats *rte_stats,
struct eth_queue_stats *qstats);
+ int (*get_uvn_pkt_drop_stats)(void *priv, u16 vsi_id,
+ u16 num_queues, u32 *uvn_stat_pkt_drop);
+ int (*get_ustore_total_pkt_drop_stats)(void *priv, u8 eth_id,
+ struct nbl_ustore_stats *ustore_stats);
int (*reset_stats)(void *priv);
int (*get_txrx_xstats_cnt)(void *priv, u16 *xstats_cnt);
int (*get_txrx_xstats)(void *priv, struct rte_eth_xstat *xstats,
diff --git a/drivers/net/nbl/nbl_include/nbl_include.h b/drivers/net/nbl/nbl_include/nbl_include.h
index f565e321d4..e095799503 100644
--- a/drivers/net/nbl/nbl_include/nbl_include.h
+++ b/drivers/net/nbl/nbl_include/nbl_include.h
@@ -25,6 +25,7 @@
#include <unistd.h>
#include <net/if.h>
+#include <rte_alarm.h>
#include <rte_ethdev.h>
#include <ethdev_driver.h>
#include <ethdev_pci.h>
@@ -203,4 +204,9 @@ struct nbl_txq_stats {
uint64_t tx_tso_packets;
};
+struct nbl_ustore_stats {
+ u64 rx_drop_packets;
+ u64 rx_trun_packets;
+};
+
#endif
--
2.34.1
^ permalink raw reply [flat|nested] 7+ messages in thread