From: Wenbo Cao <caowenbo@mucse.com>
To: thomas@monjalon.net, Wenbo Cao <caowenbo@mucse.com>
Cc: stephen@networkplumber.org, dev@dpdk.org, ferruh.yigit@amd.com,
andrew.rybchenko@oktetlabs.ru, yaojun@mucse.com
Subject: [PATCH v7 19/28] net/rnp: add support basic stats operation
Date: Sat, 8 Feb 2025 10:43:56 +0800 [thread overview]
Message-ID: <1738982645-34550-20-git-send-email-caowenbo@mucse.com> (raw)
In-Reply-To: <1738982645-34550-1-git-send-email-caowenbo@mucse.com>
add support hw-missed rx/tx packets bytes.
Signed-off-by: Wenbo Cao <caowenbo@mucse.com>
---
doc/guides/nics/features/rnp.ini | 2 +
doc/guides/nics/rnp.rst | 1 +
drivers/net/rnp/base/rnp_eth_regs.h | 3 +
drivers/net/rnp/rnp.h | 10 ++-
drivers/net/rnp/rnp_ethdev.c | 147 ++++++++++++++++++++++++++++++++++++
drivers/net/rnp/rnp_rxtx.c | 9 +++
drivers/net/rnp/rnp_rxtx.h | 10 +++
7 files changed, 181 insertions(+), 1 deletion(-)
diff --git a/doc/guides/nics/features/rnp.ini b/doc/guides/nics/features/rnp.ini
index c68d6fb..45dae3b 100644
--- a/doc/guides/nics/features/rnp.ini
+++ b/doc/guides/nics/features/rnp.ini
@@ -7,6 +7,8 @@
Speed capabilities = Y
Link status = Y
Link status event = Y
+Basic stats = Y
+Stats per queue = Y
Queue start/stop = Y
Promiscuous mode = Y
Allmulticast mode = Y
diff --git a/doc/guides/nics/rnp.rst b/doc/guides/nics/rnp.rst
index db64104..ec6f3f9 100644
--- a/doc/guides/nics/rnp.rst
+++ b/doc/guides/nics/rnp.rst
@@ -19,6 +19,7 @@ Features
- MTU update
- Jumbo frames
- Scatter-Gather IO support
+- Port hardware statistic
Prerequisites
-------------
diff --git a/drivers/net/rnp/base/rnp_eth_regs.h b/drivers/net/rnp/base/rnp_eth_regs.h
index 91a18dd..391688b 100644
--- a/drivers/net/rnp/base/rnp_eth_regs.h
+++ b/drivers/net/rnp/base/rnp_eth_regs.h
@@ -23,6 +23,9 @@
#define RNP_RX_FC_ENABLE _ETH_(0x8520)
#define RNP_RING_FC_EN(n) _ETH_(0x8524 + ((0x4) * ((n) / 32)))
#define RNP_RING_FC_THRESH(n) _ETH_(0x8a00 + ((0x4) * (n)))
+/* ETH Statistic */
+#define RNP_ETH_RXTRANS_DROP _ETH_(0x8904)
+#define RNP_ETH_RXTRUNC_DROP _ETH_(0x8928)
/* Mac Host Filter */
#define RNP_MAC_FCTRL _ETH_(0x9110)
#define RNP_MAC_FCTRL_MPE RTE_BIT32(8) /* Multicast Promiscuous En */
diff --git a/drivers/net/rnp/rnp.h b/drivers/net/rnp/rnp.h
index 054382e..b4f4f28 100644
--- a/drivers/net/rnp/rnp.h
+++ b/drivers/net/rnp/rnp.h
@@ -10,7 +10,7 @@
#include "base/rnp_hw.h"
#define PCI_VENDOR_ID_MUCSE (0x8848)
-#define RNP_DEV_ID_N10G (0x1000)
+#define RNP_DEV_ID_N10G (0x1020)
#define RNP_MAX_VF_NUM (64)
#define RNP_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
/* maximum frame size supported */
@@ -105,6 +105,11 @@ struct rnp_proc_priv {
const struct rnp_mbx_ops *mbx_ops;
};
+struct rnp_hw_eth_stats {
+ uint64_t rx_trans_drop; /* rx eth to dma fifo full drop */
+ uint64_t rx_trunc_drop; /* rx mac to eth to host copy fifo full drop */
+};
+
struct rnp_eth_port {
struct rnp_proc_priv *proc_priv;
struct rte_ether_addr mac_addr;
@@ -113,6 +118,9 @@ struct rnp_eth_port {
struct rnp_tx_queue *tx_queues[RNP_MAX_RX_QUEUE_NUM];
struct rnp_hw *hw;
+ struct rnp_hw_eth_stats eth_stats_old;
+ struct rnp_hw_eth_stats eth_stats;
+
struct rte_eth_rss_conf rss_conf;
uint16_t last_rx_num;
bool rxq_num_changed;
diff --git a/drivers/net/rnp/rnp_ethdev.c b/drivers/net/rnp/rnp_ethdev.c
index 0fcb256..fa2617b 100644
--- a/drivers/net/rnp/rnp_ethdev.c
+++ b/drivers/net/rnp/rnp_ethdev.c
@@ -770,6 +770,150 @@ static int rnp_allmulticast_disable(struct rte_eth_dev *eth_dev)
return 0;
}
+struct rte_rnp_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint32_t offset;
+ uint32_t reg_base;
+ bool hi_addr_en;
+};
+
+static const struct rte_rnp_xstats_name_off rte_rnp_rx_eth_stats_str[] = {
+ {"eth rx full drop", offsetof(struct rnp_hw_eth_stats,
+ rx_trans_drop), RNP_ETH_RXTRANS_DROP, false},
+ {"eth_rx_fifo_drop", offsetof(struct rnp_hw_eth_stats,
+ rx_trunc_drop), RNP_ETH_RXTRUNC_DROP, false},
+};
+#define RNP_NB_RX_HW_ETH_STATS (RTE_DIM(rte_rnp_rx_eth_stats_str))
+#define RNP_GET_E_HW_COUNT(stats, offset) \
+ ((uint64_t *)(((char *)stats) + (offset)))
+#define RNP_ADD_INCL_COUNT(stats, offset, val) \
+ ((*(RNP_GET_E_HW_COUNT(stats, (offset)))) += val)
+
+static inline void
+rnp_update_eth_stats_32bit(struct rnp_hw_eth_stats *new,
+ struct rnp_hw_eth_stats *old,
+ uint32_t offset, uint32_t val)
+{
+ uint64_t *last_count = NULL;
+
+ last_count = RNP_GET_E_HW_COUNT(old, offset);
+ if (val >= *last_count)
+ RNP_ADD_INCL_COUNT(new, offset, val - (*last_count));
+ else
+ RNP_ADD_INCL_COUNT(new, offset, val + UINT32_MAX);
+ *last_count = val;
+}
+
+static void rnp_get_eth_count(struct rnp_hw *hw,
+ uint16_t lane,
+ struct rnp_hw_eth_stats *new,
+ struct rnp_hw_eth_stats *old,
+ const struct rte_rnp_xstats_name_off *ptr)
+{
+ uint64_t val = 0;
+
+ if (ptr->reg_base) {
+ val = RNP_E_REG_RD(hw, ptr->reg_base + 0x40 * lane);
+ rnp_update_eth_stats_32bit(new, old, ptr->offset, val);
+ }
+}
+
+static void rnp_get_hw_stats(struct rte_eth_dev *dev)
+{
+ struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+ struct rnp_hw_eth_stats *old = &port->eth_stats_old;
+ struct rnp_hw_eth_stats *new = &port->eth_stats;
+ const struct rte_rnp_xstats_name_off *ptr;
+ uint16_t lane = port->attr.nr_lane;
+ struct rnp_hw *hw = port->hw;
+ uint16_t i;
+
+ for (i = 0; i < RNP_NB_RX_HW_ETH_STATS; i++) {
+ ptr = &rte_rnp_rx_eth_stats_str[i];
+ rnp_get_eth_count(hw, lane, new, old, ptr);
+ }
+}
+
+static int
+rnp_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+ struct rnp_hw_eth_stats *eth_stats = &port->eth_stats;
+ struct rte_eth_dev_data *data = dev->data;
+ int i = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ rnp_get_hw_stats(dev);
+ for (i = 0; i < data->nb_rx_queues; i++) {
+ if (!data->rx_queues[i])
+ continue;
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[i] = ((struct rnp_rx_queue **)
+ (data->rx_queues))[i]->stats.ipackets;
+ stats->q_ibytes[i] = ((struct rnp_rx_queue **)
+ (data->rx_queues))[i]->stats.ibytes;
+ stats->ipackets += stats->q_ipackets[i];
+ stats->ibytes += stats->q_ibytes[i];
+ } else {
+ stats->ipackets += ((struct rnp_rx_queue **)
+ (data->rx_queues))[i]->stats.ipackets;
+ stats->ibytes += ((struct rnp_rx_queue **)
+ (data->rx_queues))[i]->stats.ibytes;
+ }
+ }
+
+ for (i = 0; i < data->nb_tx_queues; i++) {
+ if (!data->tx_queues[i])
+ continue;
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[i] = ((struct rnp_tx_queue **)
+ (data->tx_queues))[i]->stats.opackets;
+ stats->q_obytes[i] = ((struct rnp_tx_queue **)
+ (data->tx_queues))[i]->stats.obytes;
+ stats->opackets += stats->q_opackets[i];
+ stats->obytes += stats->q_obytes[i];
+ } else {
+ stats->opackets += ((struct rnp_tx_queue **)
+ (data->tx_queues))[i]->stats.opackets;
+ stats->obytes += ((struct rnp_tx_queue **)
+ (data->tx_queues))[i]->stats.obytes;
+ }
+ }
+ stats->imissed = eth_stats->rx_trans_drop + eth_stats->rx_trunc_drop;
+
+ return 0;
+}
+
+static int
+rnp_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+ struct rnp_hw_eth_stats *eth_stats = &port->eth_stats;
+ struct rnp_rx_queue *rxq;
+ struct rnp_tx_queue *txq;
+ uint16_t idx;
+
+ PMD_INIT_FUNC_TRACE();
+ memset(eth_stats, 0, sizeof(*eth_stats));
+ for (idx = 0; idx < dev->data->nb_rx_queues; idx++) {
+ rxq = ((struct rnp_rx_queue **)
+ (dev->data->rx_queues))[idx];
+ if (!rxq)
+ continue;
+ memset(&rxq->stats, 0, sizeof(struct rnp_queue_stats));
+ }
+ for (idx = 0; idx < dev->data->nb_tx_queues; idx++) {
+ txq = ((struct rnp_tx_queue **)
+ (dev->data->tx_queues))[idx];
+ if (!txq)
+ continue;
+ memset(&txq->stats, 0, sizeof(struct rnp_queue_stats));
+ }
+
+ return 0;
+}
+
/* Features supported by this driver */
static const struct eth_dev_ops rnp_eth_dev_ops = {
.dev_configure = rnp_dev_configure,
@@ -793,6 +937,9 @@ static int rnp_allmulticast_disable(struct rte_eth_dev *eth_dev)
.reta_query = rnp_dev_rss_reta_query,
.rss_hash_update = rnp_dev_rss_hash_update,
.rss_hash_conf_get = rnp_dev_rss_hash_conf_get,
+ /* stats */
+ .stats_get = rnp_dev_stats_get,
+ .stats_reset = rnp_dev_stats_reset,
/* link impl */
.link_update = rnp_dev_link_update,
.dev_set_link_up = rnp_dev_set_link_up,
diff --git a/drivers/net/rnp/rnp_rxtx.c b/drivers/net/rnp/rnp_rxtx.c
index 777ce7b..c351fee 100644
--- a/drivers/net/rnp/rnp_rxtx.c
+++ b/drivers/net/rnp/rnp_rxtx.c
@@ -741,6 +741,8 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
nmb->packet_type = 0;
nmb->ol_flags = 0;
nmb->nb_segs = 1;
+
+ rxq->stats.ibytes += nmb->data_len;
}
for (j = 0; j < nb_dd; ++j) {
rx_pkts[i + j] = rx_swbd[j].mbuf;
@@ -752,6 +754,7 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
if (nb_dd != RNP_CACHE_FETCH_RX)
break;
}
+ rxq->stats.ipackets += nb_rx;
rxq->rx_tail = (rxq->rx_tail + nb_rx) & rxq->attr.nb_desc_mask;
rxq->rxrearm_nb = rxq->rxrearm_nb + nb_rx;
@@ -821,6 +824,7 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
txbd->d.blen = tx_swbd->mbuf->data_len;
txbd->d.cmd = RNP_CMD_EOP;
+ txq->stats.obytes += txbd->d.blen;
i = (i + 1) & txq->attr.nb_desc_mask;
}
txq->nb_tx_free -= start;
@@ -832,6 +836,7 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
if (txq->tx_next_rs > txq->attr.nb_desc)
txq->tx_next_rs = txq->tx_rs_thresh - 1;
}
+ txq->stats.opackets += start;
txq->tx_tail = i;
rte_wmb();
@@ -936,6 +941,7 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
}
rxm->next = NULL;
first_seg->port = rxq->attr.port_id;
+ rxq->stats.ibytes += first_seg->pkt_len;
/* this the end of packet the large pkt has been recv finish */
rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
first_seg->data_off));
@@ -944,6 +950,7 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
}
if (!nb_rx)
return 0;
+ rxq->stats.ipackets += nb_rx;
/* update sw record point */
rxq->rx_tail = rx_id;
rxq->pkt_first_seg = first_seg;
@@ -1033,6 +1040,7 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
tx_id = txe->next_id;
txe = txn;
} while (m_seg != NULL);
+ txq->stats.obytes += tx_pkt->pkt_len;
txbd->d.cmd |= RNP_CMD_EOP;
txq->nb_tx_used = (uint16_t)txq->nb_tx_used + nb_used_bd;
txq->nb_tx_free = (uint16_t)txq->nb_tx_free - nb_used_bd;
@@ -1044,6 +1052,7 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
}
if (!send_pkts)
return 0;
+ txq->stats.opackets += send_pkts;
txq->tx_tail = tx_id;
rte_wmb();
diff --git a/drivers/net/rnp/rnp_rxtx.h b/drivers/net/rnp/rnp_rxtx.h
index f631285..d26497a 100644
--- a/drivers/net/rnp/rnp_rxtx.h
+++ b/drivers/net/rnp/rnp_rxtx.h
@@ -47,6 +47,14 @@ struct rnp_rxsw_entry {
struct rte_mbuf *mbuf;
};
+struct rnp_queue_stats {
+ uint64_t obytes;
+ uint64_t opackets;
+
+ uint64_t ibytes;
+ uint64_t ipackets;
+};
+
struct rnp_rx_queue {
struct rte_mempool *mb_pool; /* mbuf pool to populate rx ring. */
const struct rte_memzone *rz; /* rx hw ring base alloc memzone */
@@ -73,6 +81,7 @@ struct rnp_rx_queue {
uint8_t pthresh; /* rx desc prefetch threshold */
uint8_t pburst; /* rx desc prefetch burst */
+ struct rnp_queue_stats stats;
uint64_t rx_offloads; /* user set hw offload features */
struct rte_mbuf **free_mbufs; /* rx bulk alloc reserve of free mbufs */
struct rte_mbuf fake_mbuf; /* dummy mbuf */
@@ -113,6 +122,7 @@ struct rnp_tx_queue {
uint8_t pthresh; /* rx desc prefetch threshold */
uint8_t pburst; /* rx desc burst*/
+ struct rnp_queue_stats stats;
uint64_t tx_offloads; /* tx offload features */
struct rte_mbuf **free_mbufs; /* tx bulk free reserve of free mbufs */
};
--
1.8.3.1
next prev parent reply other threads:[~2025-02-08 2:46 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-08 2:43 [PATCH v7 00/28] [v6]drivers/net Add Support mucse N10 Pmd Driver Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 01/28] net/rnp: add skeleton Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 02/28] net/rnp: add ethdev probe and remove Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 03/28] net/rnp: add log Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 04/28] net/rnp: support mailbox basic operate Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 05/28] net/rnp: add device init and uninit Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 06/28] net/rnp: add get device information operation Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 07/28] net/rnp: add support mac promisc mode Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 08/28] net/rnp: add queue setup and release operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 09/28] net/rnp: add queue stop and start operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 10/28] net/rnp: add support device start stop operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 11/28] net/rnp: add RSS support operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 12/28] net/rnp: add support link update operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 13/28] net/rnp: add support link setup operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 14/28] net/rnp: add Rx burst simple support Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 15/28] net/rnp: add Tx " Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 16/28] net/rnp: add MTU set operation Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 17/28] net/rnp: add Rx scatter segment version Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 18/28] net/rnp: add Tx multiple " Wenbo Cao
2025-02-08 2:43 ` Wenbo Cao [this message]
2025-02-08 2:43 ` [PATCH v7 20/28] net/rnp: add support xstats operation Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 21/28] net/rnp: add unicast MAC filter operation Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 22/28] net/rnp: add supported packet types Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 23/28] net/rnp: add support Rx checksum offload Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 24/28] net/rnp: add support Tx TSO offload Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 25/28] net/rnp: support VLAN offloads Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 26/28] net/rnp: add support VLAN filters operations Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 27/28] net/rnp: add queue info operation Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 28/28] net/rnp: support Rx/Tx burst mode info Wenbo Cao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1738982645-34550-20-git-send-email-caowenbo@mucse.com \
--to=caowenbo@mucse.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@amd.com \
--cc=stephen@networkplumber.org \
--cc=thomas@monjalon.net \
--cc=yaojun@mucse.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).