From: Alejandro Lucero <alejandro.lucero@netronome.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v9 4/9] nfp: adding stats
Date: Thu, 26 Nov 2015 09:49:24 +0000 [thread overview]
Message-ID: <1448531369-8808-5-git-send-email-alejandro.lucero@netronome.com> (raw)
In-Reply-To: <1448531369-8808-1-git-send-email-alejandro.lucero@netronome.com>
Signed-off-by: Alejandro Lucero <alejandro.lucero@netronome.com>
Signed-off-by: Rolf Neugebauer <rolf.neugebauer@netronome.com>
---
drivers/net/nfp/nfp_net.c | 179 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 179 insertions(+)
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 8451a49..fc02916 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -90,6 +90,9 @@ static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
static int nfp_net_start(struct rte_eth_dev *dev);
+static void nfp_net_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void nfp_net_stats_reset(struct rte_eth_dev *dev);
static void nfp_net_stop(struct rte_eth_dev *dev);
static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -679,6 +682,177 @@ nfp_net_close(struct rte_eth_dev *dev)
*/
}
+static void
+nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ int i;
+ struct nfp_net_hw *hw;
+ struct rte_eth_stats nfp_dev_stats;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
+
+ /* reading per RX ring stats */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ nfp_dev_stats.q_ipackets[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
+
+ nfp_dev_stats.q_ipackets[i] -=
+ hw->eth_stats_base.q_ipackets[i];
+
+ nfp_dev_stats.q_ibytes[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
+
+ nfp_dev_stats.q_ibytes[i] -=
+ hw->eth_stats_base.q_ibytes[i];
+ }
+
+ /* reading per TX ring stats */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ nfp_dev_stats.q_opackets[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
+
+ nfp_dev_stats.q_opackets[i] -=
+ hw->eth_stats_base.q_opackets[i];
+
+ nfp_dev_stats.q_obytes[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
+
+ nfp_dev_stats.q_obytes[i] -=
+ hw->eth_stats_base.q_obytes[i];
+ }
+
+ nfp_dev_stats.ipackets =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
+
+ nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
+
+ nfp_dev_stats.ibytes =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
+
+ nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
+
+ nfp_dev_stats.opackets =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
+
+ nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
+
+ nfp_dev_stats.obytes =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
+
+ nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
+
+ nfp_dev_stats.imcasts =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
+
+ nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts;
+
+ /* reading general device stats */
+ nfp_dev_stats.ierrors =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
+
+ nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
+
+ nfp_dev_stats.oerrors =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
+
+ nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
+
+ /* Multicast frames received */
+ nfp_dev_stats.imcasts =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
+
+ nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts;
+
+ /* RX ring mbuf allocation failures */
+ nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+
+ nfp_dev_stats.imissed =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
+
+ nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
+
+ if (stats)
+ memcpy(stats, &nfp_dev_stats, sizeof(*stats));
+}
+
+static void
+nfp_net_stats_reset(struct rte_eth_dev *dev)
+{
+ int i;
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * hw->eth_stats_base records the per counter starting point.
+ * Lets update it now
+ */
+
+ /* reading per RX ring stats */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ hw->eth_stats_base.q_ipackets[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
+
+ hw->eth_stats_base.q_ibytes[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
+ }
+
+ /* reading per TX ring stats */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ hw->eth_stats_base.q_opackets[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
+
+ hw->eth_stats_base.q_obytes[i] =
+ nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
+ }
+
+ hw->eth_stats_base.ipackets =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
+
+ hw->eth_stats_base.ibytes =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
+
+ hw->eth_stats_base.opackets =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
+
+ hw->eth_stats_base.obytes =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
+
+ hw->eth_stats_base.imcasts =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
+
+ /* reading general device stats */
+ hw->eth_stats_base.ierrors =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
+
+ hw->eth_stats_base.oerrors =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
+
+ /* Multicast frames received */
+ hw->eth_stats_base.imcasts =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
+
+ /* RX ring mbuf allocation failures */
+ dev->data->rx_mbuf_alloc_failed = 0;
+
+ hw->eth_stats_base.imissed =
+ nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
+}
+
static uint32_t
nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
{
@@ -1721,6 +1895,8 @@ static struct eth_dev_ops nfp_net_eth_dev_ops = {
.dev_start = nfp_net_start,
.dev_stop = nfp_net_stop,
.dev_close = nfp_net_close,
+ .stats_get = nfp_net_stats_get,
+ .stats_reset = nfp_net_stats_reset,
.reta_update = nfp_net_reta_update,
.reta_query = nfp_net_reta_query,
.rss_hash_update = nfp_net_rss_hash_update,
@@ -1852,6 +2028,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
+ /* Recording current stats counters values */
+ nfp_net_stats_reset(eth_dev);
+
return 0;
}
--
1.7.9.5
next prev parent reply other threads:[~2015-11-26 9:49 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-11-26 9:49 [dpdk-dev] [PATCH v9 0/9] support for netronome nfp-6xxx card Alejandro Lucero
2015-11-26 9:49 ` [dpdk-dev] [PATCH v9 1/9] nfp: basic initialization Alejandro Lucero
2015-11-26 18:14 ` Stephen Hemminger
2015-11-27 7:51 ` Alejandro Lucero
2015-11-27 9:18 ` Thomas Monjalon
2015-11-26 9:49 ` [dpdk-dev] [PATCH v9 2/9] nfp: adding rx/tx functionality Alejandro Lucero
2015-11-26 9:49 ` [dpdk-dev] [PATCH v9 3/9] nfp: adding rss Alejandro Lucero
2015-11-26 9:49 ` Alejandro Lucero [this message]
2015-11-26 9:49 ` [dpdk-dev] [PATCH v9 5/9] nfp: adding link functionality Alejandro Lucero
2015-11-26 9:49 ` [dpdk-dev] [PATCH v9 6/9] nfp: adding extra functionality Alejandro Lucero
2015-11-26 9:49 ` [dpdk-dev] [PATCH v9 7/9] nfp: link status change interrupt support Alejandro Lucero
2015-11-26 9:49 ` [dpdk-dev] [PATCH v9 8/9] nfp: adding nic guide Alejandro Lucero
2015-11-26 9:49 ` [dpdk-dev] [PATCH v9 9/9] nfp: updating maintainers Alejandro Lucero
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1448531369-8808-5-git-send-email-alejandro.lucero@netronome.com \
--to=alejandro.lucero@netronome.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).