DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH v2 09/26] net/ngbe: support basic statistics
Date: Thu, 21 Oct 2021 17:50:06 +0800	[thread overview]
Message-ID: <20211021095023.18288-10-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20211021095023.18288-1-jiawenwu@trustnetic.com>

Support to read and clear basic statistics.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 doc/guides/nics/features/ngbe.ini      |   2 +
 doc/guides/nics/ngbe.rst               |   1 +
 doc/guides/rel_notes/release_21_11.rst |   1 +
 drivers/net/ngbe/base/ngbe_dummy.h     |   5 +
 drivers/net/ngbe/base/ngbe_hw.c        | 101 ++++++++++
 drivers/net/ngbe/base/ngbe_hw.h        |   1 +
 drivers/net/ngbe/base/ngbe_type.h      | 134 +++++++++++++
 drivers/net/ngbe/ngbe_ethdev.c         | 250 +++++++++++++++++++++++++
 drivers/net/ngbe/ngbe_ethdev.h         |  18 ++
 9 files changed, 513 insertions(+)

diff --git a/doc/guides/nics/features/ngbe.ini b/doc/guides/nics/features/ngbe.ini
index 17c5e034e6..dc17938104 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -21,6 +21,8 @@ L4 checksum offload  = Y
 Inner L3 checksum    = Y
 Inner L4 checksum    = Y
 Packet type parsing  = Y
+Basic stats          = Y
+Stats per queue      = Y
 Multiprocess aware   = Y
 Linux                = Y
 ARMv8                = Y
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index be70b0f51c..a180acbea3 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -16,6 +16,7 @@ Features
 - Checksum offload
 - VLAN/QinQ stripping and inserting
 - TSO offload
+- Port hardware statistics
 - Jumbo frames
 - Link state information
 - Scattered and gather for TX and RX
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 3047452d1c..7d11248342 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -149,6 +149,7 @@ New Features
 
   * Added offloads and packet type on RxTx.
   * Added VLAN filters.
+  * Added device basic statistics.
 
 * **Added multi-process support for testpmd.**
 
diff --git a/drivers/net/ngbe/base/ngbe_dummy.h b/drivers/net/ngbe/base/ngbe_dummy.h
index fe0596887c..59c8097241 100644
--- a/drivers/net/ngbe/base/ngbe_dummy.h
+++ b/drivers/net/ngbe/base/ngbe_dummy.h
@@ -55,6 +55,10 @@ static inline s32 ngbe_mac_stop_hw_dummy(struct ngbe_hw *TUP0)
 {
 	return NGBE_ERR_OPS_DUMMY;
 }
+static inline s32 ngbe_mac_clear_hw_cntrs_dummy(struct ngbe_hw *TUP0)
+{
+	return NGBE_ERR_OPS_DUMMY;
+}
 static inline s32 ngbe_mac_get_mac_addr_dummy(struct ngbe_hw *TUP0, u8 *TUP1)
 {
 	return NGBE_ERR_OPS_DUMMY;
@@ -182,6 +186,7 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
 	hw->mac.reset_hw = ngbe_mac_reset_hw_dummy;
 	hw->mac.start_hw = ngbe_mac_start_hw_dummy;
 	hw->mac.stop_hw = ngbe_mac_stop_hw_dummy;
+	hw->mac.clear_hw_cntrs = ngbe_mac_clear_hw_cntrs_dummy;
 	hw->mac.get_mac_addr = ngbe_mac_get_mac_addr_dummy;
 	hw->mac.enable_rx_dma = ngbe_mac_enable_rx_dma_dummy;
 	hw->mac.disable_sec_rx_path = ngbe_mac_disable_sec_rx_path_dummy;
diff --git a/drivers/net/ngbe/base/ngbe_hw.c b/drivers/net/ngbe/base/ngbe_hw.c
index bfd744fa66..c12e6e6dfd 100644
--- a/drivers/net/ngbe/base/ngbe_hw.c
+++ b/drivers/net/ngbe/base/ngbe_hw.c
@@ -22,6 +22,9 @@ s32 ngbe_start_hw(struct ngbe_hw *hw)
 	/* Clear the VLAN filter table */
 	hw->mac.clear_vfta(hw);
 
+	/* Clear statistics registers */
+	hw->mac.clear_hw_cntrs(hw);
+
 	/* Clear adapter stopped flag */
 	hw->adapter_stopped = false;
 
@@ -162,6 +165,7 @@ s32 ngbe_reset_hw_em(struct ngbe_hw *hw)
 	msec_delay(50);
 
 	ngbe_reset_misc_em(hw);
+	hw->mac.clear_hw_cntrs(hw);
 
 	msec_delay(50);
 
@@ -178,6 +182,102 @@ s32 ngbe_reset_hw_em(struct ngbe_hw *hw)
 	return status;
 }
 
+/**
+ *  ngbe_clear_hw_cntrs - Generic clear hardware counters
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears all hardware statistics counters by reading them from the hardware
+ *  Statistics counters are clear on read.
+ **/
+s32 ngbe_clear_hw_cntrs(struct ngbe_hw *hw)
+{
+	u16 i = 0;
+
+	DEBUGFUNC("ngbe_clear_hw_cntrs");
+
+	/* QP Stats */
+	/* don't write clear queue stats */
+	for (i = 0; i < NGBE_MAX_QP; i++) {
+		hw->qp_last[i].rx_qp_packets = 0;
+		hw->qp_last[i].tx_qp_packets = 0;
+		hw->qp_last[i].rx_qp_bytes = 0;
+		hw->qp_last[i].tx_qp_bytes = 0;
+		hw->qp_last[i].rx_qp_mc_packets = 0;
+		hw->qp_last[i].tx_qp_mc_packets = 0;
+		hw->qp_last[i].rx_qp_bc_packets = 0;
+		hw->qp_last[i].tx_qp_bc_packets = 0;
+	}
+
+	/* PB Stats */
+	rd32(hw, NGBE_PBRXLNKXON);
+	rd32(hw, NGBE_PBRXLNKXOFF);
+	rd32(hw, NGBE_PBTXLNKXON);
+	rd32(hw, NGBE_PBTXLNKXOFF);
+
+	/* DMA Stats */
+	rd32(hw, NGBE_DMARXPKT);
+	rd32(hw, NGBE_DMATXPKT);
+
+	rd64(hw, NGBE_DMARXOCTL);
+	rd64(hw, NGBE_DMATXOCTL);
+
+	/* MAC Stats */
+	rd64(hw, NGBE_MACRXERRCRCL);
+	rd64(hw, NGBE_MACRXMPKTL);
+	rd64(hw, NGBE_MACTXMPKTL);
+
+	rd64(hw, NGBE_MACRXPKTL);
+	rd64(hw, NGBE_MACTXPKTL);
+	rd64(hw, NGBE_MACRXGBOCTL);
+
+	rd64(hw, NGBE_MACRXOCTL);
+	rd32(hw, NGBE_MACTXOCTL);
+
+	rd64(hw, NGBE_MACRX1TO64L);
+	rd64(hw, NGBE_MACRX65TO127L);
+	rd64(hw, NGBE_MACRX128TO255L);
+	rd64(hw, NGBE_MACRX256TO511L);
+	rd64(hw, NGBE_MACRX512TO1023L);
+	rd64(hw, NGBE_MACRX1024TOMAXL);
+	rd64(hw, NGBE_MACTX1TO64L);
+	rd64(hw, NGBE_MACTX65TO127L);
+	rd64(hw, NGBE_MACTX128TO255L);
+	rd64(hw, NGBE_MACTX256TO511L);
+	rd64(hw, NGBE_MACTX512TO1023L);
+	rd64(hw, NGBE_MACTX1024TOMAXL);
+
+	rd64(hw, NGBE_MACRXERRLENL);
+	rd32(hw, NGBE_MACRXOVERSIZE);
+	rd32(hw, NGBE_MACRXJABBER);
+
+	/* MACsec Stats */
+	rd32(hw, NGBE_LSECTX_UTPKT);
+	rd32(hw, NGBE_LSECTX_ENCPKT);
+	rd32(hw, NGBE_LSECTX_PROTPKT);
+	rd32(hw, NGBE_LSECTX_ENCOCT);
+	rd32(hw, NGBE_LSECTX_PROTOCT);
+	rd32(hw, NGBE_LSECRX_UTPKT);
+	rd32(hw, NGBE_LSECRX_BTPKT);
+	rd32(hw, NGBE_LSECRX_NOSCIPKT);
+	rd32(hw, NGBE_LSECRX_UNSCIPKT);
+	rd32(hw, NGBE_LSECRX_DECOCT);
+	rd32(hw, NGBE_LSECRX_VLDOCT);
+	rd32(hw, NGBE_LSECRX_UNCHKPKT);
+	rd32(hw, NGBE_LSECRX_DLYPKT);
+	rd32(hw, NGBE_LSECRX_LATEPKT);
+	for (i = 0; i < 2; i++) {
+		rd32(hw, NGBE_LSECRX_OKPKT(i));
+		rd32(hw, NGBE_LSECRX_INVPKT(i));
+		rd32(hw, NGBE_LSECRX_BADPKT(i));
+	}
+	for (i = 0; i < 4; i++) {
+		rd32(hw, NGBE_LSECRX_INVSAPKT(i));
+		rd32(hw, NGBE_LSECRX_BADSAPKT(i));
+	}
+
+	return 0;
+}
+
 /**
  *  ngbe_get_mac_addr - Generic get MAC address
  *  @hw: pointer to hardware structure
@@ -1015,6 +1115,7 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw)
 	mac->init_hw = ngbe_init_hw;
 	mac->reset_hw = ngbe_reset_hw_em;
 	mac->start_hw = ngbe_start_hw;
+	mac->clear_hw_cntrs = ngbe_clear_hw_cntrs;
 	mac->enable_rx_dma = ngbe_enable_rx_dma;
 	mac->get_mac_addr = ngbe_get_mac_addr;
 	mac->stop_hw = ngbe_stop_hw;
diff --git a/drivers/net/ngbe/base/ngbe_hw.h b/drivers/net/ngbe/base/ngbe_hw.h
index 3f9eee84e9..c4b94beb40 100644
--- a/drivers/net/ngbe/base/ngbe_hw.h
+++ b/drivers/net/ngbe/base/ngbe_hw.h
@@ -18,6 +18,7 @@ s32 ngbe_init_hw(struct ngbe_hw *hw);
 s32 ngbe_start_hw(struct ngbe_hw *hw);
 s32 ngbe_reset_hw_em(struct ngbe_hw *hw);
 s32 ngbe_stop_hw(struct ngbe_hw *hw);
+s32 ngbe_clear_hw_cntrs(struct ngbe_hw *hw);
 s32 ngbe_get_mac_addr(struct ngbe_hw *hw, u8 *mac_addr);
 
 void ngbe_set_lan_id_multi_port(struct ngbe_hw *hw);
diff --git a/drivers/net/ngbe/base/ngbe_type.h b/drivers/net/ngbe/base/ngbe_type.h
index 68f82e1efb..93d0799852 100644
--- a/drivers/net/ngbe/base/ngbe_type.h
+++ b/drivers/net/ngbe/base/ngbe_type.h
@@ -10,6 +10,7 @@
 
 #define NGBE_FRAME_SIZE_DFT       (1522) /* Default frame size, +FCS */
 #define NGBE_NUM_POOL             (32)
+#define NGBE_MAX_QP               (8)
 
 #define NGBE_ALIGN		128 /* as intel did */
 #define NGBE_ISB_SIZE		16
@@ -78,6 +79,127 @@ struct ngbe_bus_info {
 	u8 lan_id;
 };
 
+/* Statistics counters collected by the MAC */
+/* PB[] RxTx */
+struct ngbe_pb_stats {
+	u64 tx_pb_xon_packets;
+	u64 rx_pb_xon_packets;
+	u64 tx_pb_xoff_packets;
+	u64 rx_pb_xoff_packets;
+	u64 rx_pb_dropped;
+	u64 rx_pb_mbuf_alloc_errors;
+	u64 tx_pb_xon2off_packets;
+};
+
+/* QP[] RxTx */
+struct ngbe_qp_stats {
+	u64 rx_qp_packets;
+	u64 tx_qp_packets;
+	u64 rx_qp_bytes;
+	u64 tx_qp_bytes;
+	u64 rx_qp_mc_packets;
+};
+
+struct ngbe_hw_stats {
+	/* MNG RxTx */
+	u64 mng_bmc2host_packets;
+	u64 mng_host2bmc_packets;
+	/* Basix RxTx */
+	u64 rx_drop_packets;
+	u64 tx_drop_packets;
+	u64 rx_dma_drop;
+	u64 tx_secdrp_packets;
+	u64 rx_packets;
+	u64 tx_packets;
+	u64 rx_bytes;
+	u64 tx_bytes;
+	u64 rx_total_bytes;
+	u64 rx_total_packets;
+	u64 tx_total_packets;
+	u64 rx_total_missed_packets;
+	u64 rx_broadcast_packets;
+	u64 tx_broadcast_packets;
+	u64 rx_multicast_packets;
+	u64 tx_multicast_packets;
+	u64 rx_management_packets;
+	u64 tx_management_packets;
+	u64 rx_management_dropped;
+
+	/* Basic Error */
+	u64 rx_crc_errors;
+	u64 rx_illegal_byte_errors;
+	u64 rx_error_bytes;
+	u64 rx_mac_short_packet_dropped;
+	u64 rx_length_errors;
+	u64 rx_undersize_errors;
+	u64 rx_fragment_errors;
+	u64 rx_oversize_errors;
+	u64 rx_jabber_errors;
+	u64 rx_l3_l4_xsum_error;
+	u64 mac_local_errors;
+	u64 mac_remote_errors;
+
+	/* MACSEC */
+	u64 tx_macsec_pkts_untagged;
+	u64 tx_macsec_pkts_encrypted;
+	u64 tx_macsec_pkts_protected;
+	u64 tx_macsec_octets_encrypted;
+	u64 tx_macsec_octets_protected;
+	u64 rx_macsec_pkts_untagged;
+	u64 rx_macsec_pkts_badtag;
+	u64 rx_macsec_pkts_nosci;
+	u64 rx_macsec_pkts_unknownsci;
+	u64 rx_macsec_octets_decrypted;
+	u64 rx_macsec_octets_validated;
+	u64 rx_macsec_sc_pkts_unchecked;
+	u64 rx_macsec_sc_pkts_delayed;
+	u64 rx_macsec_sc_pkts_late;
+	u64 rx_macsec_sa_pkts_ok;
+	u64 rx_macsec_sa_pkts_invalid;
+	u64 rx_macsec_sa_pkts_notvalid;
+	u64 rx_macsec_sa_pkts_unusedsa;
+	u64 rx_macsec_sa_pkts_notusingsa;
+
+	/* MAC RxTx */
+	u64 rx_size_64_packets;
+	u64 rx_size_65_to_127_packets;
+	u64 rx_size_128_to_255_packets;
+	u64 rx_size_256_to_511_packets;
+	u64 rx_size_512_to_1023_packets;
+	u64 rx_size_1024_to_max_packets;
+	u64 tx_size_64_packets;
+	u64 tx_size_65_to_127_packets;
+	u64 tx_size_128_to_255_packets;
+	u64 tx_size_256_to_511_packets;
+	u64 tx_size_512_to_1023_packets;
+	u64 tx_size_1024_to_max_packets;
+
+	/* Flow Control */
+	u64 tx_xon_packets;
+	u64 rx_xon_packets;
+	u64 tx_xoff_packets;
+	u64 rx_xoff_packets;
+
+	u64 rx_up_dropped;
+
+	u64 rdb_pkt_cnt;
+	u64 rdb_repli_cnt;
+	u64 rdb_drp_cnt;
+
+	/* QP[] RxTx */
+	struct {
+		u64 rx_qp_packets;
+		u64 tx_qp_packets;
+		u64 rx_qp_bytes;
+		u64 tx_qp_bytes;
+		u64 rx_qp_mc_packets;
+		u64 tx_qp_mc_packets;
+		u64 rx_qp_bc_packets;
+		u64 tx_qp_bc_packets;
+	} qp[NGBE_MAX_QP];
+
+};
+
 struct ngbe_rom_info {
 	s32 (*init_params)(struct ngbe_hw *hw);
 	s32 (*validate_checksum)(struct ngbe_hw *hw, u16 *checksum_val);
@@ -97,6 +219,7 @@ struct ngbe_mac_info {
 	s32 (*reset_hw)(struct ngbe_hw *hw);
 	s32 (*start_hw)(struct ngbe_hw *hw);
 	s32 (*stop_hw)(struct ngbe_hw *hw);
+	s32 (*clear_hw_cntrs)(struct ngbe_hw *hw);
 	s32 (*get_mac_addr)(struct ngbe_hw *hw, u8 *mac_addr);
 	s32 (*enable_rx_dma)(struct ngbe_hw *hw, u32 regval);
 	s32 (*disable_sec_rx_path)(struct ngbe_hw *hw);
@@ -198,7 +321,18 @@ struct ngbe_hw {
 
 	u32 q_rx_regs[8 * 4];
 	u32 q_tx_regs[8 * 4];
+	bool offset_loaded;
 	bool is_pf;
+	struct {
+		u64 rx_qp_packets;
+		u64 tx_qp_packets;
+		u64 rx_qp_bytes;
+		u64 tx_qp_bytes;
+		u64 rx_qp_mc_packets;
+		u64 tx_qp_mc_packets;
+		u64 rx_qp_bc_packets;
+		u64 tx_qp_bc_packets;
+	} qp_last[NGBE_MAX_QP];
 };
 
 #include "ngbe_regs.h"
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index b8320a641c..3f1dac74e8 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -17,6 +17,7 @@
 static int ngbe_dev_close(struct rte_eth_dev *dev);
 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
 				int wait_to_complete);
+static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
 					uint16_t queue);
@@ -190,6 +191,7 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 	}
 
 	rte_eth_copy_pci_info(eth_dev, pci_dev);
+	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
 	/* Vendor and Device ID need to be set before init of shared code */
 	hw->device_id = pci_dev->id.device_id;
@@ -236,6 +238,9 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 		return -EIO;
 	}
 
+	/* Reset the hw statistics */
+	ngbe_dev_stats_reset(eth_dev);
+
 	/* disable interrupt */
 	ngbe_disable_intr(hw);
 
@@ -741,6 +746,7 @@ static int
 ngbe_dev_start(struct rte_eth_dev *dev)
 {
 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
+	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 	uint32_t intr_vector = 0;
@@ -905,6 +911,9 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 	 */
 	ngbe_dev_link_update(dev, 0);
 
+	ngbe_read_stats_registers(hw, hw_stats);
+	hw->offset_loaded = 1;
+
 	return 0;
 
 error:
@@ -1041,6 +1050,245 @@ ngbe_dev_reset(struct rte_eth_dev *dev)
 	return ret;
 }
 
+#define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
+	{                                                       \
+		uint32_t current_counter = rd32(hw, reg);       \
+		if (current_counter < last_counter)             \
+			current_counter += 0x100000000LL;       \
+		if (!hw->offset_loaded)                         \
+			last_counter = current_counter;         \
+		counter = current_counter - last_counter;       \
+		counter &= 0xFFFFFFFFLL;                        \
+	}
+
+#define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+	{                                                                \
+		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
+		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
+		uint64_t current_counter = (current_counter_msb << 32) | \
+			current_counter_lsb;                             \
+		if (current_counter < last_counter)                      \
+			current_counter += 0x1000000000LL;               \
+		if (!hw->offset_loaded)                                  \
+			last_counter = current_counter;                  \
+		counter = current_counter - last_counter;                \
+		counter &= 0xFFFFFFFFFLL;                                \
+	}
+
+void
+ngbe_read_stats_registers(struct ngbe_hw *hw,
+			   struct ngbe_hw_stats *hw_stats)
+{
+	unsigned int i;
+
+	/* QP Stats */
+	for (i = 0; i < hw->nb_rx_queues; i++) {
+		UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
+			hw->qp_last[i].rx_qp_packets,
+			hw_stats->qp[i].rx_qp_packets);
+		UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
+			hw->qp_last[i].rx_qp_bytes,
+			hw_stats->qp[i].rx_qp_bytes);
+		UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
+			hw->qp_last[i].rx_qp_mc_packets,
+			hw_stats->qp[i].rx_qp_mc_packets);
+		UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
+			hw->qp_last[i].rx_qp_bc_packets,
+			hw_stats->qp[i].rx_qp_bc_packets);
+	}
+
+	for (i = 0; i < hw->nb_tx_queues; i++) {
+		UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
+			hw->qp_last[i].tx_qp_packets,
+			hw_stats->qp[i].tx_qp_packets);
+		UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
+			hw->qp_last[i].tx_qp_bytes,
+			hw_stats->qp[i].tx_qp_bytes);
+		UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
+			hw->qp_last[i].tx_qp_mc_packets,
+			hw_stats->qp[i].tx_qp_mc_packets);
+		UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
+			hw->qp_last[i].tx_qp_bc_packets,
+			hw_stats->qp[i].tx_qp_bc_packets);
+	}
+
+	/* PB Stats */
+	hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
+	hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
+	hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
+	hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
+	hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
+	hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
+
+	hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
+	hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
+
+	/* DMA Stats */
+	hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
+	hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
+	hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
+	hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
+	hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
+	hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
+	hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
+	hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
+
+	/* MAC Stats */
+	hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
+	hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
+	hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
+
+	hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
+	hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
+	hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
+
+	hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
+	hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
+
+	hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
+	hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
+	hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
+	hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
+	hw_stats->rx_size_512_to_1023_packets +=
+			rd64(hw, NGBE_MACRX512TO1023L);
+	hw_stats->rx_size_1024_to_max_packets +=
+			rd64(hw, NGBE_MACRX1024TOMAXL);
+	hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
+	hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
+	hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
+	hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
+	hw_stats->tx_size_512_to_1023_packets +=
+			rd64(hw, NGBE_MACTX512TO1023L);
+	hw_stats->tx_size_1024_to_max_packets +=
+			rd64(hw, NGBE_MACTX1024TOMAXL);
+
+	hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
+	hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
+	hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
+
+	/* MNG Stats */
+	hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
+	hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
+	hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
+	hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
+
+	/* MACsec Stats */
+	hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
+	hw_stats->tx_macsec_pkts_encrypted +=
+			rd32(hw, NGBE_LSECTX_ENCPKT);
+	hw_stats->tx_macsec_pkts_protected +=
+			rd32(hw, NGBE_LSECTX_PROTPKT);
+	hw_stats->tx_macsec_octets_encrypted +=
+			rd32(hw, NGBE_LSECTX_ENCOCT);
+	hw_stats->tx_macsec_octets_protected +=
+			rd32(hw, NGBE_LSECTX_PROTOCT);
+	hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
+	hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
+	hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
+	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
+	hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
+	hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
+	hw_stats->rx_macsec_sc_pkts_unchecked +=
+			rd32(hw, NGBE_LSECRX_UNCHKPKT);
+	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
+	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
+	for (i = 0; i < 2; i++) {
+		hw_stats->rx_macsec_sa_pkts_ok +=
+			rd32(hw, NGBE_LSECRX_OKPKT(i));
+		hw_stats->rx_macsec_sa_pkts_invalid +=
+			rd32(hw, NGBE_LSECRX_INVPKT(i));
+		hw_stats->rx_macsec_sa_pkts_notvalid +=
+			rd32(hw, NGBE_LSECRX_BADPKT(i));
+	}
+	for (i = 0; i < 4; i++) {
+		hw_stats->rx_macsec_sa_pkts_unusedsa +=
+			rd32(hw, NGBE_LSECRX_INVSAPKT(i));
+		hw_stats->rx_macsec_sa_pkts_notusingsa +=
+			rd32(hw, NGBE_LSECRX_BADSAPKT(i));
+	}
+	hw_stats->rx_total_missed_packets =
+			hw_stats->rx_up_dropped;
+}
+
+static int
+ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+	struct ngbe_hw *hw = ngbe_dev_hw(dev);
+	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
+	struct ngbe_stat_mappings *stat_mappings =
+			NGBE_DEV_STAT_MAPPINGS(dev);
+	uint32_t i, j;
+
+	ngbe_read_stats_registers(hw, hw_stats);
+
+	if (stats == NULL)
+		return -EINVAL;
+
+	/* Fill out the rte_eth_stats statistics structure */
+	stats->ipackets = hw_stats->rx_packets;
+	stats->ibytes = hw_stats->rx_bytes;
+	stats->opackets = hw_stats->tx_packets;
+	stats->obytes = hw_stats->tx_bytes;
+
+	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
+	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
+	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
+	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
+	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
+	for (i = 0; i < NGBE_MAX_QP; i++) {
+		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
+		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
+		uint32_t q_map;
+
+		q_map = (stat_mappings->rqsm[n] >> offset)
+				& QMAP_FIELD_RESERVED_BITS_MASK;
+		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
+		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
+		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
+		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
+
+		q_map = (stat_mappings->tqsm[n] >> offset)
+				& QMAP_FIELD_RESERVED_BITS_MASK;
+		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
+		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
+		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
+		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
+	}
+
+	/* Rx Errors */
+	stats->imissed  = hw_stats->rx_total_missed_packets +
+			  hw_stats->rx_dma_drop;
+	stats->ierrors  = hw_stats->rx_crc_errors +
+			  hw_stats->rx_mac_short_packet_dropped +
+			  hw_stats->rx_length_errors +
+			  hw_stats->rx_undersize_errors +
+			  hw_stats->rx_oversize_errors +
+			  hw_stats->rx_illegal_byte_errors +
+			  hw_stats->rx_error_bytes +
+			  hw_stats->rx_fragment_errors;
+
+	/* Tx Errors */
+	stats->oerrors  = 0;
+	return 0;
+}
+
+static int
+ngbe_dev_stats_reset(struct rte_eth_dev *dev)
+{
+	struct ngbe_hw *hw = ngbe_dev_hw(dev);
+	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
+
+	/* HW registers are cleared on read */
+	hw->offset_loaded = 0;
+	ngbe_dev_stats_get(dev, NULL);
+	hw->offset_loaded = 1;
+
+	/* Reset software totals */
+	memset(hw_stats, 0, sizeof(*hw_stats));
+
+	return 0;
+}
+
 static int
 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
@@ -1587,6 +1835,8 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {
 	.dev_close                  = ngbe_dev_close,
 	.dev_reset                  = ngbe_dev_reset,
 	.link_update                = ngbe_dev_link_update,
+	.stats_get                  = ngbe_dev_stats_get,
+	.stats_reset                = ngbe_dev_stats_reset,
 	.dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
 	.vlan_filter_set            = ngbe_vlan_filter_set,
 	.vlan_tpid_set              = ngbe_vlan_tpid_set,
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index 5ca093ab4c..cb8dc5e5f5 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -43,6 +43,14 @@ struct ngbe_interrupt {
 	uint64_t mask_orig; /* save mask during delayed handler */
 };
 
+#define NGBE_NB_STAT_MAPPING  32
+#define NB_QMAP_FIELDS_PER_QSM_REG 4
+#define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
+struct ngbe_stat_mappings {
+	uint32_t tqsm[NGBE_NB_STAT_MAPPING];
+	uint32_t rqsm[NGBE_NB_STAT_MAPPING];
+};
+
 struct ngbe_vfta {
 	uint32_t vfta[NGBE_VFTA_SIZE];
 };
@@ -56,7 +64,9 @@ struct ngbe_hwstrip {
  */
 struct ngbe_adapter {
 	struct ngbe_hw             hw;
+	struct ngbe_hw_stats       stats;
 	struct ngbe_interrupt      intr;
+	struct ngbe_stat_mappings  stat_mappings;
 	struct ngbe_vfta           shadow_vfta;
 	struct ngbe_hwstrip        hwstrip;
 	bool                       rx_bulk_alloc_allowed;
@@ -79,6 +89,9 @@ ngbe_dev_hw(struct rte_eth_dev *dev)
 	return hw;
 }
 
+#define NGBE_DEV_STATS(dev) \
+	(&((struct ngbe_adapter *)(dev)->data->dev_private)->stats)
+
 static inline struct ngbe_interrupt *
 ngbe_dev_intr(struct rte_eth_dev *dev)
 {
@@ -88,6 +101,9 @@ ngbe_dev_intr(struct rte_eth_dev *dev)
 	return intr;
 }
 
+#define NGBE_DEV_STAT_MAPPINGS(dev) \
+	(&((struct ngbe_adapter *)(dev)->data->dev_private)->stat_mappings)
+
 #define NGBE_DEV_VFTA(dev) \
 	(&((struct ngbe_adapter *)(dev)->data->dev_private)->shadow_vfta)
 
@@ -200,5 +216,7 @@ void ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
 		uint16_t queue, bool on);
 void ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
 						  int mask);
+void ngbe_read_stats_registers(struct ngbe_hw *hw,
+			   struct ngbe_hw_stats *hw_stats);
 
 #endif /* _NGBE_ETHDEV_H_ */
-- 
2.21.0.windows.1




  parent reply	other threads:[~2021-10-21  9:52 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-21  9:49 [dpdk-dev] [PATCH v2 00/26] net/ngbe: add many features Jiawen Wu
2021-10-21  9:49 ` [dpdk-dev] [PATCH v2 01/26] net/ngbe: add packet type Jiawen Wu
2021-10-21  9:49 ` [dpdk-dev] [PATCH v2 02/26] net/ngbe: support scattered Rx Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 03/26] net/ngbe: support Rx checksum offload Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 04/26] net/ngbe: support TSO Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 05/26] net/ngbe: support Rx/Tx burst mode info Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 06/26] net/ngbe: support CRC offload Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 07/26] net/ngbe: support jumbo frame Jiawen Wu
2021-10-29 22:17   ` Ferruh Yigit
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 08/26] net/ngbe: support VLAN offload and VLAN filter Jiawen Wu
2021-10-21  9:50 ` Jiawen Wu [this message]
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 10/26] net/ngbe: support device xstats Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 11/26] net/ngbe: support MTU set Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 12/26] net/ngbe: add device promiscuous and allmulticast mode Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 13/26] net/ngbe: support getting FW version Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 14/26] net/ngbe: add loopback mode Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 15/26] net/ngbe: support MAC filters Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 16/26] net/ngbe: support RSS hash Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 17/26] net/ngbe: support SRIOV Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 18/26] net/ngbe: add mailbox process operations Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 19/26] net/ngbe: support flow control Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 20/26] net/ngbe: support device LED on and off Jiawen Wu
2021-10-29 22:20   ` Ferruh Yigit
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 21/26] net/ngbe: support EEPROM dump Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 22/26] net/ngbe: support register dump Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 23/26] net/ngbe: support timesync Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 24/26] net/ngbe: add Rx and Tx queue info get Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 25/26] net/ngbe: add Rx and Tx descriptor status Jiawen Wu
2021-10-21  9:50 ` [dpdk-dev] [PATCH v2 26/26] net/ngbe: add Tx done cleanup Jiawen Wu
2021-10-29 22:24   ` Ferruh Yigit
2021-10-29 22:15 ` [dpdk-dev] [PATCH v2 00/26] net/ngbe: add many features Ferruh Yigit
2021-10-29 22:55   ` Ferruh Yigit
2021-11-01  2:08   ` Jiawen Wu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211021095023.18288-10-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).