DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH 08/32] net/ngbe: support basic statistics
Date: Wed,  8 Sep 2021 16:37:34 +0800
Message-ID: <20210908083758.312055-9-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20210908083758.312055-1-jiawenwu@trustnetic.com>

Support to read and clear basic statistics, and configure per-queue
stats counter mapping.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 doc/guides/nics/features/ngbe.ini  |   2 +
 doc/guides/nics/ngbe.rst           |   1 +
 drivers/net/ngbe/base/ngbe_dummy.h |   5 +
 drivers/net/ngbe/base/ngbe_hw.c    | 101 ++++++++++
 drivers/net/ngbe/base/ngbe_hw.h    |   1 +
 drivers/net/ngbe/base/ngbe_type.h  | 134 +++++++++++++
 drivers/net/ngbe/ngbe_ethdev.c     | 300 +++++++++++++++++++++++++++++
 drivers/net/ngbe/ngbe_ethdev.h     |  19 ++
 8 files changed, 563 insertions(+)

diff --git a/doc/guides/nics/features/ngbe.ini b/doc/guides/nics/features/ngbe.ini
index 4ae2d66d15..f310fb102a 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -19,6 +19,8 @@ L4 checksum offload  = P
 Inner L3 checksum    = P
 Inner L4 checksum    = P
 Packet type parsing  = Y
+Basic stats          = Y
+Stats per queue      = Y
 Multiprocess aware   = Y
 Linux                = Y
 ARMv8                = Y
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 9518a59443..64c07e4741 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -15,6 +15,7 @@ Features
 - Checksum offload
 - VLAN/QinQ stripping and inserting
 - TSO offload
+- Port hardware statistics
 - Jumbo frames
 - Link state information
 - Scattered and gather for TX and RX
diff --git a/drivers/net/ngbe/base/ngbe_dummy.h b/drivers/net/ngbe/base/ngbe_dummy.h
index 8863acef0d..0def116c53 100644
--- a/drivers/net/ngbe/base/ngbe_dummy.h
+++ b/drivers/net/ngbe/base/ngbe_dummy.h
@@ -55,6 +55,10 @@ static inline s32 ngbe_mac_stop_hw_dummy(struct ngbe_hw *TUP0)
 {
 	return NGBE_ERR_OPS_DUMMY;
 }
+static inline s32 ngbe_mac_clear_hw_cntrs_dummy(struct ngbe_hw *TUP0)
+{
+	return NGBE_ERR_OPS_DUMMY;
+}
 static inline s32 ngbe_mac_get_mac_addr_dummy(struct ngbe_hw *TUP0, u8 *TUP1)
 {
 	return NGBE_ERR_OPS_DUMMY;
@@ -178,6 +182,7 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
 	hw->mac.reset_hw = ngbe_mac_reset_hw_dummy;
 	hw->mac.start_hw = ngbe_mac_start_hw_dummy;
 	hw->mac.stop_hw = ngbe_mac_stop_hw_dummy;
+	hw->mac.clear_hw_cntrs = ngbe_mac_clear_hw_cntrs_dummy;
 	hw->mac.get_mac_addr = ngbe_mac_get_mac_addr_dummy;
 	hw->mac.enable_rx_dma = ngbe_mac_enable_rx_dma_dummy;
 	hw->mac.disable_sec_rx_path = ngbe_mac_disable_sec_rx_path_dummy;
diff --git a/drivers/net/ngbe/base/ngbe_hw.c b/drivers/net/ngbe/base/ngbe_hw.c
index 6b575fc67b..f302df5d9d 100644
--- a/drivers/net/ngbe/base/ngbe_hw.c
+++ b/drivers/net/ngbe/base/ngbe_hw.c
@@ -19,6 +19,9 @@ s32 ngbe_start_hw(struct ngbe_hw *hw)
 {
 	DEBUGFUNC("ngbe_start_hw");
 
+	/* Clear statistics registers */
+	hw->mac.clear_hw_cntrs(hw);
+
 	/* Clear adapter stopped flag */
 	hw->adapter_stopped = false;
 
@@ -159,6 +162,7 @@ s32 ngbe_reset_hw_em(struct ngbe_hw *hw)
 	msec_delay(50);
 
 	ngbe_reset_misc_em(hw);
+	hw->mac.clear_hw_cntrs(hw);
 
 	msec_delay(50);
 
@@ -175,6 +179,102 @@ s32 ngbe_reset_hw_em(struct ngbe_hw *hw)
 	return status;
 }
 
+/**
+ *  ngbe_clear_hw_cntrs - Generic clear hardware counters
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears all hardware statistics counters by reading them from the hardware
+ *  Statistics counters are clear on read.
+ **/
+s32 ngbe_clear_hw_cntrs(struct ngbe_hw *hw)
+{
+	u16 i = 0;
+
+	DEBUGFUNC("ngbe_clear_hw_cntrs");
+
+	/* QP Stats */
+	/* don't write clear queue stats */
+	for (i = 0; i < NGBE_MAX_QP; i++) {
+		hw->qp_last[i].rx_qp_packets = 0;
+		hw->qp_last[i].tx_qp_packets = 0;
+		hw->qp_last[i].rx_qp_bytes = 0;
+		hw->qp_last[i].tx_qp_bytes = 0;
+		hw->qp_last[i].rx_qp_mc_packets = 0;
+		hw->qp_last[i].tx_qp_mc_packets = 0;
+		hw->qp_last[i].rx_qp_bc_packets = 0;
+		hw->qp_last[i].tx_qp_bc_packets = 0;
+	}
+
+	/* PB Stats */
+	rd32(hw, NGBE_PBRXLNKXON);
+	rd32(hw, NGBE_PBRXLNKXOFF);
+	rd32(hw, NGBE_PBTXLNKXON);
+	rd32(hw, NGBE_PBTXLNKXOFF);
+
+	/* DMA Stats */
+	rd32(hw, NGBE_DMARXPKT);
+	rd32(hw, NGBE_DMATXPKT);
+
+	rd64(hw, NGBE_DMARXOCTL);
+	rd64(hw, NGBE_DMATXOCTL);
+
+	/* MAC Stats */
+	rd64(hw, NGBE_MACRXERRCRCL);
+	rd64(hw, NGBE_MACRXMPKTL);
+	rd64(hw, NGBE_MACTXMPKTL);
+
+	rd64(hw, NGBE_MACRXPKTL);
+	rd64(hw, NGBE_MACTXPKTL);
+	rd64(hw, NGBE_MACRXGBOCTL);
+
+	rd64(hw, NGBE_MACRXOCTL);
+	rd32(hw, NGBE_MACTXOCTL);
+
+	rd64(hw, NGBE_MACRX1TO64L);
+	rd64(hw, NGBE_MACRX65TO127L);
+	rd64(hw, NGBE_MACRX128TO255L);
+	rd64(hw, NGBE_MACRX256TO511L);
+	rd64(hw, NGBE_MACRX512TO1023L);
+	rd64(hw, NGBE_MACRX1024TOMAXL);
+	rd64(hw, NGBE_MACTX1TO64L);
+	rd64(hw, NGBE_MACTX65TO127L);
+	rd64(hw, NGBE_MACTX128TO255L);
+	rd64(hw, NGBE_MACTX256TO511L);
+	rd64(hw, NGBE_MACTX512TO1023L);
+	rd64(hw, NGBE_MACTX1024TOMAXL);
+
+	rd64(hw, NGBE_MACRXERRLENL);
+	rd32(hw, NGBE_MACRXOVERSIZE);
+	rd32(hw, NGBE_MACRXJABBER);
+
+	/* MACsec Stats */
+	rd32(hw, NGBE_LSECTX_UTPKT);
+	rd32(hw, NGBE_LSECTX_ENCPKT);
+	rd32(hw, NGBE_LSECTX_PROTPKT);
+	rd32(hw, NGBE_LSECTX_ENCOCT);
+	rd32(hw, NGBE_LSECTX_PROTOCT);
+	rd32(hw, NGBE_LSECRX_UTPKT);
+	rd32(hw, NGBE_LSECRX_BTPKT);
+	rd32(hw, NGBE_LSECRX_NOSCIPKT);
+	rd32(hw, NGBE_LSECRX_UNSCIPKT);
+	rd32(hw, NGBE_LSECRX_DECOCT);
+	rd32(hw, NGBE_LSECRX_VLDOCT);
+	rd32(hw, NGBE_LSECRX_UNCHKPKT);
+	rd32(hw, NGBE_LSECRX_DLYPKT);
+	rd32(hw, NGBE_LSECRX_LATEPKT);
+	for (i = 0; i < 2; i++) {
+		rd32(hw, NGBE_LSECRX_OKPKT(i));
+		rd32(hw, NGBE_LSECRX_INVPKT(i));
+		rd32(hw, NGBE_LSECRX_BADPKT(i));
+	}
+	for (i = 0; i < 4; i++) {
+		rd32(hw, NGBE_LSECRX_INVSAPKT(i));
+		rd32(hw, NGBE_LSECRX_BADSAPKT(i));
+	}
+
+	return 0;
+}
+
 /**
  *  ngbe_get_mac_addr - Generic get MAC address
  *  @hw: pointer to hardware structure
@@ -988,6 +1088,7 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw)
 	mac->init_hw = ngbe_init_hw;
 	mac->reset_hw = ngbe_reset_hw_em;
 	mac->start_hw = ngbe_start_hw;
+	mac->clear_hw_cntrs = ngbe_clear_hw_cntrs;
 	mac->enable_rx_dma = ngbe_enable_rx_dma;
 	mac->get_mac_addr = ngbe_get_mac_addr;
 	mac->stop_hw = ngbe_stop_hw;
diff --git a/drivers/net/ngbe/base/ngbe_hw.h b/drivers/net/ngbe/base/ngbe_hw.h
index 17a0a03c88..6a08c02bee 100644
--- a/drivers/net/ngbe/base/ngbe_hw.h
+++ b/drivers/net/ngbe/base/ngbe_hw.h
@@ -17,6 +17,7 @@ s32 ngbe_init_hw(struct ngbe_hw *hw);
 s32 ngbe_start_hw(struct ngbe_hw *hw);
 s32 ngbe_reset_hw_em(struct ngbe_hw *hw);
 s32 ngbe_stop_hw(struct ngbe_hw *hw);
+s32 ngbe_clear_hw_cntrs(struct ngbe_hw *hw);
 s32 ngbe_get_mac_addr(struct ngbe_hw *hw, u8 *mac_addr);
 
 void ngbe_set_lan_id_multi_port(struct ngbe_hw *hw);
diff --git a/drivers/net/ngbe/base/ngbe_type.h b/drivers/net/ngbe/base/ngbe_type.h
index 28540e4ba0..c13f0208fd 100644
--- a/drivers/net/ngbe/base/ngbe_type.h
+++ b/drivers/net/ngbe/base/ngbe_type.h
@@ -9,6 +9,7 @@
 #define NGBE_LINK_UP_TIME	90 /* 9.0 Seconds */
 
 #define NGBE_FRAME_SIZE_DFT       (1522) /* Default frame size, +FCS */
+#define NGBE_MAX_QP               (8)
 
 #define NGBE_ALIGN		128 /* as intel did */
 #define NGBE_ISB_SIZE		16
@@ -77,6 +78,127 @@ struct ngbe_bus_info {
 	u8 lan_id;
 };
 
+/* Statistics counters collected by the MAC */
+/* PB[] RxTx */
+struct ngbe_pb_stats {
+	u64 tx_pb_xon_packets;
+	u64 rx_pb_xon_packets;
+	u64 tx_pb_xoff_packets;
+	u64 rx_pb_xoff_packets;
+	u64 rx_pb_dropped;
+	u64 rx_pb_mbuf_alloc_errors;
+	u64 tx_pb_xon2off_packets;
+};
+
+/* QP[] RxTx */
+struct ngbe_qp_stats {
+	u64 rx_qp_packets;
+	u64 tx_qp_packets;
+	u64 rx_qp_bytes;
+	u64 tx_qp_bytes;
+	u64 rx_qp_mc_packets;
+};
+
+struct ngbe_hw_stats {
+	/* MNG RxTx */
+	u64 mng_bmc2host_packets;
+	u64 mng_host2bmc_packets;
+	/* Basix RxTx */
+	u64 rx_drop_packets;
+	u64 tx_drop_packets;
+	u64 rx_dma_drop;
+	u64 tx_secdrp_packets;
+	u64 rx_packets;
+	u64 tx_packets;
+	u64 rx_bytes;
+	u64 tx_bytes;
+	u64 rx_total_bytes;
+	u64 rx_total_packets;
+	u64 tx_total_packets;
+	u64 rx_total_missed_packets;
+	u64 rx_broadcast_packets;
+	u64 tx_broadcast_packets;
+	u64 rx_multicast_packets;
+	u64 tx_multicast_packets;
+	u64 rx_management_packets;
+	u64 tx_management_packets;
+	u64 rx_management_dropped;
+
+	/* Basic Error */
+	u64 rx_crc_errors;
+	u64 rx_illegal_byte_errors;
+	u64 rx_error_bytes;
+	u64 rx_mac_short_packet_dropped;
+	u64 rx_length_errors;
+	u64 rx_undersize_errors;
+	u64 rx_fragment_errors;
+	u64 rx_oversize_errors;
+	u64 rx_jabber_errors;
+	u64 rx_l3_l4_xsum_error;
+	u64 mac_local_errors;
+	u64 mac_remote_errors;
+
+	/* MACSEC */
+	u64 tx_macsec_pkts_untagged;
+	u64 tx_macsec_pkts_encrypted;
+	u64 tx_macsec_pkts_protected;
+	u64 tx_macsec_octets_encrypted;
+	u64 tx_macsec_octets_protected;
+	u64 rx_macsec_pkts_untagged;
+	u64 rx_macsec_pkts_badtag;
+	u64 rx_macsec_pkts_nosci;
+	u64 rx_macsec_pkts_unknownsci;
+	u64 rx_macsec_octets_decrypted;
+	u64 rx_macsec_octets_validated;
+	u64 rx_macsec_sc_pkts_unchecked;
+	u64 rx_macsec_sc_pkts_delayed;
+	u64 rx_macsec_sc_pkts_late;
+	u64 rx_macsec_sa_pkts_ok;
+	u64 rx_macsec_sa_pkts_invalid;
+	u64 rx_macsec_sa_pkts_notvalid;
+	u64 rx_macsec_sa_pkts_unusedsa;
+	u64 rx_macsec_sa_pkts_notusingsa;
+
+	/* MAC RxTx */
+	u64 rx_size_64_packets;
+	u64 rx_size_65_to_127_packets;
+	u64 rx_size_128_to_255_packets;
+	u64 rx_size_256_to_511_packets;
+	u64 rx_size_512_to_1023_packets;
+	u64 rx_size_1024_to_max_packets;
+	u64 tx_size_64_packets;
+	u64 tx_size_65_to_127_packets;
+	u64 tx_size_128_to_255_packets;
+	u64 tx_size_256_to_511_packets;
+	u64 tx_size_512_to_1023_packets;
+	u64 tx_size_1024_to_max_packets;
+
+	/* Flow Control */
+	u64 tx_xon_packets;
+	u64 rx_xon_packets;
+	u64 tx_xoff_packets;
+	u64 rx_xoff_packets;
+
+	u64 rx_up_dropped;
+
+	u64 rdb_pkt_cnt;
+	u64 rdb_repli_cnt;
+	u64 rdb_drp_cnt;
+
+	/* QP[] RxTx */
+	struct {
+		u64 rx_qp_packets;
+		u64 tx_qp_packets;
+		u64 rx_qp_bytes;
+		u64 tx_qp_bytes;
+		u64 rx_qp_mc_packets;
+		u64 tx_qp_mc_packets;
+		u64 rx_qp_bc_packets;
+		u64 tx_qp_bc_packets;
+	} qp[NGBE_MAX_QP];
+
+};
+
 struct ngbe_rom_info {
 	s32 (*init_params)(struct ngbe_hw *hw);
 	s32 (*validate_checksum)(struct ngbe_hw *hw, u16 *checksum_val);
@@ -96,6 +218,7 @@ struct ngbe_mac_info {
 	s32 (*reset_hw)(struct ngbe_hw *hw);
 	s32 (*start_hw)(struct ngbe_hw *hw);
 	s32 (*stop_hw)(struct ngbe_hw *hw);
+	s32 (*clear_hw_cntrs)(struct ngbe_hw *hw);
 	s32 (*get_mac_addr)(struct ngbe_hw *hw, u8 *mac_addr);
 	s32 (*enable_rx_dma)(struct ngbe_hw *hw, u32 regval);
 	s32 (*disable_sec_rx_path)(struct ngbe_hw *hw);
@@ -195,7 +318,18 @@ struct ngbe_hw {
 
 	u32 q_rx_regs[8 * 4];
 	u32 q_tx_regs[8 * 4];
+	bool offset_loaded;
 	bool is_pf;
+	struct {
+		u64 rx_qp_packets;
+		u64 tx_qp_packets;
+		u64 rx_qp_bytes;
+		u64 tx_qp_bytes;
+		u64 rx_qp_mc_packets;
+		u64 tx_qp_mc_packets;
+		u64 rx_qp_bc_packets;
+		u64 tx_qp_bc_packets;
+	} qp_last[NGBE_MAX_QP];
 };
 
 #include "ngbe_regs.h"
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 3903eb0a2c..3d459718b1 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -17,6 +17,7 @@
 static int ngbe_dev_close(struct rte_eth_dev *dev);
 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
 				int wait_to_complete);
+static int ngbe_dev_stats_reset(struct rte_eth_dev *dev);
 static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
 static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
 					uint16_t queue);
@@ -122,6 +123,56 @@ ngbe_disable_intr(struct ngbe_hw *hw)
 	ngbe_flush(hw);
 }
 
+static int
+ngbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+				  uint16_t queue_id,
+				  uint8_t stat_idx,
+				  uint8_t is_rx)
+{
+	struct ngbe_stat_mappings *stat_mappings =
+		NGBE_DEV_STAT_MAPPINGS(eth_dev);
+	uint32_t qsmr_mask = 0;
+	uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
+	uint32_t q_map;
+	uint8_t n, offset;
+
+	if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
+		return -EIO;
+
+	PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
+		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+		     queue_id, stat_idx);
+
+	n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
+	if (n >= NGBE_NB_STAT_MAPPING) {
+		PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
+		return -EIO;
+	}
+	offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
+
+	/* Now clear any previous stat_idx set */
+	clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
+	if (!is_rx)
+		stat_mappings->tqsm[n] &= ~clearing_mask;
+	else
+		stat_mappings->rqsm[n] &= ~clearing_mask;
+
+	q_map = (uint32_t)stat_idx;
+	q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
+	qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
+	if (!is_rx)
+		stat_mappings->tqsm[n] |= qsmr_mask;
+	else
+		stat_mappings->rqsm[n] |= qsmr_mask;
+
+	PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
+		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+		     queue_id, stat_idx);
+	PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+		     is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
+	return 0;
+}
+
 /*
  * Ensure that all locks are released before first NVM or PHY access
  */
@@ -236,6 +287,9 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 		return -EIO;
 	}
 
+	/* Reset the hw statistics */
+	ngbe_dev_stats_reset(eth_dev);
+
 	/* disable interrupt */
 	ngbe_disable_intr(hw);
 
@@ -616,6 +670,7 @@ static int
 ngbe_dev_start(struct rte_eth_dev *dev)
 {
 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
+	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 	uint32_t intr_vector = 0;
@@ -780,6 +835,9 @@ ngbe_dev_start(struct rte_eth_dev *dev)
 	 */
 	ngbe_dev_link_update(dev, 0);
 
+	ngbe_read_stats_registers(hw, hw_stats);
+	hw->offset_loaded = 1;
+
 	return 0;
 
 error:
@@ -916,6 +974,245 @@ ngbe_dev_reset(struct rte_eth_dev *dev)
 	return ret;
 }
 
+#define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
+	{                                                       \
+		uint32_t current_counter = rd32(hw, reg);       \
+		if (current_counter < last_counter)             \
+			current_counter += 0x100000000LL;       \
+		if (!hw->offset_loaded)                         \
+			last_counter = current_counter;         \
+		counter = current_counter - last_counter;       \
+		counter &= 0xFFFFFFFFLL;                        \
+	}
+
+#define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+	{                                                                \
+		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
+		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
+		uint64_t current_counter = (current_counter_msb << 32) | \
+			current_counter_lsb;                             \
+		if (current_counter < last_counter)                      \
+			current_counter += 0x1000000000LL;               \
+		if (!hw->offset_loaded)                                  \
+			last_counter = current_counter;                  \
+		counter = current_counter - last_counter;                \
+		counter &= 0xFFFFFFFFFLL;                                \
+	}
+
+void
+ngbe_read_stats_registers(struct ngbe_hw *hw,
+			   struct ngbe_hw_stats *hw_stats)
+{
+	unsigned int i;
+
+	/* QP Stats */
+	for (i = 0; i < hw->nb_rx_queues; i++) {
+		UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i),
+			hw->qp_last[i].rx_qp_packets,
+			hw_stats->qp[i].rx_qp_packets);
+		UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i),
+			hw->qp_last[i].rx_qp_bytes,
+			hw_stats->qp[i].rx_qp_bytes);
+		UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i),
+			hw->qp_last[i].rx_qp_mc_packets,
+			hw_stats->qp[i].rx_qp_mc_packets);
+		UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i),
+			hw->qp_last[i].rx_qp_bc_packets,
+			hw_stats->qp[i].rx_qp_bc_packets);
+	}
+
+	for (i = 0; i < hw->nb_tx_queues; i++) {
+		UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i),
+			hw->qp_last[i].tx_qp_packets,
+			hw_stats->qp[i].tx_qp_packets);
+		UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i),
+			hw->qp_last[i].tx_qp_bytes,
+			hw_stats->qp[i].tx_qp_bytes);
+		UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i),
+			hw->qp_last[i].tx_qp_mc_packets,
+			hw_stats->qp[i].tx_qp_mc_packets);
+		UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i),
+			hw->qp_last[i].tx_qp_bc_packets,
+			hw_stats->qp[i].tx_qp_bc_packets);
+	}
+
+	/* PB Stats */
+	hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS);
+	hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT);
+	hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP);
+	hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP);
+	hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF);
+	hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON);
+
+	hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON);
+	hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF);
+
+	/* DMA Stats */
+	hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP);
+	hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP);
+	hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP);
+	hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP);
+	hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT);
+	hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT);
+	hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL);
+	hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL);
+
+	/* MAC Stats */
+	hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL);
+	hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL);
+	hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL);
+
+	hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL);
+	hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL);
+	hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL);
+
+	hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL);
+	hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL);
+
+	hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L);
+	hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L);
+	hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L);
+	hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L);
+	hw_stats->rx_size_512_to_1023_packets +=
+			rd64(hw, NGBE_MACRX512TO1023L);
+	hw_stats->rx_size_1024_to_max_packets +=
+			rd64(hw, NGBE_MACRX1024TOMAXL);
+	hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L);
+	hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L);
+	hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L);
+	hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L);
+	hw_stats->tx_size_512_to_1023_packets +=
+			rd64(hw, NGBE_MACTX512TO1023L);
+	hw_stats->tx_size_1024_to_max_packets +=
+			rd64(hw, NGBE_MACTX1024TOMAXL);
+
+	hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL);
+	hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE);
+	hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER);
+
+	/* MNG Stats */
+	hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS);
+	hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC);
+	hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG);
+	hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG);
+
+	/* MACsec Stats */
+	hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT);
+	hw_stats->tx_macsec_pkts_encrypted +=
+			rd32(hw, NGBE_LSECTX_ENCPKT);
+	hw_stats->tx_macsec_pkts_protected +=
+			rd32(hw, NGBE_LSECTX_PROTPKT);
+	hw_stats->tx_macsec_octets_encrypted +=
+			rd32(hw, NGBE_LSECTX_ENCOCT);
+	hw_stats->tx_macsec_octets_protected +=
+			rd32(hw, NGBE_LSECTX_PROTOCT);
+	hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT);
+	hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT);
+	hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT);
+	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT);
+	hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT);
+	hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT);
+	hw_stats->rx_macsec_sc_pkts_unchecked +=
+			rd32(hw, NGBE_LSECRX_UNCHKPKT);
+	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT);
+	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT);
+	for (i = 0; i < 2; i++) {
+		hw_stats->rx_macsec_sa_pkts_ok +=
+			rd32(hw, NGBE_LSECRX_OKPKT(i));
+		hw_stats->rx_macsec_sa_pkts_invalid +=
+			rd32(hw, NGBE_LSECRX_INVPKT(i));
+		hw_stats->rx_macsec_sa_pkts_notvalid +=
+			rd32(hw, NGBE_LSECRX_BADPKT(i));
+	}
+	for (i = 0; i < 4; i++) {
+		hw_stats->rx_macsec_sa_pkts_unusedsa +=
+			rd32(hw, NGBE_LSECRX_INVSAPKT(i));
+		hw_stats->rx_macsec_sa_pkts_notusingsa +=
+			rd32(hw, NGBE_LSECRX_BADSAPKT(i));
+	}
+	hw_stats->rx_total_missed_packets =
+			hw_stats->rx_up_dropped;
+}
+
+static int
+ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+	struct ngbe_hw *hw = ngbe_dev_hw(dev);
+	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
+	struct ngbe_stat_mappings *stat_mappings =
+			NGBE_DEV_STAT_MAPPINGS(dev);
+	uint32_t i, j;
+
+	ngbe_read_stats_registers(hw, hw_stats);
+
+	if (stats == NULL)
+		return -EINVAL;
+
+	/* Fill out the rte_eth_stats statistics structure */
+	stats->ipackets = hw_stats->rx_packets;
+	stats->ibytes = hw_stats->rx_bytes;
+	stats->opackets = hw_stats->tx_packets;
+	stats->obytes = hw_stats->tx_bytes;
+
+	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
+	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
+	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
+	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
+	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
+	for (i = 0; i < NGBE_MAX_QP; i++) {
+		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
+		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
+		uint32_t q_map;
+
+		q_map = (stat_mappings->rqsm[n] >> offset)
+				& QMAP_FIELD_RESERVED_BITS_MASK;
+		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
+		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
+		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
+		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
+
+		q_map = (stat_mappings->tqsm[n] >> offset)
+				& QMAP_FIELD_RESERVED_BITS_MASK;
+		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
+		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
+		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
+		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
+	}
+
+	/* Rx Errors */
+	stats->imissed  = hw_stats->rx_total_missed_packets +
+			  hw_stats->rx_dma_drop;
+	stats->ierrors  = hw_stats->rx_crc_errors +
+			  hw_stats->rx_mac_short_packet_dropped +
+			  hw_stats->rx_length_errors +
+			  hw_stats->rx_undersize_errors +
+			  hw_stats->rx_oversize_errors +
+			  hw_stats->rx_illegal_byte_errors +
+			  hw_stats->rx_error_bytes +
+			  hw_stats->rx_fragment_errors;
+
+	/* Tx Errors */
+	stats->oerrors  = 0;
+	return 0;
+}
+
+static int
+ngbe_dev_stats_reset(struct rte_eth_dev *dev)
+{
+	struct ngbe_hw *hw = ngbe_dev_hw(dev);
+	struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
+
+	/* HW registers are cleared on read */
+	hw->offset_loaded = 0;
+	ngbe_dev_stats_get(dev, NULL);
+	hw->offset_loaded = 1;
+
+	/* Reset software totals */
+	memset(hw_stats, 0, sizeof(*hw_stats));
+
+	return 0;
+}
+
 static int
 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
@@ -1462,6 +1759,9 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {
 	.dev_close                  = ngbe_dev_close,
 	.dev_reset                  = ngbe_dev_reset,
 	.link_update                = ngbe_dev_link_update,
+	.stats_get                  = ngbe_dev_stats_get,
+	.stats_reset                = ngbe_dev_stats_reset,
+	.queue_stats_mapping_set    = ngbe_dev_queue_stats_mapping_set,
 	.vlan_offload_set           = ngbe_vlan_offload_set,
 	.rx_queue_start	            = ngbe_dev_rx_queue_start,
 	.rx_queue_stop              = ngbe_dev_rx_queue_stop,
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index 8b3a1cdc3d..c0f1a50c66 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -40,6 +40,15 @@ struct ngbe_interrupt {
 	uint64_t mask_orig; /* save mask during delayed handler */
 };
 
+#define NGBE_NB_STAT_MAPPING  32
+#define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
+#define NB_QMAP_FIELDS_PER_QSM_REG 4
+#define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
+struct ngbe_stat_mappings {
+	uint32_t tqsm[NGBE_NB_STAT_MAPPING];
+	uint32_t rqsm[NGBE_NB_STAT_MAPPING];
+};
+
 struct ngbe_vfta {
 	uint32_t vfta[NGBE_VFTA_SIZE];
 };
@@ -53,7 +62,9 @@ struct ngbe_hwstrip {
  */
 struct ngbe_adapter {
 	struct ngbe_hw             hw;
+	struct ngbe_hw_stats       stats;
 	struct ngbe_interrupt      intr;
+	struct ngbe_stat_mappings  stat_mappings;
 	struct ngbe_vfta           shadow_vfta;
 	struct ngbe_hwstrip        hwstrip;
 	bool                       rx_bulk_alloc_allowed;
@@ -76,6 +87,9 @@ ngbe_dev_hw(struct rte_eth_dev *dev)
 	return hw;
 }
 
+#define NGBE_DEV_STATS(dev) \
+	(&((struct ngbe_adapter *)(dev)->data->dev_private)->stats)
+
 static inline struct ngbe_interrupt *
 ngbe_dev_intr(struct rte_eth_dev *dev)
 {
@@ -85,6 +99,9 @@ ngbe_dev_intr(struct rte_eth_dev *dev)
 	return intr;
 }
 
+#define NGBE_DEV_STAT_MAPPINGS(dev) \
+	(&((struct ngbe_adapter *)(dev)->data->dev_private)->stat_mappings)
+
 #define NGBE_DEV_VFTA(dev) \
 	(&((struct ngbe_adapter *)(dev)->data->dev_private)->shadow_vfta)
 
@@ -190,5 +207,7 @@ void ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
 		uint16_t queue, bool on);
 void ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
 						  int mask);
+void ngbe_read_stats_registers(struct ngbe_hw *hw,
+			   struct ngbe_hw_stats *hw_stats);
 
 #endif /* _NGBE_ETHDEV_H_ */
-- 
2.21.0.windows.1




  parent reply	other threads:[~2021-09-08  8:37 UTC|newest]

Thread overview: 54+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-08  8:37 [dpdk-dev] [PATCH 00/32] net/ngbe: add many features Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 01/32] net/ngbe: add packet type Jiawen Wu
2021-09-15 16:47   ` Ferruh Yigit
2021-09-22  8:01     ` Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 02/32] net/ngbe: support scattered Rx Jiawen Wu
2021-09-15 13:22   ` Ferruh Yigit
2021-09-08  8:37 ` [dpdk-dev] [PATCH 03/32] net/ngbe: support Rx checksum offload Jiawen Wu
2021-09-15 16:48   ` Ferruh Yigit
2021-09-08  8:37 ` [dpdk-dev] [PATCH 04/32] net/ngbe: support TSO Jiawen Wu
2021-09-15 16:57   ` Ferruh Yigit
2021-09-08  8:37 ` [dpdk-dev] [PATCH 05/32] net/ngbe: support CRC offload Jiawen Wu
2021-09-15 16:48   ` Ferruh Yigit
2021-09-08  8:37 ` [dpdk-dev] [PATCH 06/32] net/ngbe: support jumbo frame Jiawen Wu
2021-09-15 16:48   ` Ferruh Yigit
2021-09-08  8:37 ` [dpdk-dev] [PATCH 07/32] net/ngbe: support VLAN and QinQ offload Jiawen Wu
2021-09-08  8:37 ` Jiawen Wu [this message]
2021-09-15 16:50   ` [dpdk-dev] [PATCH 08/32] net/ngbe: support basic statistics Ferruh Yigit
2021-10-14  2:51     ` Jiawen Wu
2021-10-14  7:59       ` Ferruh Yigit
2021-09-08  8:37 ` [dpdk-dev] [PATCH 09/32] net/ngbe: support device xstats Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 10/32] net/ngbe: support MTU set Jiawen Wu
2021-09-15 16:52   ` Ferruh Yigit
2021-09-08  8:37 ` [dpdk-dev] [PATCH 11/32] net/ngbe: add device promiscuous and allmulticast mode Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 12/32] net/ngbe: support getting FW version Jiawen Wu
2021-09-15 16:53   ` Ferruh Yigit
2021-09-08  8:37 ` [dpdk-dev] [PATCH 13/32] net/ngbe: add loopback mode Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 14/32] net/ngbe: support Rx interrupt Jiawen Wu
2021-09-15 16:53   ` Ferruh Yigit
2021-10-14 10:11     ` Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 15/32] net/ngbe: support MAC filters Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 16/32] net/ngbe: support VLAN filter Jiawen Wu
2021-09-15 16:54   ` Ferruh Yigit
2021-09-08  8:37 ` [dpdk-dev] [PATCH 17/32] net/ngbe: support RSS hash Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 18/32] net/ngbe: support SRIOV Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 19/32] net/ngbe: add mailbox process operations Jiawen Wu
2021-09-15 16:56   ` Ferruh Yigit
2021-09-08  8:37 ` [dpdk-dev] [PATCH 20/32] net/ngbe: support flow control Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 21/32] net/ngbe: support device LED on and off Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 22/32] net/ngbe: support EEPROM dump Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 23/32] net/ngbe: support register dump Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 24/32] net/ngbe: support timesync Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 25/32] net/ngbe: add Rx and Tx queue info get Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 26/32] net/ngbe: add Rx and Tx descriptor status Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 27/32] net/ngbe: add Tx done cleanup Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 28/32] net/ngbe: add IPsec context creation Jiawen Wu
2021-09-15 16:58   ` Ferruh Yigit
2021-09-16  9:00     ` Hemant Agrawal
2021-09-16 17:15       ` Ferruh Yigit
2021-09-16  9:04   ` Hemant Agrawal
2021-09-08  8:37 ` [dpdk-dev] [PATCH 29/32] net/ngbe: create and destroy security session Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 30/32] net/ngbe: support security operations Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 31/32] net/ngbe: add security offload in Rx and Tx Jiawen Wu
2021-09-08  8:37 ` [dpdk-dev] [PATCH 32/32] doc: update for ngbe Jiawen Wu
2021-09-15 16:58   ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210908083758.312055-9-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror http://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ http://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git