From: Ajit Khaparde <ajit.khaparde@broadcom.com>
To: dev@dpdk.org
Cc: Damodharam Ammepalli <damodharam.ammepalli@broadcom.com>
Subject: [PATCH 16/18] net/bnxt: query extended stats from firmware
Date: Thu, 21 Dec 2023 10:05:27 -0800 [thread overview]
Message-ID: <20231221180529.18687-17-ajit.khaparde@broadcom.com> (raw)
In-Reply-To: <20231221180529.18687-1-ajit.khaparde@broadcom.com>
[-- Attachment #1: Type: text/plain, Size: 28237 bytes --]
From: Damodharam Ammepalli <damodharam.ammepalli@broadcom.com>
Add the driver support for HWRM_STAT_EXT_CTX_QUERY HWRM
msg. In this patch only P7 chipset is enabled for this HWRM
while P5 and previous generation remain with HWRM_STAT_CTX_QUERY.
Signed-off-by: Damodharam Ammepalli <damodharam.ammepalli@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
drivers/net/bnxt/bnxt.h | 49 ++++++
drivers/net/bnxt/bnxt_cpr.h | 3 +-
drivers/net/bnxt/bnxt_ethdev.c | 36 ++++-
drivers/net/bnxt/bnxt_hwrm.c | 117 ++++++++++++++
drivers/net/bnxt/bnxt_hwrm.h | 12 +-
drivers/net/bnxt/bnxt_ring.c | 6 +-
drivers/net/bnxt/bnxt_rxq.c | 8 +-
drivers/net/bnxt/bnxt_stats.c | 279 ++++++++++++++++++++++++++++++---
8 files changed, 483 insertions(+), 27 deletions(-)
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 2d871933e9..5919d219f7 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -714,6 +714,53 @@ struct bnxt_ring_stats {
uint64_t rx_agg_aborts;
};
+struct bnxt_ring_stats_ext {
+ /* Number of received unicast packets */
+ uint64_t rx_ucast_pkts;
+ /* Number of received multicast packets */
+ uint64_t rx_mcast_pkts;
+ /* Number of received broadcast packets */
+ uint64_t rx_bcast_pkts;
+ /* Number of discarded packets on receive path */
+ uint64_t rx_discard_pkts;
+ /* Number of packets on receive path with error */
+ uint64_t rx_error_pkts;
+ /* Number of received bytes for unicast traffic */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for multicast traffic */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for broadcast traffic */
+ uint64_t rx_bcast_bytes;
+ /* Number of transmitted unicast packets */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted multicast packets */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted broadcast packets */
+ uint64_t tx_bcast_pkts;
+ /* Number of packets on transmit path with error */
+ uint64_t tx_error_pkts;
+ /* Number of discarded packets on transmit path */
+ uint64_t tx_discard_pkts;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t tx_bcast_bytes;
+ /* Number of TPA eligible packets */
+ uint64_t rx_tpa_eligible_pkt;
+ /* Number of TPA eligible bytes */
+ uint64_t rx_tpa_eligible_bytes;
+ /* Number of TPA packets */
+ uint64_t rx_tpa_pkt;
+ /* Number of TPA bytes */
+ uint64_t rx_tpa_bytes;
+ /* Number of TPA errors */
+ uint64_t rx_tpa_errors;
+ /* Number of TPA events */
+ uint64_t rx_tpa_events;
+};
+
enum bnxt_session_type {
BNXT_SESSION_TYPE_REGULAR = 0,
BNXT_SESSION_TYPE_SHARED_COMMON,
@@ -991,6 +1038,8 @@ struct bnxt {
uint16_t tx_cfa_action;
struct bnxt_ring_stats *prev_rx_ring_stats;
struct bnxt_ring_stats *prev_tx_ring_stats;
+ struct bnxt_ring_stats_ext *prev_rx_ring_stats_ext;
+ struct bnxt_ring_stats_ext *prev_tx_ring_stats_ext;
struct bnxt_vnic_queue_db vnic_queue_db;
#define BNXT_MAX_MC_ADDRS ((bp)->max_mcast_addr)
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index 26e81a6a7e..c7b3480dc9 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -68,7 +68,8 @@ struct bnxt_cp_ring_info {
struct bnxt_db_info cp_db;
rte_iova_t cp_desc_mapping;
- struct ctx_hw_stats *hw_stats;
+ char *hw_stats;
+ uint16_t hw_ring_stats_size;
rte_iova_t hw_stats_map;
uint32_t hw_stats_ctx_id;
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 625e5f1f9a..031028eda1 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -732,15 +732,49 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
static void bnxt_free_prev_ring_stats(struct bnxt *bp)
{
+ /* tpa v2 devices use ext variant local struct */
+ if (BNXT_TPA_V2_P7(bp)) {
+ rte_free(bp->prev_rx_ring_stats_ext);
+ rte_free(bp->prev_tx_ring_stats_ext);
+ bp->prev_rx_ring_stats_ext = NULL;
+ bp->prev_tx_ring_stats_ext = NULL;
+ return;
+ }
rte_free(bp->prev_rx_ring_stats);
rte_free(bp->prev_tx_ring_stats);
-
bp->prev_rx_ring_stats = NULL;
bp->prev_tx_ring_stats = NULL;
}
+static int bnxt_alloc_prev_ring_ext_stats(struct bnxt *bp)
+{
+ bp->prev_rx_ring_stats_ext = rte_zmalloc("bnxt_prev_rx_ring_stats_ext",
+ sizeof(struct bnxt_ring_stats_ext) *
+ bp->rx_cp_nr_rings,
+ 0);
+ if (bp->prev_rx_ring_stats_ext == NULL)
+ return -ENOMEM;
+
+ bp->prev_tx_ring_stats_ext = rte_zmalloc("bnxt_prev_tx_ring_stats_ext",
+ sizeof(struct bnxt_ring_stats_ext) *
+ bp->tx_cp_nr_rings,
+ 0);
+
+ if (bp->tx_cp_nr_rings > 0 && bp->prev_tx_ring_stats_ext == NULL)
+ goto error;
+
+ return 0;
+
+error:
+ bnxt_free_prev_ring_stats(bp);
+ return -ENOMEM;
+}
+
static int bnxt_alloc_prev_ring_stats(struct bnxt *bp)
{
+ if (BNXT_TPA_V2_P7(bp))
+ return bnxt_alloc_prev_ring_ext_stats(bp);
+
bp->prev_rx_ring_stats = rte_zmalloc("bnxt_prev_rx_ring_stats",
sizeof(struct bnxt_ring_stats) *
bp->rx_cp_nr_rings,
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 98cb130fb2..d61446dd7c 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -2386,6 +2386,8 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
+ req.stats_dma_length = rte_cpu_to_le_16(BNXT_HWRM_CTX_GET_SIZE(bp));
+
req.update_period_ms = rte_cpu_to_le_32(0);
req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
@@ -5184,6 +5186,8 @@ static void bnxt_update_prev_stat(uint64_t *cntr, uint64_t *prev_cntr)
* returned by HW in this iteration, so use the previous
* iteration's counter value
*/
+ if (!cntr || !prev_cntr)
+ return;
if (*prev_cntr && *cntr == 0)
*cntr = *prev_cntr;
else
@@ -5292,6 +5296,119 @@ int bnxt_hwrm_ring_stats(struct bnxt *bp, uint32_t cid, int idx,
return rc;
}
+int bnxt_hwrm_ring_stats_ext(struct bnxt *bp, uint32_t cid, int idx,
+ struct bnxt_ring_stats_ext *ring_stats, bool rx)
+{
+ int rc = 0;
+ struct hwrm_stat_ext_ctx_query_input req = {.req_type = 0};
+ struct hwrm_stat_ext_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(&req, HWRM_STAT_EXT_CTX_QUERY, BNXT_USE_CHIMP_MB);
+
+ req.stat_ctx_id = rte_cpu_to_le_32(cid);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+
+ if (rx) {
+ struct bnxt_ring_stats_ext *prev_stats = &bp->prev_rx_ring_stats_ext[idx];
+
+ ring_stats->rx_ucast_pkts = rte_le_to_cpu_64(resp->rx_ucast_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_ucast_pkts,
+ &prev_stats->rx_ucast_pkts);
+
+ ring_stats->rx_mcast_pkts = rte_le_to_cpu_64(resp->rx_mcast_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_mcast_pkts,
+ &prev_stats->rx_mcast_pkts);
+
+ ring_stats->rx_bcast_pkts = rte_le_to_cpu_64(resp->rx_bcast_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_bcast_pkts,
+ &prev_stats->rx_bcast_pkts);
+
+ ring_stats->rx_ucast_bytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
+ bnxt_update_prev_stat(&ring_stats->rx_ucast_bytes,
+ &prev_stats->rx_ucast_bytes);
+
+ ring_stats->rx_mcast_bytes = rte_le_to_cpu_64(resp->rx_mcast_bytes);
+ bnxt_update_prev_stat(&ring_stats->rx_mcast_bytes,
+ &prev_stats->rx_mcast_bytes);
+
+ ring_stats->rx_bcast_bytes = rte_le_to_cpu_64(resp->rx_bcast_bytes);
+ bnxt_update_prev_stat(&ring_stats->rx_bcast_bytes,
+ &prev_stats->rx_bcast_bytes);
+
+ ring_stats->rx_discard_pkts = rte_le_to_cpu_64(resp->rx_discard_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_discard_pkts,
+ &prev_stats->rx_discard_pkts);
+
+ ring_stats->rx_error_pkts = rte_le_to_cpu_64(resp->rx_error_pkts);
+ bnxt_update_prev_stat(&ring_stats->rx_error_pkts,
+ &prev_stats->rx_error_pkts);
+
+ ring_stats->rx_tpa_eligible_pkt = rte_le_to_cpu_64(resp->rx_tpa_eligible_pkt);
+ bnxt_update_prev_stat(&ring_stats->rx_tpa_eligible_pkt,
+ &prev_stats->rx_tpa_eligible_pkt);
+
+ ring_stats->rx_tpa_eligible_bytes = rte_le_to_cpu_64(resp->rx_tpa_eligible_bytes);
+ bnxt_update_prev_stat(&ring_stats->rx_tpa_eligible_bytes,
+ &prev_stats->rx_tpa_eligible_bytes);
+
+ ring_stats->rx_tpa_pkt = rte_le_to_cpu_64(resp->rx_tpa_pkt);
+ bnxt_update_prev_stat(&ring_stats->rx_tpa_pkt,
+ &prev_stats->rx_tpa_pkt);
+
+ ring_stats->rx_tpa_bytes = rte_le_to_cpu_64(resp->rx_tpa_bytes);
+ bnxt_update_prev_stat(&ring_stats->rx_tpa_bytes,
+ &prev_stats->rx_tpa_bytes);
+
+ ring_stats->rx_tpa_errors = rte_le_to_cpu_64(resp->rx_tpa_errors);
+ bnxt_update_prev_stat(&ring_stats->rx_tpa_errors,
+ &prev_stats->rx_tpa_errors);
+
+ ring_stats->rx_tpa_events = rte_le_to_cpu_64(resp->rx_tpa_events);
+ bnxt_update_prev_stat(&ring_stats->rx_tpa_events,
+ &prev_stats->rx_tpa_events);
+ } else {
+ struct bnxt_ring_stats_ext *prev_stats = &bp->prev_tx_ring_stats_ext[idx];
+
+ ring_stats->tx_ucast_pkts = rte_le_to_cpu_64(resp->tx_ucast_pkts);
+ bnxt_update_prev_stat(&ring_stats->tx_ucast_pkts,
+ &prev_stats->tx_ucast_pkts);
+
+ ring_stats->tx_mcast_pkts = rte_le_to_cpu_64(resp->tx_mcast_pkts);
+ bnxt_update_prev_stat(&ring_stats->tx_mcast_pkts,
+ &prev_stats->tx_mcast_pkts);
+
+ ring_stats->tx_bcast_pkts = rte_le_to_cpu_64(resp->tx_bcast_pkts);
+ bnxt_update_prev_stat(&ring_stats->tx_bcast_pkts,
+ &prev_stats->tx_bcast_pkts);
+
+ ring_stats->tx_ucast_bytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
+ bnxt_update_prev_stat(&ring_stats->tx_ucast_bytes,
+ &prev_stats->tx_ucast_bytes);
+
+ ring_stats->tx_mcast_bytes = rte_le_to_cpu_64(resp->tx_mcast_bytes);
+ bnxt_update_prev_stat(&ring_stats->tx_mcast_bytes,
+ &prev_stats->tx_mcast_bytes);
+
+ ring_stats->tx_bcast_bytes = rte_le_to_cpu_64(resp->tx_bcast_bytes);
+ bnxt_update_prev_stat(&ring_stats->tx_bcast_bytes,
+ &prev_stats->tx_bcast_bytes);
+
+ ring_stats->tx_discard_pkts = rte_le_to_cpu_64(resp->tx_discard_pkts);
+ bnxt_update_prev_stat(&ring_stats->tx_discard_pkts,
+ &prev_stats->tx_discard_pkts);
+
+ ring_stats->tx_error_pkts = rte_le_to_cpu_64(resp->tx_error_pkts);
+ bnxt_update_prev_stat(&ring_stats->tx_error_pkts,
+ &prev_stats->tx_error_pkts);
+ }
+
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
int bnxt_hwrm_port_qstats(struct bnxt *bp)
{
struct hwrm_port_qstats_input req = {0};
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 179d5dc1f0..19fb35f223 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -167,8 +167,14 @@ struct bnxt_pf_resource_info {
BNXT_TUNNELED_OFFLOADS_CAP_GRE_EN(bp) && \
BNXT_TUNNELED_OFFLOADS_CAP_IPINIP_EN(bp))
-#define BNXT_SIG_MODE_NRZ HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_NRZ
-#define BNXT_SIG_MODE_PAM4 HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4
+/* Is this tpa_v2 and P7
+ * Just add P5 to this once we validate on Thor FW
+ */
+#define BNXT_TPA_V2_P7(bp) ((bp)->max_tpa_v2 && BNXT_CHIP_P7(bp))
+/* Get the size of the stat context size for DMA from HW */
+#define BNXT_HWRM_CTX_GET_SIZE(bp) (BNXT_TPA_V2_P7(bp) ? \
+ sizeof(struct ctx_hw_stats_ext) : \
+ sizeof(struct ctx_hw_stats))
int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
@@ -352,6 +358,8 @@ int bnxt_hwrm_poll_ver_get(struct bnxt *bp);
int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int queue_index);
int bnxt_hwrm_ring_stats(struct bnxt *bp, uint32_t cid, int idx,
struct bnxt_ring_stats *stats, bool rx);
+int bnxt_hwrm_ring_stats_ext(struct bnxt *bp, uint32_t cid, int idx,
+ struct bnxt_ring_stats_ext *ring_stats, bool rx);
int bnxt_hwrm_read_sfp_module_eeprom_info(struct bnxt *bp, uint16_t i2c_addr,
uint16_t page_number, uint16_t start_addr,
uint16_t data_length, uint8_t *buf);
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 4bf0b9c6ed..9e512321d9 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -119,8 +119,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
int ag_ring_len = 0;
int stats_len = (tx_ring_info || rx_ring_info) ?
- RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
- sizeof (struct hwrm_resp_hdr)) : 0;
+ RTE_CACHE_LINE_ROUNDUP(BNXT_HWRM_CTX_GET_SIZE(bp)) : 0;
stats_len = RTE_ALIGN(stats_len, 128);
int cp_vmem_start = stats_len;
@@ -305,8 +304,9 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
*cp_ring->vmem = ((char *)mz->addr + stats_len);
if (stats_len) {
cp_ring_info->hw_stats = mz->addr;
- cp_ring_info->hw_stats_map = mz_phys_addr;
}
+ cp_ring_info->hw_stats_map = mz_phys_addr;
+
cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
if (nq_ring_info) {
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 575e7f193f..913856e6eb 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -483,8 +483,12 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
/* reset the previous stats for the rx_queue since the counters
* will be cleared when the queue is started.
*/
- memset(&bp->prev_rx_ring_stats[rx_queue_id], 0,
- sizeof(struct bnxt_ring_stats));
+ if (BNXT_TPA_V2_P7(bp))
+ memset(&bp->prev_rx_ring_stats_ext[rx_queue_id], 0,
+ sizeof(struct bnxt_ring_stats_ext));
+ else
+ memset(&bp->prev_rx_ring_stats[rx_queue_id], 0,
+ sizeof(struct bnxt_ring_stats));
/* Set the queue state to started here.
* We check the status of the queue while posting buffer.
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 0e25207fc3..ee10fe0360 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -258,6 +258,53 @@ static const struct bnxt_xstats_name_off bnxt_tx_stats_strings[] = {
tx_stat_error)},
};
+static const struct bnxt_xstats_name_off bnxt_func_stats_ext_strings[] = {
+ {"tx_ucast_pkts", offsetof(struct hwrm_func_qstats_ext_output,
+ tx_ucast_pkts)},
+ {"tx_mcast_pkts", offsetof(struct hwrm_func_qstats_ext_output,
+ tx_mcast_pkts)},
+ {"tx_bcast_pkts", offsetof(struct hwrm_func_qstats_ext_output,
+ tx_bcast_pkts)},
+ {"tx_discard_pkts", offsetof(struct hwrm_func_qstats_ext_output,
+ tx_discard_pkts)},
+ {"tx_drop_pkts", offsetof(struct hwrm_func_qstats_ext_output,
+ tx_error_pkts)},
+ {"tx_ucast_bytes", offsetof(struct hwrm_func_qstats_ext_output,
+ tx_ucast_bytes)},
+ {"tx_mcast_bytes", offsetof(struct hwrm_func_qstats_ext_output,
+ tx_mcast_bytes)},
+ {"tx_bcast_bytes", offsetof(struct hwrm_func_qstats_ext_output,
+ tx_bcast_bytes)},
+ {"rx_ucast_pkts", offsetof(struct hwrm_func_qstats_ext_output,
+ rx_ucast_pkts)},
+ {"rx_mcast_pkts", offsetof(struct hwrm_func_qstats_ext_output,
+ rx_mcast_pkts)},
+ {"rx_bcast_pkts", offsetof(struct hwrm_func_qstats_ext_output,
+ rx_bcast_pkts)},
+ {"rx_discard_pkts", offsetof(struct hwrm_func_qstats_ext_output,
+ rx_discard_pkts)},
+ {"rx_drop_pkts", offsetof(struct hwrm_func_qstats_ext_output,
+ rx_error_pkts)},
+ {"rx_ucast_bytes", offsetof(struct hwrm_func_qstats_ext_output,
+ rx_ucast_bytes)},
+ {"rx_mcast_bytes", offsetof(struct hwrm_func_qstats_ext_output,
+ rx_mcast_bytes)},
+ {"rx_bcast_bytes", offsetof(struct hwrm_func_qstats_ext_output,
+ rx_bcast_bytes)},
+ {"rx_tpa_eligible_pkt", offsetof(struct hwrm_func_qstats_ext_output,
+ rx_tpa_eligible_pkt)},
+ {"rx_tpa_eligible_bytes", offsetof(struct hwrm_func_qstats_ext_output,
+ rx_tpa_eligible_bytes)},
+ {"rx_tpa_pkt", offsetof(struct hwrm_func_qstats_ext_output,
+ rx_tpa_pkt)},
+ {"rx_tpa_bytes", offsetof(struct hwrm_func_qstats_ext_output,
+ rx_tpa_bytes)},
+ {"rx_tpa_errors", offsetof(struct hwrm_func_qstats_ext_output,
+ rx_tpa_errors)},
+ {"rx_tpa_events", offsetof(struct hwrm_func_qstats_ext_output,
+ rx_tpa_events)},
+};
+
static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = {
{"tx_ucast_pkts", offsetof(struct hwrm_func_qstats_output,
tx_ucast_pkts)},
@@ -417,6 +464,12 @@ static const struct bnxt_xstats_name_off bnxt_rx_ext_stats_strings[] = {
rx_discard_packets_cos6)},
{"rx_discard_packets_cos7", offsetof(struct rx_port_stats_ext,
rx_discard_packets_cos7)},
+ {"rx_fec_corrected_blocks", offsetof(struct rx_port_stats_ext,
+ rx_fec_corrected_blocks)},
+ {"rx_fec_uncorrectable_blocks", offsetof(struct rx_port_stats_ext,
+ rx_fec_uncorrectable_blocks)},
+ {"rx_filter_miss", offsetof(struct rx_port_stats_ext,
+ rx_filter_miss)},
};
static const struct bnxt_xstats_name_off bnxt_tx_ext_stats_strings[] = {
@@ -506,6 +559,45 @@ void bnxt_free_stats(struct bnxt *bp)
}
}
+static void bnxt_fill_rte_eth_stats_ext(struct rte_eth_stats *stats,
+ struct bnxt_ring_stats_ext *ring_stats,
+ unsigned int i, bool rx)
+{
+ if (rx) {
+ stats->q_ipackets[i] = ring_stats->rx_ucast_pkts;
+ stats->q_ipackets[i] += ring_stats->rx_mcast_pkts;
+ stats->q_ipackets[i] += ring_stats->rx_bcast_pkts;
+
+ stats->ipackets += stats->q_ipackets[i];
+
+ stats->q_ibytes[i] = ring_stats->rx_ucast_bytes;
+ stats->q_ibytes[i] += ring_stats->rx_mcast_bytes;
+ stats->q_ibytes[i] += ring_stats->rx_bcast_bytes;
+
+ stats->ibytes += stats->q_ibytes[i];
+
+ stats->q_errors[i] = ring_stats->rx_discard_pkts;
+ stats->q_errors[i] += ring_stats->rx_error_pkts;
+
+ stats->imissed += ring_stats->rx_discard_pkts;
+ stats->ierrors += ring_stats->rx_error_pkts;
+ } else {
+ stats->q_opackets[i] = ring_stats->tx_ucast_pkts;
+ stats->q_opackets[i] += ring_stats->tx_mcast_pkts;
+ stats->q_opackets[i] += ring_stats->tx_bcast_pkts;
+
+ stats->opackets += stats->q_opackets[i];
+
+ stats->q_obytes[i] = ring_stats->tx_ucast_bytes;
+ stats->q_obytes[i] += ring_stats->tx_mcast_bytes;
+ stats->q_obytes[i] += ring_stats->tx_bcast_bytes;
+
+ stats->obytes += stats->q_obytes[i];
+
+ stats->oerrors += ring_stats->tx_discard_pkts;
+ }
+}
+
static void bnxt_fill_rte_eth_stats(struct rte_eth_stats *stats,
struct bnxt_ring_stats *ring_stats,
unsigned int i, bool rx)
@@ -545,6 +637,57 @@ static void bnxt_fill_rte_eth_stats(struct rte_eth_stats *stats,
}
}
+static int bnxt_stats_get_op_ext(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *bnxt_stats)
+{
+ int rc = 0;
+ unsigned int i;
+ struct bnxt *bp = eth_dev->data->dev_private;
+ unsigned int num_q_stats;
+
+ num_q_stats = RTE_MIN(bp->rx_cp_nr_rings,
+ (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+ for (i = 0; i < num_q_stats; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_ring_stats_ext ring_stats = {0};
+
+ if (!rxq->rx_started)
+ continue;
+
+ rc = bnxt_hwrm_ring_stats_ext(bp, cpr->hw_stats_ctx_id, i,
+ &ring_stats, true);
+ if (unlikely(rc))
+ return rc;
+
+ bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats, i, true);
+ bnxt_stats->rx_nombuf +=
+ __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
+ }
+
+ num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
+ (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+ for (i = 0; i < num_q_stats; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ struct bnxt_ring_stats_ext ring_stats = {0};
+
+ if (!txq->tx_started)
+ continue;
+
+ rc = bnxt_hwrm_ring_stats_ext(bp, cpr->hw_stats_ctx_id, i,
+ &ring_stats, false);
+ if (unlikely(rc))
+ return rc;
+
+ bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats, i, false);
+ }
+
+ return rc;
+}
+
int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_stats *bnxt_stats)
{
@@ -560,6 +703,9 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
if (!eth_dev->data->dev_started)
return -EIO;
+ if (BNXT_TPA_V2_P7(bp))
+ return bnxt_stats_get_op_ext(eth_dev, bnxt_stats);
+
num_q_stats = RTE_MIN(bp->rx_cp_nr_rings,
(unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
@@ -609,8 +755,17 @@ static void bnxt_clear_prev_stat(struct bnxt *bp)
* Clear the cached values of stats returned by HW in the previous
* get operation.
*/
- memset(bp->prev_rx_ring_stats, 0, sizeof(struct bnxt_ring_stats) * bp->rx_cp_nr_rings);
- memset(bp->prev_tx_ring_stats, 0, sizeof(struct bnxt_ring_stats) * bp->tx_cp_nr_rings);
+ if (BNXT_TPA_V2_P7(bp)) {
+ memset(bp->prev_rx_ring_stats_ext, 0,
+ sizeof(struct bnxt_ring_stats_ext) * bp->rx_cp_nr_rings);
+ memset(bp->prev_tx_ring_stats_ext, 0,
+ sizeof(struct bnxt_ring_stats_ext) * bp->tx_cp_nr_rings);
+ } else {
+ memset(bp->prev_rx_ring_stats, 0,
+ sizeof(struct bnxt_ring_stats) * bp->rx_cp_nr_rings);
+ memset(bp->prev_tx_ring_stats, 0,
+ sizeof(struct bnxt_ring_stats) * bp->tx_cp_nr_rings);
+ }
}
int bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
@@ -640,6 +795,42 @@ int bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
return ret;
}
+static void bnxt_fill_func_qstats_ext(struct hwrm_func_qstats_ext_output *func_qstats,
+ struct bnxt_ring_stats_ext *ring_stats,
+ bool rx)
+{
+ if (rx) {
+ func_qstats->rx_ucast_pkts += ring_stats->rx_ucast_pkts;
+ func_qstats->rx_mcast_pkts += ring_stats->rx_mcast_pkts;
+ func_qstats->rx_bcast_pkts += ring_stats->rx_bcast_pkts;
+
+ func_qstats->rx_ucast_bytes += ring_stats->rx_ucast_bytes;
+ func_qstats->rx_mcast_bytes += ring_stats->rx_mcast_bytes;
+ func_qstats->rx_bcast_bytes += ring_stats->rx_bcast_bytes;
+
+ func_qstats->rx_discard_pkts += ring_stats->rx_discard_pkts;
+ func_qstats->rx_error_pkts += ring_stats->rx_error_pkts;
+
+ func_qstats->rx_tpa_eligible_pkt += ring_stats->rx_tpa_eligible_pkt;
+ func_qstats->rx_tpa_eligible_bytes += ring_stats->rx_tpa_eligible_bytes;
+ func_qstats->rx_tpa_pkt += ring_stats->rx_tpa_pkt;
+ func_qstats->rx_tpa_bytes += ring_stats->rx_tpa_bytes;
+ func_qstats->rx_tpa_errors += ring_stats->rx_tpa_errors;
+ func_qstats->rx_tpa_events += ring_stats->rx_tpa_events;
+ } else {
+ func_qstats->tx_ucast_pkts += ring_stats->tx_ucast_pkts;
+ func_qstats->tx_mcast_pkts += ring_stats->tx_mcast_pkts;
+ func_qstats->tx_bcast_pkts += ring_stats->tx_bcast_pkts;
+
+ func_qstats->tx_ucast_bytes += ring_stats->tx_ucast_bytes;
+ func_qstats->tx_mcast_bytes += ring_stats->tx_mcast_bytes;
+ func_qstats->tx_bcast_bytes += ring_stats->tx_bcast_bytes;
+
+ func_qstats->tx_error_pkts += ring_stats->tx_error_pkts;
+ func_qstats->tx_discard_pkts += ring_stats->tx_discard_pkts;
+ }
+}
+
static void bnxt_fill_func_qstats(struct hwrm_func_qstats_output *func_qstats,
struct bnxt_ring_stats *ring_stats,
bool rx)
@@ -683,16 +874,21 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
unsigned int tx_port_stats_ext_cnt;
unsigned int stat_size = sizeof(uint64_t);
struct hwrm_func_qstats_output func_qstats = {0};
- unsigned int stat_count;
+ struct hwrm_func_qstats_ext_output func_qstats_ext = {0};
+ unsigned int stat_count, sz;
int rc;
rc = is_bnxt_in_error(bp);
if (rc)
return rc;
+ if (BNXT_TPA_V2_P7(bp))
+ sz = RTE_DIM(bnxt_func_stats_ext_strings);
+ else
+ sz = RTE_DIM(bnxt_func_stats_strings);
+
stat_count = RTE_DIM(bnxt_rx_stats_strings) +
- RTE_DIM(bnxt_tx_stats_strings) +
- RTE_DIM(bnxt_func_stats_strings) +
+ RTE_DIM(bnxt_tx_stats_strings) + sz +
RTE_DIM(bnxt_rx_ext_stats_strings) +
RTE_DIM(bnxt_tx_ext_stats_strings) +
bnxt_flow_stats_cnt(bp);
@@ -704,32 +900,51 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
struct bnxt_ring_stats ring_stats = {0};
+ struct bnxt_ring_stats_ext ring_stats_ext = {0};
if (!rxq->rx_started)
continue;
- rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
- &ring_stats, true);
+ if (BNXT_TPA_V2_P7(bp))
+ rc = bnxt_hwrm_ring_stats_ext(bp, cpr->hw_stats_ctx_id, i,
+ &ring_stats_ext, true);
+ else
+ rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
+ &ring_stats, true);
+
if (unlikely(rc))
return rc;
- bnxt_fill_func_qstats(&func_qstats, &ring_stats, true);
+ if (BNXT_TPA_V2_P7(bp))
+ bnxt_fill_func_qstats_ext(&func_qstats_ext,
+ &ring_stats_ext, true);
+ else
+ bnxt_fill_func_qstats(&func_qstats, &ring_stats, true);
}
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
struct bnxt_tx_queue *txq = bp->tx_queues[i];
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
struct bnxt_ring_stats ring_stats = {0};
+ struct bnxt_ring_stats_ext ring_stats_ext = {0};
if (!txq->tx_started)
continue;
- rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
- &ring_stats, false);
+ if (BNXT_TPA_V2_P7(bp))
+ rc = bnxt_hwrm_ring_stats_ext(bp, cpr->hw_stats_ctx_id, i,
+ &ring_stats_ext, false);
+ else
+ rc = bnxt_hwrm_ring_stats(bp, cpr->hw_stats_ctx_id, i,
+ &ring_stats, false);
if (unlikely(rc))
return rc;
- bnxt_fill_func_qstats(&func_qstats, &ring_stats, false);
+ if (BNXT_TPA_V2_P7(bp))
+ bnxt_fill_func_qstats_ext(&func_qstats_ext,
+ &ring_stats_ext, false);
+ else
+ bnxt_fill_func_qstats(&func_qstats, &ring_stats, false);
}
bnxt_hwrm_port_qstats(bp);
@@ -762,6 +977,15 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
count++;
}
+ if (BNXT_TPA_V2_P7(bp)) {
+ for (i = 0; i < RTE_DIM(bnxt_func_stats_ext_strings); i++) {
+ xstats[count].id = count;
+ xstats[count].value = *(uint64_t *)((char *)&func_qstats_ext +
+ bnxt_func_stats_ext_strings[i].offset);
+ count++;
+ }
+ goto skip_func_stats;
+ }
for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) {
xstats[count].id = count;
xstats[count].value = *(uint64_t *)((char *)&func_qstats +
@@ -769,6 +993,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
count++;
}
+skip_func_stats:
for (i = 0; i < rx_port_stats_ext_cnt; i++) {
uint64_t *rx_stats_ext = (uint64_t *)bp->hw_rx_port_stats_ext;
@@ -849,19 +1074,26 @@ int bnxt_dev_xstats_get_names_op(struct rte_eth_dev *eth_dev,
unsigned int size)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
- const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
- RTE_DIM(bnxt_tx_stats_strings) +
- RTE_DIM(bnxt_func_stats_strings) +
- RTE_DIM(bnxt_rx_ext_stats_strings) +
- RTE_DIM(bnxt_tx_ext_stats_strings) +
- bnxt_flow_stats_cnt(bp);
- unsigned int i, count = 0;
+ unsigned int stat_cnt;
+ unsigned int i, count = 0, sz;
int rc;
rc = is_bnxt_in_error(bp);
if (rc)
return rc;
+ if (BNXT_TPA_V2_P7(bp))
+ sz = RTE_DIM(bnxt_func_stats_ext_strings);
+ else
+ sz = RTE_DIM(bnxt_func_stats_strings);
+
+ stat_cnt = RTE_DIM(bnxt_rx_stats_strings) +
+ RTE_DIM(bnxt_tx_stats_strings) +
+ sz +
+ RTE_DIM(bnxt_rx_ext_stats_strings) +
+ RTE_DIM(bnxt_tx_ext_stats_strings) +
+ bnxt_flow_stats_cnt(bp);
+
if (xstats_names == NULL || size < stat_cnt)
return stat_cnt;
@@ -879,6 +1111,16 @@ int bnxt_dev_xstats_get_names_op(struct rte_eth_dev *eth_dev,
count++;
}
+ if (BNXT_TPA_V2_P7(bp)) {
+ for (i = 0; i < RTE_DIM(bnxt_func_stats_ext_strings); i++) {
+ strlcpy(xstats_names[count].name,
+ bnxt_func_stats_ext_strings[i].name,
+ sizeof(xstats_names[count].name));
+ count++;
+ }
+ goto skip_func_stats;
+ }
+
for (i = 0; i < RTE_DIM(bnxt_func_stats_strings); i++) {
strlcpy(xstats_names[count].name,
bnxt_func_stats_strings[i].name,
@@ -886,6 +1128,7 @@ int bnxt_dev_xstats_get_names_op(struct rte_eth_dev *eth_dev,
count++;
}
+skip_func_stats:
for (i = 0; i < RTE_DIM(bnxt_rx_ext_stats_strings); i++) {
strlcpy(xstats_names[count].name,
bnxt_rx_ext_stats_strings[i].name,
--
2.39.2 (Apple Git-143)
[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4218 bytes --]
next prev parent reply other threads:[~2023-12-21 18:07 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-21 18:05 [PATCH 00/18] bnxt patchset Ajit Khaparde
2023-12-21 18:05 ` [PATCH 01/18] net/bnxt: add support for UDP GSO Ajit Khaparde
2023-12-21 18:05 ` [PATCH 02/18] net/bnxt: add support for compressed Rx CQE Ajit Khaparde
2023-12-21 18:05 ` [PATCH 03/18] net/bnxt: fix a typo while parsing link speed Ajit Khaparde
2023-12-21 18:05 ` [PATCH 04/18] net/bnxt: fix setting 50G and 100G forced speed Ajit Khaparde
2023-12-21 18:05 ` [PATCH 05/18] net/bnxt: fix speed change from 200G to 25G on Thor Ajit Khaparde
2023-12-21 18:05 ` [PATCH 06/18] net/bnxt: support backward compatibility Ajit Khaparde
2023-12-21 18:05 ` [PATCH 07/18] net/bnxt: reattempt mbuf allocation for Rx and AGG rings Ajit Khaparde
2023-12-21 18:05 ` [PATCH 08/18] net/bnxt: refactor Rx doorbell during Rx flush Ajit Khaparde
2023-12-21 18:05 ` [PATCH 09/18] net/bnxt: extend RSS hash support for P7 devices Ajit Khaparde
2023-12-21 18:05 ` [PATCH 10/18] net/bnxt: add flow query callback Ajit Khaparde
2023-12-21 18:05 ` [PATCH 11/18] net/bnxt: add ESP and AH header based RSS support Ajit Khaparde
2023-12-21 18:05 ` [PATCH 12/18] net/bnxt: set allmulti mode if multicast filter fails Ajit Khaparde
2023-12-21 18:05 ` [PATCH 13/18] net/bnxt: add VF FLR async event handler Ajit Khaparde
2023-12-21 18:05 ` [PATCH 14/18] net/bnxt: add tunnel TPA support Ajit Khaparde
2023-12-21 18:05 ` [PATCH 15/18] net/bnxt: add 400G get support for P7 devices Ajit Khaparde
2023-12-21 18:05 ` Ajit Khaparde [this message]
2023-12-21 18:05 ` [PATCH 17/18] net/bnxt: add AVX2 support for compressed CQE Ajit Khaparde
2023-12-21 18:05 ` [PATCH 18/18] net/bnxt: enable SSE mode " Ajit Khaparde
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231221180529.18687-17-ajit.khaparde@broadcom.com \
--to=ajit.khaparde@broadcom.com \
--cc=damodharam.ammepalli@broadcom.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).