From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id ADEB1461C1; Sat, 8 Feb 2025 03:46:41 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B383D40A72; Sat, 8 Feb 2025 03:45:08 +0100 (CET) Received: from localhost.localdomain (unknown [103.233.162.252]) by mails.dpdk.org (Postfix) with ESMTP id C1B9640684 for ; Sat, 8 Feb 2025 03:45:02 +0100 (CET) Received: by localhost.localdomain (Postfix, from userid 0) id 0578BA324A; Sat, 8 Feb 2025 10:44:22 +0800 (CST) From: Wenbo Cao To: thomas@monjalon.net, Wenbo Cao Cc: stephen@networkplumber.org, dev@dpdk.org, ferruh.yigit@amd.com, andrew.rybchenko@oktetlabs.ru, yaojun@mucse.com Subject: [PATCH v7 19/28] net/rnp: add support basic stats operation Date: Sat, 8 Feb 2025 10:43:56 +0800 Message-Id: <1738982645-34550-20-git-send-email-caowenbo@mucse.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1738982645-34550-1-git-send-email-caowenbo@mucse.com> References: <1738982645-34550-1-git-send-email-caowenbo@mucse.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org add support hw-missed rx/tx packets bytes. Signed-off-by: Wenbo Cao --- doc/guides/nics/features/rnp.ini | 2 + doc/guides/nics/rnp.rst | 1 + drivers/net/rnp/base/rnp_eth_regs.h | 3 + drivers/net/rnp/rnp.h | 10 ++- drivers/net/rnp/rnp_ethdev.c | 147 ++++++++++++++++++++++++++++++++++++ drivers/net/rnp/rnp_rxtx.c | 9 +++ drivers/net/rnp/rnp_rxtx.h | 10 +++ 7 files changed, 181 insertions(+), 1 deletion(-) diff --git a/doc/guides/nics/features/rnp.ini b/doc/guides/nics/features/rnp.ini index c68d6fb..45dae3b 100644 --- a/doc/guides/nics/features/rnp.ini +++ b/doc/guides/nics/features/rnp.ini @@ -7,6 +7,8 @@ Speed capabilities = Y Link status = Y Link status event = Y +Basic stats = Y +Stats per queue = Y Queue start/stop = Y Promiscuous mode = Y Allmulticast mode = Y diff --git a/doc/guides/nics/rnp.rst b/doc/guides/nics/rnp.rst index db64104..ec6f3f9 100644 --- a/doc/guides/nics/rnp.rst +++ b/doc/guides/nics/rnp.rst @@ -19,6 +19,7 @@ Features - MTU update - Jumbo frames - Scatter-Gather IO support +- Port hardware statistic Prerequisites ------------- diff --git a/drivers/net/rnp/base/rnp_eth_regs.h b/drivers/net/rnp/base/rnp_eth_regs.h index 91a18dd..391688b 100644 --- a/drivers/net/rnp/base/rnp_eth_regs.h +++ b/drivers/net/rnp/base/rnp_eth_regs.h @@ -23,6 +23,9 @@ #define RNP_RX_FC_ENABLE _ETH_(0x8520) #define RNP_RING_FC_EN(n) _ETH_(0x8524 + ((0x4) * ((n) / 32))) #define RNP_RING_FC_THRESH(n) _ETH_(0x8a00 + ((0x4) * (n))) +/* ETH Statistic */ +#define RNP_ETH_RXTRANS_DROP _ETH_(0x8904) +#define RNP_ETH_RXTRUNC_DROP _ETH_(0x8928) /* Mac Host Filter */ #define RNP_MAC_FCTRL _ETH_(0x9110) #define RNP_MAC_FCTRL_MPE RTE_BIT32(8) /* Multicast Promiscuous En */ diff --git a/drivers/net/rnp/rnp.h b/drivers/net/rnp/rnp.h index 054382e..b4f4f28 100644 --- a/drivers/net/rnp/rnp.h +++ b/drivers/net/rnp/rnp.h @@ -10,7 +10,7 @@ #include "base/rnp_hw.h" #define PCI_VENDOR_ID_MUCSE (0x8848) -#define RNP_DEV_ID_N10G (0x1000) +#define RNP_DEV_ID_N10G (0x1020) #define RNP_MAX_VF_NUM (64) #define RNP_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET /* maximum frame size supported */ @@ -105,6 +105,11 @@ struct rnp_proc_priv { const struct rnp_mbx_ops *mbx_ops; }; +struct rnp_hw_eth_stats { + uint64_t rx_trans_drop; /* rx eth to dma fifo full drop */ + uint64_t rx_trunc_drop; /* rx mac to eth to host copy fifo full drop */ +}; + struct rnp_eth_port { struct rnp_proc_priv *proc_priv; struct rte_ether_addr mac_addr; @@ -113,6 +118,9 @@ struct rnp_eth_port { struct rnp_tx_queue *tx_queues[RNP_MAX_RX_QUEUE_NUM]; struct rnp_hw *hw; + struct rnp_hw_eth_stats eth_stats_old; + struct rnp_hw_eth_stats eth_stats; + struct rte_eth_rss_conf rss_conf; uint16_t last_rx_num; bool rxq_num_changed; diff --git a/drivers/net/rnp/rnp_ethdev.c b/drivers/net/rnp/rnp_ethdev.c index 0fcb256..fa2617b 100644 --- a/drivers/net/rnp/rnp_ethdev.c +++ b/drivers/net/rnp/rnp_ethdev.c @@ -770,6 +770,150 @@ static int rnp_allmulticast_disable(struct rte_eth_dev *eth_dev) return 0; } +struct rte_rnp_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint32_t offset; + uint32_t reg_base; + bool hi_addr_en; +}; + +static const struct rte_rnp_xstats_name_off rte_rnp_rx_eth_stats_str[] = { + {"eth rx full drop", offsetof(struct rnp_hw_eth_stats, + rx_trans_drop), RNP_ETH_RXTRANS_DROP, false}, + {"eth_rx_fifo_drop", offsetof(struct rnp_hw_eth_stats, + rx_trunc_drop), RNP_ETH_RXTRUNC_DROP, false}, +}; +#define RNP_NB_RX_HW_ETH_STATS (RTE_DIM(rte_rnp_rx_eth_stats_str)) +#define RNP_GET_E_HW_COUNT(stats, offset) \ + ((uint64_t *)(((char *)stats) + (offset))) +#define RNP_ADD_INCL_COUNT(stats, offset, val) \ + ((*(RNP_GET_E_HW_COUNT(stats, (offset)))) += val) + +static inline void +rnp_update_eth_stats_32bit(struct rnp_hw_eth_stats *new, + struct rnp_hw_eth_stats *old, + uint32_t offset, uint32_t val) +{ + uint64_t *last_count = NULL; + + last_count = RNP_GET_E_HW_COUNT(old, offset); + if (val >= *last_count) + RNP_ADD_INCL_COUNT(new, offset, val - (*last_count)); + else + RNP_ADD_INCL_COUNT(new, offset, val + UINT32_MAX); + *last_count = val; +} + +static void rnp_get_eth_count(struct rnp_hw *hw, + uint16_t lane, + struct rnp_hw_eth_stats *new, + struct rnp_hw_eth_stats *old, + const struct rte_rnp_xstats_name_off *ptr) +{ + uint64_t val = 0; + + if (ptr->reg_base) { + val = RNP_E_REG_RD(hw, ptr->reg_base + 0x40 * lane); + rnp_update_eth_stats_32bit(new, old, ptr->offset, val); + } +} + +static void rnp_get_hw_stats(struct rte_eth_dev *dev) +{ + struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev); + struct rnp_hw_eth_stats *old = &port->eth_stats_old; + struct rnp_hw_eth_stats *new = &port->eth_stats; + const struct rte_rnp_xstats_name_off *ptr; + uint16_t lane = port->attr.nr_lane; + struct rnp_hw *hw = port->hw; + uint16_t i; + + for (i = 0; i < RNP_NB_RX_HW_ETH_STATS; i++) { + ptr = &rte_rnp_rx_eth_stats_str[i]; + rnp_get_eth_count(hw, lane, new, old, ptr); + } +} + +static int +rnp_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev); + struct rnp_hw_eth_stats *eth_stats = &port->eth_stats; + struct rte_eth_dev_data *data = dev->data; + int i = 0; + + PMD_INIT_FUNC_TRACE(); + rnp_get_hw_stats(dev); + for (i = 0; i < data->nb_rx_queues; i++) { + if (!data->rx_queues[i]) + continue; + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_ipackets[i] = ((struct rnp_rx_queue **) + (data->rx_queues))[i]->stats.ipackets; + stats->q_ibytes[i] = ((struct rnp_rx_queue **) + (data->rx_queues))[i]->stats.ibytes; + stats->ipackets += stats->q_ipackets[i]; + stats->ibytes += stats->q_ibytes[i]; + } else { + stats->ipackets += ((struct rnp_rx_queue **) + (data->rx_queues))[i]->stats.ipackets; + stats->ibytes += ((struct rnp_rx_queue **) + (data->rx_queues))[i]->stats.ibytes; + } + } + + for (i = 0; i < data->nb_tx_queues; i++) { + if (!data->tx_queues[i]) + continue; + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_opackets[i] = ((struct rnp_tx_queue **) + (data->tx_queues))[i]->stats.opackets; + stats->q_obytes[i] = ((struct rnp_tx_queue **) + (data->tx_queues))[i]->stats.obytes; + stats->opackets += stats->q_opackets[i]; + stats->obytes += stats->q_obytes[i]; + } else { + stats->opackets += ((struct rnp_tx_queue **) + (data->tx_queues))[i]->stats.opackets; + stats->obytes += ((struct rnp_tx_queue **) + (data->tx_queues))[i]->stats.obytes; + } + } + stats->imissed = eth_stats->rx_trans_drop + eth_stats->rx_trunc_drop; + + return 0; +} + +static int +rnp_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev); + struct rnp_hw_eth_stats *eth_stats = &port->eth_stats; + struct rnp_rx_queue *rxq; + struct rnp_tx_queue *txq; + uint16_t idx; + + PMD_INIT_FUNC_TRACE(); + memset(eth_stats, 0, sizeof(*eth_stats)); + for (idx = 0; idx < dev->data->nb_rx_queues; idx++) { + rxq = ((struct rnp_rx_queue **) + (dev->data->rx_queues))[idx]; + if (!rxq) + continue; + memset(&rxq->stats, 0, sizeof(struct rnp_queue_stats)); + } + for (idx = 0; idx < dev->data->nb_tx_queues; idx++) { + txq = ((struct rnp_tx_queue **) + (dev->data->tx_queues))[idx]; + if (!txq) + continue; + memset(&txq->stats, 0, sizeof(struct rnp_queue_stats)); + } + + return 0; +} + /* Features supported by this driver */ static const struct eth_dev_ops rnp_eth_dev_ops = { .dev_configure = rnp_dev_configure, @@ -793,6 +937,9 @@ static int rnp_allmulticast_disable(struct rte_eth_dev *eth_dev) .reta_query = rnp_dev_rss_reta_query, .rss_hash_update = rnp_dev_rss_hash_update, .rss_hash_conf_get = rnp_dev_rss_hash_conf_get, + /* stats */ + .stats_get = rnp_dev_stats_get, + .stats_reset = rnp_dev_stats_reset, /* link impl */ .link_update = rnp_dev_link_update, .dev_set_link_up = rnp_dev_set_link_up, diff --git a/drivers/net/rnp/rnp_rxtx.c b/drivers/net/rnp/rnp_rxtx.c index 777ce7b..c351fee 100644 --- a/drivers/net/rnp/rnp_rxtx.c +++ b/drivers/net/rnp/rnp_rxtx.c @@ -741,6 +741,8 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx) nmb->packet_type = 0; nmb->ol_flags = 0; nmb->nb_segs = 1; + + rxq->stats.ibytes += nmb->data_len; } for (j = 0; j < nb_dd; ++j) { rx_pkts[i + j] = rx_swbd[j].mbuf; @@ -752,6 +754,7 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx) if (nb_dd != RNP_CACHE_FETCH_RX) break; } + rxq->stats.ipackets += nb_rx; rxq->rx_tail = (rxq->rx_tail + nb_rx) & rxq->attr.nb_desc_mask; rxq->rxrearm_nb = rxq->rxrearm_nb + nb_rx; @@ -821,6 +824,7 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx) txbd->d.blen = tx_swbd->mbuf->data_len; txbd->d.cmd = RNP_CMD_EOP; + txq->stats.obytes += txbd->d.blen; i = (i + 1) & txq->attr.nb_desc_mask; } txq->nb_tx_free -= start; @@ -832,6 +836,7 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx) if (txq->tx_next_rs > txq->attr.nb_desc) txq->tx_next_rs = txq->tx_rs_thresh - 1; } + txq->stats.opackets += start; txq->tx_tail = i; rte_wmb(); @@ -936,6 +941,7 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx) } rxm->next = NULL; first_seg->port = rxq->attr.port_id; + rxq->stats.ibytes += first_seg->pkt_len; /* this the end of packet the large pkt has been recv finish */ rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr, first_seg->data_off)); @@ -944,6 +950,7 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx) } if (!nb_rx) return 0; + rxq->stats.ipackets += nb_rx; /* update sw record point */ rxq->rx_tail = rx_id; rxq->pkt_first_seg = first_seg; @@ -1033,6 +1040,7 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx) tx_id = txe->next_id; txe = txn; } while (m_seg != NULL); + txq->stats.obytes += tx_pkt->pkt_len; txbd->d.cmd |= RNP_CMD_EOP; txq->nb_tx_used = (uint16_t)txq->nb_tx_used + nb_used_bd; txq->nb_tx_free = (uint16_t)txq->nb_tx_free - nb_used_bd; @@ -1044,6 +1052,7 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx) } if (!send_pkts) return 0; + txq->stats.opackets += send_pkts; txq->tx_tail = tx_id; rte_wmb(); diff --git a/drivers/net/rnp/rnp_rxtx.h b/drivers/net/rnp/rnp_rxtx.h index f631285..d26497a 100644 --- a/drivers/net/rnp/rnp_rxtx.h +++ b/drivers/net/rnp/rnp_rxtx.h @@ -47,6 +47,14 @@ struct rnp_rxsw_entry { struct rte_mbuf *mbuf; }; +struct rnp_queue_stats { + uint64_t obytes; + uint64_t opackets; + + uint64_t ibytes; + uint64_t ipackets; +}; + struct rnp_rx_queue { struct rte_mempool *mb_pool; /* mbuf pool to populate rx ring. */ const struct rte_memzone *rz; /* rx hw ring base alloc memzone */ @@ -73,6 +81,7 @@ struct rnp_rx_queue { uint8_t pthresh; /* rx desc prefetch threshold */ uint8_t pburst; /* rx desc prefetch burst */ + struct rnp_queue_stats stats; uint64_t rx_offloads; /* user set hw offload features */ struct rte_mbuf **free_mbufs; /* rx bulk alloc reserve of free mbufs */ struct rte_mbuf fake_mbuf; /* dummy mbuf */ @@ -113,6 +122,7 @@ struct rnp_tx_queue { uint8_t pthresh; /* rx desc prefetch threshold */ uint8_t pburst; /* rx desc burst*/ + struct rnp_queue_stats stats; uint64_t tx_offloads; /* tx offload features */ struct rte_mbuf **free_mbufs; /* tx bulk free reserve of free mbufs */ }; -- 1.8.3.1