* [PATCH 19/19] net/xsc: add dev basic stats ops
@ 2024-09-06 12:14 WanRenyong
0 siblings, 0 replies; only message in thread
From: WanRenyong @ 2024-09-06 12:14 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, nana, WanRenyong
Implement xsc ethdev basic stats get and reset functions.
Signed-off-by: WanRenyong <wanry@yunsilicon.com>
---
doc/guides/nics/features/xsc.ini | 1 +
drivers/net/xsc/xsc_ethdev.c | 76 ++++++++++++++++++++++++++++++++
drivers/net/xsc/xsc_rxtx.c | 11 ++++-
drivers/net/xsc/xsc_rxtx.h | 15 +++++++
4 files changed, 102 insertions(+), 1 deletion(-)
diff --git a/doc/guides/nics/features/xsc.ini b/doc/guides/nics/features/xsc.ini
index 84c5ff4b6b..d73cf9d136 100644
--- a/doc/guides/nics/features/xsc.ini
+++ b/doc/guides/nics/features/xsc.ini
@@ -12,6 +12,7 @@ L3 checksum offload = Y
L4 checksum offload = Y
Inner L3 checksum = Y
Inner L4 checksum = Y
+Basic stats = Y
Linux = Y
ARMv8 = Y
x86-64 = Y
diff --git a/drivers/net/xsc/xsc_ethdev.c b/drivers/net/xsc/xsc_ethdev.c
index 0c8a620d03..b3ae7ba75e 100644
--- a/drivers/net/xsc/xsc_ethdev.c
+++ b/drivers/net/xsc/xsc_ethdev.c
@@ -1089,6 +1089,80 @@ xsc_ethdev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
}
+static int
+xsc_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ uint32_t rxqs_n = priv->num_rq;
+ uint32_t txqs_n = priv->num_sq;
+ uint32_t i, idx;
+ struct xsc_rxq_data *rxq;
+ struct xsc_txq_data *txq;
+
+ memset(stats, 0, sizeof(struct rte_eth_stats));
+ for (i = 0; i < rxqs_n; ++i) {
+ rxq = xsc_rxq_get(dev, i);
+ if (unlikely(rxq == NULL))
+ continue;
+
+ idx = rxq->idx;
+ if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[idx] += rxq->stats.rx_pkts;
+ stats->q_ibytes[idx] += rxq->stats.rx_bytes;
+ stats->q_errors[idx] += (rxq->stats.rx_errors +
+ rxq->stats.rx_nombuf);
+ }
+ stats->ipackets += rxq->stats.rx_pkts;
+ stats->ibytes += rxq->stats.rx_bytes;
+ stats->ierrors += rxq->stats.rx_errors;
+ stats->rx_nombuf += rxq->stats.rx_nombuf;
+ }
+
+ for (i = 0; i < txqs_n; ++i) {
+ txq = xsc_txq_get(dev, i);
+ if (unlikely(txq == NULL))
+ continue;
+
+ idx = txq->idx;
+ if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[idx] += txq->stats.tx_pkts;
+ stats->q_obytes[idx] += txq->stats.tx_bytes;
+ stats->q_errors[idx] += txq->stats.tx_errors;
+ }
+ stats->opackets += txq->stats.tx_pkts;
+ stats->obytes += txq->stats.tx_bytes;
+ stats->oerrors += txq->stats.tx_errors;
+ }
+
+ return 0;
+}
+
+static int
+xsc_ethdev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ uint32_t rxqs_n = priv->num_rq;
+ uint32_t txqs_n = priv->num_sq;
+ uint32_t i;
+ struct xsc_rxq_data *rxq;
+ struct xsc_txq_data *txq;
+
+ for (i = 0; i < rxqs_n; ++i) {
+ rxq = xsc_rxq_get(dev, i);
+ if (unlikely(rxq == NULL))
+ continue;
+ memset(&rxq->stats, 0, sizeof(struct xsc_rxq_stats));
+ }
+ for (i = 0; i < txqs_n; ++i) {
+ txq = xsc_txq_get(dev, i);
+ if (unlikely(txq == NULL))
+ continue;
+ memset(&txq->stats, 0, sizeof(struct xsc_txq_stats));
+ }
+
+ return 0;
+}
+
static int
xsc_ethdev_link_update(__rte_unused struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
@@ -1104,6 +1178,8 @@ const struct eth_dev_ops xsc_dev_ops = {
.dev_set_link_up = xsc_ethdev_set_link_up,
.dev_close = xsc_ethdev_close,
.link_update = xsc_ethdev_link_update,
+ .stats_get = xsc_ethdev_stats_get,
+ .stats_reset = xsc_ethdev_stats_reset,
.dev_infos_get = xsc_ethdev_infos_get,
.rx_queue_setup = xsc_ethdev_rx_queue_setup,
.tx_queue_setup = xsc_ethdev_tx_queue_setup,
diff --git a/drivers/net/xsc/xsc_rxtx.c b/drivers/net/xsc/xsc_rxtx.c
index 7a31cd428c..8aed8f4b12 100644
--- a/drivers/net/xsc/xsc_rxtx.c
+++ b/drivers/net/xsc/xsc_rxtx.c
@@ -62,6 +62,7 @@ xsc_rx_poll_len(struct xsc_rxq_data *rxq, volatile struct xsc_cqe *cqe)
ret = check_cqe_own(cqe, rxq->cqe_n, rxq->cq_ci);
if (unlikely(ret != XSC_CQE_OWNER_SW)) {
if (unlikely(ret == XSC_CQE_OWNER_ERR)) {
+ ++rxq->stats.rx_errors;
/* TODO */
if (ret == XSC_CQE_OWNER_HW ||
ret == -1)
@@ -116,8 +117,10 @@ xsc_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
rte_prefetch0(wqe);
rep = rte_mbuf_raw_alloc(seg->pool);
- if (unlikely(rep == NULL))
+ if (unlikely(rep == NULL)) {
+ ++rxq->stats.rx_nombuf;
break;
+ }
if (!pkt) {
if (read_cqe_num) {
@@ -166,6 +169,7 @@ xsc_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Fill wqe */
wqe->va = rte_cpu_to_le_64(rte_pktmbuf_iova(rep));
rte_pktmbuf_data_len(seg) = len;
+ rxq->stats.rx_bytes += rte_pktmbuf_pkt_len(pkt);
*(pkts++) = pkt;
pkt = NULL;
@@ -200,6 +204,7 @@ xsc_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
rxq->nb_rx_hold = 0;
}
+ rxq->stats.rx_pkts += nb_pkts;
return nb_pkts;
}
@@ -239,6 +244,7 @@ xsc_tx_cqes_handle(struct xsc_txq_data *__rte_restrict txq)
++txq->cq_ci;
txq->cq_pi = txq->cq_ci;
last_cqe = NULL;
+ ++txq->stats.tx_errors;
continue;
}
@@ -348,6 +354,7 @@ xsc_tx_wqes_fill(struct xsc_txq_data *__rte_restrict txq,
/* init wqe data segs */
xsc_tx_wqe_data_seg_init(mbuf, wqe);
++txq->wqe_ci;
+ txq->stats.tx_bytes += rte_pktmbuf_pkt_len(mbuf);
}
return wqe;
@@ -432,5 +439,7 @@ xsc_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
goto loop;
exit:
+
+ txq->stats.tx_pkts += (pkts_n - remain_n);
return pkts_n - remain_n;
}
diff --git a/drivers/net/xsc/xsc_rxtx.h b/drivers/net/xsc/xsc_rxtx.h
index ce6e47ad4c..3d1e3ba83a 100644
--- a/drivers/net/xsc/xsc_rxtx.h
+++ b/drivers/net/xsc/xsc_rxtx.h
@@ -85,6 +85,12 @@ struct xsc_cqe {
uint8_t owner:1;
};
+struct xsc_txq_stats {
+ uint64_t tx_pkts; /* Total number of tx packets */
+ uint64_t tx_bytes; /* Total number of tx bytes */
+ uint64_t tx_errors; /* Total number of tx error packets */
+};
+
struct __rte_cache_aligned xsc_txq_data {
uint16_t idx; /*QP idx */
uint16_t port_id;
@@ -116,12 +122,20 @@ struct __rte_cache_aligned xsc_txq_data {
volatile uint32_t *qp_db;
volatile uint32_t *cq_db;
struct xsc_ethdev_priv *priv;
+ struct xsc_txq_stats stats;
uint32_t socket;
uint8_t tso_en:1; /* TSO enable 0-off 1-on */
uint16_t *fcqs; /* Free completion queue. */
struct rte_mbuf *elts[0]; /* Storage for queued packets, for free */
};
+struct xsc_rxq_stats {
+ uint64_t rx_pkts; /* Total number of rx packets */
+ uint64_t rx_bytes; /* Total number of rx bytes */
+ uint64_t rx_errors; /* Total number of rx error packets */
+ uint64_t rx_nombuf; /* Total number of rx mbuf alloc failed */
+};
+
struct xsc_cqe_u64 {
struct xsc_cqe cqe0;
struct xsc_cqe cqe1;
@@ -158,6 +172,7 @@ struct __rte_cache_aligned xsc_rxq_data {
const struct rte_memzone *rq_pas; /* Palist memory */
uint32_t socket;
struct xsc_ethdev_priv *priv;
+ struct xsc_rxq_stats stats;
/* attr */
uint32_t csum:1; /* Checksum offloading enable */
uint32_t hw_timestamp:1;
--
2.25.1
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2024-09-06 12:16 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-09-06 12:14 [PATCH 19/19] net/xsc: add dev basic stats ops WanRenyong
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).