basic stats ops implementations.

Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
 doc/guides/nics/features/zxdh.ini  |   2 +
 doc/guides/nics/zxdh.rst           |   1 +
 drivers/net/zxdh/zxdh_ethdev.c     |   2 +
 drivers/net/zxdh/zxdh_ethdev_ops.c | 353 +++++++++++++++++++++++++++++
 drivers/net/zxdh/zxdh_ethdev_ops.h |  27 +++
 drivers/net/zxdh/zxdh_msg.h        |  16 ++
 drivers/net/zxdh/zxdh_np.c         | 341 ++++++++++++++++++++++++++++
 drivers/net/zxdh/zxdh_np.h         |  30 +++
 drivers/net/zxdh/zxdh_queue.h      |   2 +
 drivers/net/zxdh/zxdh_rxtx.c       |  83 ++++++-
 drivers/net/zxdh/zxdh_tables.h     |   5 +
 11 files changed, 859 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini
index 415ca547d0..98c141cf95 100644
--- a/doc/guides/nics/features/zxdh.ini
+++ b/doc/guides/nics/features/zxdh.ini
@@ -22,3 +22,5 @@ QinQ offload         = Y
 RSS hash             = Y
 RSS reta update      = Y
 Inner RSS            = Y
+Basic stats          = Y
+Stats per queue      = Y
diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst
index 3cc6a1d348..c8a52b587c 100644
--- a/doc/guides/nics/zxdh.rst
+++ b/doc/guides/nics/zxdh.rst
@@ -32,6 +32,7 @@ Features of the ZXDH PMD are:
 - VLAN stripping and inserting
 - QINQ stripping and inserting
 - Receive Side Scaling (RSS)
+- Port hardware statistics
 
 
 Driver compilation and testing
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index 17fca8e909..0326d143ec 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -1150,6 +1150,8 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = {
     .reta_query                 = zxdh_dev_rss_reta_query,
     .rss_hash_update         = zxdh_rss_hash_update,
     .rss_hash_conf_get         = zxdh_rss_hash_conf_get,
+    .stats_get                 = zxdh_dev_stats_get,
+    .stats_reset             = zxdh_dev_stats_reset,
 };
 
 static int32_t
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c
index c12947cb4d..2377ff202d 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.c
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c
@@ -11,6 +11,8 @@
 #include "zxdh_ethdev_ops.h"
 #include "zxdh_tables.h"
 #include "zxdh_logs.h"
+#include "zxdh_rxtx.h"
+#include "zxdh_np.h"
 
 #define ZXDH_VLAN_FILTER_GROUPS       64
 #define ZXDH_INVALID_LOGIC_QID        0xFFFFU
@@ -22,6 +24,108 @@
 #define ZXDH_HF_MAC_VLAN     4
 #define ZXDH_HF_ALL          0
 
+struct zxdh_hw_mac_stats {
+    uint64_t rx_total;
+    uint64_t rx_pause;
+    uint64_t rx_unicast;
+    uint64_t rx_multicast;
+    uint64_t rx_broadcast;
+    uint64_t rx_vlan;
+    uint64_t rx_size_64;
+    uint64_t rx_size_65_127;
+    uint64_t rx_size_128_255;
+    uint64_t rx_size_256_511;
+    uint64_t rx_size_512_1023;
+    uint64_t rx_size_1024_1518;
+    uint64_t rx_size_1519_mru;
+    uint64_t rx_undersize;
+    uint64_t rx_oversize;
+    uint64_t rx_fragment;
+    uint64_t rx_jabber;
+    uint64_t rx_control;
+    uint64_t rx_eee;
+
+    uint64_t tx_total;
+    uint64_t tx_pause;
+    uint64_t tx_unicast;
+    uint64_t tx_multicast;
+    uint64_t tx_broadcast;
+    uint64_t tx_vlan;
+    uint64_t tx_size_64;
+    uint64_t tx_size_65_127;
+    uint64_t tx_size_128_255;
+    uint64_t tx_size_256_511;
+    uint64_t tx_size_512_1023;
+    uint64_t tx_size_1024_1518;
+    uint64_t tx_size_1519_mtu;
+    uint64_t tx_undersize;
+    uint64_t tx_oversize;
+    uint64_t tx_fragment;
+    uint64_t tx_jabber;
+    uint64_t tx_control;
+    uint64_t tx_eee;
+
+    uint64_t rx_error;
+    uint64_t rx_fcs_error;
+    uint64_t rx_drop;
+
+    uint64_t tx_error;
+    uint64_t tx_fcs_error;
+    uint64_t tx_drop;
+
+} __rte_packed;
+
+struct zxdh_hw_mac_bytes {
+    uint64_t rx_total_bytes;
+    uint64_t rx_good_bytes;
+    uint64_t tx_total_bytes;
+    uint64_t tx_good_bytes;
+} __rte_packed;
+
+struct zxdh_np_stats_data {
+    uint64_t n_pkts_dropped;
+    uint64_t n_bytes_dropped;
+};
+
+struct zxdh_xstats_name_off {
+    char name[RTE_ETH_XSTATS_NAME_SIZE];
+    unsigned int offset;
+};
+
+static const struct zxdh_xstats_name_off zxdh_rxq_stat_strings[] = {
+    {"good_packets",           offsetof(struct zxdh_virtnet_rx, stats.packets)},
+    {"good_bytes",             offsetof(struct zxdh_virtnet_rx, stats.bytes)},
+    {"errors",                 offsetof(struct zxdh_virtnet_rx, stats.errors)},
+    {"multicast_packets",      offsetof(struct zxdh_virtnet_rx, stats.multicast)},
+    {"broadcast_packets",      offsetof(struct zxdh_virtnet_rx, stats.broadcast)},
+    {"truncated_err",          offsetof(struct zxdh_virtnet_rx, stats.truncated_err)},
+    {"undersize_packets",      offsetof(struct zxdh_virtnet_rx, stats.size_bins[0])},
+    {"size_64_packets",        offsetof(struct zxdh_virtnet_rx, stats.size_bins[1])},
+    {"size_65_127_packets",    offsetof(struct zxdh_virtnet_rx, stats.size_bins[2])},
+    {"size_128_255_packets",   offsetof(struct zxdh_virtnet_rx, stats.size_bins[3])},
+    {"size_256_511_packets",   offsetof(struct zxdh_virtnet_rx, stats.size_bins[4])},
+    {"size_512_1023_packets",  offsetof(struct zxdh_virtnet_rx, stats.size_bins[5])},
+    {"size_1024_1518_packets", offsetof(struct zxdh_virtnet_rx, stats.size_bins[6])},
+    {"size_1519_max_packets",  offsetof(struct zxdh_virtnet_rx, stats.size_bins[7])},
+};
+
+static const struct zxdh_xstats_name_off zxdh_txq_stat_strings[] = {
+    {"good_packets",           offsetof(struct zxdh_virtnet_tx, stats.packets)},
+    {"good_bytes",             offsetof(struct zxdh_virtnet_tx, stats.bytes)},
+    {"errors",                 offsetof(struct zxdh_virtnet_tx, stats.errors)},
+    {"multicast_packets",      offsetof(struct zxdh_virtnet_tx, stats.multicast)},
+    {"broadcast_packets",      offsetof(struct zxdh_virtnet_tx, stats.broadcast)},
+    {"truncated_err",          offsetof(struct zxdh_virtnet_tx, stats.truncated_err)},
+    {"undersize_packets",      offsetof(struct zxdh_virtnet_tx, stats.size_bins[0])},
+    {"size_64_packets",        offsetof(struct zxdh_virtnet_tx, stats.size_bins[1])},
+    {"size_65_127_packets",    offsetof(struct zxdh_virtnet_tx, stats.size_bins[2])},
+    {"size_128_255_packets",   offsetof(struct zxdh_virtnet_tx, stats.size_bins[3])},
+    {"size_256_511_packets",   offsetof(struct zxdh_virtnet_tx, stats.size_bins[4])},
+    {"size_512_1023_packets",  offsetof(struct zxdh_virtnet_tx, stats.size_bins[5])},
+    {"size_1024_1518_packets", offsetof(struct zxdh_virtnet_tx, stats.size_bins[6])},
+    {"size_1519_max_packets",  offsetof(struct zxdh_virtnet_tx, stats.size_bins[7])},
+};
+
 static int32_t zxdh_config_port_status(struct rte_eth_dev *dev, uint16_t link_status)
 {
     struct zxdh_hw *hw = dev->data->dev_private;
@@ -1162,3 +1266,252 @@ zxdh_rss_configure(struct rte_eth_dev *dev)
     }
     return 0;
 }
+
+static int32_t
+zxdh_hw_vqm_stats_get(struct rte_eth_dev *dev, enum zxdh_agent_msg_type opcode,
+            struct zxdh_hw_vqm_stats *hw_stats)
+{
+    struct zxdh_hw *hw = dev->data->dev_private;
+    struct zxdh_msg_info msg_info = {0};
+    struct zxdh_msg_reply_info reply_info = {0};
+    enum ZXDH_BAR_MODULE_ID module_id;
+    int ret = 0;
+
+    switch (opcode) {
+    case ZXDH_VQM_DEV_STATS_GET:
+    case ZXDH_VQM_QUEUE_STATS_GET:
+    case ZXDH_VQM_QUEUE_STATS_RESET:
+        module_id = ZXDH_BAR_MODULE_VQM;
+        break;
+    case ZXDH_MAC_STATS_GET:
+    case ZXDH_MAC_STATS_RESET:
+        module_id = ZXDH_BAR_MODULE_MAC;
+        break;
+    default:
+        PMD_DRV_LOG(ERR, "invalid opcode %u", opcode);
+        return -1;
+    }
+
+    zxdh_agent_msg_build(hw, opcode, &msg_info);
+
+    ret = zxdh_send_msg_to_riscv(dev, &msg_info, sizeof(struct zxdh_msg_info),
+                &reply_info, sizeof(struct zxdh_msg_reply_info), module_id);
+    if (ret) {
+        PMD_DRV_LOG(ERR, "Failed to get hw stats");
+        return -EAGAIN;
+    }
+    struct zxdh_msg_reply_body *reply_body = &reply_info.reply_body;
+
+    rte_memcpy(hw_stats, &reply_body->vqm_stats, sizeof(struct zxdh_hw_vqm_stats));
+    return 0;
+}
+
+static int zxdh_hw_mac_stats_get(struct rte_eth_dev *dev,
+                struct zxdh_hw_mac_stats *mac_stats,
+                struct zxdh_hw_mac_bytes *mac_bytes)
+{
+    struct zxdh_hw *hw = dev->data->dev_private;
+    uint64_t virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_MAC_OFFSET);
+    uint64_t stats_addr =  0;
+    uint64_t bytes_addr =  0;
+
+    if (hw->speed <= RTE_ETH_SPEED_NUM_25G) {
+        stats_addr = virt_addr + ZXDH_MAC_STATS_OFFSET + 352 * (hw->phyport % 4);
+        bytes_addr = virt_addr + ZXDH_MAC_BYTES_OFFSET + 32 * (hw->phyport % 4);
+    } else {
+        stats_addr = virt_addr + ZXDH_MAC_STATS_OFFSET + 352 * 4;
+        bytes_addr = virt_addr + ZXDH_MAC_BYTES_OFFSET + 32 * 4;
+    }
+
+    rte_memcpy(mac_stats, (void *)stats_addr, sizeof(struct zxdh_hw_mac_stats));
+    rte_memcpy(mac_bytes, (void *)bytes_addr, sizeof(struct zxdh_hw_mac_bytes));
+    return 0;
+}
+
+static void zxdh_data_hi_to_lo(uint64_t *data)
+{
+    uint32_t n_data_hi;
+    uint32_t n_data_lo;
+
+    n_data_lo = *data >> 32;
+    n_data_hi = *data;
+    *data =  (uint64_t)(rte_le_to_cpu_32(n_data_hi)) << 32 |
+                rte_le_to_cpu_32(n_data_lo);
+}
+
+static int zxdh_np_stats_get(struct rte_eth_dev *dev, struct zxdh_hw_np_stats *np_stats)
+{
+    struct zxdh_hw *hw = dev->data->dev_private;
+    struct zxdh_np_stats_data stats_data;
+    uint32_t stats_id = zxdh_vport_to_vfid(hw->vport);
+    uint32_t idx = 0;
+    int ret = 0;
+
+    idx = stats_id + ZXDH_BROAD_STATS_EGRESS_BASE;
+    ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid,
+                0, idx, (uint32_t *)&np_stats->np_tx_broadcast);
+    if (ret)
+        return ret;
+    zxdh_data_hi_to_lo(&np_stats->np_tx_broadcast);
+
+    idx = stats_id + ZXDH_BROAD_STATS_INGRESS_BASE;
+    memset(&stats_data, 0, sizeof(stats_data));
+    ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid,
+                0, idx, (uint32_t *)&np_stats->np_rx_broadcast);
+    if (ret)
+        return ret;
+    zxdh_data_hi_to_lo(&np_stats->np_rx_broadcast);
+
+    idx = stats_id + ZXDH_MTU_STATS_EGRESS_BASE;
+    memset(&stats_data, 0, sizeof(stats_data));
+    ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid,
+                1, idx, (uint32_t *)&stats_data);
+    if (ret)
+        return ret;
+
+    np_stats->np_tx_mtu_drop_pkts = stats_data.n_pkts_dropped;
+    np_stats->np_tx_mtu_drop_bytes = stats_data.n_bytes_dropped;
+    zxdh_data_hi_to_lo(&np_stats->np_tx_mtu_drop_pkts);
+    zxdh_data_hi_to_lo(&np_stats->np_tx_mtu_drop_bytes);
+
+    idx = stats_id + ZXDH_MTU_STATS_INGRESS_BASE;
+    memset(&stats_data, 0, sizeof(stats_data));
+    ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid,
+                1, idx, (uint32_t *)&stats_data);
+    if (ret)
+        return ret;
+    np_stats->np_rx_mtu_drop_pkts = stats_data.n_pkts_dropped;
+    np_stats->np_rx_mtu_drop_bytes = stats_data.n_bytes_dropped;
+    zxdh_data_hi_to_lo(&np_stats->np_rx_mtu_drop_pkts);
+    zxdh_data_hi_to_lo(&np_stats->np_rx_mtu_drop_bytes);
+
+    return 0;
+}
+
+static int
+zxdh_hw_np_stats_get(struct rte_eth_dev *dev,  struct zxdh_hw_np_stats *np_stats)
+{
+    struct zxdh_hw *hw = dev->data->dev_private;
+    struct zxdh_msg_info msg_info = {0};
+    struct zxdh_msg_reply_info reply_info = {0};
+    int ret = 0;
+
+    if (hw->is_pf) {
+        ret = zxdh_np_stats_get(dev, np_stats);
+        if (ret) {
+            PMD_DRV_LOG(ERR, "get np stats failed");
+            return -1;
+        }
+    } else {
+        zxdh_msg_head_build(hw, ZXDH_GET_NP_STATS, &msg_info);
+        ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(struct zxdh_msg_info),
+                    &reply_info, sizeof(struct zxdh_msg_reply_info));
+        if (ret) {
+            PMD_DRV_LOG(ERR,
+                "Failed to send msg: port 0x%x msg type ZXDH_PORT_METER_STAT_GET",
+                hw->vport.vport);
+            return -1;
+        }
+        memcpy(np_stats, &reply_info.reply_body.np_stats, sizeof(struct zxdh_hw_np_stats));
+    }
+    return ret;
+}
+
+int
+zxdh_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+    struct zxdh_hw *hw = dev->data->dev_private;
+    struct zxdh_hw_vqm_stats vqm_stats = {0};
+    struct zxdh_hw_np_stats np_stats = {0};
+    struct zxdh_hw_mac_stats mac_stats = {0};
+    struct zxdh_hw_mac_bytes mac_bytes = {0};
+    uint32_t i = 0;
+
+    zxdh_hw_vqm_stats_get(dev, ZXDH_VQM_DEV_STATS_GET,  &vqm_stats);
+    if (hw->is_pf)
+        zxdh_hw_mac_stats_get(dev, &mac_stats, &mac_bytes);
+
+    zxdh_hw_np_stats_get(dev, &np_stats);
+
+    stats->ipackets = vqm_stats.rx_total;
+    stats->opackets = vqm_stats.tx_total;
+    stats->ibytes = vqm_stats.rx_bytes;
+    stats->obytes = vqm_stats.tx_bytes;
+    stats->imissed = vqm_stats.rx_drop + mac_stats.rx_drop;
+    stats->ierrors = vqm_stats.rx_error + mac_stats.rx_error + np_stats.np_rx_mtu_drop_pkts;
+    stats->oerrors = vqm_stats.tx_error + mac_stats.tx_error + np_stats.np_tx_mtu_drop_pkts;
+
+    stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+    for (i = 0; (i < dev->data->nb_rx_queues) && (i < RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) {
+        struct zxdh_virtnet_rx *rxvq = dev->data->rx_queues[i];
+
+        if (rxvq == NULL)
+            continue;
+        stats->q_ipackets[i] = *(uint64_t *)(((char *)rxvq) +
+                zxdh_rxq_stat_strings[0].offset);
+        stats->q_ibytes[i] = *(uint64_t *)(((char *)rxvq) +
+                zxdh_rxq_stat_strings[1].offset);
+        stats->q_errors[i] = *(uint64_t *)(((char *)rxvq) +
+                zxdh_rxq_stat_strings[2].offset);
+        stats->q_errors[i] += *(uint64_t *)(((char *)rxvq) +
+                zxdh_rxq_stat_strings[5].offset);
+    }
+
+    for (i = 0; (i < dev->data->nb_tx_queues) && (i < RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) {
+        struct zxdh_virtnet_tx *txvq = dev->data->tx_queues[i];
+
+        if (txvq == NULL)
+            continue;
+        stats->q_opackets[i] = *(uint64_t *)(((char *)txvq) +
+                zxdh_txq_stat_strings[0].offset);
+        stats->q_obytes[i] = *(uint64_t *)(((char *)txvq) +
+                zxdh_txq_stat_strings[1].offset);
+        stats->q_errors[i] += *(uint64_t *)(((char *)txvq) +
+                zxdh_txq_stat_strings[2].offset);
+        stats->q_errors[i] += *(uint64_t *)(((char *)txvq) +
+                zxdh_txq_stat_strings[5].offset);
+    }
+    return 0;
+}
+
+static int zxdh_hw_stats_reset(struct rte_eth_dev *dev, enum zxdh_agent_msg_type opcode)
+{
+    struct zxdh_hw *hw = dev->data->dev_private;
+    struct zxdh_msg_info msg_info = {0};
+    struct zxdh_msg_reply_info reply_info = {0};
+    enum ZXDH_BAR_MODULE_ID module_id;
+    int ret = 0;
+
+    switch (opcode) {
+    case ZXDH_VQM_DEV_STATS_RESET:
+        module_id = ZXDH_BAR_MODULE_VQM;
+        break;
+    case ZXDH_MAC_STATS_RESET:
+        module_id = ZXDH_BAR_MODULE_MAC;
+        break;
+    default:
+        PMD_DRV_LOG(ERR, "invalid opcode %u", opcode);
+        return -1;
+    }
+
+    zxdh_agent_msg_build(hw, opcode, &msg_info);
+
+    ret = zxdh_send_msg_to_riscv(dev, &msg_info, sizeof(struct zxdh_msg_info),
+                &reply_info, sizeof(struct zxdh_msg_reply_info), module_id);
+    if (ret) {
+        PMD_DRV_LOG(ERR, "Failed to reset hw stats");
+        return -EAGAIN;
+    }
+    return 0;
+}
+
+int zxdh_dev_stats_reset(struct rte_eth_dev *dev)
+{
+    struct zxdh_hw *hw = dev->data->dev_private;
+
+    zxdh_hw_stats_reset(dev, ZXDH_VQM_DEV_STATS_RESET);
+    if (hw->is_pf)
+        zxdh_hw_stats_reset(dev, ZXDH_MAC_STATS_RESET);
+
+    return 0;
+}
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h
index 860716d079..f35378e691 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.h
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.h
@@ -5,6 +5,8 @@
 #ifndef ZXDH_ETHDEV_OPS_H
 #define ZXDH_ETHDEV_OPS_H
 
+#include <stdint.h>
+
 #include <rte_ether.h>
 
 #include "zxdh_ethdev.h"
@@ -24,6 +26,29 @@
 #define ZXDH_HF_MAC_VLAN_ETH  ZXDH_ETH_RSS_L2
 #define ZXDH_RSS_HF  ((ZXDH_HF_MAC_VLAN_ETH | ZXDH_HF_F3_ETH | ZXDH_HF_F5_ETH))
 
+struct zxdh_hw_vqm_stats {
+    uint64_t rx_total;
+    uint64_t tx_total;
+    uint64_t rx_bytes;
+    uint64_t tx_bytes;
+    uint64_t rx_error;
+    uint64_t tx_error;
+    uint64_t rx_drop;
+} __rte_packed;
+
+struct zxdh_hw_np_stats {
+    uint64_t np_rx_broadcast;
+    uint64_t np_tx_broadcast;
+    uint64_t np_rx_mtu_drop_pkts;
+    uint64_t np_tx_mtu_drop_pkts;
+    uint64_t np_rx_mtu_drop_bytes;
+    uint64_t np_tx_mtu_drop_bytes;
+    uint64_t np_rx_mtr_drop_pkts;
+    uint64_t np_tx_mtr_drop_pkts;
+    uint64_t np_rx_mtr_drop_bytes;
+    uint64_t np_tx_mtr_drop_bytes;
+};
+
 int zxdh_dev_set_link_up(struct rte_eth_dev *dev);
 int zxdh_dev_set_link_down(struct rte_eth_dev *dev);
 int32_t zxdh_dev_link_update(struct rte_eth_dev *dev, int32_t wait_to_complete __rte_unused);
@@ -46,5 +71,7 @@ int zxdh_dev_rss_reta_query(struct rte_eth_dev *dev,
 int zxdh_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf);
 int zxdh_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf);
 int zxdh_rss_configure(struct rte_eth_dev *dev);
+int zxdh_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+int zxdh_dev_stats_reset(struct rte_eth_dev *dev);
 
 #endif /* ZXDH_ETHDEV_OPS_H */
diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h
index 45a9b10aa4..159c8c9c71 100644
--- a/drivers/net/zxdh/zxdh_msg.h
+++ b/drivers/net/zxdh/zxdh_msg.h
@@ -9,10 +9,16 @@
 
 #include <ethdev_driver.h>
 
+#include "zxdh_ethdev_ops.h"
+
 #define ZXDH_BAR0_INDEX                 0
 #define ZXDH_CTRLCH_OFFSET              (0x2000)
 #define ZXDH_MSG_CHAN_PFVFSHARE_OFFSET  (ZXDH_CTRLCH_OFFSET + 0x1000)
 
+#define ZXDH_MAC_OFFSET                 (0x24000)
+#define ZXDH_MAC_STATS_OFFSET           (0x1408)
+#define ZXDH_MAC_BYTES_OFFSET           (0xb000)
+
 #define ZXDH_MSIX_INTR_MSG_VEC_BASE   1
 #define ZXDH_MSIX_INTR_MSG_VEC_NUM    3
 #define ZXDH_MSIX_INTR_DTB_VEC        (ZXDH_MSIX_INTR_MSG_VEC_BASE + ZXDH_MSIX_INTR_MSG_VEC_NUM)
@@ -173,7 +179,13 @@ enum pciebar_layout_type {
 
 /* riscv msg opcodes */
 enum zxdh_agent_msg_type {
+    ZXDH_MAC_STATS_GET = 10,
+    ZXDH_MAC_STATS_RESET,
     ZXDH_MAC_LINK_GET = 14,
+    ZXDH_VQM_DEV_STATS_GET = 21,
+    ZXDH_VQM_DEV_STATS_RESET,
+    ZXDH_VQM_QUEUE_STATS_GET = 24,
+    ZXDH_VQM_QUEUE_STATS_RESET,
 };
 
 enum zxdh_msg_type {
@@ -195,6 +207,8 @@ enum zxdh_msg_type {
     ZXDH_PORT_ATTRS_SET = 25,
     ZXDH_PORT_PROMISC_SET = 26,
 
+    ZXDH_GET_NP_STATS = 31,
+
     ZXDH_MSG_TYPE_END,
 };
 
@@ -322,6 +336,8 @@ struct zxdh_msg_reply_body {
         struct zxdh_link_info_msg link_msg;
         struct zxdh_rss_hf rss_hf;
         struct zxdh_rss_reta rss_reta;
+        struct zxdh_hw_vqm_stats vqm_stats;
+        struct zxdh_hw_np_stats np_stats;
     } __rte_packed;
 } __rte_packed;
 
diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c
index f2518b6d7c..7ec53b1aa6 100644
--- a/drivers/net/zxdh/zxdh_np.c
+++ b/drivers/net/zxdh/zxdh_np.c
@@ -26,6 +26,7 @@ ZXDH_TLB_MGR_T *g_p_dpp_tlb_mgr[ZXDH_DEV_CHANNEL_MAX];
 ZXDH_REG_T g_dpp_reg_info[4];
 ZXDH_DTB_TABLE_T g_dpp_dtb_table_info[4];
 ZXDH_SDT_TBL_DATA_T g_sdt_info[ZXDH_DEV_CHANNEL_MAX][ZXDH_DEV_SDT_ID_MAX];
+ZXDH_PPU_STAT_CFG_T g_ppu_stat_cfg = {0};
 
 #define ZXDH_SDT_MGR_PTR_GET()    (&g_sdt_mgr)
 #define ZXDH_SDT_SOFT_TBL_GET(id) (g_sdt_mgr.sdt_tbl_array[id])
@@ -117,6 +118,18 @@ do {\
 #define ZXDH_COMM_CONVERT16(w_data) \
             (((w_data) & 0xff) << 8)
 
+#define ZXDH_DTB_TAB_UP_WR_INDEX_GET(DEV_ID, QUEUE_ID)       \
+        (p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.wr_index)
+
+#define ZXDH_DTB_TAB_UP_USER_PHY_ADDR_FLAG_GET(DEV_ID, QUEUE_ID, INDEX)     \
+    (p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.user_addr[(INDEX)].user_flag)
+
+#define ZXDH_DTB_TAB_UP_USER_PHY_ADDR_GET(DEV_ID, QUEUE_ID, INDEX)     \
+        (p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.user_addr[(INDEX)].phy_addr)
+
+#define ZXDH_DTB_TAB_UP_DATA_LEN_GET(DEV_ID, QUEUE_ID, INDEX)       \
+        (p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.data_len[(INDEX)])
+
 #define ZXDH_DTB_TAB_UP_VIR_ADDR_GET(DEV_ID, QUEUE_ID, INDEX)     \
         ((INDEX) * p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.item_size)
 
@@ -1717,3 +1730,331 @@ zxdh_np_dtb_table_entry_get(uint32_t dev_id,
 
     return 0;
 }
+
+static void
+zxdh_np_stat_cfg_soft_get(uint32_t dev_id,
+                ZXDH_PPU_STAT_CFG_T *p_stat_cfg)
+{
+    ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_stat_cfg);
+
+    p_stat_cfg->ddr_base_addr = g_ppu_stat_cfg.ddr_base_addr;
+    p_stat_cfg->eram_baddr = g_ppu_stat_cfg.eram_baddr;
+    p_stat_cfg->eram_depth = g_ppu_stat_cfg.eram_depth;
+    p_stat_cfg->ppu_addr_offset = g_ppu_stat_cfg.ppu_addr_offset;
+}
+
+static uint32_t
+zxdh_np_dtb_tab_up_info_set(uint32_t dev_id,
+            uint32_t queue_id,
+            uint32_t item_index,
+            uint32_t int_flag,
+            uint32_t data_len,
+            uint32_t desc_len,
+            uint32_t *p_desc_data)
+{
+    ZXDH_DTB_QUEUE_ITEM_INFO_T item_info = {0};
+    uint32_t queue_en = 0;
+    uint32_t rc;
+
+    zxdh_np_dtb_queue_enable_get(dev_id, queue_id, &queue_en);
+    if (!queue_en) {
+        PMD_DRV_LOG(ERR, "the queue %d is not enable!", queue_id);
+        return ZXDH_RC_DTB_QUEUE_NOT_ENABLE;
+    }
+
+    if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) {
+        PMD_DRV_LOG(ERR, "dtb queue %d is not init.", queue_id);
+        return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT;
+    }
+
+    if (desc_len % 4 != 0)
+        return ZXDH_RC_DTB_PARA_INVALID;
+
+    zxdh_np_dtb_item_buff_wr(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE,
+        item_index, 0, desc_len, p_desc_data);
+
+    ZXDH_DTB_TAB_UP_DATA_LEN_GET(dev_id, queue_id, item_index) = data_len;
+
+    item_info.cmd_vld = 1;
+    item_info.cmd_type = ZXDH_DTB_DIR_UP_TYPE;
+    item_info.int_en = int_flag;
+    item_info.data_len = desc_len / 4;
+
+    if (zxdh_np_dev_get_dev_type(dev_id) == ZXDH_DEV_TYPE_SIM)
+        return 0;
+
+    rc = zxdh_np_dtb_queue_item_info_set(dev_id, queue_id, &item_info);
+
+    return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_write_dump_desc_info(uint32_t dev_id,
+        uint32_t queue_id,
+        uint32_t queue_element_id,
+        uint32_t *p_dump_info,
+        uint32_t data_len,
+        uint32_t desc_len,
+        uint32_t *p_dump_data)
+{
+    uint32_t dtb_interrupt_status = 0;
+    uint32_t rc;
+
+    ZXDH_COMM_CHECK_POINT(p_dump_data);
+    rc = zxdh_np_dtb_tab_up_info_set(dev_id,
+                queue_id,
+                queue_element_id,
+                dtb_interrupt_status,
+                data_len,
+                desc_len,
+                p_dump_info);
+    if (rc != 0) {
+        PMD_DRV_LOG(ERR, "the queue %d element id %d dump"
+            " info set failed!", queue_id, queue_element_id);
+        zxdh_np_dtb_item_ack_wr(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE,
+            queue_element_id, 0, ZXDH_DTB_TAB_ACK_UNUSED_MASK);
+    }
+
+    return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_tab_up_free_item_get(uint32_t dev_id,
+                    uint32_t queue_id,
+                    uint32_t *p_item_index)
+{
+    uint32_t ack_vale = 0;
+    uint32_t item_index = 0;
+    uint32_t unused_item_num = 0;
+    uint32_t i;
+
+    if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) {
+        PMD_DRV_LOG(ERR, "dtb queue %d is not init.", queue_id);
+        return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT;
+    }
+
+    zxdh_np_dtb_queue_unused_item_num_get(dev_id, queue_id, &unused_item_num);
+
+    if (unused_item_num == 0)
+        return ZXDH_RC_DTB_QUEUE_ITEM_HW_EMPTY;
+
+    for (i = 0; i < ZXDH_DTB_QUEUE_ITEM_NUM_MAX; i++) {
+        item_index = ZXDH_DTB_TAB_UP_WR_INDEX_GET(dev_id, queue_id) %
+            ZXDH_DTB_QUEUE_ITEM_NUM_MAX;
+
+        zxdh_np_dtb_item_ack_rd(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE, item_index,
+            0, &ack_vale);
+
+        ZXDH_DTB_TAB_UP_WR_INDEX_GET(dev_id, queue_id)++;
+
+        if ((ack_vale >> 8) == ZXDH_DTB_TAB_ACK_UNUSED_MASK)
+            break;
+    }
+
+    if (i == ZXDH_DTB_QUEUE_ITEM_NUM_MAX)
+        return ZXDH_RC_DTB_QUEUE_ITEM_SW_EMPTY;
+
+    zxdh_np_dtb_item_ack_wr(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE, item_index,
+        0, ZXDH_DTB_TAB_ACK_IS_USING_MASK);
+
+    *p_item_index = item_index;
+
+
+    return 0;
+}
+
+static uint32_t
+zxdh_np_dtb_tab_up_item_addr_get(uint32_t dev_id,
+                    uint32_t queue_id,
+                    uint32_t item_index,
+                    uint32_t *p_phy_haddr,
+                    uint32_t *p_phy_laddr)
+{
+    uint32_t rc = 0;
+    uint64_t addr;
+
+    if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) {
+        PMD_DRV_LOG(ERR, "dtb queue %d is not init.", queue_id);
+        return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT;
+    }
+
+    if (ZXDH_DTB_TAB_UP_USER_PHY_ADDR_FLAG_GET(dev_id, queue_id, item_index) ==
+        ZXDH_DTB_TAB_UP_USER_ADDR_TYPE)
+        addr = ZXDH_DTB_TAB_UP_USER_PHY_ADDR_GET(dev_id, queue_id, item_index);
+    else
+        addr = ZXDH_DTB_ITEM_ACK_SIZE;
+
+    *p_phy_haddr = (addr >> 32) & 0xffffffff;
+    *p_phy_laddr = addr & 0xffffffff;
+
+    return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_se_smmu0_dma_dump(uint32_t dev_id,
+        uint32_t queue_id,
+        uint32_t base_addr,
+        uint32_t depth,
+        uint32_t *p_data,
+        uint32_t *element_id)
+{
+    uint8_t form_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0};
+    uint32_t dump_dst_phy_haddr = 0;
+    uint32_t dump_dst_phy_laddr = 0;
+    uint32_t queue_item_index = 0;
+    uint32_t data_len;
+    uint32_t desc_len;
+    uint32_t rc;
+
+    rc = zxdh_np_dtb_tab_up_free_item_get(dev_id, queue_id, &queue_item_index);
+    if (rc != 0) {
+        PMD_DRV_LOG(ERR, "dpp_dtb_tab_up_free_item_get failed = %d!", base_addr);
+        return ZXDH_RC_DTB_QUEUE_ITEM_SW_EMPTY;
+    }
+
+    *element_id = queue_item_index;
+
+    rc = zxdh_np_dtb_tab_up_item_addr_get(dev_id, queue_id, queue_item_index,
+        &dump_dst_phy_haddr, &dump_dst_phy_laddr);
+    ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_item_addr_get");
+
+    data_len = depth * 128 / 32;
+    desc_len = ZXDH_DTB_LEN_POS_SETP / 4;
+
+    rc = zxdh_np_dtb_write_dump_desc_info(dev_id, queue_id, queue_item_index,
+        (uint32_t *)form_buff, data_len, desc_len, p_data);
+    ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_dump_desc_info");
+
+    return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_se_smmu0_ind_read(uint32_t dev_id,
+        uint32_t queue_id,
+        uint32_t base_addr,
+        uint32_t index,
+        uint32_t rd_mode,
+        uint32_t *p_data)
+{
+    uint32_t temp_data[4] = {0};
+    uint32_t element_id = 0;
+    uint32_t row_index = 0;
+    uint32_t col_index = 0;
+    uint32_t eram_dump_base_addr;
+    uint32_t rc;
+
+    switch (rd_mode) {
+    case ZXDH_ERAM128_OPR_128b:
+    {
+        row_index = index;
+        break;
+    }
+    case ZXDH_ERAM128_OPR_64b:
+    {
+        row_index = (index >> 1);
+        col_index = index & 0x1;
+        break;
+    }
+    case ZXDH_ERAM128_OPR_1b:
+    {
+        row_index = (index >> 7);
+        col_index = index & 0x7F;
+        break;
+    }
+    }
+
+    eram_dump_base_addr = base_addr + row_index;
+    rc = zxdh_np_dtb_se_smmu0_dma_dump(dev_id,
+            queue_id,
+            eram_dump_base_addr,
+            1,
+            temp_data,
+            &element_id);
+    ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_np_dtb_se_smmu0_dma_dump");
+
+    switch (rd_mode) {
+    case ZXDH_ERAM128_OPR_128b:
+    {
+        memcpy(p_data, temp_data, (128 / 8));
+        break;
+    }
+
+    case ZXDH_ERAM128_OPR_64b:
+    {
+        memcpy(p_data, temp_data + ((1 - col_index) << 1), (64 / 8));
+        break;
+    }
+
+    case ZXDH_ERAM128_OPR_1b:
+    {
+        ZXDH_COMM_UINT32_GET_BITS(p_data[0], *(temp_data +
+            (3 - col_index / 32)), (col_index % 32), 1);
+        break;
+    }
+    }
+
+    return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_stat_smmu0_int_read(uint32_t dev_id,
+        uint32_t queue_id,
+        uint32_t smmu0_base_addr,
+        ZXDH_STAT_CNT_MODE_E rd_mode,
+        uint32_t index,
+        uint32_t *p_data)
+{
+    uint32_t eram_rd_mode;
+    uint32_t rc;
+
+    ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_data);
+
+    if (rd_mode == ZXDH_STAT_128_MODE)
+        eram_rd_mode = ZXDH_ERAM128_OPR_128b;
+    else
+        eram_rd_mode = ZXDH_ERAM128_OPR_64b;
+
+    rc = zxdh_np_dtb_se_smmu0_ind_read(dev_id,
+                                   queue_id,
+                                   smmu0_base_addr,
+                                   index,
+                                   eram_rd_mode,
+                                   p_data);
+    ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_np_dtb_se_smmu0_ind_read");
+
+    return rc;
+}
+
+int
+zxdh_np_dtb_stats_get(uint32_t dev_id,
+        uint32_t queue_id,
+        ZXDH_STAT_CNT_MODE_E rd_mode,
+        uint32_t index,
+        uint32_t *p_data)
+{
+    ZXDH_PPU_STAT_CFG_T stat_cfg = {0};
+    uint32_t ppu_eram_baddr;
+    uint32_t ppu_eram_depth;
+    uint32_t rc = 0;
+
+    ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_data);
+
+    memset(&stat_cfg, 0x0, sizeof(stat_cfg));
+
+    zxdh_np_stat_cfg_soft_get(dev_id, &stat_cfg);
+
+    ppu_eram_depth = stat_cfg.eram_depth;
+    ppu_eram_baddr = stat_cfg.eram_baddr;
+
+    if ((index >> (ZXDH_STAT_128_MODE - rd_mode)) < ppu_eram_depth) {
+        rc = zxdh_np_dtb_stat_smmu0_int_read(dev_id,
+                                    queue_id,
+                                    ppu_eram_baddr,
+                                    rd_mode,
+                                    index,
+                                    p_data);
+        ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_stat_smmu0_int_read");
+    }
+
+    return rc;
+}
diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h
index 19d1f03f59..7da29cf7bd 100644
--- a/drivers/net/zxdh/zxdh_np.h
+++ b/drivers/net/zxdh/zxdh_np.h
@@ -432,6 +432,18 @@ typedef enum zxdh_sdt_table_type_e {
     ZXDH_SDT_TBLT_MAX     = 7,
 } ZXDH_SDT_TABLE_TYPE_E;
 
+typedef enum zxdh_dtb_dir_type_e {
+    ZXDH_DTB_DIR_DOWN_TYPE    = 0,
+    ZXDH_DTB_DIR_UP_TYPE    = 1,
+    ZXDH_DTB_DIR_TYPE_MAX,
+} ZXDH_DTB_DIR_TYPE_E;
+
+typedef enum zxdh_dtb_tab_up_user_addr_type_e {
+    ZXDH_DTB_TAB_UP_NOUSER_ADDR_TYPE     = 0,
+    ZXDH_DTB_TAB_UP_USER_ADDR_TYPE       = 1,
+    ZXDH_DTB_TAB_UP_USER_ADDR_TYPE_MAX,
+} ZXDH_DTB_TAB_UP_USER_ADDR_TYPE_E;
+
 typedef struct zxdh_dtb_lpm_entry_t {
     uint32_t dtb_len0;
     uint8_t *p_data_buff0;
@@ -537,6 +549,19 @@ typedef struct zxdh_dtb_hash_entry_info_t {
     uint8_t *p_rst;
 } ZXDH_DTB_HASH_ENTRY_INFO_T;
 
+typedef struct zxdh_ppu_stat_cfg_t {
+    uint32_t eram_baddr;
+    uint32_t eram_depth;
+    uint32_t ddr_base_addr;
+    uint32_t ppu_addr_offset;
+} ZXDH_PPU_STAT_CFG_T;
+
+typedef enum zxdh_stat_cnt_mode_e {
+    ZXDH_STAT_64_MODE  = 0,
+    ZXDH_STAT_128_MODE = 1,
+    ZXDH_STAT_MAX_MODE,
+} ZXDH_STAT_CNT_MODE_E;
+
 int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl);
 int zxdh_np_online_uninit(uint32_t dev_id, char *port_name, uint32_t queue_id);
 int zxdh_np_dtb_table_entry_write(uint32_t dev_id, uint32_t queue_id,
@@ -545,5 +570,10 @@ int zxdh_np_dtb_table_entry_delete(uint32_t dev_id, uint32_t queue_id,
              uint32_t entrynum, ZXDH_DTB_USER_ENTRY_T *delete_entries);
 int zxdh_np_dtb_table_entry_get(uint32_t dev_id, uint32_t queue_id,
             ZXDH_DTB_USER_ENTRY_T *get_entry, uint32_t srh_mode);
+int zxdh_np_dtb_stats_get(uint32_t dev_id,
+            uint32_t queue_id,
+            ZXDH_STAT_CNT_MODE_E rd_mode,
+            uint32_t index,
+            uint32_t *p_data);
 
 #endif /* ZXDH_NP_H */
diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h
index 9343df81ac..deb0dd891a 100644
--- a/drivers/net/zxdh/zxdh_queue.h
+++ b/drivers/net/zxdh/zxdh_queue.h
@@ -53,6 +53,8 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 };
 #define ZXDH_PI_HDR_SIZE          sizeof(struct zxdh_pi_hdr)
 #define ZXDH_DL_NET_HDR_SIZE      sizeof(struct zxdh_net_hdr_dl)
 #define ZXDH_UL_NET_HDR_SIZE      sizeof(struct zxdh_net_hdr_ul)
+#define ZXDH_PD_HDR_SIZE_MAX              256
+#define ZXDH_PD_HDR_SIZE_MIN              ZXDH_TYPE_HDR_SIZE
 
 /*
  * ring descriptors: 16 bytes.
diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c
index e36ba39423..9f315cecc6 100644
--- a/drivers/net/zxdh/zxdh_rxtx.c
+++ b/drivers/net/zxdh/zxdh_rxtx.c
@@ -406,6 +406,40 @@ static inline void zxdh_enqueue_xmit_packed(struct zxdh_virtnet_tx *txvq,
     zxdh_queue_store_flags_packed(head_dp, head_flags, vq->hw->weak_barriers);
 }
 
+static void
+zxdh_update_packet_stats(struct zxdh_virtnet_stats *stats, struct rte_mbuf *mbuf)
+{
+    uint32_t s = mbuf->pkt_len;
+    struct rte_ether_addr *ea = NULL;
+
+    stats->bytes += s;
+
+    if (s == 64) {
+        stats->size_bins[1]++;
+    } else if (s > 64 && s < 1024) {
+        uint32_t bin;
+
+        /* count zeros, and offset into correct bin */
+        bin = (sizeof(s) * 8) - rte_clz32(s) - 5;
+        stats->size_bins[bin]++;
+    } else {
+        if (s < 64)
+            stats->size_bins[0]++;
+        else if (s < 1519)
+            stats->size_bins[6]++;
+        else
+            stats->size_bins[7]++;
+    }
+
+    ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
+    if (rte_is_multicast_ether_addr(ea)) {
+        if (rte_is_broadcast_ether_addr(ea))
+            stats->broadcast++;
+        else
+            stats->multicast++;
+    }
+}
+
 uint16_t
 zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
@@ -459,12 +493,19 @@ zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkt
                 break;
             }
         }
+        if (txm->nb_segs > ZXDH_TX_MAX_SEGS) {
+            PMD_TX_LOG(ERR, "%d segs  dropped", txm->nb_segs);
+            txvq->stats.truncated_err += nb_pkts - nb_tx;
+            break;
+        }
         /* Enqueue Packet buffers */
         if (can_push)
             zxdh_enqueue_xmit_packed_fast(txvq, txm, in_order);
         else
             zxdh_enqueue_xmit_packed(txvq, txm, slots, use_indirect, in_order);
+        zxdh_update_packet_stats(&txvq->stats, txm);
     }
+    txvq->stats.packets += nb_tx;
     if (likely(nb_tx)) {
         if (unlikely(zxdh_queue_kick_prepare_packed(vq))) {
             zxdh_queue_notify(vq);
@@ -474,9 +515,10 @@ zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkt
     return nb_tx;
 }
 
-uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
+uint16_t zxdh_xmit_pkts_prepare(void *tx_queue, struct rte_mbuf **tx_pkts,
                 uint16_t nb_pkts)
 {
+    struct zxdh_virtnet_tx *txvq = tx_queue;
     uint16_t nb_tx;
 
     for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
@@ -496,6 +538,12 @@ uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **t
             rte_errno = -error;
             break;
         }
+        if (m->nb_segs > ZXDH_TX_MAX_SEGS) {
+            PMD_TX_LOG(ERR, "%d segs dropped", m->nb_segs);
+            txvq->stats.truncated_err += nb_pkts - nb_tx;
+            rte_errno = ENOMEM;
+            break;
+        }
     }
     return nb_tx;
 }
@@ -571,7 +619,7 @@ static int32_t zxdh_rx_update_mbuf(struct rte_mbuf *m, struct zxdh_net_hdr_ul *h
     return 0;
 }
 
-static inline void zxdh_discard_rxbuf(struct zxdh_virtqueue *vq, struct rte_mbuf *m)
+static void zxdh_discard_rxbuf(struct zxdh_virtqueue *vq, struct rte_mbuf *m)
 {
     int32_t error = 0;
     /*
@@ -613,7 +661,13 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
 
     for (i = 0; i < num; i++) {
         rxm = rcv_pkts[i];
-
+        if (unlikely(len[i] < ZXDH_UL_NET_HDR_SIZE)) {
+            nb_enqueued++;
+            PMD_RX_LOG(ERR, "RX, len:%u err", len[i]);
+            zxdh_discard_rxbuf(vq, rxm);
+            rxvq->stats.errors++;
+            continue;
+        }
         struct zxdh_net_hdr_ul *header =
             (struct zxdh_net_hdr_ul *)((char *)rxm->buf_addr +
             RTE_PKTMBUF_HEADROOM);
@@ -623,8 +677,22 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
             PMD_RX_LOG(ERR, "dequeue %d pkt, No.%d pkt seg_num is %d", num, i, seg_num);
             seg_num = 1;
         }
+        if (seg_num > ZXDH_RX_MAX_SEGS) {
+            PMD_RX_LOG(ERR, "dequeue %d pkt, No.%d pkt seg_num is %d", num, i, seg_num);
+            nb_enqueued++;
+            zxdh_discard_rxbuf(vq, rxm);
+            rxvq->stats.errors++;
+            continue;
+        }
         /* bit[0:6]-pd_len unit:2B */
         uint16_t pd_len = header->type_hdr.pd_len << 1;
+        if (pd_len > ZXDH_PD_HDR_SIZE_MAX || pd_len < ZXDH_PD_HDR_SIZE_MIN) {
+            PMD_RX_LOG(ERR, "pd_len:%d is invalid", pd_len);
+            nb_enqueued++;
+            zxdh_discard_rxbuf(vq, rxm);
+            rxvq->stats.errors++;
+            continue;
+        }
         /* Private queue only handle type hdr */
         hdr_size = pd_len;
         rxm->data_off = RTE_PKTMBUF_HEADROOM + hdr_size;
@@ -639,6 +707,7 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
         /* Update rte_mbuf according to pi/pd header */
         if (zxdh_rx_update_mbuf(rxm, header) < 0) {
             zxdh_discard_rxbuf(vq, rxm);
+            rxvq->stats.errors++;
             continue;
         }
         seg_res = seg_num - 1;
@@ -661,8 +730,11 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
                 PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d.",
                     rcvd_pkt_len, rx_pkts[nb_rx]->pkt_len);
                 zxdh_discard_rxbuf(vq, rx_pkts[nb_rx]);
+                rxvq->stats.errors++;
+                rxvq->stats.truncated_err++;
                 continue;
             }
+            zxdh_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]);
             nb_rx++;
         }
     }
@@ -675,6 +747,7 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
         if (unlikely(rcv_cnt == 0)) {
             PMD_RX_LOG(ERR, "No enough segments for packet.");
             rte_pktmbuf_free(rx_pkts[nb_rx]);
+            rxvq->stats.errors++;
             break;
         }
         while (extra_idx < rcv_cnt) {
@@ -694,11 +767,15 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
                 PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d.",
                     rcvd_pkt_len, rx_pkts[nb_rx]->pkt_len);
                 zxdh_discard_rxbuf(vq, rx_pkts[nb_rx]);
+                rxvq->stats.errors++;
+                rxvq->stats.truncated_err++;
                 continue;
             }
+            zxdh_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]);
             nb_rx++;
         }
     }
+    rxvq->stats.packets += nb_rx;
 
     /* Allocate new mbuf for the used descriptor */
     if (likely(!zxdh_queue_full(vq))) {
diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h
index c8d1de3bbb..a77ec46d84 100644
--- a/drivers/net/zxdh/zxdh_tables.h
+++ b/drivers/net/zxdh/zxdh_tables.h
@@ -11,6 +11,11 @@
 #define ZXDH_PORT_BASE_QID_FLAG           10
 #define ZXDH_PORT_ATTR_IS_UP_FLAG         35
 
+#define ZXDH_MTU_STATS_EGRESS_BASE        0x8481
+#define ZXDH_MTU_STATS_INGRESS_BASE       0x8981
+#define ZXDH_BROAD_STATS_EGRESS_BASE      0xC902
+#define ZXDH_BROAD_STATS_INGRESS_BASE     0xD102
+
 extern struct zxdh_dtb_shared_data g_dtb_data;
 
 struct zxdh_port_attr_table {
-- 
2.27.0