DPDK patches and discussions
 help / color / mirror / Atom feed
From: Junlong Wang <wang.junlong1@zte.com.cn>
To: stephen@networkplumber.org
Cc: dev@dpdk.org, Junlong Wang <wang.junlong1@zte.com.cn>
Subject: [PATCH v4 14/15] net/zxdh: basic stats ops implementations
Date: Wed, 18 Dec 2024 17:26:01 +0800	[thread overview]
Message-ID: <20241218092603.1218855-15-wang.junlong1@zte.com.cn> (raw)
In-Reply-To: <20241218092603.1218855-1-wang.junlong1@zte.com.cn>


[-- Attachment #1.1.1: Type: text/plain, Size: 37376 bytes --]

basic stats ops implementations.

Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
 doc/guides/nics/features/zxdh.ini  |   2 +
 doc/guides/nics/zxdh.rst           |   1 +
 drivers/net/zxdh/zxdh_ethdev.c     |   2 +
 drivers/net/zxdh/zxdh_ethdev_ops.c | 353 +++++++++++++++++++++++++++++
 drivers/net/zxdh/zxdh_ethdev_ops.h |  27 +++
 drivers/net/zxdh/zxdh_msg.h        |  16 ++
 drivers/net/zxdh/zxdh_np.c         | 341 ++++++++++++++++++++++++++++
 drivers/net/zxdh/zxdh_np.h         |  30 +++
 drivers/net/zxdh/zxdh_queue.h      |   2 +
 drivers/net/zxdh/zxdh_rxtx.c       |  83 ++++++-
 drivers/net/zxdh/zxdh_tables.h     |   5 +
 11 files changed, 859 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini
index 415ca547d0..98c141cf95 100644
--- a/doc/guides/nics/features/zxdh.ini
+++ b/doc/guides/nics/features/zxdh.ini
@@ -22,3 +22,5 @@ QinQ offload         = Y
 RSS hash             = Y
 RSS reta update      = Y
 Inner RSS            = Y
+Basic stats          = Y
+Stats per queue      = Y
diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst
index 3cc6a1d348..c8a52b587c 100644
--- a/doc/guides/nics/zxdh.rst
+++ b/doc/guides/nics/zxdh.rst
@@ -32,6 +32,7 @@ Features of the ZXDH PMD are:
 - VLAN stripping and inserting
 - QINQ stripping and inserting
 - Receive Side Scaling (RSS)
+- Port hardware statistics
 
 
 Driver compilation and testing
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index 1349559c9b..a1822e1556 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -1151,6 +1151,8 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = {
 	.reta_query				 = zxdh_dev_rss_reta_query,
 	.rss_hash_update		 = zxdh_rss_hash_update,
 	.rss_hash_conf_get		 = zxdh_rss_hash_conf_get,
+	.stats_get				 = zxdh_dev_stats_get,
+	.stats_reset			 = zxdh_dev_stats_reset,
 };
 
 static int32_t
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c
index c12947cb4d..2c10f171aa 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.c
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c
@@ -11,6 +11,8 @@
 #include "zxdh_ethdev_ops.h"
 #include "zxdh_tables.h"
 #include "zxdh_logs.h"
+#include "zxdh_rxtx.h"
+#include "zxdh_np.h"
 
 #define ZXDH_VLAN_FILTER_GROUPS       64
 #define ZXDH_INVALID_LOGIC_QID        0xFFFFU
@@ -22,6 +24,108 @@
 #define ZXDH_HF_MAC_VLAN     4
 #define ZXDH_HF_ALL          0
 
+struct zxdh_hw_mac_stats {
+	uint64_t rx_total;
+	uint64_t rx_pause;
+	uint64_t rx_unicast;
+	uint64_t rx_multicast;
+	uint64_t rx_broadcast;
+	uint64_t rx_vlan;
+	uint64_t rx_size_64;
+	uint64_t rx_size_65_127;
+	uint64_t rx_size_128_255;
+	uint64_t rx_size_256_511;
+	uint64_t rx_size_512_1023;
+	uint64_t rx_size_1024_1518;
+	uint64_t rx_size_1519_mru;
+	uint64_t rx_undersize;
+	uint64_t rx_oversize;
+	uint64_t rx_fragment;
+	uint64_t rx_jabber;
+	uint64_t rx_control;
+	uint64_t rx_eee;
+
+	uint64_t tx_total;
+	uint64_t tx_pause;
+	uint64_t tx_unicast;
+	uint64_t tx_multicast;
+	uint64_t tx_broadcast;
+	uint64_t tx_vlan;
+	uint64_t tx_size_64;
+	uint64_t tx_size_65_127;
+	uint64_t tx_size_128_255;
+	uint64_t tx_size_256_511;
+	uint64_t tx_size_512_1023;
+	uint64_t tx_size_1024_1518;
+	uint64_t tx_size_1519_mtu;
+	uint64_t tx_undersize;
+	uint64_t tx_oversize;
+	uint64_t tx_fragment;
+	uint64_t tx_jabber;
+	uint64_t tx_control;
+	uint64_t tx_eee;
+
+	uint64_t rx_error;
+	uint64_t rx_fcs_error;
+	uint64_t rx_drop;
+
+	uint64_t tx_error;
+	uint64_t tx_fcs_error;
+	uint64_t tx_drop;
+
+} __rte_packed;
+
+struct zxdh_hw_mac_bytes {
+	uint64_t rx_total_bytes;
+	uint64_t rx_good_bytes;
+	uint64_t tx_total_bytes;
+	uint64_t tx_good_bytes;
+} __rte_packed;
+
+struct zxdh_np_stats_data {
+	uint64_t n_pkts_dropped;
+	uint64_t n_bytes_dropped;
+};
+
+struct zxdh_xstats_name_off {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	unsigned int offset;
+};
+
+static const struct zxdh_xstats_name_off zxdh_rxq_stat_strings[] = {
+	{"good_packets",           offsetof(struct zxdh_virtnet_rx, stats.packets)},
+	{"good_bytes",             offsetof(struct zxdh_virtnet_rx, stats.bytes)},
+	{"errors",                 offsetof(struct zxdh_virtnet_rx, stats.errors)},
+	{"multicast_packets",      offsetof(struct zxdh_virtnet_rx, stats.multicast)},
+	{"broadcast_packets",      offsetof(struct zxdh_virtnet_rx, stats.broadcast)},
+	{"truncated_err",          offsetof(struct zxdh_virtnet_rx, stats.truncated_err)},
+	{"undersize_packets",      offsetof(struct zxdh_virtnet_rx, stats.size_bins[0])},
+	{"size_64_packets",        offsetof(struct zxdh_virtnet_rx, stats.size_bins[1])},
+	{"size_65_127_packets",    offsetof(struct zxdh_virtnet_rx, stats.size_bins[2])},
+	{"size_128_255_packets",   offsetof(struct zxdh_virtnet_rx, stats.size_bins[3])},
+	{"size_256_511_packets",   offsetof(struct zxdh_virtnet_rx, stats.size_bins[4])},
+	{"size_512_1023_packets",  offsetof(struct zxdh_virtnet_rx, stats.size_bins[5])},
+	{"size_1024_1518_packets", offsetof(struct zxdh_virtnet_rx, stats.size_bins[6])},
+	{"size_1519_max_packets",  offsetof(struct zxdh_virtnet_rx, stats.size_bins[7])},
+};
+
+static const struct zxdh_xstats_name_off zxdh_txq_stat_strings[] = {
+	{"good_packets",           offsetof(struct zxdh_virtnet_tx, stats.packets)},
+	{"good_bytes",             offsetof(struct zxdh_virtnet_tx, stats.bytes)},
+	{"errors",                 offsetof(struct zxdh_virtnet_tx, stats.errors)},
+	{"multicast_packets",      offsetof(struct zxdh_virtnet_tx, stats.multicast)},
+	{"broadcast_packets",      offsetof(struct zxdh_virtnet_tx, stats.broadcast)},
+	{"truncated_err",          offsetof(struct zxdh_virtnet_tx, stats.truncated_err)},
+	{"undersize_packets",      offsetof(struct zxdh_virtnet_tx, stats.size_bins[0])},
+	{"size_64_packets",        offsetof(struct zxdh_virtnet_tx, stats.size_bins[1])},
+	{"size_65_127_packets",    offsetof(struct zxdh_virtnet_tx, stats.size_bins[2])},
+	{"size_128_255_packets",   offsetof(struct zxdh_virtnet_tx, stats.size_bins[3])},
+	{"size_256_511_packets",   offsetof(struct zxdh_virtnet_tx, stats.size_bins[4])},
+	{"size_512_1023_packets",  offsetof(struct zxdh_virtnet_tx, stats.size_bins[5])},
+	{"size_1024_1518_packets", offsetof(struct zxdh_virtnet_tx, stats.size_bins[6])},
+	{"size_1519_max_packets",  offsetof(struct zxdh_virtnet_tx, stats.size_bins[7])},
+};
+
 static int32_t zxdh_config_port_status(struct rte_eth_dev *dev, uint16_t link_status)
 {
 	struct zxdh_hw *hw = dev->data->dev_private;
@@ -1162,3 +1266,252 @@ zxdh_rss_configure(struct rte_eth_dev *dev)
 	}
 	return 0;
 }
+
+static int32_t
+zxdh_hw_vqm_stats_get(struct rte_eth_dev *dev, enum zxdh_agent_msg_type opcode,
+			struct zxdh_hw_vqm_stats *hw_stats)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_msg_info msg_info = {0};
+	struct zxdh_msg_reply_info reply_info = {0};
+	enum ZXDH_BAR_MODULE_ID module_id;
+	int ret = 0;
+
+	switch (opcode) {
+	case ZXDH_VQM_DEV_STATS_GET:
+	case ZXDH_VQM_QUEUE_STATS_GET:
+	case ZXDH_VQM_QUEUE_STATS_RESET:
+		module_id = ZXDH_BAR_MODULE_VQM;
+		break;
+	case ZXDH_MAC_STATS_GET:
+	case ZXDH_MAC_STATS_RESET:
+		module_id = ZXDH_BAR_MODULE_MAC;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "invalid opcode %u", opcode);
+		return -1;
+	}
+
+	zxdh_agent_msg_build(hw, opcode, &msg_info);
+
+	ret = zxdh_send_msg_to_riscv(dev, &msg_info, sizeof(struct zxdh_msg_info),
+				&reply_info, sizeof(struct zxdh_msg_reply_info), module_id);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get hw stats");
+		return -1;
+	}
+	struct zxdh_msg_reply_body *reply_body = &reply_info.reply_body;
+
+	rte_memcpy(hw_stats, &reply_body->vqm_stats, sizeof(struct zxdh_hw_vqm_stats));
+	return 0;
+}
+
+static int zxdh_hw_mac_stats_get(struct rte_eth_dev *dev,
+				struct zxdh_hw_mac_stats *mac_stats,
+				struct zxdh_hw_mac_bytes *mac_bytes)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint64_t virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_MAC_OFFSET);
+	uint64_t stats_addr =  0;
+	uint64_t bytes_addr =  0;
+
+	if (hw->speed <= RTE_ETH_SPEED_NUM_25G) {
+		stats_addr = virt_addr + ZXDH_MAC_STATS_OFFSET + 352 * (hw->phyport % 4);
+		bytes_addr = virt_addr + ZXDH_MAC_BYTES_OFFSET + 32 * (hw->phyport % 4);
+	} else {
+		stats_addr = virt_addr + ZXDH_MAC_STATS_OFFSET + 352 * 4;
+		bytes_addr = virt_addr + ZXDH_MAC_BYTES_OFFSET + 32 * 4;
+	}
+
+	rte_memcpy(mac_stats, (void *)stats_addr, sizeof(struct zxdh_hw_mac_stats));
+	rte_memcpy(mac_bytes, (void *)bytes_addr, sizeof(struct zxdh_hw_mac_bytes));
+	return 0;
+}
+
+static void zxdh_data_hi_to_lo(uint64_t *data)
+{
+	uint32_t n_data_hi;
+	uint32_t n_data_lo;
+
+	n_data_lo = *data >> 32;
+	n_data_hi = *data;
+	*data =  (uint64_t)(rte_le_to_cpu_32(n_data_hi)) << 32 |
+				rte_le_to_cpu_32(n_data_lo);
+}
+
+static int zxdh_np_stats_get(struct rte_eth_dev *dev, struct zxdh_hw_np_stats *np_stats)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_np_stats_data stats_data;
+	uint32_t stats_id = zxdh_vport_to_vfid(hw->vport);
+	uint32_t idx = 0;
+	int ret = 0;
+
+	idx = stats_id + ZXDH_BROAD_STATS_EGRESS_BASE;
+	ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid,
+				0, idx, (uint32_t *)&np_stats->np_tx_broadcast);
+	if (ret)
+		return ret;
+	zxdh_data_hi_to_lo(&np_stats->np_tx_broadcast);
+
+	idx = stats_id + ZXDH_BROAD_STATS_INGRESS_BASE;
+	memset(&stats_data, 0, sizeof(stats_data));
+	ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid,
+				0, idx, (uint32_t *)&np_stats->np_rx_broadcast);
+	if (ret)
+		return ret;
+	zxdh_data_hi_to_lo(&np_stats->np_rx_broadcast);
+
+	idx = stats_id + ZXDH_MTU_STATS_EGRESS_BASE;
+	memset(&stats_data, 0, sizeof(stats_data));
+	ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid,
+				1, idx, (uint32_t *)&stats_data);
+	if (ret)
+		return ret;
+
+	np_stats->np_tx_mtu_drop_pkts = stats_data.n_pkts_dropped;
+	np_stats->np_tx_mtu_drop_bytes = stats_data.n_bytes_dropped;
+	zxdh_data_hi_to_lo(&np_stats->np_tx_mtu_drop_pkts);
+	zxdh_data_hi_to_lo(&np_stats->np_tx_mtu_drop_bytes);
+
+	idx = stats_id + ZXDH_MTU_STATS_INGRESS_BASE;
+	memset(&stats_data, 0, sizeof(stats_data));
+	ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid,
+				1, idx, (uint32_t *)&stats_data);
+	if (ret)
+		return ret;
+	np_stats->np_rx_mtu_drop_pkts = stats_data.n_pkts_dropped;
+	np_stats->np_rx_mtu_drop_bytes = stats_data.n_bytes_dropped;
+	zxdh_data_hi_to_lo(&np_stats->np_rx_mtu_drop_pkts);
+	zxdh_data_hi_to_lo(&np_stats->np_rx_mtu_drop_bytes);
+
+	return 0;
+}
+
+static int
+zxdh_hw_np_stats_get(struct rte_eth_dev *dev,  struct zxdh_hw_np_stats *np_stats)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_msg_info msg_info = {0};
+	struct zxdh_msg_reply_info reply_info = {0};
+	int ret = 0;
+
+	if (hw->is_pf) {
+		ret = zxdh_np_stats_get(dev, np_stats);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "get np stats failed");
+			return -1;
+		}
+	} else {
+		zxdh_msg_head_build(hw, ZXDH_GET_NP_STATS, &msg_info);
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(struct zxdh_msg_info),
+					&reply_info, sizeof(struct zxdh_msg_reply_info));
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				"Failed to send msg: port 0x%x msg type ZXDH_PORT_METER_STAT_GET",
+				hw->vport.vport);
+			return -1;
+		}
+		memcpy(np_stats, &reply_info.reply_body.np_stats, sizeof(struct zxdh_hw_np_stats));
+	}
+	return ret;
+}
+
+int
+zxdh_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_hw_vqm_stats vqm_stats = {0};
+	struct zxdh_hw_np_stats np_stats = {0};
+	struct zxdh_hw_mac_stats mac_stats = {0};
+	struct zxdh_hw_mac_bytes mac_bytes = {0};
+	uint32_t i = 0;
+
+	zxdh_hw_vqm_stats_get(dev, ZXDH_VQM_DEV_STATS_GET,  &vqm_stats);
+	if (hw->is_pf)
+		zxdh_hw_mac_stats_get(dev, &mac_stats, &mac_bytes);
+
+	zxdh_hw_np_stats_get(dev, &np_stats);
+
+	stats->ipackets = vqm_stats.rx_total;
+	stats->opackets = vqm_stats.tx_total;
+	stats->ibytes = vqm_stats.rx_bytes;
+	stats->obytes = vqm_stats.tx_bytes;
+	stats->imissed = vqm_stats.rx_drop + mac_stats.rx_drop;
+	stats->ierrors = vqm_stats.rx_error + mac_stats.rx_error + np_stats.np_rx_mtu_drop_pkts;
+	stats->oerrors = vqm_stats.tx_error + mac_stats.tx_error + np_stats.np_tx_mtu_drop_pkts;
+
+	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+	for (i = 0; (i < dev->data->nb_rx_queues) && (i < RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) {
+		struct zxdh_virtnet_rx *rxvq = dev->data->rx_queues[i];
+
+		if (rxvq == NULL)
+			continue;
+		stats->q_ipackets[i] = *(uint64_t *)(((char *)rxvq) +
+				zxdh_rxq_stat_strings[0].offset);
+		stats->q_ibytes[i] = *(uint64_t *)(((char *)rxvq) +
+				zxdh_rxq_stat_strings[1].offset);
+		stats->q_errors[i] = *(uint64_t *)(((char *)rxvq) +
+				zxdh_rxq_stat_strings[2].offset);
+		stats->q_errors[i] += *(uint64_t *)(((char *)rxvq) +
+				zxdh_rxq_stat_strings[5].offset);
+	}
+
+	for (i = 0; (i < dev->data->nb_tx_queues) && (i < RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) {
+		struct zxdh_virtnet_tx *txvq = dev->data->tx_queues[i];
+
+		if (txvq == NULL)
+			continue;
+		stats->q_opackets[i] = *(uint64_t *)(((char *)txvq) +
+				zxdh_txq_stat_strings[0].offset);
+		stats->q_obytes[i] = *(uint64_t *)(((char *)txvq) +
+				zxdh_txq_stat_strings[1].offset);
+		stats->q_errors[i] += *(uint64_t *)(((char *)txvq) +
+				zxdh_txq_stat_strings[2].offset);
+		stats->q_errors[i] += *(uint64_t *)(((char *)txvq) +
+				zxdh_txq_stat_strings[5].offset);
+	}
+	return 0;
+}
+
+static int zxdh_hw_stats_reset(struct rte_eth_dev *dev, enum zxdh_agent_msg_type opcode)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_msg_info msg_info = {0};
+	struct zxdh_msg_reply_info reply_info = {0};
+	enum ZXDH_BAR_MODULE_ID module_id;
+	int ret = 0;
+
+	switch (opcode) {
+	case ZXDH_VQM_DEV_STATS_RESET:
+		module_id = ZXDH_BAR_MODULE_VQM;
+		break;
+	case ZXDH_MAC_STATS_RESET:
+		module_id = ZXDH_BAR_MODULE_MAC;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "invalid opcode %u", opcode);
+		return -1;
+	}
+
+	zxdh_agent_msg_build(hw, opcode, &msg_info);
+
+	ret = zxdh_send_msg_to_riscv(dev, &msg_info, sizeof(struct zxdh_msg_info),
+				&reply_info, sizeof(struct zxdh_msg_reply_info), module_id);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to reset hw stats");
+		return -1;
+	}
+	return 0;
+}
+
+int zxdh_dev_stats_reset(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	zxdh_hw_stats_reset(dev, ZXDH_VQM_DEV_STATS_RESET);
+	if (hw->is_pf)
+		zxdh_hw_stats_reset(dev, ZXDH_MAC_STATS_RESET);
+
+	return 0;
+}
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h
index 860716d079..f35378e691 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.h
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.h
@@ -5,6 +5,8 @@
 #ifndef ZXDH_ETHDEV_OPS_H
 #define ZXDH_ETHDEV_OPS_H
 
+#include <stdint.h>
+
 #include <rte_ether.h>
 
 #include "zxdh_ethdev.h"
@@ -24,6 +26,29 @@
 #define ZXDH_HF_MAC_VLAN_ETH  ZXDH_ETH_RSS_L2
 #define ZXDH_RSS_HF  ((ZXDH_HF_MAC_VLAN_ETH | ZXDH_HF_F3_ETH | ZXDH_HF_F5_ETH))
 
+struct zxdh_hw_vqm_stats {
+	uint64_t rx_total;
+	uint64_t tx_total;
+	uint64_t rx_bytes;
+	uint64_t tx_bytes;
+	uint64_t rx_error;
+	uint64_t tx_error;
+	uint64_t rx_drop;
+} __rte_packed;
+
+struct zxdh_hw_np_stats {
+	uint64_t np_rx_broadcast;
+	uint64_t np_tx_broadcast;
+	uint64_t np_rx_mtu_drop_pkts;
+	uint64_t np_tx_mtu_drop_pkts;
+	uint64_t np_rx_mtu_drop_bytes;
+	uint64_t np_tx_mtu_drop_bytes;
+	uint64_t np_rx_mtr_drop_pkts;
+	uint64_t np_tx_mtr_drop_pkts;
+	uint64_t np_rx_mtr_drop_bytes;
+	uint64_t np_tx_mtr_drop_bytes;
+};
+
 int zxdh_dev_set_link_up(struct rte_eth_dev *dev);
 int zxdh_dev_set_link_down(struct rte_eth_dev *dev);
 int32_t zxdh_dev_link_update(struct rte_eth_dev *dev, int32_t wait_to_complete __rte_unused);
@@ -46,5 +71,7 @@ int zxdh_dev_rss_reta_query(struct rte_eth_dev *dev,
 int zxdh_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf);
 int zxdh_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf);
 int zxdh_rss_configure(struct rte_eth_dev *dev);
+int zxdh_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+int zxdh_dev_stats_reset(struct rte_eth_dev *dev);
 
 #endif /* ZXDH_ETHDEV_OPS_H */
diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h
index 45a9b10aa4..159c8c9c71 100644
--- a/drivers/net/zxdh/zxdh_msg.h
+++ b/drivers/net/zxdh/zxdh_msg.h
@@ -9,10 +9,16 @@
 
 #include <ethdev_driver.h>
 
+#include "zxdh_ethdev_ops.h"
+
 #define ZXDH_BAR0_INDEX                 0
 #define ZXDH_CTRLCH_OFFSET              (0x2000)
 #define ZXDH_MSG_CHAN_PFVFSHARE_OFFSET  (ZXDH_CTRLCH_OFFSET + 0x1000)
 
+#define ZXDH_MAC_OFFSET                 (0x24000)
+#define ZXDH_MAC_STATS_OFFSET           (0x1408)
+#define ZXDH_MAC_BYTES_OFFSET           (0xb000)
+
 #define ZXDH_MSIX_INTR_MSG_VEC_BASE   1
 #define ZXDH_MSIX_INTR_MSG_VEC_NUM    3
 #define ZXDH_MSIX_INTR_DTB_VEC        (ZXDH_MSIX_INTR_MSG_VEC_BASE + ZXDH_MSIX_INTR_MSG_VEC_NUM)
@@ -173,7 +179,13 @@ enum pciebar_layout_type {
 
 /* riscv msg opcodes */
 enum zxdh_agent_msg_type {
+	ZXDH_MAC_STATS_GET = 10,
+	ZXDH_MAC_STATS_RESET,
 	ZXDH_MAC_LINK_GET = 14,
+	ZXDH_VQM_DEV_STATS_GET = 21,
+	ZXDH_VQM_DEV_STATS_RESET,
+	ZXDH_VQM_QUEUE_STATS_GET = 24,
+	ZXDH_VQM_QUEUE_STATS_RESET,
 };
 
 enum zxdh_msg_type {
@@ -195,6 +207,8 @@ enum zxdh_msg_type {
 	ZXDH_PORT_ATTRS_SET = 25,
 	ZXDH_PORT_PROMISC_SET = 26,
 
+	ZXDH_GET_NP_STATS = 31,
+
 	ZXDH_MSG_TYPE_END,
 };
 
@@ -322,6 +336,8 @@ struct zxdh_msg_reply_body {
 		struct zxdh_link_info_msg link_msg;
 		struct zxdh_rss_hf rss_hf;
 		struct zxdh_rss_reta rss_reta;
+		struct zxdh_hw_vqm_stats vqm_stats;
+		struct zxdh_hw_np_stats np_stats;
 	} __rte_packed;
 } __rte_packed;
 
diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c
index 1f06539263..42679635f4 100644
--- a/drivers/net/zxdh/zxdh_np.c
+++ b/drivers/net/zxdh/zxdh_np.c
@@ -26,6 +26,7 @@ ZXDH_TLB_MGR_T *g_p_dpp_tlb_mgr[ZXDH_DEV_CHANNEL_MAX];
 ZXDH_REG_T g_dpp_reg_info[4];
 ZXDH_DTB_TABLE_T g_dpp_dtb_table_info[4];
 ZXDH_SDT_TBL_DATA_T g_sdt_info[ZXDH_DEV_CHANNEL_MAX][ZXDH_DEV_SDT_ID_MAX];
+ZXDH_PPU_STAT_CFG_T g_ppu_stat_cfg;
 
 #define ZXDH_SDT_MGR_PTR_GET()    (&g_sdt_mgr)
 #define ZXDH_SDT_SOFT_TBL_GET(id) (g_sdt_mgr.sdt_tbl_array[id])
@@ -117,6 +118,18 @@ do {\
 #define ZXDH_COMM_CONVERT16(w_data) \
 			(((w_data) & 0xff) << 8)
 
+#define ZXDH_DTB_TAB_UP_WR_INDEX_GET(DEV_ID, QUEUE_ID)       \
+		(p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.wr_index)
+
+#define ZXDH_DTB_TAB_UP_USER_PHY_ADDR_FLAG_GET(DEV_ID, QUEUE_ID, INDEX)     \
+	(p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.user_addr[(INDEX)].user_flag)
+
+#define ZXDH_DTB_TAB_UP_USER_PHY_ADDR_GET(DEV_ID, QUEUE_ID, INDEX)     \
+		(p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.user_addr[(INDEX)].phy_addr)
+
+#define ZXDH_DTB_TAB_UP_DATA_LEN_GET(DEV_ID, QUEUE_ID, INDEX)       \
+		(p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.data_len[(INDEX)])
+
 #define ZXDH_DTB_TAB_UP_VIR_ADDR_GET(DEV_ID, QUEUE_ID, INDEX)     \
 		((INDEX) * p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.item_size)
 
@@ -1717,3 +1730,331 @@ zxdh_np_dtb_table_entry_get(uint32_t dev_id,
 
 	return 0;
 }
+
+static void
+zxdh_np_stat_cfg_soft_get(uint32_t dev_id,
+				ZXDH_PPU_STAT_CFG_T *p_stat_cfg)
+{
+	ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_stat_cfg);
+
+	p_stat_cfg->ddr_base_addr = g_ppu_stat_cfg.ddr_base_addr;
+	p_stat_cfg->eram_baddr = g_ppu_stat_cfg.eram_baddr;
+	p_stat_cfg->eram_depth = g_ppu_stat_cfg.eram_depth;
+	p_stat_cfg->ppu_addr_offset = g_ppu_stat_cfg.ppu_addr_offset;
+}
+
+static uint32_t
+zxdh_np_dtb_tab_up_info_set(uint32_t dev_id,
+			uint32_t queue_id,
+			uint32_t item_index,
+			uint32_t int_flag,
+			uint32_t data_len,
+			uint32_t desc_len,
+			uint32_t *p_desc_data)
+{
+	ZXDH_DTB_QUEUE_ITEM_INFO_T item_info = {0};
+	uint32_t queue_en = 0;
+	uint32_t rc;
+
+	zxdh_np_dtb_queue_enable_get(dev_id, queue_id, &queue_en);
+	if (!queue_en) {
+		PMD_DRV_LOG(ERR, "the queue %d is not enable!", queue_id);
+		return ZXDH_RC_DTB_QUEUE_NOT_ENABLE;
+	}
+
+	if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) {
+		PMD_DRV_LOG(ERR, "dtb queue %d is not init.", queue_id);
+		return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT;
+	}
+
+	if (desc_len % 4 != 0)
+		return ZXDH_RC_DTB_PARA_INVALID;
+
+	zxdh_np_dtb_item_buff_wr(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE,
+		item_index, 0, desc_len, p_desc_data);
+
+	ZXDH_DTB_TAB_UP_DATA_LEN_GET(dev_id, queue_id, item_index) = data_len;
+
+	item_info.cmd_vld = 1;
+	item_info.cmd_type = ZXDH_DTB_DIR_UP_TYPE;
+	item_info.int_en = int_flag;
+	item_info.data_len = desc_len / 4;
+
+	if (zxdh_np_dev_get_dev_type(dev_id) == ZXDH_DEV_TYPE_SIM)
+		return 0;
+
+	rc = zxdh_np_dtb_queue_item_info_set(dev_id, queue_id, &item_info);
+
+	return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_write_dump_desc_info(uint32_t dev_id,
+		uint32_t queue_id,
+		uint32_t queue_element_id,
+		uint32_t *p_dump_info,
+		uint32_t data_len,
+		uint32_t desc_len,
+		uint32_t *p_dump_data)
+{
+	uint32_t dtb_interrupt_status = 0;
+	uint32_t rc;
+
+	ZXDH_COMM_CHECK_POINT(p_dump_data);
+	rc = zxdh_np_dtb_tab_up_info_set(dev_id,
+				queue_id,
+				queue_element_id,
+				dtb_interrupt_status,
+				data_len,
+				desc_len,
+				p_dump_info);
+	if (rc != 0) {
+		PMD_DRV_LOG(ERR, "the queue %d element id %d dump"
+			" info set failed!", queue_id, queue_element_id);
+		zxdh_np_dtb_item_ack_wr(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE,
+			queue_element_id, 0, ZXDH_DTB_TAB_ACK_UNUSED_MASK);
+	}
+
+	return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_tab_up_free_item_get(uint32_t dev_id,
+					uint32_t queue_id,
+					uint32_t *p_item_index)
+{
+	uint32_t ack_vale = 0;
+	uint32_t item_index = 0;
+	uint32_t unused_item_num = 0;
+	uint32_t i;
+
+	if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) {
+		PMD_DRV_LOG(ERR, "dtb queue %d is not init.", queue_id);
+		return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT;
+	}
+
+	zxdh_np_dtb_queue_unused_item_num_get(dev_id, queue_id, &unused_item_num);
+
+	if (unused_item_num == 0)
+		return ZXDH_RC_DTB_QUEUE_ITEM_HW_EMPTY;
+
+	for (i = 0; i < ZXDH_DTB_QUEUE_ITEM_NUM_MAX; i++) {
+		item_index = ZXDH_DTB_TAB_UP_WR_INDEX_GET(dev_id, queue_id) %
+			ZXDH_DTB_QUEUE_ITEM_NUM_MAX;
+
+		zxdh_np_dtb_item_ack_rd(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE, item_index,
+			0, &ack_vale);
+
+		ZXDH_DTB_TAB_UP_WR_INDEX_GET(dev_id, queue_id)++;
+
+		if ((ack_vale >> 8) == ZXDH_DTB_TAB_ACK_UNUSED_MASK)
+			break;
+	}
+
+	if (i == ZXDH_DTB_QUEUE_ITEM_NUM_MAX)
+		return ZXDH_RC_DTB_QUEUE_ITEM_SW_EMPTY;
+
+	zxdh_np_dtb_item_ack_wr(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE, item_index,
+		0, ZXDH_DTB_TAB_ACK_IS_USING_MASK);
+
+	*p_item_index = item_index;
+
+
+	return 0;
+}
+
+static uint32_t
+zxdh_np_dtb_tab_up_item_addr_get(uint32_t dev_id,
+					uint32_t queue_id,
+					uint32_t item_index,
+					uint32_t *p_phy_haddr,
+					uint32_t *p_phy_laddr)
+{
+	uint32_t rc = 0;
+	uint64_t addr;
+
+	if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) {
+		PMD_DRV_LOG(ERR, "dtb queue %d is not init.", queue_id);
+		return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT;
+	}
+
+	if (ZXDH_DTB_TAB_UP_USER_PHY_ADDR_FLAG_GET(dev_id, queue_id, item_index) ==
+		ZXDH_DTB_TAB_UP_USER_ADDR_TYPE)
+		addr = ZXDH_DTB_TAB_UP_USER_PHY_ADDR_GET(dev_id, queue_id, item_index);
+	else
+		addr = ZXDH_DTB_ITEM_ACK_SIZE;
+
+	*p_phy_haddr = (addr >> 32) & 0xffffffff;
+	*p_phy_laddr = addr & 0xffffffff;
+
+	return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_se_smmu0_dma_dump(uint32_t dev_id,
+		uint32_t queue_id,
+		uint32_t base_addr,
+		uint32_t depth,
+		uint32_t *p_data,
+		uint32_t *element_id)
+{
+	uint8_t form_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0};
+	uint32_t dump_dst_phy_haddr = 0;
+	uint32_t dump_dst_phy_laddr = 0;
+	uint32_t queue_item_index = 0;
+	uint32_t data_len;
+	uint32_t desc_len;
+	uint32_t rc;
+
+	rc = zxdh_np_dtb_tab_up_free_item_get(dev_id, queue_id, &queue_item_index);
+	if (rc != 0) {
+		PMD_DRV_LOG(ERR, "dpp_dtb_tab_up_free_item_get failed = %d!", base_addr);
+		return ZXDH_RC_DTB_QUEUE_ITEM_SW_EMPTY;
+	}
+
+	*element_id = queue_item_index;
+
+	rc = zxdh_np_dtb_tab_up_item_addr_get(dev_id, queue_id, queue_item_index,
+		&dump_dst_phy_haddr, &dump_dst_phy_laddr);
+	ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_item_addr_get");
+
+	data_len = depth * 128 / 32;
+	desc_len = ZXDH_DTB_LEN_POS_SETP / 4;
+
+	rc = zxdh_np_dtb_write_dump_desc_info(dev_id, queue_id, queue_item_index,
+		(uint32_t *)form_buff, data_len, desc_len, p_data);
+	ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_dump_desc_info");
+
+	return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_se_smmu0_ind_read(uint32_t dev_id,
+		uint32_t queue_id,
+		uint32_t base_addr,
+		uint32_t index,
+		uint32_t rd_mode,
+		uint32_t *p_data)
+{
+	uint32_t temp_data[4] = {0};
+	uint32_t element_id = 0;
+	uint32_t row_index = 0;
+	uint32_t col_index = 0;
+	uint32_t eram_dump_base_addr;
+	uint32_t rc;
+
+	switch (rd_mode) {
+	case ZXDH_ERAM128_OPR_128b:
+	{
+		row_index = index;
+		break;
+	}
+	case ZXDH_ERAM128_OPR_64b:
+	{
+		row_index = (index >> 1);
+		col_index = index & 0x1;
+		break;
+	}
+	case ZXDH_ERAM128_OPR_1b:
+	{
+		row_index = (index >> 7);
+		col_index = index & 0x7F;
+		break;
+	}
+	}
+
+	eram_dump_base_addr = base_addr + row_index;
+	rc = zxdh_np_dtb_se_smmu0_dma_dump(dev_id,
+			queue_id,
+			eram_dump_base_addr,
+			1,
+			temp_data,
+			&element_id);
+	ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_np_dtb_se_smmu0_dma_dump");
+
+	switch (rd_mode) {
+	case ZXDH_ERAM128_OPR_128b:
+	{
+		memcpy(p_data, temp_data, (128 / 8));
+		break;
+	}
+
+	case ZXDH_ERAM128_OPR_64b:
+	{
+		memcpy(p_data, temp_data + ((1 - col_index) << 1), (64 / 8));
+		break;
+	}
+
+	case ZXDH_ERAM128_OPR_1b:
+	{
+		ZXDH_COMM_UINT32_GET_BITS(p_data[0], *(temp_data +
+			(3 - col_index / 32)), (col_index % 32), 1);
+		break;
+	}
+	}
+
+	return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_stat_smmu0_int_read(uint32_t dev_id,
+		uint32_t queue_id,
+		uint32_t smmu0_base_addr,
+		ZXDH_STAT_CNT_MODE_E rd_mode,
+		uint32_t index,
+		uint32_t *p_data)
+{
+	uint32_t eram_rd_mode;
+	uint32_t rc;
+
+	ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_data);
+
+	if (rd_mode == ZXDH_STAT_128_MODE)
+		eram_rd_mode = ZXDH_ERAM128_OPR_128b;
+	else
+		eram_rd_mode = ZXDH_ERAM128_OPR_64b;
+
+	rc = zxdh_np_dtb_se_smmu0_ind_read(dev_id,
+								   queue_id,
+								   smmu0_base_addr,
+								   index,
+								   eram_rd_mode,
+								   p_data);
+	ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_np_dtb_se_smmu0_ind_read");
+
+	return rc;
+}
+
+int
+zxdh_np_dtb_stats_get(uint32_t dev_id,
+		uint32_t queue_id,
+		ZXDH_STAT_CNT_MODE_E rd_mode,
+		uint32_t index,
+		uint32_t *p_data)
+{
+	ZXDH_PPU_STAT_CFG_T stat_cfg = {0};
+	uint32_t ppu_eram_baddr;
+	uint32_t ppu_eram_depth;
+	uint32_t rc = 0;
+
+	ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_data);
+
+	memset(&stat_cfg, 0x0, sizeof(stat_cfg));
+
+	zxdh_np_stat_cfg_soft_get(dev_id, &stat_cfg);
+
+	ppu_eram_depth = stat_cfg.eram_depth;
+	ppu_eram_baddr = stat_cfg.eram_baddr;
+
+	if ((index >> (ZXDH_STAT_128_MODE - rd_mode)) < ppu_eram_depth) {
+		rc = zxdh_np_dtb_stat_smmu0_int_read(dev_id,
+									queue_id,
+									ppu_eram_baddr,
+									rd_mode,
+									index,
+									p_data);
+		ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_stat_smmu0_int_read");
+	}
+
+	return rc;
+}
diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h
index 19d1f03f59..7da29cf7bd 100644
--- a/drivers/net/zxdh/zxdh_np.h
+++ b/drivers/net/zxdh/zxdh_np.h
@@ -432,6 +432,18 @@ typedef enum zxdh_sdt_table_type_e {
 	ZXDH_SDT_TBLT_MAX     = 7,
 } ZXDH_SDT_TABLE_TYPE_E;
 
+typedef enum zxdh_dtb_dir_type_e {
+	ZXDH_DTB_DIR_DOWN_TYPE    = 0,
+	ZXDH_DTB_DIR_UP_TYPE    = 1,
+	ZXDH_DTB_DIR_TYPE_MAX,
+} ZXDH_DTB_DIR_TYPE_E;
+
+typedef enum zxdh_dtb_tab_up_user_addr_type_e {
+	ZXDH_DTB_TAB_UP_NOUSER_ADDR_TYPE     = 0,
+	ZXDH_DTB_TAB_UP_USER_ADDR_TYPE       = 1,
+	ZXDH_DTB_TAB_UP_USER_ADDR_TYPE_MAX,
+} ZXDH_DTB_TAB_UP_USER_ADDR_TYPE_E;
+
 typedef struct zxdh_dtb_lpm_entry_t {
 	uint32_t dtb_len0;
 	uint8_t *p_data_buff0;
@@ -537,6 +549,19 @@ typedef struct zxdh_dtb_hash_entry_info_t {
 	uint8_t *p_rst;
 } ZXDH_DTB_HASH_ENTRY_INFO_T;
 
+typedef struct zxdh_ppu_stat_cfg_t {
+	uint32_t eram_baddr;
+	uint32_t eram_depth;
+	uint32_t ddr_base_addr;
+	uint32_t ppu_addr_offset;
+} ZXDH_PPU_STAT_CFG_T;
+
+typedef enum zxdh_stat_cnt_mode_e {
+	ZXDH_STAT_64_MODE  = 0,
+	ZXDH_STAT_128_MODE = 1,
+	ZXDH_STAT_MAX_MODE,
+} ZXDH_STAT_CNT_MODE_E;
+
 int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl);
 int zxdh_np_online_uninit(uint32_t dev_id, char *port_name, uint32_t queue_id);
 int zxdh_np_dtb_table_entry_write(uint32_t dev_id, uint32_t queue_id,
@@ -545,5 +570,10 @@ int zxdh_np_dtb_table_entry_delete(uint32_t dev_id, uint32_t queue_id,
 			 uint32_t entrynum, ZXDH_DTB_USER_ENTRY_T *delete_entries);
 int zxdh_np_dtb_table_entry_get(uint32_t dev_id, uint32_t queue_id,
 			ZXDH_DTB_USER_ENTRY_T *get_entry, uint32_t srh_mode);
+int zxdh_np_dtb_stats_get(uint32_t dev_id,
+			uint32_t queue_id,
+			ZXDH_STAT_CNT_MODE_E rd_mode,
+			uint32_t index,
+			uint32_t *p_data);
 
 #endif /* ZXDH_NP_H */
diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h
index 9343df81ac..deb0dd891a 100644
--- a/drivers/net/zxdh/zxdh_queue.h
+++ b/drivers/net/zxdh/zxdh_queue.h
@@ -53,6 +53,8 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 };
 #define ZXDH_PI_HDR_SIZE          sizeof(struct zxdh_pi_hdr)
 #define ZXDH_DL_NET_HDR_SIZE      sizeof(struct zxdh_net_hdr_dl)
 #define ZXDH_UL_NET_HDR_SIZE      sizeof(struct zxdh_net_hdr_ul)
+#define ZXDH_PD_HDR_SIZE_MAX              256
+#define ZXDH_PD_HDR_SIZE_MIN              ZXDH_TYPE_HDR_SIZE
 
 /*
  * ring descriptors: 16 bytes.
diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c
index 0ffce50042..27a61d46dd 100644
--- a/drivers/net/zxdh/zxdh_rxtx.c
+++ b/drivers/net/zxdh/zxdh_rxtx.c
@@ -406,6 +406,40 @@ static inline void zxdh_enqueue_xmit_packed(struct zxdh_virtnet_tx *txvq,
 	zxdh_queue_store_flags_packed(head_dp, head_flags, vq->hw->weak_barriers);
 }
 
+static void
+zxdh_update_packet_stats(struct zxdh_virtnet_stats *stats, struct rte_mbuf *mbuf)
+{
+	uint32_t s = mbuf->pkt_len;
+	struct rte_ether_addr *ea = NULL;
+
+	stats->bytes += s;
+
+	if (s == 64) {
+		stats->size_bins[1]++;
+	} else if (s > 64 && s < 1024) {
+		uint32_t bin;
+
+		/* count zeros, and offset into correct bin */
+		bin = (sizeof(s) * 8) - rte_clz32(s) - 5;
+		stats->size_bins[bin]++;
+	} else {
+		if (s < 64)
+			stats->size_bins[0]++;
+		else if (s < 1519)
+			stats->size_bins[6]++;
+		else
+			stats->size_bins[7]++;
+	}
+
+	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
+	if (rte_is_multicast_ether_addr(ea)) {
+		if (rte_is_broadcast_ether_addr(ea))
+			stats->broadcast++;
+		else
+			stats->multicast++;
+	}
+}
+
 uint16_t
 zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
@@ -459,12 +493,19 @@ zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkt
 				break;
 			}
 		}
+		if (txm->nb_segs > ZXDH_TX_MAX_SEGS) {
+			PMD_TX_LOG(ERR, "%d segs  dropped", txm->nb_segs);
+			txvq->stats.truncated_err += nb_pkts - nb_tx;
+			break;
+		}
 		/* Enqueue Packet buffers */
 		if (can_push)
 			zxdh_enqueue_xmit_packed_fast(txvq, txm, in_order);
 		else
 			zxdh_enqueue_xmit_packed(txvq, txm, slots, use_indirect, in_order);
+		zxdh_update_packet_stats(&txvq->stats, txm);
 	}
+	txvq->stats.packets += nb_tx;
 	if (likely(nb_tx)) {
 		if (unlikely(zxdh_queue_kick_prepare_packed(vq))) {
 			zxdh_queue_notify(vq);
@@ -474,9 +515,10 @@ zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkt
 	return nb_tx;
 }
 
-uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
+uint16_t zxdh_xmit_pkts_prepare(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts)
 {
+	struct zxdh_virtnet_tx *txvq = tx_queue;
 	uint16_t nb_tx;
 
 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
@@ -496,6 +538,12 @@ uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **t
 			rte_errno = -error;
 			break;
 		}
+		if (m->nb_segs > ZXDH_TX_MAX_SEGS) {
+			PMD_TX_LOG(ERR, "%d segs dropped", m->nb_segs);
+			txvq->stats.truncated_err += nb_pkts - nb_tx;
+			rte_errno = ENOMEM;
+			break;
+		}
 	}
 	return nb_tx;
 }
@@ -571,7 +619,7 @@ static int32_t zxdh_rx_update_mbuf(struct rte_mbuf *m, struct zxdh_net_hdr_ul *h
 	return 0;
 }
 
-static inline void zxdh_discard_rxbuf(struct zxdh_virtqueue *vq, struct rte_mbuf *m)
+static void zxdh_discard_rxbuf(struct zxdh_virtqueue *vq, struct rte_mbuf *m)
 {
 	int32_t error = 0;
 	/*
@@ -613,7 +661,13 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
 
 	for (i = 0; i < num; i++) {
 		rxm = rcv_pkts[i];
-
+		if (unlikely(len[i] < ZXDH_UL_NET_HDR_SIZE)) {
+			nb_enqueued++;
+			PMD_RX_LOG(ERR, "RX, len:%u err", len[i]);
+			zxdh_discard_rxbuf(vq, rxm);
+			rxvq->stats.errors++;
+			continue;
+		}
 		struct zxdh_net_hdr_ul *header =
 			(struct zxdh_net_hdr_ul *)((char *)rxm->buf_addr +
 			RTE_PKTMBUF_HEADROOM);
@@ -623,8 +677,22 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
 			PMD_RX_LOG(ERR, "dequeue %d pkt, No.%d pkt seg_num is %d", num, i, seg_num);
 			seg_num = 1;
 		}
+		if (seg_num > ZXDH_RX_MAX_SEGS) {
+			PMD_RX_LOG(ERR, "dequeue %d pkt, No.%d pkt seg_num is %d", num, i, seg_num);
+			nb_enqueued++;
+			zxdh_discard_rxbuf(vq, rxm);
+			rxvq->stats.errors++;
+			continue;
+		}
 		/* bit[0:6]-pd_len unit:2B */
 		uint16_t pd_len = header->type_hdr.pd_len << 1;
+		if (pd_len > ZXDH_PD_HDR_SIZE_MAX || pd_len < ZXDH_PD_HDR_SIZE_MIN) {
+			PMD_RX_LOG(ERR, "pd_len:%d is invalid", pd_len);
+			nb_enqueued++;
+			zxdh_discard_rxbuf(vq, rxm);
+			rxvq->stats.errors++;
+			continue;
+		}
 		/* Private queue only handle type hdr */
 		hdr_size = pd_len;
 		rxm->data_off = RTE_PKTMBUF_HEADROOM + hdr_size;
@@ -639,6 +707,7 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
 		/* Update rte_mbuf according to pi/pd header */
 		if (zxdh_rx_update_mbuf(rxm, header) < 0) {
 			zxdh_discard_rxbuf(vq, rxm);
+			rxvq->stats.errors++;
 			continue;
 		}
 		seg_res = seg_num - 1;
@@ -661,8 +730,11 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
 				PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d.",
 					rcvd_pkt_len, rx_pkts[nb_rx]->pkt_len);
 				zxdh_discard_rxbuf(vq, rx_pkts[nb_rx]);
+				rxvq->stats.errors++;
+				rxvq->stats.truncated_err++;
 				continue;
 			}
+			zxdh_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]);
 			nb_rx++;
 		}
 	}
@@ -675,6 +747,7 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
 		if (unlikely(rcv_cnt == 0)) {
 			PMD_RX_LOG(ERR, "No enough segments for packet.");
 			rte_pktmbuf_free(rx_pkts[nb_rx]);
+			rxvq->stats.errors++;
 			break;
 		}
 		while (extra_idx < rcv_cnt) {
@@ -694,11 +767,15 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
 				PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d.",
 					rcvd_pkt_len, rx_pkts[nb_rx]->pkt_len);
 				zxdh_discard_rxbuf(vq, rx_pkts[nb_rx]);
+				rxvq->stats.errors++;
+				rxvq->stats.truncated_err++;
 				continue;
 			}
+			zxdh_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]);
 			nb_rx++;
 		}
 	}
+	rxvq->stats.packets += nb_rx;
 
 	/* Allocate new mbuf for the used descriptor */
 	if (likely(!zxdh_queue_full(vq))) {
diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h
index 7bac39375c..c7da40f294 100644
--- a/drivers/net/zxdh/zxdh_tables.h
+++ b/drivers/net/zxdh/zxdh_tables.h
@@ -11,6 +11,11 @@
 #define ZXDH_PORT_BASE_QID_FLAG           10
 #define ZXDH_PORT_ATTR_IS_UP_FLAG         35
 
+#define ZXDH_MTU_STATS_EGRESS_BASE        0x8481
+#define ZXDH_MTU_STATS_INGRESS_BASE       0x8981
+#define ZXDH_BROAD_STATS_EGRESS_BASE      0xC902
+#define ZXDH_BROAD_STATS_INGRESS_BASE     0xD102
+
 extern struct zxdh_dtb_shared_data g_dtb_data;
 
 struct zxdh_port_attr_table {
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 87119 bytes --]

  parent reply	other threads:[~2024-12-18  9:36 UTC|newest]

Thread overview: 191+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-10 12:00 [PATCH v4] net/zxdh: Provided zxdh basic init Junlong Wang
2024-09-24  1:35 ` [v4] " Junlong Wang
2024-09-25 22:39 ` [PATCH v4] " Ferruh Yigit
2024-09-26  6:49 ` [v4] " Junlong Wang
2024-10-07 21:43 ` [PATCH v4] " Stephen Hemminger
2024-10-15  5:43 ` [PATCH v5 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-15  5:43   ` [PATCH v5 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-15  5:44     ` [PATCH v5 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-15  5:44       ` [PATCH v5 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-15  5:44       ` [PATCH v5 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-15  5:44       ` [PATCH v5 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-15  5:44       ` [PATCH v5 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-15  5:44       ` [PATCH v5 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-15  5:44       ` [PATCH v5 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-15  5:44       ` [PATCH v5 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-15 15:37         ` Stephen Hemminger
2024-10-15 15:57         ` Stephen Hemminger
2024-10-16  8:16     ` [PATCH v6 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-16  8:16       ` [PATCH v6 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-16  8:18         ` [PATCH v6 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-16  8:18           ` [PATCH v6 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-16  8:18           ` [PATCH v6 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-16  8:18           ` [PATCH v6 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-21  8:50             ` Thomas Monjalon
2024-10-21 10:56             ` Junlong Wang
2024-10-16  8:18           ` [PATCH v6 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-21  8:52             ` Thomas Monjalon
2024-10-16  8:18           ` [PATCH v6 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-16  8:18           ` [PATCH v6 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-21  8:54             ` Thomas Monjalon
2024-10-16  8:18           ` [PATCH v6 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-18  5:18             ` [v6,9/9] " Junlong Wang
2024-10-18  6:48               ` David Marchand
2024-10-19 11:17             ` Junlong Wang
2024-10-21  9:03         ` [PATCH v6 1/9] net/zxdh: add zxdh ethdev pmd driver Thomas Monjalon
2024-10-22 12:20         ` [PATCH v7 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-22 12:20           ` [PATCH v7 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-30  9:01             ` [PATCH v8 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-30  9:01               ` [PATCH v8 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-11-01  6:21                 ` [PATCH v9 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-11-01  6:21                   ` [PATCH v9 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-11-02  0:57                     ` Ferruh Yigit
2024-11-04 11:58                     ` [PATCH v10 00/10] net/zxdh: introduce net zxdh driver Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 01/10] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-11-07 10:32                         ` [PATCH v10 00/10] net/zxdh: introduce net zxdh driver Junlong Wang
2024-11-12  0:42                           ` Thomas Monjalon
2024-12-06  5:57                         ` [PATCH v1 00/15] net/zxdh: updated " Junlong Wang
2024-12-06  5:57                           ` [PATCH v1 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2024-12-10  5:53                             ` [PATCH v2 00/15] net/zxdh: updated net zxdh driver Junlong Wang
2024-12-10  5:53                               ` [PATCH v2 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2024-12-11 16:10                                 ` Stephen Hemminger
2024-12-12  2:06                                 ` Junlong Wang
2024-12-12  3:35                                 ` Junlong Wang
2024-12-17 11:41                                 ` [PATCH v3 00/15] net/zxdh: updated net zxdh driver Junlong Wang
2024-12-17 11:41                                   ` [PATCH v3 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2024-12-17 11:41                                   ` [PATCH v3 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2024-12-17 11:41                                   ` [PATCH v3 03/15] net/zxdh: port tables init implementations Junlong Wang
2024-12-17 11:41                                   ` [PATCH v3 04/15] net/zxdh: port tables unint implementations Junlong Wang
2024-12-17 11:41                                   ` [PATCH v3 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2024-12-17 11:41                                   ` [PATCH v3 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2024-12-17 11:41                                   ` [PATCH v3 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2024-12-17 11:41                                   ` [PATCH v3 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2024-12-17 11:41                                   ` [PATCH v3 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2024-12-17 11:41                                   ` [PATCH v3 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2024-12-17 11:41                                   ` [PATCH v3 11/15] net/zxdh: promisc/allmulti " Junlong Wang
2024-12-17 11:41                                   ` [PATCH v3 12/15] net/zxdh: vlan filter/ offload " Junlong Wang
2024-12-17 11:41                                   ` [PATCH v3 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2024-12-17 11:41                                   ` [PATCH v3 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2024-12-17 11:41                                   ` [PATCH v3 15/15] net/zxdh: mtu update " Junlong Wang
2024-12-18  9:25                                 ` [PATCH v4 00/15] net/zxdh: updated net zxdh driver Junlong Wang
2024-12-18  9:25                                   ` [PATCH v4 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2024-12-18  9:25                                   ` [PATCH v4 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2024-12-18  9:25                                   ` [PATCH v4 03/15] net/zxdh: port tables init implementations Junlong Wang
2024-12-18  9:25                                   ` [PATCH v4 04/15] net/zxdh: port tables unint implementations Junlong Wang
2024-12-18  9:25                                   ` [PATCH v4 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2024-12-18  9:25                                   ` [PATCH v4 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2024-12-21  0:51                                     ` Stephen Hemminger
2024-12-18  9:25                                   ` [PATCH v4 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2024-12-18  9:25                                   ` [PATCH v4 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2024-12-18  9:25                                   ` [PATCH v4 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2024-12-18  9:25                                   ` [PATCH v4 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2024-12-18  9:25                                   ` [PATCH v4 11/15] net/zxdh: promisc/allmulti " Junlong Wang
2024-12-18  9:25                                   ` [PATCH v4 12/15] net/zxdh: vlan filter/ offload " Junlong Wang
2024-12-18  9:26                                   ` [PATCH v4 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2024-12-21  0:44                                     ` Stephen Hemminger
2024-12-18  9:26                                   ` Junlong Wang [this message]
2024-12-18  9:26                                   ` [PATCH v4 15/15] net/zxdh: mtu update ops implementations Junlong Wang
2024-12-21  0:33                                     ` Stephen Hemminger
2024-12-10  5:53                               ` [PATCH v2 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2024-12-13 19:38                                 ` Stephen Hemminger
2024-12-13 19:41                                 ` Stephen Hemminger
2024-12-13 19:41                                 ` Stephen Hemminger
2024-12-10  5:53                               ` [PATCH v2 03/15] net/zxdh: port tables init implementations Junlong Wang
2024-12-13 19:42                                 ` Stephen Hemminger
2024-12-10  5:53                               ` [PATCH v2 04/15] net/zxdh: port tables unint implementations Junlong Wang
2024-12-13 19:45                                 ` Stephen Hemminger
2024-12-13 19:48                                 ` Stephen Hemminger
2024-12-10  5:53                               ` [PATCH v2 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2024-12-10  5:53                               ` [PATCH v2 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2024-12-13 21:05                                 ` Stephen Hemminger
2024-12-10  5:53                               ` [PATCH v2 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2024-12-10  5:53                               ` [PATCH v2 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2024-12-10  5:53                               ` [PATCH v2 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2024-12-13 19:57                                 ` Stephen Hemminger
2024-12-13 20:08                                 ` Stephen Hemminger
2024-12-10  5:53                               ` [PATCH v2 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2024-12-10  5:53                               ` [PATCH v2 11/15] net/zxdh: promisc/allmulti " Junlong Wang
2024-12-10  5:53                               ` [PATCH v2 12/15] net/zxdh: vlan filter/ offload " Junlong Wang
2024-12-10  5:53                               ` [PATCH v2 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2024-12-10  5:53                               ` [PATCH v2 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2024-12-10  5:53                               ` [PATCH v2 15/15] net/zxdh: mtu update " Junlong Wang
2024-12-06  5:57                           ` [PATCH v1 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2024-12-06  5:57                           ` [PATCH v1 03/15] net/zxdh: port tables init implementations Junlong Wang
2024-12-06  5:57                           ` [PATCH v1 04/15] net/zxdh: port tables unint implementations Junlong Wang
2024-12-06  5:57                           ` [PATCH v1 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2024-12-06  5:57                           ` [PATCH v1 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2024-12-06  5:57                           ` [PATCH v1 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2024-12-06  5:57                           ` [PATCH v1 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2024-12-06  5:57                           ` [PATCH v1 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2024-12-06  5:57                           ` [PATCH v1 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2024-12-06  5:57                           ` [PATCH v1 11/15] net/zxdh: promiscuous/allmulticast " Junlong Wang
2024-12-06  5:57                           ` [PATCH v1 12/15] net/zxdh: vlan filter, vlan offload " Junlong Wang
2024-12-06  5:57                           ` [PATCH v1 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2024-12-06  5:57                           ` [PATCH v1 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2024-12-06  5:57                           ` [PATCH v1 15/15] net/zxdh: mtu update " Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 02/10] net/zxdh: add logging implementation Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 03/10] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 04/10] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 05/10] net/zxdh: add msg chan enable implementation Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 06/10] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 07/10] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 08/10] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 09/10] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 10/10] net/zxdh: add zxdh dev close ops Junlong Wang
2024-11-06  0:40                       ` [PATCH v10 00/10] net/zxdh: introduce net zxdh driver Ferruh Yigit
2024-11-07  9:28                         ` Ferruh Yigit
2024-11-07  9:58                           ` Ferruh Yigit
2024-11-12  2:49                       ` Junlong Wang
2024-11-01  6:21                   ` [PATCH v9 2/9] net/zxdh: add logging implementation Junlong Wang
2024-11-02  1:02                     ` Ferruh Yigit
2024-11-04  2:44                     ` [v9,2/9] " Junlong Wang
2024-11-01  6:21                   ` [PATCH v9 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-11-02  1:01                     ` Ferruh Yigit
2024-11-01  6:21                   ` [PATCH v9 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-11-02  1:00                     ` Ferruh Yigit
2024-11-04  2:47                     ` Junlong Wang
2024-11-01  6:21                   ` [PATCH v9 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-11-01  6:21                   ` [PATCH v9 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-11-02  1:06                     ` Ferruh Yigit
2024-11-04  3:30                     ` [v9,6/9] " Junlong Wang
2024-11-01  6:21                   ` [PATCH v9 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-11-02  1:07                     ` Ferruh Yigit
2024-11-01  6:21                   ` [PATCH v9 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-11-01  6:21                   ` [PATCH v9 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-11-02  0:56                   ` [PATCH v9 0/9] net/zxdh: introduce net zxdh driver Ferruh Yigit
2024-11-04  2:42                   ` Junlong Wang
2024-11-04  8:46                     ` Ferruh Yigit
2024-11-04  9:52                       ` David Marchand
2024-11-04 11:46                   ` Junlong Wang
2024-11-04 22:47                     ` Thomas Monjalon
2024-11-05  9:39                   ` Junlong Wang
2024-11-06  0:38                     ` Ferruh Yigit
2024-10-30  9:01               ` [PATCH v8 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-30  9:01               ` [PATCH v8 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-30 14:55                 ` David Marchand
2024-10-30  9:01               ` [PATCH v8 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-30  9:01               ` [PATCH v8 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-30  9:01               ` [PATCH v8 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-30  9:01               ` [PATCH v8 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-30  9:01               ` [PATCH v8 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-30  9:01               ` [PATCH v8 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-22 12:20           ` [PATCH v7 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-22 12:20           ` [PATCH v7 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-27 16:47             ` Stephen Hemminger
2024-10-27 16:47             ` Stephen Hemminger
2024-10-22 12:20           ` [PATCH v7 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-22 12:20           ` [PATCH v7 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-26 17:05             ` Thomas Monjalon
2024-10-22 12:20           ` [PATCH v7 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-22 12:20           ` [PATCH v7 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-27 17:07             ` Stephen Hemminger
2024-10-22 12:20           ` [PATCH v7 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-22 12:20           ` [PATCH v7 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-24 11:31             ` [v7,9/9] " Junlong Wang
2024-10-25  9:48             ` Junlong Wang
2024-10-26  2:32             ` Junlong Wang
2024-10-27 16:40             ` [PATCH v7 9/9] " Stephen Hemminger
2024-10-27 17:03               ` Stephen Hemminger
2024-10-27 16:58             ` Stephen Hemminger
2024-12-19 22:38 ` [PATCH v4] net/zxdh: Provided zxdh basic init Stephen Hemminger
2024-12-20  1:47 ` Junlong Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241218092603.1218855-15-wang.junlong1@zte.com.cn \
    --to=wang.junlong1@zte.com.cn \
    --cc=dev@dpdk.org \
    --cc=stephen@networkplumber.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).