DPDK patches and discussions
 help / color / mirror / Atom feed
From: Junlong Wang <wang.junlong1@zte.com.cn>
To: stephen@networkplumber.org
Cc: dev@dpdk.org, Junlong Wang <wang.junlong1@zte.com.cn>
Subject: [PATCH v1 4/5] net/zxdh: add support primary/secondary process
Date: Sat, 20 Dec 2025 14:15:20 +0800	[thread overview]
Message-ID: <20251220061521.289722-5-wang.junlong1@zte.com.cn> (raw)
In-Reply-To: <20251220061521.289722-1-wang.junlong1@zte.com.cn>


[-- Attachment #1.1.1: Type: text/plain, Size: 7722 bytes --]

add support primary/secondary process,
and secondary process ops only support get stats/infos.

Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
 drivers/net/zxdh/zxdh_ethdev.c     | 29 ++++++++++++++++--
 drivers/net/zxdh/zxdh_ethdev.h     |  1 +
 drivers/net/zxdh/zxdh_ethdev_ops.c | 49 +++++++++++++++++++-----------
 drivers/net/zxdh/zxdh_queue.h      |  1 -
 drivers/net/zxdh/zxdh_rxtx.c       |  2 +-
 5 files changed, 60 insertions(+), 22 deletions(-)

diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index 823b1ffb5c..cd29162070 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -23,7 +23,6 @@ struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];
 struct zxdh_dev_nic_shared_data dev_nic_sd[ZXDH_SLOT_MAX];
 static rte_spinlock_t zxdh_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
 struct zxdh_shared_data *zxdh_shared_data;
-struct zxdh_net_hdr_dl g_net_hdr_dl[RTE_MAX_ETHPORTS];
 struct zxdh_mtr_res g_mtr_res;
 
 #define ZXDH_INVALID_DTBQUE      0xFFFF
@@ -409,7 +408,7 @@ zxdh_configure_intr(struct rte_eth_dev *dev)
 static void
 zxdh_update_net_hdr_dl(struct zxdh_hw *hw)
 {
-	struct zxdh_net_hdr_dl *net_hdr_dl = &g_net_hdr_dl[hw->port_id];
+	struct zxdh_net_hdr_dl *net_hdr_dl = hw->net_hdr_dl;
 	memset(net_hdr_dl, 0, ZXDH_DL_NET_HDR_SIZE);
 
 	if (zxdh_tx_offload_enabled(hw)) {
@@ -1229,6 +1228,9 @@ zxdh_priv_res_free(struct zxdh_hw *priv)
 
 	rte_free(priv->queue_conf);
 	priv->queue_conf = NULL;
+
+	rte_free(priv->net_hdr_dl);
+	priv->net_hdr_dl = NULL;
 }
 
 static int
@@ -1555,6 +1557,16 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = {
 	.flow_ops_get			 = zxdh_flow_ops_get,
 };
 
+const struct eth_dev_ops zxdh_eth_dev_secondary_ops = {
+	.dev_infos_get			 = zxdh_dev_infos_get,
+	.stats_get				 = zxdh_dev_stats_get,
+	.xstats_get				 = zxdh_dev_xstats_get,
+	.xstats_get_names		 = zxdh_dev_xstats_get_names,
+	.rxq_info_get			 = zxdh_rxq_info_get,
+	.txq_info_get			 = zxdh_txq_info_get,
+	.dev_supported_ptypes_get = zxdh_dev_supported_ptypes_get,
+};
+
 static int32_t
 zxdh_init_device(struct rte_eth_dev *eth_dev)
 {
@@ -2136,6 +2148,13 @@ zxdh_priv_res_init(struct zxdh_hw *hw)
 		return -ENOMEM;
 	}
 
+	hw->net_hdr_dl = rte_zmalloc("zxdh_net_hdr_dl", sizeof(struct zxdh_net_hdr_dl), 0);
+	if (hw->net_hdr_dl == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to allocate %ld bytes store queue conf",
+					sizeof(struct zxdh_net_hdr_dl));
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 
@@ -2173,6 +2192,12 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
 
 	eth_dev->dev_ops = &zxdh_eth_dev_ops;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		eth_dev->dev_ops = &zxdh_eth_dev_secondary_ops;
+		ZXDH_VTPCI_OPS(hw) = &zxdh_dev_pci_ops;
+		return 0;
+	}
+
 	/* Allocate memory for storing MAC addresses */
 	eth_dev->data->mac_addrs = rte_zmalloc("zxdh_mac",
 			ZXDH_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0);
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
index 81b385ecb8..41fa89d20c 100644
--- a/drivers/net/zxdh/zxdh_ethdev.h
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -120,6 +120,7 @@ struct zxdh_hw {
 	struct zxdh_dev_nic_shared_data *dev_nic_sd;
 	struct vfinfo *vfinfo;
 	struct zxdh_queue_conf *queue_conf;
+	struct zxdh_net_hdr_dl *net_hdr_dl;
 
 	uint64_t bar_addr[ZXDH_NUM_BARS];
 	uint64_t host_features;
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c
index 8fb315eeac..068dd21876 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.c
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c
@@ -1802,22 +1802,26 @@ zxdh_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats,
 	struct zxdh_hw_mac_bytes mac_bytes = {0};
 	uint32_t i = 0;
 
-	zxdh_hw_vqm_stats_get(dev, ZXDH_VQM_DEV_STATS_GET,  &vqm_stats);
-	if (hw->is_pf)
-		zxdh_hw_mac_stats_get(dev, &mac_stats, &mac_bytes);
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		zxdh_hw_vqm_stats_get(dev, ZXDH_VQM_DEV_STATS_GET,  &vqm_stats);
+		if (hw->is_pf)
+			zxdh_hw_mac_stats_get(dev, &mac_stats, &mac_bytes);
 
-	zxdh_hw_np_stats_get(dev, &np_stats);
+		zxdh_hw_np_stats_get(dev, &np_stats);
 
-	stats->ipackets = vqm_stats.rx_total;
-	stats->opackets = vqm_stats.tx_total;
-	stats->ibytes = vqm_stats.rx_bytes;
-	stats->obytes = vqm_stats.tx_bytes;
-	stats->imissed = vqm_stats.rx_drop + mac_stats.rx_drop;
-	stats->ierrors = vqm_stats.rx_error + mac_stats.rx_error + np_stats.rx_mtu_drop_pkts;
-	stats->oerrors = vqm_stats.tx_error + mac_stats.tx_error + np_stats.tx_mtu_drop_pkts;
+		stats->ipackets = vqm_stats.rx_total;
+		stats->opackets = vqm_stats.tx_total;
+		stats->ibytes = vqm_stats.rx_bytes;
+		stats->obytes = vqm_stats.tx_bytes;
+		stats->imissed = vqm_stats.rx_drop + mac_stats.rx_drop;
+		stats->ierrors = vqm_stats.rx_error +
+			mac_stats.rx_error + np_stats.rx_mtu_drop_pkts;
+		stats->oerrors = vqm_stats.tx_error +
+			mac_stats.tx_error + np_stats.tx_mtu_drop_pkts;
 
-	if (hw->i_mtr_en || hw->e_mtr_en)
-		stats->imissed  += np_stats.rx_mtr_drop_pkts;
+		if (hw->i_mtr_en || hw->e_mtr_en)
+			stats->imissed  += np_stats.rx_mtr_drop_pkts;
+	}
 
 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
 	for (i = 0; (i < dev->data->nb_rx_queues) && (i < RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) {
@@ -2093,14 +2097,20 @@ zxdh_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, uint3
 	uint32_t count = 0;
 	uint32_t t = 0;
 
-	if (hw->is_pf) {
+	if (hw->is_pf)
 		nstats += ZXDH_MAC_XSTATS + ZXDH_MAC_BYTES;
-		zxdh_hw_mac_stats_get(dev, &mac_stats, &mac_bytes);
-	}
+
 	if (n < nstats)
 		return nstats;
-	zxdh_hw_vqm_stats_get(dev, ZXDH_VQM_DEV_STATS_GET,  &vqm_stats);
-	zxdh_hw_np_stats_get(dev, &np_stats);
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		if (hw->is_pf)
+			zxdh_hw_mac_stats_get(dev, &mac_stats, &mac_bytes);
+
+		zxdh_hw_vqm_stats_get(dev, ZXDH_VQM_DEV_STATS_GET,  &vqm_stats);
+		zxdh_hw_np_stats_get(dev, &np_stats);
+	}
+
 	for (i = 0; i < ZXDH_NP_XSTATS; i++) {
 		xstats[count].value = *(uint64_t *)(((char *)&np_stats)
 						 + zxdh_np_stat_strings[i].offset);
@@ -2235,6 +2245,9 @@ zxdh_dev_fw_version_get(struct rte_eth_dev *dev,
 	char fw_ver[ZXDH_FWVERS_LEN] = {0};
 	uint32_t ret = 0;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -EPERM;
+
 	zxdh_agent_msg_build(hw, ZXDH_FLASH_FIR_VERSION_GET, &msg_info);
 
 	ret = zxdh_send_msg_to_riscv(dev, &msg_info, sizeof(struct zxdh_msg_info),
diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h
index 3c89687d45..1a0c8a0d90 100644
--- a/drivers/net/zxdh/zxdh_queue.h
+++ b/drivers/net/zxdh/zxdh_queue.h
@@ -410,7 +410,6 @@ zxdh_queue_kick_prepare_packed(struct zxdh_virtqueue *vq)
 	return (flags != ZXDH_RING_EVENT_FLAGS_DISABLE);
 }
 
-extern struct zxdh_net_hdr_dl g_net_hdr_dl[RTE_MAX_ETHPORTS];
 
 struct rte_mbuf *zxdh_queue_detach_unused(struct zxdh_virtqueue *vq);
 int32_t zxdh_free_queues(struct rte_eth_dev *dev);
diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c
index 6071f5ef93..3ccef8fd0b 100644
--- a/drivers/net/zxdh/zxdh_rxtx.c
+++ b/drivers/net/zxdh/zxdh_rxtx.c
@@ -256,7 +256,7 @@ zxdh_xmit_fill_net_hdr(struct zxdh_virtqueue *vq, struct rte_mbuf *cookie,
 	struct zxdh_pi_hdr *pi_hdr = NULL;
 	struct zxdh_pd_hdr_dl *pd_hdr = NULL;
 	struct zxdh_hw *hw = vq->hw;
-	struct zxdh_net_hdr_dl *net_hdr_dl = &g_net_hdr_dl[hw->port_id];
+	struct zxdh_net_hdr_dl *net_hdr_dl = hw->net_hdr_dl;
 	uint8_t hdr_len = hw->dl_net_hdr_len;
 	uint32_t ol_flag = 0;
 
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 15857 bytes --]

  parent reply	other threads:[~2025-12-20  6:36 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-20  6:15 [PATCH v1 0/5] net/zxdh: add support some new features Junlong Wang
2025-12-20  6:15 ` [PATCH v1 1/5] net/zxdh: add support for modifying queue depth Junlong Wang
2025-12-20  6:15 ` [PATCH v1 2/5] net/zxdh: optimize alloc queue resources Junlong Wang
2025-12-20  6:15 ` [PATCH v1 3/5] net/zxdh: add support set link speed get autoneg stats Junlong Wang
2025-12-21 16:57   ` Stephen Hemminger
2025-12-20  6:15 ` Junlong Wang [this message]
2025-12-21 17:00   ` [PATCH v1 4/5] net/zxdh: add support primary/secondary process Stephen Hemminger
2025-12-20  6:15 ` [PATCH v1 5/5] net/zxdh: add support GENEVE TSO and Rx outer UDP chksum Junlong Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251220061521.289722-5-wang.junlong1@zte.com.cn \
    --to=wang.junlong1@zte.com.cn \
    --cc=dev@dpdk.org \
    --cc=stephen@networkplumber.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).