DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH v1 0/5] net/zxdh: add support some new features
@ 2025-12-20  6:15 Junlong Wang
  2025-12-20  6:15 ` [PATCH v1 1/5] net/zxdh: add support for modifying queue depth Junlong Wang
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Junlong Wang @ 2025-12-20  6:15 UTC (permalink / raw)
  To: stephen; +Cc: dev, Junlong Wang


[-- Attachment #1.1.1: Type: text/plain, Size: 1254 bytes --]

V1:
  - support modifying queue depth.
  - optimeze alloc queue resources.
  - support set link speed.
  - support primary/secondary process.
  - support GENEVE TSO/chksum,and tunnel packets outer udp chksum.

Junlong Wang (5):
  net/zxdh: add support for modifying queue depth
  net/zxdh: optimize alloc queue resources
  net/zxdh: add support set link speed get autoneg stats
  net/zxdh: add support primary/secondary process
  net/zxdh: add support GENEVE TSO and Rx outer UDP chksum

 drivers/net/zxdh/zxdh_common.c     |  75 ++-------
 drivers/net/zxdh/zxdh_common.h     |   2 +-
 drivers/net/zxdh/zxdh_ethdev.c     | 239 ++++++++++++++++++++---------
 drivers/net/zxdh/zxdh_ethdev.h     |  23 ++-
 drivers/net/zxdh/zxdh_ethdev_ops.c | 165 ++++++++++++++++----
 drivers/net/zxdh/zxdh_ethdev_ops.h |  15 ++
 drivers/net/zxdh/zxdh_msg.c        |  52 +++++++
 drivers/net/zxdh/zxdh_msg.h        |  24 ++-
 drivers/net/zxdh/zxdh_pci.h        |   2 +
 drivers/net/zxdh/zxdh_queue.c      | 137 ++++++++++++++---
 drivers/net/zxdh/zxdh_queue.h      |  12 +-
 drivers/net/zxdh/zxdh_rxtx.c       | 117 ++++++++++----
 drivers/net/zxdh/zxdh_tables.h     |  12 +-
 13 files changed, 649 insertions(+), 226 deletions(-)

-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 2404 bytes --]

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v1 1/5] net/zxdh: add support for modifying queue depth
  2025-12-20  6:15 [PATCH v1 0/5] net/zxdh: add support some new features Junlong Wang
@ 2025-12-20  6:15 ` Junlong Wang
  2025-12-20  6:15 ` [PATCH v1 2/5] net/zxdh: optimize alloc queue resources Junlong Wang
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Junlong Wang @ 2025-12-20  6:15 UTC (permalink / raw)
  To: stephen; +Cc: dev, Junlong Wang


[-- Attachment #1.1.1: Type: text/plain, Size: 16990 bytes --]

add support for modifying queue depth, min depth is 512,
and max depth is 32768

Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
 drivers/net/zxdh/zxdh_ethdev.c | 161 +++++++++++++++++++++------------
 drivers/net/zxdh/zxdh_ethdev.h |  15 +++
 drivers/net/zxdh/zxdh_queue.c  | 137 +++++++++++++++++++++++-----
 drivers/net/zxdh/zxdh_queue.h  |  11 ++-
 4 files changed, 240 insertions(+), 84 deletions(-)

diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index bc929bacc5..80b992d4ad 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -641,7 +641,8 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)
 	uint32_t vq_size = 0;
 	int32_t ret = 0;
 
-	if (hw->channel_context[vtpci_logic_qidx].valid == 0) {
+	if (vtpci_logic_qidx >= ZXDH_QUEUES_NUM_MAX ||
+		hw->channel_context[vtpci_logic_qidx].valid == 0) {
 		PMD_DRV_LOG(ERR, "lch %d is invalid", vtpci_logic_qidx);
 		return -EINVAL;
 	}
@@ -650,7 +651,10 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)
 	PMD_DRV_LOG(DEBUG, "vtpci_logic_qidx :%d setting up physical queue: %u on NUMA node %d",
 			vtpci_logic_qidx, vtpci_phy_qidx, numa_node);
 
-	vq_size = ZXDH_QUEUE_DEPTH;
+	if (queue_type == ZXDH_VTNET_RQ)
+		vq_size = hw->queue_conf->conf[vtpci_logic_qidx >> 1].rx_nb_desc;
+	else
+		vq_size = hw->queue_conf->conf[vtpci_logic_qidx >> 1].tx_nb_desc;
 
 	if (ZXDH_VTPCI_OPS(hw)->set_queue_num != NULL)
 		ZXDH_VTPCI_OPS(hw)->set_queue_num(hw, vtpci_phy_qidx, vq_size);
@@ -980,12 +984,6 @@ zxdh_dev_conf_offload(struct rte_eth_dev *dev)
 		return ret;
 	}
 
-	ret = zxdh_rss_configure(dev);
-	if (ret) {
-		PMD_DRV_LOG(ERR, "rss configure failed");
-		return ret;
-	}
-
 	ret = zxdh_rx_csum_lro_offload_configure(dev);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "rx csum lro configure failed");
@@ -1081,52 +1079,6 @@ zxdh_dev_configure(struct rte_eth_dev *dev)
 	hw->has_tx_offload = zxdh_tx_offload_enabled(hw);
 	hw->has_rx_offload = zxdh_rx_offload_enabled(hw);
 
-	if (dev->data->nb_rx_queues == hw->rx_qnum &&
-			dev->data->nb_tx_queues == hw->tx_qnum) {
-		PMD_DRV_LOG(DEBUG, "The queue not need to change. queue_rx %d queue_tx %d",
-				hw->rx_qnum, hw->tx_qnum);
-		/*no queue changed */
-		goto end;
-	}
-
-	PMD_DRV_LOG(DEBUG, "queue changed need reset");
-	/* Reset the device although not necessary at startup */
-	zxdh_pci_reset(hw);
-
-	/* Tell the host we've noticed this device. */
-	zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_ACK);
-
-	/* Tell the host we've known how to drive the device. */
-	zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER);
-	/* The queue needs to be released when reconfiguring*/
-	if (hw->vqs != NULL) {
-		zxdh_dev_free_mbufs(dev);
-		zxdh_free_queues(dev);
-	}
-
-	hw->rx_qnum = dev->data->nb_rx_queues;
-	hw->tx_qnum = dev->data->nb_tx_queues;
-	ret = zxdh_alloc_queues(dev);
-	if (ret < 0)
-		return ret;
-
-	zxdh_datach_set(dev);
-
-	if (zxdh_configure_intr(dev) < 0) {
-		PMD_DRV_LOG(ERR, "Failed to configure interrupt");
-		zxdh_free_queues(dev);
-		return -1;
-	}
-
-	ret = zxdh_rss_qid_config(dev);
-	if (ret) {
-		PMD_DRV_LOG(ERR, "Failed to configure base qid!");
-		return -1;
-	}
-
-	zxdh_pci_reinit_complete(hw);
-
-end:
 	zxdh_dev_conf_offload(dev);
 	zxdh_update_net_hdr_dl(hw);
 	return ret;
@@ -1264,6 +1216,9 @@ zxdh_priv_res_free(struct zxdh_hw *priv)
 
 	rte_free(priv->channel_context);
 	priv->channel_context = NULL;
+
+	rte_free(priv->queue_conf);
+	priv->queue_conf = NULL;
 }
 
 static int
@@ -1354,6 +1309,88 @@ zxdh_mac_config(struct rte_eth_dev *eth_dev)
 	return ret;
 }
 
+static int32_t zxdh_reconfig_queues(struct rte_eth_dev *dev)
+{
+	int32_t ret;
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	zxdh_pci_reset(hw);
+
+	/* Tell the host we've noticed this device. */
+	zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_ACK);
+
+	/* Tell the host we've known how to drive the device. */
+	zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER);
+	/* The queue needs to be released when reconfiguring */
+	if (hw->vqs != NULL) {
+		zxdh_dev_free_mbufs(dev);
+		zxdh_free_queues(dev);
+	}
+
+	hw->rx_qnum = dev->data->nb_rx_queues;
+	hw->tx_qnum = dev->data->nb_tx_queues;
+	ret = zxdh_alloc_queues(dev);
+	if (ret < 0)
+		return ret;
+
+	zxdh_datach_set(dev);
+
+	if (zxdh_configure_intr(dev) < 0) {
+		PMD_DRV_LOG(ERR, "Failed to configure interrupt");
+		zxdh_free_queues(dev);
+		return -1;
+	}
+
+	zxdh_pci_reinit_complete(hw);
+	return 0;
+}
+
+static int32_t zxdh_config_queue(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int32_t ret = 0, i = 0;
+
+	if (hw->queue_conf->queue_changed) {
+		ret = zxdh_reconfig_queues(dev);
+		if (ret)
+			return ret;
+
+		ret = zxdh_rss_qid_config(dev);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to configure base qid!");
+			return -1;
+		}
+
+		ret = zxdh_rss_configure(dev);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to config rss");
+			return -1;
+		}
+		hw->queue_conf->queue_changed = 0;
+	}
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		ret = zxdh_tx_queue_config(dev, i);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to config tx queue");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		ret = zxdh_rx_queue_config(dev, i);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to config rx queue");
+			return ret;
+		}
+		ret = zxdh_dev_rx_queue_setup_finish(dev, i);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
 static int
 zxdh_dev_start(struct rte_eth_dev *dev)
 {
@@ -1363,12 +1400,9 @@ zxdh_dev_start(struct rte_eth_dev *dev)
 	uint16_t logic_qidx;
 	uint16_t i;
 
-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		logic_qidx = 2 * i + ZXDH_RQ_QUEUE_IDX;
-		ret = zxdh_dev_rx_queue_setup_finish(dev, logic_qidx);
-		if (ret < 0)
-			return ret;
-	}
+	ret = zxdh_config_queue(dev);
+	if (ret)
+		return ret;
 
 	zxdh_set_rxtx_funcs(dev);
 	ret = zxdh_intr_enable(dev);
@@ -2081,6 +2115,13 @@ zxdh_priv_res_init(struct zxdh_hw *hw)
 		PMD_DRV_LOG(ERR, "Failed to allocate channel_context");
 		return -ENOMEM;
 	}
+
+	hw->queue_conf = rte_zmalloc("zxdh_queue_conf", sizeof(struct zxdh_queue_conf), 0);
+	if (hw->queue_conf == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to allocate queue conf");
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
index a269199540..411d287f32 100644
--- a/drivers/net/zxdh/zxdh_ethdev.h
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -94,6 +94,20 @@ struct vfinfo {
 	struct rte_ether_addr vf_mac[ZXDH_MAX_MAC_ADDRS];
 };
 
+struct queue_conf {
+	struct rte_mempool *queue_mp;
+	struct rte_eth_rxconf zxdh_rx_conf;
+	struct rte_eth_txconf zxdh_tx_conf;
+	uint16_t rx_nb_desc;
+	uint16_t tx_nb_desc;
+};
+
+struct zxdh_queue_conf {
+	struct queue_conf conf[ZXDH_QUEUES_NUM_MAX / 2];
+	uint16_t queue_changed;
+	uint16_t rsv;
+};
+
 struct zxdh_hw {
 	struct rte_eth_dev *eth_dev;
 	struct zxdh_pci_common_cfg *common_cfg;
@@ -105,6 +119,7 @@ struct zxdh_hw {
 	struct zxdh_dev_shared_data *dev_sd;
 	struct zxdh_dev_nic_shared_data *dev_nic_sd;
 	struct vfinfo *vfinfo;
+	struct zxdh_queue_conf *queue_conf;
 
 	uint64_t bar_addr[ZXDH_NUM_BARS];
 	uint64_t host_features;
diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c
index 9266756d79..4eb8ef782c 100644
--- a/drivers/net/zxdh/zxdh_queue.c
+++ b/drivers/net/zxdh/zxdh_queue.c
@@ -184,6 +184,45 @@ zxdh_check_mempool(struct rte_mempool *mp, uint16_t offset, uint16_t min_length)
 	return 0;
 }
 
+static unsigned int
+log2above(unsigned int v)
+{
+	unsigned int l;
+	unsigned int r;
+
+	for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
+		r |= (v & 1);
+	return l + r;
+}
+
+static uint16_t zxdh_queue_desc_pre_setup(uint16_t desc)
+{
+	uint32_t nb_desc = desc;
+
+	if (desc < ZXDH_MIN_QUEUE_DEPTH) {
+		PMD_DRV_LOG(WARNING, "nb_desc(%u) < min queue depth (%u), turn to min queue depth",
+				desc, ZXDH_MIN_QUEUE_DEPTH);
+		return ZXDH_MIN_QUEUE_DEPTH;
+	}
+
+	if (desc > ZXDH_MAX_QUEUE_DEPTH) {
+		PMD_DRV_LOG(WARNING, "nb_desc(%u) > max queue depth (%d), turn to max queue depth",
+				desc, ZXDH_MAX_QUEUE_DEPTH);
+		return ZXDH_MAX_QUEUE_DEPTH;
+	}
+
+	if (!rte_is_power_of_2(desc)) {
+		nb_desc = 1 << log2above(desc);
+		if (nb_desc > ZXDH_MAX_QUEUE_DEPTH)
+			nb_desc = ZXDH_MAX_QUEUE_DEPTH;
+		PMD_DRV_LOG(WARNING, "nb_desc(%u) turn to the next power of two (%u)",
+			desc, nb_desc);
+	}
+
+	return nb_desc;
+}
+
+
 int32_t
 zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			uint16_t queue_idx,
@@ -191,37 +230,66 @@ zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			uint32_t socket_id __rte_unused,
 			const struct rte_eth_rxconf *rx_conf,
 			struct rte_mempool *mp)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint16_t valid_nb_desc = 0;
+
+	if (rx_conf->rx_deferred_start) {
+		PMD_RX_LOG(ERR, "Rx deferred start is not supported");
+		return -EINVAL;
+	}
+
+	valid_nb_desc = zxdh_queue_desc_pre_setup(nb_desc);
+	if (dev->data->nb_rx_queues != hw->rx_qnum ||
+		valid_nb_desc != hw->queue_conf->conf[queue_idx].rx_nb_desc) {
+		PMD_RX_LOG(DEBUG, "rx queue changed. rxq:[%d], hw->rxq:[%d], nb_desc:%d, hw->nb_desc:%d",
+			dev->data->nb_rx_queues, hw->rx_qnum, valid_nb_desc,
+			hw->queue_conf->conf[queue_idx].rx_nb_desc);
+		hw->queue_conf->queue_changed = 1;
+	}
+
+	rte_memcpy(&hw->queue_conf->conf[queue_idx].zxdh_rx_conf,
+		rx_conf, sizeof(struct rte_eth_rxconf));
+	hw->queue_conf->conf[queue_idx].rx_nb_desc = valid_nb_desc;
+	hw->queue_conf->conf[queue_idx].queue_mp = mp;
+
+	return 0;
+}
+
+int32_t
+zxdh_rx_queue_config(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
 	struct zxdh_hw *hw = dev->data->dev_private;
 	uint16_t vtpci_logic_qidx = 2 * queue_idx + ZXDH_RQ_QUEUE_IDX;
 	struct zxdh_virtqueue *vq = hw->vqs[vtpci_logic_qidx];
 	int32_t ret = 0;
 
-	if (rx_conf->rx_deferred_start) {
-		PMD_RX_LOG(ERR, "Rx deferred start is not supported");
+	if (!hw->queue_conf) {
+		PMD_RX_LOG(ERR, "rx queue config failed queue_conf is NULL, queue-idx:%d",
+				queue_idx);
 		return -EINVAL;
 	}
+
+	struct rte_eth_rxconf *rx_conf = &hw->queue_conf->conf[queue_idx].zxdh_rx_conf;
 	uint16_t rx_free_thresh = rx_conf->rx_free_thresh;
 
 	if (rx_free_thresh == 0)
 		rx_free_thresh = RTE_MIN(vq->vq_nentries / 4, ZXDH_RX_FREE_THRESH);
 
-	/* rx_free_thresh must be multiples of four. */
 	if (rx_free_thresh & 0x3) {
 		PMD_RX_LOG(ERR, "(rx_free_thresh=%u port=%u queue=%u)",
 			rx_free_thresh, dev->data->port_id, queue_idx);
 		return -EINVAL;
 	}
-	/* rx_free_thresh must be less than the number of RX entries */
 	if (rx_free_thresh >= vq->vq_nentries) {
 		PMD_RX_LOG(ERR, "RX entries (%u). (rx_free_thresh=%u port=%u queue=%u)",
 			vq->vq_nentries, rx_free_thresh, dev->data->port_id, queue_idx);
 		return -EINVAL;
 	}
+
 	vq->vq_free_thresh = rx_free_thresh;
-	nb_desc = ZXDH_QUEUE_DEPTH;
 
-	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
+	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, hw->queue_conf->conf[queue_idx].rx_nb_desc);
 	struct zxdh_virtnet_rx *rxvq = &vq->rxq;
 
 	rxvq->queue_id = vtpci_logic_qidx;
@@ -231,6 +299,7 @@ zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
 		mbuf_min_size = ZXDH_MBUF_SIZE_4K;
 
+	struct rte_mempool *mp = hw->queue_conf->conf[queue_idx].queue_mp;
 	ret = zxdh_check_mempool(mp, RTE_PKTMBUF_HEADROOM, mbuf_min_size);
 	if (ret != 0) {
 		PMD_RX_LOG(ERR,
@@ -238,6 +307,7 @@ zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 	rxvq->mpool = mp;
+
 	if (queue_idx < dev->data->nb_rx_queues)
 		dev->data->rx_queues[queue_idx] = rxvq;
 
@@ -251,21 +321,48 @@ zxdh_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			uint32_t socket_id __rte_unused,
 			const struct rte_eth_txconf *tx_conf)
 {
-	uint16_t vtpci_logic_qidx = 2 * queue_idx + ZXDH_TQ_QUEUE_IDX;
 	struct zxdh_hw *hw = dev->data->dev_private;
-	struct zxdh_virtqueue *vq = hw->vqs[vtpci_logic_qidx];
-	struct zxdh_virtnet_tx *txvq = NULL;
-	uint16_t tx_free_thresh = 0;
+	uint16_t valid_nb_desc = 0;
 
 	if (tx_conf->tx_deferred_start) {
 		PMD_TX_LOG(ERR, "Tx deferred start is not supported");
 		return -EINVAL;
 	}
 
-	nb_desc = ZXDH_QUEUE_DEPTH;
+	valid_nb_desc = zxdh_queue_desc_pre_setup(nb_desc);
+	if (dev->data->nb_tx_queues != hw->tx_qnum ||
+		valid_nb_desc != hw->queue_conf->conf[queue_idx].tx_nb_desc){
+		PMD_TX_LOG(DEBUG, "tx queue changed. txq:[%d], hw->txq:[%d], nb_desc:%d, hw->nb_desc:%d",
+			dev->data->nb_tx_queues, hw->tx_qnum, valid_nb_desc,
+			hw->queue_conf->conf[queue_idx].tx_nb_desc);
+		hw->queue_conf->queue_changed = 1;
+	}
+
+	rte_memcpy(&hw->queue_conf->conf[queue_idx].zxdh_tx_conf,
+		tx_conf, sizeof(struct rte_eth_txconf));
+	hw->queue_conf->conf[queue_idx].tx_nb_desc = valid_nb_desc;
+
+	return 0;
+}
+
+int32_t
+zxdh_tx_queue_config(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_virtnet_tx *txvq = NULL;
+	uint16_t vtpci_logic_qidx = 2 * queue_idx + ZXDH_TQ_QUEUE_IDX;
+	struct zxdh_virtqueue *vq = hw->vqs[vtpci_logic_qidx];
+	uint16_t tx_free_thresh = 0;
+
+	if (!hw->queue_conf) {
+		PMD_TX_LOG(ERR, "tx queue config failed queue_conf is NULL, queue_idx:%d",
+				queue_idx);
+		return -EINVAL;
+	}
 
-	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
+	struct rte_eth_txconf *tx_conf = &hw->queue_conf->conf[queue_idx].zxdh_tx_conf;
 
+	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, hw->queue_conf->conf[queue_idx].tx_nb_desc);
 	txvq = &vq->txq;
 	txvq->queue_id = vtpci_logic_qidx;
 
@@ -273,15 +370,14 @@ zxdh_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	if (tx_free_thresh == 0)
 		tx_free_thresh = RTE_MIN(vq->vq_nentries / 4, ZXDH_TX_FREE_THRESH);
 
-	/* tx_free_thresh must be less than the number of TX entries minus 3 */
 	if (tx_free_thresh >= (vq->vq_nentries - 3)) {
-		PMD_TX_LOG(ERR, "TX entries - 3 (%u). (tx_free_thresh=%u port=%u queue=%u)",
-				vq->vq_nentries - 3, tx_free_thresh, dev->data->port_id, queue_idx);
+		PMD_TX_LOG(ERR, "tx_free_thresh must be less than the number of TX entries minus 3 (%u). (tx_free_thresh=%u port=%u queue=%u)",
+					vq->vq_nentries - 3,
+					tx_free_thresh, dev->data->port_id, queue_idx);
 		return -EINVAL;
 	}
 
 	vq->vq_free_thresh = tx_free_thresh;
-
 	if (queue_idx < dev->data->nb_tx_queues)
 		dev->data->tx_queues[queue_idx] = txvq;
 
@@ -337,19 +433,14 @@ int32_t zxdh_enqueue_recv_refill_packed(struct zxdh_virtqueue *vq,
 	return 0;
 }
 
-int32_t zxdh_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t logic_qidx)
+int32_t zxdh_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
 	struct zxdh_hw *hw = dev->data->dev_private;
+	uint16_t logic_qidx = ((queue_idx << 1) + ZXDH_RQ_QUEUE_IDX) % ZXDH_QUEUES_NUM_MAX;
 	struct zxdh_virtqueue *vq = hw->vqs[logic_qidx];
 	struct zxdh_virtnet_rx *rxvq = &vq->rxq;
-	uint16_t desc_idx;
 	int32_t error = 0;
 
-	/* Allocate blank mbufs for the each rx descriptor */
-	memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
-	for (desc_idx = 0; desc_idx < ZXDH_MBUF_BURST_SZ; desc_idx++)
-		vq->sw_ring[vq->vq_nentries + desc_idx] = &rxvq->fake_mbuf;
-
 	while (!zxdh_queue_full(vq)) {
 		struct rte_mbuf *new_pkts[ZXDH_MBUF_BURST_SZ];
 		uint16_t free_cnt = RTE_MIN(ZXDH_MBUF_BURST_SZ, vq->vq_free_cnt);
diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h
index 1a54e7cfc9..3c89687d45 100644
--- a/drivers/net/zxdh/zxdh_queue.h
+++ b/drivers/net/zxdh/zxdh_queue.h
@@ -21,6 +21,14 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 };
 #define ZXDH_TQ_QUEUE_IDX                 1
 #define ZXDH_MAX_TX_INDIRECT              8
 
+#define ZXDH_MIN_QUEUE_DEPTH              512
+#define ZXDH_MAX_QUEUE_DEPTH              32768
+
+#define ZXDH_TX_FREE_THRESH               32
+#define ZXDH_RX_FREE_THRESH               32
+
+#define ZXDH_MBUF_SIZE_4K                 4096
+
 /* This marks a buffer as continuing via the next field. */
 #define ZXDH_VRING_DESC_F_NEXT                 1
 
@@ -424,5 +432,6 @@ int32_t zxdh_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t logic_q
 void zxdh_queue_rxvq_flush(struct zxdh_virtqueue *vq);
 int32_t zxdh_enqueue_recv_refill_packed(struct zxdh_virtqueue *vq,
 			struct rte_mbuf **cookie, uint16_t num);
-
+int32_t zxdh_tx_queue_config(struct rte_eth_dev *dev, uint16_t queue_idx);
+int32_t zxdh_rx_queue_config(struct rte_eth_dev *dev, uint16_t queue_idx);
 #endif /* ZXDH_QUEUE_H */
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 39242 bytes --]

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v1 2/5] net/zxdh: optimize alloc queue resources
  2025-12-20  6:15 [PATCH v1 0/5] net/zxdh: add support some new features Junlong Wang
  2025-12-20  6:15 ` [PATCH v1 1/5] net/zxdh: add support for modifying queue depth Junlong Wang
@ 2025-12-20  6:15 ` Junlong Wang
  2025-12-20  6:15 ` [PATCH v1 3/5] net/zxdh: add support set link speed get autoneg stats Junlong Wang
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Junlong Wang @ 2025-12-20  6:15 UTC (permalink / raw)
  To: stephen; +Cc: dev, Junlong Wang


[-- Attachment #1.1.1: Type: text/plain, Size: 7296 bytes --]

optimized alloc queue resources,
alloc queue resources during a hardware lock get and release period.

Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
 drivers/net/zxdh/zxdh_common.c | 75 ++++++----------------------------
 drivers/net/zxdh/zxdh_common.h |  2 +-
 drivers/net/zxdh/zxdh_ethdev.c | 24 +++++++----
 drivers/net/zxdh/zxdh_pci.h    |  2 +
 4 files changed, 31 insertions(+), 72 deletions(-)

diff --git a/drivers/net/zxdh/zxdh_common.c b/drivers/net/zxdh/zxdh_common.c
index ce53ee8a05..ffc00ab3e6 100644
--- a/drivers/net/zxdh/zxdh_common.c
+++ b/drivers/net/zxdh/zxdh_common.c
@@ -354,76 +354,25 @@ zxdh_write_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg, uint32_t val)
 	*((volatile uint32_t *)(pci_comm_cfg_baseaddr + reg)) = val;
 }
 
-static int32_t
-zxdh_common_table_write(struct zxdh_hw *hw, uint8_t field,
-			void *buff, uint16_t buff_size)
-{
-	struct zxdh_pci_bar_msg desc;
-	struct zxdh_msg_recviver_mem msg_rsp;
-	int32_t ret = 0;
-
-	if (!hw->msg_chan_init) {
-		PMD_DRV_LOG(ERR, "Bar messages channel not initialized");
-		return -1;
-	}
-	if (buff_size != 0 && buff == NULL) {
-		PMD_DRV_LOG(ERR, "Buff is invalid");
-		return -1;
-	}
-
-	ret = zxdh_fill_common_msg(hw, &desc, ZXDH_COMMON_TABLE_WRITE,
-					field, buff, buff_size);
-
-	if (ret != 0) {
-		PMD_DRV_LOG(ERR, "Failed to fill common msg");
-		return ret;
-	}
-
-	ret = zxdh_send_command(hw, &desc, ZXDH_BAR_MODULE_TBL, &msg_rsp);
-	if (ret != 0)
-		goto free_msg_data;
-
-	ret = zxdh_common_rsp_check(&msg_rsp, NULL, 0);
-	if (ret != 0)
-		goto free_rsp_data;
-
-free_rsp_data:
-	rte_free(msg_rsp.recv_buffer);
-free_msg_data:
-	rte_free(desc.payload_addr);
-	return ret;
-}
-
 int32_t
-zxdh_datach_set(struct rte_eth_dev *dev)
+zxdh_datach_set(struct rte_eth_dev *dev, uint16_t ph_chno)
 {
 	struct zxdh_hw *hw = dev->data->dev_private;
-	uint16_t nr_vq = hw->rx_qnum + hw->tx_qnum;
-	uint16_t buff_size = (nr_vq % ZXDH_QUEUES_NUM_MAX + 1) * sizeof(uint16_t);
-	int ret = 0;
-	uint16_t *pdata, i;
-
-	void *buff = rte_zmalloc(NULL, buff_size, 0);
+	uint64_t addr = 0;
+	uint64_t pcieid_addr = 0;
 
-	if (unlikely(buff == NULL)) {
-		PMD_DRV_LOG(ERR, "Failed to allocate buff");
+	if ((ph_chno) >=  ZXDH_QUEUES_PCIEID_SIZE) {
+		PMD_DRV_LOG(ERR, "ph_chno is greater than %08x", ph_chno);
 		return -ENOMEM;
 	}
 
-	pdata = (uint16_t *)buff;
-	*pdata++ = nr_vq;
-	for (i = 0; i < hw->rx_qnum; i++)
-		*(pdata + i) = hw->channel_context[i * 2].ph_chno;
-	for (i = 0; i < hw->tx_qnum; i++)
-		*(pdata + hw->rx_qnum + i) = hw->channel_context[i * 2 + 1].ph_chno;
-	ret = zxdh_common_table_write(hw, ZXDH_COMMON_FIELD_DATACH, (void *)buff, buff_size);
-
-	if (ret != 0)
-		PMD_DRV_LOG(ERR, "Failed to setup data channel of common table. code:%d", ret);
-	hw->queue_set_flag = 1;
-	rte_free(buff);
-
-	return ret;
+	pcieid_addr =
+		*((volatile uint64_t *)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_QUEUES_PCIEID_ADDR));
+	addr = hw->bar_addr[ZXDH_BAR0_INDEX] + pcieid_addr + (ph_chno << 1);
+	*((volatile uint16_t *)(addr)) = hw->pcie_id;
+	PMD_DRV_LOG(DEBUG, "addr %lx pcie_id %04x, pcieid_addr %lx lch %d",
+		addr, hw->pcie_id, pcieid_addr, ph_chno);
+	return 0;
 }
 
 bool
diff --git a/drivers/net/zxdh/zxdh_common.h b/drivers/net/zxdh/zxdh_common.h
index 6d78ae0273..bbc6bed1df 100644
--- a/drivers/net/zxdh/zxdh_common.h
+++ b/drivers/net/zxdh/zxdh_common.h
@@ -125,7 +125,7 @@ void zxdh_release_lock(struct zxdh_hw *hw);
 int32_t zxdh_timedlock(struct zxdh_hw *hw, uint32_t us);
 uint32_t zxdh_read_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg);
 void zxdh_write_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg, uint32_t val);
-int32_t zxdh_datach_set(struct rte_eth_dev *dev);
+int32_t zxdh_datach_set(struct rte_eth_dev *dev, uint16_t ph_chno);
 
 bool zxdh_rx_offload_enabled(struct zxdh_hw *hw);
 bool zxdh_tx_offload_enabled(struct zxdh_hw *hw);
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index 80b992d4ad..2fc2d78aff 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -520,6 +520,9 @@ zxdh_get_available_channel(struct rte_eth_dev *dev, uint8_t queue_type)
 	uint32_t res_bit = (total_queue_num + inval_bit) % 32;
 	uint32_t vq_reg_num = (total_queue_num + inval_bit) / 32 + (res_bit ? 1 : 0);
 	int32_t ret = 0;
+	uint32_t addr = 0;
+	uint32_t var = 0;
+	int32_t ph_chno = 0;
 
 	ret = zxdh_timedlock(hw, 1000);
 	if (ret) {
@@ -528,15 +531,14 @@ zxdh_get_available_channel(struct rte_eth_dev *dev, uint8_t queue_type)
 	}
 
 	for (phy_vq_reg = 0; phy_vq_reg < vq_reg_num; phy_vq_reg++) {
-		uint32_t addr = ZXDH_QUERES_SHARE_BASE +
-		(phy_vq_reg + phy_vq_reg_oft) * sizeof(uint32_t);
-		uint32_t var = zxdh_read_bar_reg(dev, ZXDH_BAR0_INDEX, addr);
+		addr = ZXDH_QUERES_SHARE_BASE +
+			(phy_vq_reg + phy_vq_reg_oft) * sizeof(uint32_t);
+		var = zxdh_read_bar_reg(dev, ZXDH_BAR0_INDEX, addr);
 		if (phy_vq_reg == 0) {
 			for (j = (inval_bit + base); j < 32; j += 2) {
 				/* Got the available channel & update COI table */
 				if ((var & (1 << j)) == 0) {
 					var |= (1 << j);
-					zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);
 					done = 1;
 					break;
 				}
@@ -548,7 +550,6 @@ zxdh_get_available_channel(struct rte_eth_dev *dev, uint8_t queue_type)
 				/* Got the available channel & update COI table */
 				if ((var & (1 << j)) == 0) {
 					var |= (1 << j);
-					zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);
 					done = 1;
 					break;
 				}
@@ -560,7 +561,6 @@ zxdh_get_available_channel(struct rte_eth_dev *dev, uint8_t queue_type)
 				/* Got the available channel & update COI table */
 				if ((var & (1 << j)) == 0) {
 					var |= (1 << j);
-					zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);
 					done = 1;
 					break;
 				}
@@ -570,6 +570,16 @@ zxdh_get_available_channel(struct rte_eth_dev *dev, uint8_t queue_type)
 		}
 	}
 
+	if (done) {
+		ph_chno = (phy_vq_reg + phy_vq_reg_oft) * 32 + j;
+		if (zxdh_datach_set(dev, ph_chno) != 0) {
+			zxdh_release_lock(hw);
+			PMD_DRV_LOG(ERR, "zxdh_datach_set queue pcie addr failed");
+			return -1;
+		}
+		zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);
+	}
+
 	zxdh_release_lock(hw);
 	/* check for no channel condition */
 	if (done != 1) {
@@ -1333,8 +1343,6 @@ static int32_t zxdh_reconfig_queues(struct rte_eth_dev *dev)
 	if (ret < 0)
 		return ret;
 
-	zxdh_datach_set(dev);
-
 	if (zxdh_configure_intr(dev) < 0) {
 		PMD_DRV_LOG(ERR, "Failed to configure interrupt");
 		zxdh_free_queues(dev);
diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h
index a1834f6615..d0f4ff6398 100644
--- a/drivers/net/zxdh/zxdh_pci.h
+++ b/drivers/net/zxdh/zxdh_pci.h
@@ -65,6 +65,8 @@ enum zxdh_msix_status {
 #define ZXDH_CONFIG_STATUS_DEV_NEED_RESET  0x40
 #define ZXDH_CONFIG_STATUS_FAILED          0x80
 #define ZXDH_PCI_QUEUE_ADDR_SHIFT          12
+#define ZXDH_QUEUES_PCIEID_ADDR            0x1BF8
+#define ZXDH_QUEUES_PCIEID_SIZE            4096
 
 struct __rte_packed_begin zxdh_net_config {
 	/* The config defining mac address (if ZXDH_NET_F_MAC) */
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 17608 bytes --]

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v1 3/5] net/zxdh: add support set link speed get autoneg stats
  2025-12-20  6:15 [PATCH v1 0/5] net/zxdh: add support some new features Junlong Wang
  2025-12-20  6:15 ` [PATCH v1 1/5] net/zxdh: add support for modifying queue depth Junlong Wang
  2025-12-20  6:15 ` [PATCH v1 2/5] net/zxdh: optimize alloc queue resources Junlong Wang
@ 2025-12-20  6:15 ` Junlong Wang
  2025-12-20  6:15 ` [PATCH v1 4/5] net/zxdh: add support primary/secondary process Junlong Wang
  2025-12-20  6:15 ` [PATCH v1 5/5] net/zxdh: add support GENEVE TSO and Rx outer UDP chksum Junlong Wang
  4 siblings, 0 replies; 6+ messages in thread
From: Junlong Wang @ 2025-12-20  6:15 UTC (permalink / raw)
  To: stephen; +Cc: dev, Junlong Wang


[-- Attachment #1.1.1: Type: text/plain, Size: 13592 bytes --]

add support ops to set link speed in usermode, and
get autoneg stats.

Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
 drivers/net/zxdh/zxdh_ethdev.c     |   6 ++
 drivers/net/zxdh/zxdh_ethdev.h     |   7 +-
 drivers/net/zxdh/zxdh_ethdev_ops.c | 116 ++++++++++++++++++++++++++---
 drivers/net/zxdh/zxdh_ethdev_ops.h |  15 ++++
 drivers/net/zxdh/zxdh_msg.c        |  49 ++++++++++++
 drivers/net/zxdh/zxdh_msg.h        |  24 +++++-
 6 files changed, 202 insertions(+), 15 deletions(-)

diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index 2fc2d78aff..823b1ffb5c 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -1432,6 +1432,10 @@ zxdh_dev_start(struct rte_eth_dev *dev)
 		zxdh_queue_notify(vq);
 	}
 
+	if (hw->is_pf)
+		zxdh_link_speed_set(dev);
+	zxdh_autoneg_stats_get(dev);
+
 	hw->admin_status = RTE_ETH_LINK_UP;
 	zxdh_dev_link_update(dev, 0);
 
@@ -1604,6 +1608,8 @@ zxdh_agent_comm(struct rte_eth_dev *eth_dev, struct zxdh_hw *hw)
 		return -1;
 	}
 
+	zxdh_speed_modes_get(eth_dev);
+
 	if (hw->switchoffload)
 		hw->phyport = 9;
 
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
index 411d287f32..81b385ecb8 100644
--- a/drivers/net/zxdh/zxdh_ethdev.h
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -125,7 +125,7 @@ struct zxdh_hw {
 	uint64_t host_features;
 	uint64_t guest_features;
 	uint32_t speed;
-	uint32_t speed_mode;
+	int32_t speed_mode;
 	uint32_t notify_off_multiplier;
 	union zxdh_virport_num vport;
 	uint16_t max_queue_pairs;
@@ -173,9 +173,10 @@ struct zxdh_hw {
 	struct zxdh_vlan_offload_cfg vlan_offload_cfg;
 	uint16_t queue_pool_count;
 	uint16_t queue_pool_start;
-	uint8_t dl_net_hdr_len;
 	uint16_t vxlan_fd_num;
-	uint8_t rsv1[1];
+	uint32_t support_speed_modes;
+	uint8_t dl_net_hdr_len;
+	uint8_t autoneg;
 
 	struct dh_flow_list dh_flow_list;
 };
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c
index cabf81107e..8fb315eeac 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.c
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c
@@ -307,7 +307,7 @@ zxdh_link_info_get(struct rte_eth_dev *dev, struct rte_eth_link *link)
 		}
 
 		link->link_speed = ZXDH_GET(link_info_msg, link_msg_addr, speed);
-		link->link_autoneg = ZXDH_GET(link_info_msg, link_msg_addr, autoneg);
+		// link->link_autoneg = ZXDH_GET(link_info_msg, link_msg_addr, autoneg);
 		hw->speed_mode = ZXDH_GET(link_info_msg, link_msg_addr, speed_modes);
 		if ((ZXDH_GET(link_info_msg, link_msg_addr, duplex) & RTE_ETH_LINK_FULL_DUPLEX) ==
 				RTE_ETH_LINK_FULL_DUPLEX)
@@ -322,6 +322,7 @@ zxdh_link_info_get(struct rte_eth_dev *dev, struct rte_eth_link *link)
 		link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 		link->link_status = RTE_ETH_LINK_UP;
 	}
+	link->link_autoneg = hw->autoneg;
 	hw->speed = link->link_speed;
 
 	return 0;
@@ -368,6 +369,109 @@ int zxdh_dev_set_link_up(struct rte_eth_dev *dev)
 	return ret;
 }
 
+static int32_t zxdh_speed_mode_to_spm(uint32_t link_speed_modes)
+{
+	switch (link_speed_modes) {
+	case RTE_ETH_LINK_SPEED_1G:   return ZXDH_SPM_SPEED_1X_1G;
+	case RTE_ETH_LINK_SPEED_10G:  return ZXDH_SPM_SPEED_1X_10G;
+	case RTE_ETH_LINK_SPEED_25G:  return ZXDH_SPM_SPEED_1X_25G;
+	case RTE_ETH_LINK_SPEED_50G:  return ZXDH_SPM_SPEED_1X_50G;
+	case RTE_ETH_LINK_SPEED_100G: return ZXDH_SPM_SPEED_4X_100G;
+	case RTE_ETH_LINK_SPEED_200G: return ZXDH_SPM_SPEED_4X_200G;
+	default: return -1;
+	}
+}
+
+int32_t zxdh_link_speed_set(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_msg_info msg = {0};
+	uint8_t zxdh_msg_reply_info[ZXDH_ST_SZ_BYTES(msg_reply_info)] = {0};
+	void *reply_body_addr = ZXDH_ADDR_OF(msg_reply_info, zxdh_msg_reply_info, reply_body);
+	uint32_t link_speed = 0;
+	int32_t spm_speed_modes = 0;
+	int32_t ret = 0;
+
+	spm_speed_modes =
+		zxdh_speed_mode_to_spm(dev->data->dev_conf.link_speeds & ~RTE_ETH_LINK_SPEED_FIXED);
+	if (spm_speed_modes == -1 || spm_speed_modes == hw->speed_mode) {
+		PMD_DRV_LOG(DEBUG, "not need update speed");
+		return 0;
+	}
+	if ((spm_speed_modes & hw->support_speed_modes) == 0)  {
+		PMD_DRV_LOG(ERR, "not support configure speed :%d ", link_speed);
+		return 0;
+	}
+
+	zxdh_agent_msg_build(hw, ZXDH_MAC_SPEED_SET, &msg);
+	msg.data.link_msg.autoneg = 0;
+	msg.data.link_msg.speed_modes = spm_speed_modes & hw->support_speed_modes;
+
+	ret = zxdh_send_msg_to_riscv(dev, &msg, sizeof(struct zxdh_msg_info),
+				zxdh_msg_reply_info, ZXDH_ST_SZ_BYTES(msg_reply_info),
+				ZXDH_BAR_MODULE_MAC);
+	uint8_t flag = ZXDH_GET(msg_reply_body, reply_body_addr, flag);
+	if (flag != ZXDH_REPS_SUCC || ret) {
+		PMD_DRV_LOG(ERR, "failed to set link speed!");
+		return -1;
+	}
+
+	return 0;
+}
+
+void
+zxdh_speed_modes_get(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_msg_info msg = {0};
+	uint8_t zxdh_msg_reply_info[ZXDH_ST_SZ_BYTES(msg_reply_info)] = {0};
+	int32_t ret = 0;
+
+	if (!hw->is_pf)
+		return;
+
+	msg.agent_msg_head.msg_type = ZXDH_MAC_PHYPORT_INIT;
+	msg.agent_msg_head.init = 1;
+
+	ret = zxdh_send_msg_to_riscv(dev, &msg, sizeof(struct zxdh_msg_info),
+				zxdh_msg_reply_info, ZXDH_ST_SZ_BYTES(msg_reply_info),
+				ZXDH_BAR_MODULE_MAC);
+	if (ret)
+		PMD_DRV_LOG(ERR, "Failed to get speed mode info");
+
+	void *reply_body_addr = ZXDH_ADDR_OF(msg_reply_info, zxdh_msg_reply_info, reply_body);
+	void *link_msg_addr = ZXDH_ADDR_OF(msg_reply_body, reply_body_addr, link_msg);
+	uint32_t speed_modes = ZXDH_GET(link_info_msg, link_msg_addr, speed_modes);
+
+	hw->support_speed_modes = speed_modes;
+}
+
+int32_t zxdh_autoneg_stats_get(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	struct zxdh_msg_info msg;
+	uint8_t zxdh_msg_reply_info[ZXDH_ST_SZ_BYTES(msg_reply_info)] = {0};
+	int32_t ret = 0;
+
+	zxdh_agent_msg_build(hw, ZXDH_MAC_AUTONEG_GET, &msg);
+
+	ret = zxdh_send_msg_to_riscv(dev, &msg, sizeof(struct zxdh_msg_info),
+				zxdh_msg_reply_info, ZXDH_ST_SZ_BYTES(msg_reply_info),
+				ZXDH_BAR_MODULE_MAC);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to get link autoneg stats!");
+		return -1;
+	}
+	void *reply_body_addr = ZXDH_ADDR_OF(msg_reply_info, zxdh_msg_reply_info, reply_body);
+	void *link_addr = ZXDH_ADDR_OF(msg_reply_body, reply_body_addr, link_msg);
+	void *link_autoeng_addr = ZXDH_ADDR_OF(link_info_msg, link_addr, autoneg);
+
+	hw->autoneg = *(uint8_t *)link_autoeng_addr;
+	PMD_DRV_LOG(DEBUG, "autoneg stats is: %d", hw->autoneg);
+
+	return 0;
+}
+
 int32_t zxdh_dev_link_update(struct rte_eth_dev *dev, int32_t wait_to_complete __rte_unused)
 {
 	struct rte_eth_link link;
@@ -384,17 +488,7 @@ int32_t zxdh_dev_link_update(struct rte_eth_dev *dev, int32_t wait_to_complete _
 		PMD_DRV_LOG(ERR, "Failed to get link status from hw");
 		return ret;
 	}
-	link.link_status &= hw->admin_status;
-	if (link.link_status == RTE_ETH_LINK_DOWN) {
-		PMD_DRV_LOG(DEBUG, "dev link status is down.");
-		goto link_down;
-	}
-	goto out;
 
-link_down:
-	link.link_status = RTE_ETH_LINK_DOWN;
-	link.link_speed  = RTE_ETH_SPEED_NUM_UNKNOWN;
-out:
 	if (link.link_status != dev->data->dev_link.link_status) {
 		ret = zxdh_config_port_status(dev, link.link_status);
 		if (ret != 0) {
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h
index 85e926887b..6dfe4be473 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.h
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.h
@@ -27,6 +27,18 @@
 #define ZXDH_RSS_HF  ((ZXDH_HF_MAC_VLAN_ETH | ZXDH_HF_F3_ETH | ZXDH_HF_F5_ETH))
 
 #define ZXDH_ETHER_MIN_MTU      68
+#define ZXDH_SPM_SPEED_1X_10M          RTE_BIT32(0)
+#define ZXDH_SPM_SPEED_1X_100M         RTE_BIT32(1)
+#define ZXDH_SPM_SPEED_1X_1G           RTE_BIT32(2)
+#define ZXDH_SPM_SPEED_1X_2DOT5G       RTE_BIT32(3)
+#define ZXDH_SPM_SPEED_1X_5G           RTE_BIT32(4)
+#define ZXDH_SPM_SPEED_1X_10G          RTE_BIT32(5)
+#define ZXDH_SPM_SPEED_1X_25G          RTE_BIT32(6)
+#define ZXDH_SPM_SPEED_1X_50G          RTE_BIT32(7)
+#define ZXDH_SPM_SPEED_2X_100G         RTE_BIT32(8)
+#define ZXDH_SPM_SPEED_4X_40G          RTE_BIT32(9)
+#define ZXDH_SPM_SPEED_4X_100G         RTE_BIT32(10)
+#define ZXDH_SPM_SPEED_4X_200G         RTE_BIT32(11)
 
 struct zxdh_np_stats_data {
 	uint64_t n_pkts_dropped;
@@ -144,5 +156,8 @@ int zxdh_dev_get_module_info(struct rte_eth_dev *dev, struct rte_eth_dev_module_
 int zxdh_dev_get_module_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *info);
 int zxdh_meter_ops_get(struct rte_eth_dev *dev, void *arg);
 uint16_t zxdh_hw_qid_to_logic_qid(struct rte_eth_dev *dev, uint16_t qid);
+int32_t zxdh_link_speed_set(struct rte_eth_dev *dev);
+void zxdh_speed_modes_get(struct rte_eth_dev *dev);
+int32_t zxdh_autoneg_stats_get(struct rte_eth_dev *dev);
 
 #endif /* ZXDH_ETHDEV_OPS_H */
diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c
index 196e27f91c..ff2d11706c 100644
--- a/drivers/net/zxdh/zxdh_msg.c
+++ b/drivers/net/zxdh/zxdh_msg.c
@@ -2450,9 +2450,58 @@ pf_recv_bar_msg(void *pay_load, uint16_t len, void *reps_buffer,
 	return ret;
 }
 
+static int vf_recv_link_state_msg(struct rte_eth_dev *dev, void *payload,
+		void *reply_body __rte_unused, uint16_t *reps_len)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	void *link_autoneg_addr = ZXDH_ADDR_OF(zxdh_link_state_msg, payload, autoneg_enable);
+
+	hw->autoneg = *(uint8_t *)link_autoneg_addr;
+	dev->data->dev_link.link_autoneg = hw->autoneg;
+	*reps_len = ZXDH_ST_SZ_BYTES(zxdh_link_state_msg);
+
+	return 0;
+}
+
+static int
+vf_recv_bar_msg(void *payload, uint16_t len __rte_unused, void *reps_buffer,
+	uint16_t *reps_len, void *eth_dev __rte_unused)
+{
+	struct zxdh_msg_info *msg_payload = (struct zxdh_msg_info *)payload;
+	uint16_t pcieid = msg_payload->msg_to_vf.pcieid;
+	uint16_t opcode = msg_payload->msg_to_vf.opcode;
+	struct rte_eth_dev *dev = (struct rte_eth_dev *)eth_dev;
+	struct zxdh_ifc_msg_reply_body_bits *reply_body;
+	reply_body = (struct zxdh_ifc_msg_reply_body_bits *)
+		ZXDH_ADDR_OF(msg_reply_body, reps_buffer, flag);
+	int32_t ret = 0;
+
+	if (dev == NULL) {
+		PMD_DRV_LOG(ERR, "param invalid, dev is NULL");
+		ret = -2;
+		return ret;
+	}
+
+	switch (opcode) {
+	case ZXDH_SET_VF_LINK_STATE:
+		PMD_DRV_LOG(DEBUG, "PF(pcieid:%d ) set VF's link state", pcieid);
+		vf_recv_link_state_msg(dev, &msg_payload->data, reps_buffer, reps_len);
+		reply_body->flag[0] = ZXDH_REPS_SUCC;
+		break;
+	default:
+		ZXDH_SET(msg_reply_body, reps_buffer, flag, ZXDH_REPS_INVALID);
+		PMD_DRV_LOG(ERR, "[VF GET MSG FROM PF]--unknown msg opcode:%d", opcode);
+		ret = -1;
+		break;
+	}
+	return ret;
+}
+
 void
 zxdh_msg_cb_reg(struct zxdh_hw *hw)
 {
 	if (hw->is_pf)
 		zxdh_bar_chan_msg_recv_register(ZXDH_MODULE_BAR_MSG_TO_PF, pf_recv_bar_msg);
+	else
+		zxdh_bar_chan_msg_recv_register(ZXDH_MODULE_BAR_MSG_TO_VF, vf_recv_bar_msg);
 }
diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h
index 61a3da878e..0fc9bff75d 100644
--- a/drivers/net/zxdh/zxdh_msg.h
+++ b/drivers/net/zxdh/zxdh_msg.h
@@ -205,6 +205,8 @@ enum zxdh_module_id {
 enum zxdh_agent_msg_type {
 	ZXDH_MAC_STATS_GET = 10,
 	ZXDH_MAC_STATS_RESET,
+	ZXDH_MAC_PHYPORT_INIT,
+	ZXDH_MAC_SPEED_SET,
 	ZXDH_MAC_LINK_GET = 14,
 	ZXDH_MAC_MODULE_EEPROM_READ = 20,
 	ZXDH_VQM_DEV_STATS_GET = 21,
@@ -212,6 +214,7 @@ enum zxdh_agent_msg_type {
 	ZXDH_FLASH_FIR_VERSION_GET = 23,
 	ZXDH_VQM_QUEUE_STATS_GET = 24,
 	ZXDH_VQM_QUEUE_STATS_RESET,
+	ZXDH_MAC_AUTONEG_GET = 44,
 };
 
 enum zxdh_msg_type {
@@ -233,6 +236,7 @@ enum zxdh_msg_type {
 
 	ZXDH_PORT_ATTRS_SET = 25,
 	ZXDH_PORT_PROMISC_SET = 26,
+	ZXDH_SET_VF_LINK_STATE = 28,
 
 	ZXDH_GET_NP_STATS = 31,
 	ZXDH_PLCR_CAR_PROFILE_ID_ADD = 36,
@@ -408,6 +412,17 @@ struct zxdh_ifc_agent_mac_module_eeprom_msg_bits {
 	uint8_t data[ZXDH_MODULE_EEPROM_DATA_LEN * 8];
 };
 
+struct zxdh_ifc_zxdh_link_state_msg_bits {
+	uint8_t is_link_force_set[0x8];
+	uint8_t link_forced[0x8];
+	uint8_t link_up[0x8];
+	uint8_t speed[0x20];
+	uint8_t autoneg_enable[0x20];
+	uint8_t supported_speed_modes[0x20];
+	uint8_t advertising_speed_modes[0x20];
+	uint8_t duplex[0x8];
+};
+
 struct zxdh_flash_msg {
 	uint8_t firmware_version[ZXDH_FWVERS_LEN];
 };
@@ -454,6 +469,7 @@ struct zxdh_ifc_msg_reply_body_bits {
 		struct zxdh_ifc_mtr_profile_info_bits  mtr_profile_info;
 		struct zxdh_ifc_mtr_stats_bits hw_mtr_stats;
 		struct zxdh_flow_op_rsp  flow_rsp;
+		struct zxdh_ifc_zxdh_link_state_msg_bits link_state_msg;
 	};
 };
 
@@ -482,6 +498,11 @@ struct __rte_packed_begin zxdh_msg_head {
 	uint16_t pcieid;
 } __rte_packed_end;
 
+struct __rte_packed_begin zxdh_msg_head_to_vf {
+	uint8_t opcode;
+	uint16_t pcieid;
+} __rte_packed_end;
+
 struct zxdh_port_attr_set_msg {
 	uint32_t mode;
 	uint32_t value;
@@ -521,7 +542,7 @@ struct zxdh_agent_msg_head {
 	uint8_t msg_type;
 	uint8_t panel_id;
 	uint8_t phyport;
-	uint8_t rsv;
+	uint8_t init;
 	uint16_t vf_id;
 	uint16_t pcie_id;
 };
@@ -570,6 +591,7 @@ struct zxdh_msg_info {
 		uint8_t head_len[ZXDH_MSG_HEAD_LEN];
 		struct zxdh_msg_head msg_head;
 		struct zxdh_agent_msg_head agent_msg_head;
+		struct zxdh_msg_head_to_vf msg_to_vf;
 	};
 	union {
 		uint8_t datainfo[ZXDH_MSG_REQ_BODY_MAX_LEN];
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 27641 bytes --]

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v1 4/5] net/zxdh: add support primary/secondary process
  2025-12-20  6:15 [PATCH v1 0/5] net/zxdh: add support some new features Junlong Wang
                   ` (2 preceding siblings ...)
  2025-12-20  6:15 ` [PATCH v1 3/5] net/zxdh: add support set link speed get autoneg stats Junlong Wang
@ 2025-12-20  6:15 ` Junlong Wang
  2025-12-20  6:15 ` [PATCH v1 5/5] net/zxdh: add support GENEVE TSO and Rx outer UDP chksum Junlong Wang
  4 siblings, 0 replies; 6+ messages in thread
From: Junlong Wang @ 2025-12-20  6:15 UTC (permalink / raw)
  To: stephen; +Cc: dev, Junlong Wang


[-- Attachment #1.1.1: Type: text/plain, Size: 7722 bytes --]

add support primary/secondary process,
and secondary process ops only support get stats/infos.

Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
 drivers/net/zxdh/zxdh_ethdev.c     | 29 ++++++++++++++++--
 drivers/net/zxdh/zxdh_ethdev.h     |  1 +
 drivers/net/zxdh/zxdh_ethdev_ops.c | 49 +++++++++++++++++++-----------
 drivers/net/zxdh/zxdh_queue.h      |  1 -
 drivers/net/zxdh/zxdh_rxtx.c       |  2 +-
 5 files changed, 60 insertions(+), 22 deletions(-)

diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index 823b1ffb5c..cd29162070 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -23,7 +23,6 @@ struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];
 struct zxdh_dev_nic_shared_data dev_nic_sd[ZXDH_SLOT_MAX];
 static rte_spinlock_t zxdh_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
 struct zxdh_shared_data *zxdh_shared_data;
-struct zxdh_net_hdr_dl g_net_hdr_dl[RTE_MAX_ETHPORTS];
 struct zxdh_mtr_res g_mtr_res;
 
 #define ZXDH_INVALID_DTBQUE      0xFFFF
@@ -409,7 +408,7 @@ zxdh_configure_intr(struct rte_eth_dev *dev)
 static void
 zxdh_update_net_hdr_dl(struct zxdh_hw *hw)
 {
-	struct zxdh_net_hdr_dl *net_hdr_dl = &g_net_hdr_dl[hw->port_id];
+	struct zxdh_net_hdr_dl *net_hdr_dl = hw->net_hdr_dl;
 	memset(net_hdr_dl, 0, ZXDH_DL_NET_HDR_SIZE);
 
 	if (zxdh_tx_offload_enabled(hw)) {
@@ -1229,6 +1228,9 @@ zxdh_priv_res_free(struct zxdh_hw *priv)
 
 	rte_free(priv->queue_conf);
 	priv->queue_conf = NULL;
+
+	rte_free(priv->net_hdr_dl);
+	priv->net_hdr_dl = NULL;
 }
 
 static int
@@ -1555,6 +1557,16 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = {
 	.flow_ops_get			 = zxdh_flow_ops_get,
 };
 
+const struct eth_dev_ops zxdh_eth_dev_secondary_ops = {
+	.dev_infos_get			 = zxdh_dev_infos_get,
+	.stats_get				 = zxdh_dev_stats_get,
+	.xstats_get				 = zxdh_dev_xstats_get,
+	.xstats_get_names		 = zxdh_dev_xstats_get_names,
+	.rxq_info_get			 = zxdh_rxq_info_get,
+	.txq_info_get			 = zxdh_txq_info_get,
+	.dev_supported_ptypes_get = zxdh_dev_supported_ptypes_get,
+};
+
 static int32_t
 zxdh_init_device(struct rte_eth_dev *eth_dev)
 {
@@ -2136,6 +2148,13 @@ zxdh_priv_res_init(struct zxdh_hw *hw)
 		return -ENOMEM;
 	}
 
+	hw->net_hdr_dl = rte_zmalloc("zxdh_net_hdr_dl", sizeof(struct zxdh_net_hdr_dl), 0);
+	if (hw->net_hdr_dl == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to allocate %ld bytes store queue conf",
+					sizeof(struct zxdh_net_hdr_dl));
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 
@@ -2173,6 +2192,12 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
 
 	eth_dev->dev_ops = &zxdh_eth_dev_ops;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		eth_dev->dev_ops = &zxdh_eth_dev_secondary_ops;
+		ZXDH_VTPCI_OPS(hw) = &zxdh_dev_pci_ops;
+		return 0;
+	}
+
 	/* Allocate memory for storing MAC addresses */
 	eth_dev->data->mac_addrs = rte_zmalloc("zxdh_mac",
 			ZXDH_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0);
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
index 81b385ecb8..41fa89d20c 100644
--- a/drivers/net/zxdh/zxdh_ethdev.h
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -120,6 +120,7 @@ struct zxdh_hw {
 	struct zxdh_dev_nic_shared_data *dev_nic_sd;
 	struct vfinfo *vfinfo;
 	struct zxdh_queue_conf *queue_conf;
+	struct zxdh_net_hdr_dl *net_hdr_dl;
 
 	uint64_t bar_addr[ZXDH_NUM_BARS];
 	uint64_t host_features;
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c
index 8fb315eeac..068dd21876 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.c
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c
@@ -1802,22 +1802,26 @@ zxdh_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats,
 	struct zxdh_hw_mac_bytes mac_bytes = {0};
 	uint32_t i = 0;
 
-	zxdh_hw_vqm_stats_get(dev, ZXDH_VQM_DEV_STATS_GET,  &vqm_stats);
-	if (hw->is_pf)
-		zxdh_hw_mac_stats_get(dev, &mac_stats, &mac_bytes);
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		zxdh_hw_vqm_stats_get(dev, ZXDH_VQM_DEV_STATS_GET,  &vqm_stats);
+		if (hw->is_pf)
+			zxdh_hw_mac_stats_get(dev, &mac_stats, &mac_bytes);
 
-	zxdh_hw_np_stats_get(dev, &np_stats);
+		zxdh_hw_np_stats_get(dev, &np_stats);
 
-	stats->ipackets = vqm_stats.rx_total;
-	stats->opackets = vqm_stats.tx_total;
-	stats->ibytes = vqm_stats.rx_bytes;
-	stats->obytes = vqm_stats.tx_bytes;
-	stats->imissed = vqm_stats.rx_drop + mac_stats.rx_drop;
-	stats->ierrors = vqm_stats.rx_error + mac_stats.rx_error + np_stats.rx_mtu_drop_pkts;
-	stats->oerrors = vqm_stats.tx_error + mac_stats.tx_error + np_stats.tx_mtu_drop_pkts;
+		stats->ipackets = vqm_stats.rx_total;
+		stats->opackets = vqm_stats.tx_total;
+		stats->ibytes = vqm_stats.rx_bytes;
+		stats->obytes = vqm_stats.tx_bytes;
+		stats->imissed = vqm_stats.rx_drop + mac_stats.rx_drop;
+		stats->ierrors = vqm_stats.rx_error +
+			mac_stats.rx_error + np_stats.rx_mtu_drop_pkts;
+		stats->oerrors = vqm_stats.tx_error +
+			mac_stats.tx_error + np_stats.tx_mtu_drop_pkts;
 
-	if (hw->i_mtr_en || hw->e_mtr_en)
-		stats->imissed  += np_stats.rx_mtr_drop_pkts;
+		if (hw->i_mtr_en || hw->e_mtr_en)
+			stats->imissed  += np_stats.rx_mtr_drop_pkts;
+	}
 
 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
 	for (i = 0; (i < dev->data->nb_rx_queues) && (i < RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) {
@@ -2093,14 +2097,20 @@ zxdh_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, uint3
 	uint32_t count = 0;
 	uint32_t t = 0;
 
-	if (hw->is_pf) {
+	if (hw->is_pf)
 		nstats += ZXDH_MAC_XSTATS + ZXDH_MAC_BYTES;
-		zxdh_hw_mac_stats_get(dev, &mac_stats, &mac_bytes);
-	}
+
 	if (n < nstats)
 		return nstats;
-	zxdh_hw_vqm_stats_get(dev, ZXDH_VQM_DEV_STATS_GET,  &vqm_stats);
-	zxdh_hw_np_stats_get(dev, &np_stats);
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		if (hw->is_pf)
+			zxdh_hw_mac_stats_get(dev, &mac_stats, &mac_bytes);
+
+		zxdh_hw_vqm_stats_get(dev, ZXDH_VQM_DEV_STATS_GET,  &vqm_stats);
+		zxdh_hw_np_stats_get(dev, &np_stats);
+	}
+
 	for (i = 0; i < ZXDH_NP_XSTATS; i++) {
 		xstats[count].value = *(uint64_t *)(((char *)&np_stats)
 						 + zxdh_np_stat_strings[i].offset);
@@ -2235,6 +2245,9 @@ zxdh_dev_fw_version_get(struct rte_eth_dev *dev,
 	char fw_ver[ZXDH_FWVERS_LEN] = {0};
 	uint32_t ret = 0;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -EPERM;
+
 	zxdh_agent_msg_build(hw, ZXDH_FLASH_FIR_VERSION_GET, &msg_info);
 
 	ret = zxdh_send_msg_to_riscv(dev, &msg_info, sizeof(struct zxdh_msg_info),
diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h
index 3c89687d45..1a0c8a0d90 100644
--- a/drivers/net/zxdh/zxdh_queue.h
+++ b/drivers/net/zxdh/zxdh_queue.h
@@ -410,7 +410,6 @@ zxdh_queue_kick_prepare_packed(struct zxdh_virtqueue *vq)
 	return (flags != ZXDH_RING_EVENT_FLAGS_DISABLE);
 }
 
-extern struct zxdh_net_hdr_dl g_net_hdr_dl[RTE_MAX_ETHPORTS];
 
 struct rte_mbuf *zxdh_queue_detach_unused(struct zxdh_virtqueue *vq);
 int32_t zxdh_free_queues(struct rte_eth_dev *dev);
diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c
index 6071f5ef93..3ccef8fd0b 100644
--- a/drivers/net/zxdh/zxdh_rxtx.c
+++ b/drivers/net/zxdh/zxdh_rxtx.c
@@ -256,7 +256,7 @@ zxdh_xmit_fill_net_hdr(struct zxdh_virtqueue *vq, struct rte_mbuf *cookie,
 	struct zxdh_pi_hdr *pi_hdr = NULL;
 	struct zxdh_pd_hdr_dl *pd_hdr = NULL;
 	struct zxdh_hw *hw = vq->hw;
-	struct zxdh_net_hdr_dl *net_hdr_dl = &g_net_hdr_dl[hw->port_id];
+	struct zxdh_net_hdr_dl *net_hdr_dl = hw->net_hdr_dl;
 	uint8_t hdr_len = hw->dl_net_hdr_len;
 	uint32_t ol_flag = 0;
 
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 15857 bytes --]

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v1 5/5] net/zxdh: add support GENEVE TSO and Rx outer UDP chksum
  2025-12-20  6:15 [PATCH v1 0/5] net/zxdh: add support some new features Junlong Wang
                   ` (3 preceding siblings ...)
  2025-12-20  6:15 ` [PATCH v1 4/5] net/zxdh: add support primary/secondary process Junlong Wang
@ 2025-12-20  6:15 ` Junlong Wang
  4 siblings, 0 replies; 6+ messages in thread
From: Junlong Wang @ 2025-12-20  6:15 UTC (permalink / raw)
  To: stephen; +Cc: dev, Junlong Wang


[-- Attachment #1.1.1: Type: text/plain, Size: 12122 bytes --]

add support geneve inner/outer chksum, and tso offload,
support tunnel packet rx_offload outer_udp chksum.

Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
 drivers/net/zxdh/zxdh_ethdev.c |  23 +++++--
 drivers/net/zxdh/zxdh_msg.c    |   3 +
 drivers/net/zxdh/zxdh_rxtx.c   | 115 +++++++++++++++++++++++++--------
 drivers/net/zxdh/zxdh_tables.h |  12 ++--
 4 files changed, 118 insertions(+), 35 deletions(-)

diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index cd29162070..7e67983ec2 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -61,7 +61,8 @@ zxdh_dev_infos_get(struct rte_eth_dev *dev,
 	dev_info->rx_offload_capa |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
 					RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
 					RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
-					RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM);
+					RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+					RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM);
 	dev_info->rx_offload_capa |= (RTE_ETH_RX_OFFLOAD_SCATTER);
 	dev_info->rx_offload_capa |=  RTE_ETH_RX_OFFLOAD_TCP_LRO;
 	dev_info->rx_offload_capa |=  RTE_ETH_RX_OFFLOAD_RSS_HASH;
@@ -75,15 +76,15 @@ zxdh_dev_infos_get(struct rte_eth_dev *dev,
 
 	dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS);
 	dev_info->tx_offload_capa |= (RTE_ETH_TX_OFFLOAD_TCP_TSO |
-					RTE_ETH_TX_OFFLOAD_UDP_TSO);
+					RTE_ETH_TX_OFFLOAD_UDP_TSO |
+					RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO);
 	dev_info->tx_offload_capa |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
 					RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
 					RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO);
 	dev_info->tx_offload_capa |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
 					RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
 					RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
-					RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-					RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM);
+					RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM);
 
 	return 0;
 }
@@ -903,6 +904,7 @@ zxdh_rx_csum_lro_offload_configure(struct rte_eth_dev *dev)
 	uint32_t need_accelerator = rxmode->offloads & (RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
 		RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
 		RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
 		RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
 		RTE_ETH_RX_OFFLOAD_TCP_LRO);
 	int ret;
@@ -917,6 +919,8 @@ zxdh_rx_csum_lro_offload_configure(struct rte_eth_dev *dev)
 		port_attr.tcp_udp_checksum_offload  =
 		(rxmode->offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
 					? true : false;
+		port_attr.outer_udp_checksum_offload =
+			(rxmode->offloads & RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM) ? true : false;
 		port_attr.lro_offload =
 				(rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ? true : false;
 		port_attr.accelerator_offload_flag  = need_accelerator ? true : false;
@@ -959,6 +963,17 @@ zxdh_rx_csum_lro_offload_configure(struct rte_eth_dev *dev)
 			return -1;
 		}
 
+		zxdh_msg_head_build(hw, ZXDH_PORT_ATTRS_SET, &msg_info);
+		attr_msg->mode = ZXDH_PORT_OUTER_UDP_CHECKSUM_OFFLOAD_FLAG;
+		attr_msg->value =
+			(rxmode->offloads & RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM) ? true : false;
+		ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				"%s outer_udp_checksum offload failed to send msg", __func__);
+			return ret;
+		}
+
 		zxdh_msg_head_build(hw, ZXDH_PORT_ATTRS_SET, &msg_info);
 		attr_msg->mode = ZXDH_PORT_LRO_OFFLOAD_FLAG;
 		attr_msg->value = (rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ? true : false;
diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c
index ff2d11706c..a61e5f865c 100644
--- a/drivers/net/zxdh/zxdh_msg.c
+++ b/drivers/net/zxdh/zxdh_msg.c
@@ -1829,6 +1829,9 @@ zxdh_vf_port_attr_set(struct zxdh_hw *pf_hw, uint16_t vport,
 	case ZXDH_PORT_TCP_UDP_CHKSUM_FLAG:
 		port_attr.tcp_udp_checksum_offload = attr_msg->value;
 		break;
+	case ZXDH_PORT_OUTER_UDP_CHECKSUM_OFFLOAD_FLAG:
+		port_attr.outer_udp_checksum_offload = attr_msg->value;
+		break;
 	case ZXDH_PORT_ACCELERATOR_OFFLOAD_FLAG_FLAG:
 		port_attr.accelerator_offload_flag = attr_msg->value;
 		break;
diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c
index 3ccef8fd0b..3af0db473a 100644
--- a/drivers/net/zxdh/zxdh_rxtx.c
+++ b/drivers/net/zxdh/zxdh_rxtx.c
@@ -41,7 +41,7 @@
 #define ZXDH_PCODE_NO_REASSMBLE_TCP_PKT_TYPE  0x0C
 
 /* Uplink pd header byte0~1 */
-#define ZXDH_MBUF_F_RX_OUTER_L4_CKSUM_GOOD               0x00080000
+#define ZXDH_MBUF_F_RX_OUTER_L4_CKSUM_BAD                0x00080000
 #define ZXDH_MBUF_F_RX_QINQ                              0x00100000
 #define ZXDH_MBUF_F_RX_SEC_OFFLOAD                       0x00200000
 #define ZXDH_MBUF_F_RX_QINQ_STRIPPED                     0x00400000
@@ -55,6 +55,8 @@
 #define ZXDH_MBUF_F_RX_OUTER_IP_CKSUM_BAD                0x20000000
 #define ZXDH_MBUF_F_RX_FDIR                              0x40000000
 #define ZXDH_MBUF_F_RX_RSS_HASH                          0x80000000
+#define ZXDH_MBUF_F_RX_INNER_IP_CKSUM_BAD                0x00020000
+#define ZXDH_MBUF_RX_CHECKSUM_BASED_OUTER                0x00010000
 
 /* Outer/Inner L2 type */
 #define ZXDH_PD_L2TYPE_MASK                              0xf000
@@ -642,10 +644,69 @@ zxdh_dequeue_burst_rx_packed(struct zxdh_virtqueue *vq,
 }
 
 static inline void
-zxdh_rx_update_mbuf(struct rte_mbuf *m, struct zxdh_net_hdr_ul *hdr)
+update_outer_rx_l4_csum(struct zxdh_hw *hw, struct zxdh_pi_hdr *pi_hdr,
+		struct zxdh_pd_hdr_ul *pd_hdr, struct rte_mbuf *m)
+{
+	uint32_t pd_pkt_flag = ntohl(pd_hdr->pkt_flag);
+	uint32_t packet_type = 0;
+	uint32_t idx = 0;
+	bool has_ip_verify = hw->eth_dev->data->dev_conf.rxmode.offloads &
+			RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
+	uint16_t pkt_type_in = rte_be_to_cpu_16(pd_hdr->pkt_type_in);
+
+	// pi_hdr as outer csum and pd_hdr as inner csum
+	if ((pd_pkt_flag & ZXDH_MBUF_RX_CHECKSUM_BASED_OUTER)) {
+		m->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_MASK;
+		m->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_MASK;
+
+		if (pi_hdr) {
+			uint16_t err_code = rte_be_to_cpu_16(pi_hdr->ul.err_code);
+			if (pi_hdr->pkt_flag_hi8 & ZXDH_RX_TCPUDP_CKSUM_VERIFY) {
+				if (err_code & ZXDH_UDP_CSUM_ERR)
+					m->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
+				else
+					m->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
+			}
+
+			if (pi_hdr->pkt_flag_hi8 & ZXDH_RX_IP_CKSUM_VERIFY) {
+				if (err_code & ZXDH_IPV4_CSUM_ERR)
+					m->ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
+			}
+		}
+		if (pd_pkt_flag & ZXDH_MBUF_F_RX_INNER_IP_CKSUM_BAD) {
+			m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+		} else {
+			if (has_ip_verify) {
+				idx = (pkt_type_in >> 8)  & 0xF;
+				packet_type = zxdh_inner_l3_type[idx];
+				if (((packet_type & RTE_PTYPE_INNER_L3_MASK) ==
+						RTE_PTYPE_INNER_L3_IPV4) ||
+				((packet_type & RTE_PTYPE_INNER_L3_MASK) ==
+						RTE_PTYPE_INNER_L3_IPV4_EXT))
+					m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+			}
+		}
+
+	} else {
+		if ((m->ol_flags & RTE_MBUF_F_RX_L4_CKSUM_BAD) ==
+				RTE_MBUF_F_RX_L4_CKSUM_BAD)
+			m->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN;
+		else if ((m->ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD) ==
+					RTE_MBUF_F_RX_L4_CKSUM_GOOD) {
+			if (pd_pkt_flag & ZXDH_MBUF_F_RX_OUTER_L4_CKSUM_BAD)
+				m->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
+			else
+				m->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
+		}
+	}
+}
+
+static inline void
+zxdh_rx_update_mbuf(struct zxdh_hw *hw, struct rte_mbuf *m, struct zxdh_net_hdr_ul *hdr)
 {
 	uint8_t has_pi = (uint64_t)(hdr->type_hdr.pd_len << 1) > ZXDH_UL_NOPI_HDR_SIZE_MAX;
 	struct zxdh_pd_hdr_ul *pd_hdr = has_pi ? &hdr->pipd_hdr_ul.pd_hdr : &hdr->pd_hdr;
+	struct zxdh_pi_hdr *pi_hdr = NULL;
 	uint32_t pkt_flag = ntohl(pd_hdr->pkt_flag);
 	uint32_t idx = 0;
 	uint32_t striped_vlan_tci = rte_be_to_cpu_32(pd_hdr->striped_vlan_tci);
@@ -704,35 +765,31 @@ zxdh_rx_update_mbuf(struct rte_mbuf *m, struct zxdh_net_hdr_ul *hdr)
 	/* checksum handle */
 	if (pkt_flag & ZXDH_MBUF_F_RX_OUTER_IP_CKSUM_BAD)
 		m->ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
-	if (pkt_flag & ZXDH_MBUF_F_RX_OUTER_L4_CKSUM_GOOD)
-		m->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
 
 	if (has_pi) {
-		struct zxdh_pi_hdr *pi_hdr = &hdr->pipd_hdr_ul.pi_hdr;
-		uint16_t pkt_type_masked = pi_hdr->pkt_type & ZXDH_PCODE_MASK;
+		pi_hdr = &hdr->pipd_hdr_ul.pi_hdr;
 		uint16_t err_code = rte_be_to_cpu_16(pi_hdr->ul.err_code);
-
-		bool is_ip_pkt =
-				(pi_hdr->pkt_type == ZXDH_PCODE_IP_PKT_TYPE) ||
-				((pi_hdr->pkt_type & ZXDH_PI_L3TYPE_MASK) == ZXDH_PI_L3TYPE_IP);
-
-		bool is_l4_pkt =
-				(pkt_type_masked == ZXDH_PCODE_UDP_PKT_TYPE) ||
-				(pkt_type_masked == ZXDH_PCODE_NO_REASSMBLE_TCP_PKT_TYPE) ||
-				(pkt_type_masked == ZXDH_PCODE_TCP_PKT_TYPE);
-
-		if (is_ip_pkt && (pi_hdr->pkt_flag_hi8 & ZXDH_RX_IP_CKSUM_VERIFY)) {
-			if (err_code & ZXDH_IPV4_CSUM_ERR)
-				m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
-			else
-				m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+		if (pi_hdr->pkt_type == ZXDH_PCODE_IP_PKT_TYPE ||
+			((pi_hdr->pkt_type & ZXDH_PI_L3TYPE_MASK) == ZXDH_PI_L3TYPE_IP)) {
+			if (pi_hdr->pkt_flag_hi8 & ZXDH_RX_IP_CKSUM_VERIFY) {
+				if (err_code & ZXDH_IPV4_CSUM_ERR)
+					m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+				else
+					m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+			}
 		}
 
-		if (is_l4_pkt && (pi_hdr->pkt_flag_hi8 & ZXDH_RX_TCPUDP_CKSUM_VERIFY)) {
-			if (err_code & (ZXDH_TCP_CSUM_ERR | ZXDH_UDP_CSUM_ERR))
-				m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
-			else
-				m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+		if (((pi_hdr->pkt_type & ZXDH_PCODE_MASK) == ZXDH_PCODE_UDP_PKT_TYPE) ||
+			((pi_hdr->pkt_type & ZXDH_PCODE_MASK) ==
+				ZXDH_PCODE_NO_REASSMBLE_TCP_PKT_TYPE) ||
+			((pi_hdr->pkt_type & ZXDH_PCODE_MASK) == ZXDH_PCODE_TCP_PKT_TYPE)) {
+			if (pi_hdr->pkt_flag_hi8 & ZXDH_RX_TCPUDP_CKSUM_VERIFY) {
+				if ((err_code & ZXDH_TCP_CSUM_ERR) ||
+					(err_code & ZXDH_UDP_CSUM_ERR))
+					m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
+				else
+					m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+			}
 		}
 
 		if (ntohl(pi_hdr->ul.lro_flag) & ZXDH_PI_LRO_FLAG)
@@ -759,6 +816,10 @@ zxdh_rx_update_mbuf(struct rte_mbuf *m, struct zxdh_net_hdr_ul *hdr)
 		m->packet_type |= zxdh_inner_l3_type[idx];
 		idx = (pkt_type_inner >> 4)  & 0xF;
 		m->packet_type |= zxdh_inner_l4_type[idx];
+
+		if (hw->eth_dev->data->dev_conf.rxmode.offloads &
+				RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM)
+			update_outer_rx_l4_csum(hw, pi_hdr, pd_hdr, m);
 	}
 }
 
@@ -833,7 +894,7 @@ zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
 		rxm->port = rxvq->port_id;
 
 		/* Update rte_mbuf according to pi/pd header */
-		zxdh_rx_update_mbuf(rxm, header);
+		zxdh_rx_update_mbuf(hw, rxm, header);
 		seg_res = seg_num - 1;
 		/* Merge remaining segments */
 		while (seg_res != 0 && i < (num - 1)) {
diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h
index 8cfc833333..3580f1dfba 100644
--- a/drivers/net/zxdh/zxdh_tables.h
+++ b/drivers/net/zxdh/zxdh_tables.h
@@ -76,6 +76,7 @@
 #define ZXDH_PORT_BUSINESS_VLAN_OFFLOAD_EN_FLAG   41
 #define ZXDH_PORT_RSS_EN_OFF_FLAG                 42
 #define ZXDH_PORT_MTU_OFFLOAD_EN_OFF_FLAG         43
+#define ZXDH_PORT_OUTER_UDP_CHECKSUM_OFFLOAD_FLAG 51
 
 #define ZXDH_MTU_STATS_EGRESS_BASE           0x8481
 #define ZXDH_MTU_STATS_INGRESS_BASE          0x8981
@@ -182,7 +183,9 @@ struct zxdh_port_attr_table {
 	uint16_t tpid;
 
 	uint16_t vhca : 10;
-	uint16_t rsv16_1 : 6;
+	uint16_t rsv16_1 : 2;
+	uint16_t outer_udp_checksum_offload : 1;
+	uint16_t rsv16_2 : 3;
 #else
 	uint8_t hw_bond_enable : 1;
 	uint8_t rdma_offload_enable: 1;
@@ -235,10 +238,11 @@ struct zxdh_port_attr_table {
 
 	uint8_t rss_hash_factor;
 
-	uint16_t tpid;
-
 	uint16_t vhca : 10;
-	uint16_t rsv16_1 : 6;
+	uint16_t rsv16_1 : 2;
+	uint16_t outer_udp_checksum_offload : 1;
+	uint16_t rsv16_2 : 3;
+	uint16_t tpid;
 #endif
 };
 
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 31086 bytes --]

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2025-12-20  6:36 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-12-20  6:15 [PATCH v1 0/5] net/zxdh: add support some new features Junlong Wang
2025-12-20  6:15 ` [PATCH v1 1/5] net/zxdh: add support for modifying queue depth Junlong Wang
2025-12-20  6:15 ` [PATCH v1 2/5] net/zxdh: optimize alloc queue resources Junlong Wang
2025-12-20  6:15 ` [PATCH v1 3/5] net/zxdh: add support set link speed get autoneg stats Junlong Wang
2025-12-20  6:15 ` [PATCH v1 4/5] net/zxdh: add support primary/secondary process Junlong Wang
2025-12-20  6:15 ` [PATCH v1 5/5] net/zxdh: add support GENEVE TSO and Rx outer UDP chksum Junlong Wang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).