DPDK patches and discussions
 help / color / mirror / Atom feed
From: Junlong Wang <wang.junlong1@zte.com.cn>
To: stephen@networkplumber.org
Cc: dev@dpdk.org, Junlong Wang <wang.junlong1@zte.com.cn>
Subject: [PATCH v1 6/6] net/zxdh: add support I510/511 PF device ID
Date: Mon,  7 Jul 2025 13:56:21 +0800	[thread overview]
Message-ID: <20250707055621.357606-6-wang.junlong1@zte.com.cn> (raw)
In-Reply-To: <20250707055621.357606-1-wang.junlong1@zte.com.cn>


[-- Attachment #1.1.1: Type: text/plain, Size: 10705 bytes --]

add support I510/511 pf device ID.

Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
 drivers/net/zxdh/zxdh_common.c     |  2 +-
 drivers/net/zxdh/zxdh_ethdev.c     | 80 +++++++++++++++++++++++++++---
 drivers/net/zxdh/zxdh_ethdev.h     |  5 +-
 drivers/net/zxdh/zxdh_ethdev_ops.c |  5 ++
 drivers/net/zxdh/zxdh_msg.c        | 44 ++++++++++++++++
 drivers/net/zxdh/zxdh_msg.h        | 25 ++++++++++
 drivers/net/zxdh/zxdh_pci.c        |  2 +
 7 files changed, 154 insertions(+), 9 deletions(-)

diff --git a/drivers/net/zxdh/zxdh_common.c b/drivers/net/zxdh/zxdh_common.c
index e95292449a..ce53ee8a05 100644
--- a/drivers/net/zxdh/zxdh_common.c
+++ b/drivers/net/zxdh/zxdh_common.c
@@ -179,7 +179,7 @@ zxdh_fill_res_para(struct rte_eth_dev *dev, struct zxdh_res_para *param)
 
 	param->pcie_id   = hw->pcie_id;
 	param->virt_addr = hw->bar_addr[0] + ZXDH_CTRLCH_OFFSET;
-	param->src_type  = ZXDH_BAR_MODULE_TBL;
+	param->src_type  = hw->is_pf ? ZXDH_MSG_CHAN_END_PF : ZXDH_MSG_CHAN_END_VF;
 }
 
 static int
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index 193cc41dbb..7742588706 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -6,6 +6,7 @@
 #include <bus_pci_driver.h>
 #include <rte_ethdev.h>
 #include <rte_malloc.h>
+#include <rte_io.h>
 
 #include "zxdh_ethdev.h"
 #include "zxdh_logs.h"
@@ -780,6 +781,27 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)
 	return ret;
 }
 
+static int
+zxdh_inic_pf_init_qid(struct zxdh_hw *hw)
+{
+	uint16_t start_qid, enabled_qp;
+	int ret = zxdh_inic_pf_get_qp_from_vcb(hw, hw->vfid, &start_qid, &enabled_qp);
+
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "vqm_vfid %u, get_qp_from_vcb fail", hw->vfid);
+		return ret;
+	}
+
+	uint16_t i, num_queues = rte_read16(&hw->common_cfg->num_queues);
+	PMD_DRV_LOG(ERR, "vqm_vfid:%u, get num_queues:%u (%s CQ)",
+		hw->vfid, num_queues, (num_queues & 0x1) ? "with" : "without");
+	for (i = 0; i < (num_queues & 0xfffe); ++i) {
+		hw->channel_context[i].ph_chno = start_qid + i;
+		hw->channel_context[i].valid = 1;
+	}
+	return 0;
+}
+
 static int32_t
 zxdh_alloc_queues(struct rte_eth_dev *dev)
 {
@@ -794,6 +816,28 @@ zxdh_alloc_queues(struct rte_eth_dev *dev)
 		PMD_DRV_LOG(ERR, "Failed to allocate %d vqs", nr_vq);
 		return -ENOMEM;
 	}
+
+	if (hw->switchoffload && !(hw->host_features & (1ULL << ZXDH_F_RING_PACKED))) {
+		if (zxdh_inic_pf_init_qid(hw) != 0)
+			goto free;
+
+		for (i = 0 ; i < rxq_num; i++) {
+			lch = i * 2;
+			if (zxdh_init_queue(dev, lch) < 0) {
+				PMD_DRV_LOG(ERR, "Failed to alloc virtio queue");
+				goto free;
+			}
+		}
+		for (i = 0 ; i < txq_num; i++) {
+			lch = i * 2 + 1;
+			if (zxdh_init_queue(dev, lch) < 0) {
+				PMD_DRV_LOG(ERR, "Failed to alloc virtio queue");
+				goto free;
+			}
+		}
+		return 0;
+	}
+
 	for (i = 0 ; i < rxq_num; i++) {
 		lch = i * 2;
 		if (zxdh_acquire_channel(dev, lch) < 0) {
@@ -1329,7 +1373,8 @@ zxdh_dev_start(struct rte_eth_dev *dev)
 		zxdh_queue_notify(vq);
 	}
 
-	zxdh_dev_set_link_up(dev);
+	hw->admin_status = RTE_ETH_LINK_UP;
+	zxdh_dev_link_update(dev, 0);
 
 	ret = zxdh_mac_config(hw->eth_dev);
 	if (ret)
@@ -1498,6 +1543,10 @@ zxdh_agent_comm(struct rte_eth_dev *eth_dev, struct zxdh_hw *hw)
 		PMD_DRV_LOG(ERR, "Failed to get panel_id");
 		return -1;
 	}
+
+	if (hw->switchoffload)
+		hw->phyport = 9;
+
 	PMD_DRV_LOG(DEBUG, "Get panel id success: 0x%x", hw->panel_id);
 
 	return 0;
@@ -1890,11 +1939,13 @@ zxdh_np_init(struct rte_eth_dev *eth_dev)
 			PMD_DRV_LOG(ERR, "dpp apt init failed, code:%d ", ret);
 			return -ret;
 		}
-		if (hw->hash_search_index >= ZXDH_HASHIDX_MAX) {
-			PMD_DRV_LOG(ERR, "invalid hash idx %d", hw->hash_search_index);
-			return -1;
+		if (!hw->switchoffload) {
+			if (hw->hash_search_index >= ZXDH_HASHIDX_MAX) {
+				PMD_DRV_LOG(ERR, "invalid hash idx %d", hw->hash_search_index);
+				return -1;
+			}
+			zxdh_tbl_entry_offline_destroy(hw);
 		}
-		zxdh_tbl_entry_offline_destroy(hw);
 	}
 
 	if (zxdh_shared_data != NULL)
@@ -1950,6 +2001,7 @@ zxdh_queue_res_get(struct rte_eth_dev *eth_dev)
 	uint32_t value = 0;
 	uint16_t offset = 0;
 
+	offset = hw->vport.epid * 8 + hw->vport.pfid;
 	if (hw->is_pf) {
 		hw->max_queue_pairs = *(volatile uint8_t *)(hw->bar_addr[0] +
 		ZXDH_PF_QUEUE_PAIRS_ADDR);
@@ -2013,7 +2065,16 @@ is_pf(uint16_t device_id)
 			device_id == ZXDH_E312S_PF_DEVICEID ||
 			device_id == ZXDH_E316_PF_DEVICEID ||
 			device_id == ZXDH_E310_RDMA_PF_DEVICEID ||
-			device_id == ZXDH_E312_RDMA_PF_DEVICEID);
+			device_id == ZXDH_E312_RDMA_PF_DEVICEID ||
+			device_id == ZXDH_I510_PF_DEVICEID ||
+			device_id == ZXDH_I511_PF_DEVICEID);
+}
+
+static uint8_t
+is_inic_pf(uint16_t device_id)
+{
+	return (device_id == ZXDH_I510_PF_DEVICEID ||
+			device_id == ZXDH_I511_PF_DEVICEID);
 }
 
 static int
@@ -2049,8 +2110,11 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
 	hw->slot_id = ZXDH_INVALID_SLOT_IDX;
 	hw->is_pf = 0;
 
-	if (is_pf(pci_dev->id.device_id))
+	if (is_pf(pci_dev->id.device_id)) {
 		hw->is_pf = 1;
+		if (is_inic_pf(pci_dev->id.device_id))
+			hw->switchoffload = 1;
+	}
 
 	ret = zxdh_init_once(eth_dev);
 	if (ret != 0)
@@ -2154,6 +2218,8 @@ static const struct rte_pci_id pci_id_zxdh_map[] = {
 	{RTE_PCI_DEVICE(ZXDH_PCI_VENDOR_ID, ZXDH_E310_RDMA_VF_DEVICEID)},
 	{RTE_PCI_DEVICE(ZXDH_PCI_VENDOR_ID, ZXDH_E312_RDMA_PF_DEVICEID)},
 	{RTE_PCI_DEVICE(ZXDH_PCI_VENDOR_ID, ZXDH_E312_RDMA_VF_DEVICEID)},
+	{RTE_PCI_DEVICE(ZXDH_PCI_VENDOR_ID, ZXDH_I510_PF_DEVICEID)},
+	{RTE_PCI_DEVICE(ZXDH_PCI_VENDOR_ID, ZXDH_I511_PF_DEVICEID)},
 	{.vendor_id = 0, /* sentinel */ },
 };
 static struct rte_pci_driver zxdh_pmd = {
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
index 1013446876..4c53cfa544 100644
--- a/drivers/net/zxdh/zxdh_ethdev.h
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -29,6 +29,9 @@
 #define ZXDH_E312_RDMA_PF_DEVICEID     0x8049
 #define ZXDH_E312_RDMA_VF_DEVICEID     0x8060
 
+#define ZXDH_I510_PF_DEVICEID     0x8064
+#define ZXDH_I511_PF_DEVICEID     0x8067
+
 #define ZXDH_MAX_UC_MAC_ADDRS     32
 #define ZXDH_MAX_MC_MAC_ADDRS     32
 #define ZXDH_MAX_MAC_ADDRS        (ZXDH_MAX_UC_MAC_ADDRS + ZXDH_MAX_MC_MAC_ADDRS)
@@ -128,7 +131,7 @@ struct zxdh_hw {
 	uint8_t use_msix;
 	uint8_t duplex;
 	uint8_t is_pf         : 1,
-			rsv : 1,
+			switchoffload : 1,
 			i_mtr_en      : 1,
 			e_mtr_en      : 1;
 	uint8_t msg_chan_init;
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c
index 0a7feb2d5f..1d53087163 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.c
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c
@@ -316,6 +316,11 @@ zxdh_link_info_get(struct rte_eth_dev *dev, struct rte_eth_link *link)
 			link->link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
 	}
 	hw->speed = link->link_speed;
+	if (hw->switchoffload) {
+		link->link_speed = RTE_ETH_SPEED_NUM_25G;
+		link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+		link->link_autoneg = RTE_ETH_LINK_AUTONEG;
+	}
 
 	return 0;
 }
diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c
index 81228a842b..2ffe07f788 100644
--- a/drivers/net/zxdh/zxdh_msg.c
+++ b/drivers/net/zxdh/zxdh_msg.c
@@ -1215,6 +1215,50 @@ zxdh_bar_chan_msg_recv_register(uint8_t module_id, zxdh_bar_chan_msg_recv_callba
 	return ZXDH_BAR_MSG_OK;
 }
 
+static int
+zxdh_msg_inic_with_vcb(struct zxdh_hw *hw, void *in_payload,
+		uint16_t in_len, struct zxdh_inic_recv_msg *out)
+{
+	struct zxdh_pci_bar_msg in = {
+		.virt_addr = hw->bar_addr[0] + ZXDH_CTRLCH_OFFSET,
+		.payload_addr = in_payload,
+		.payload_len = in_len,
+		.emec = 0,
+		.src = ZXDH_MSG_CHAN_END_PF,
+		.dst = ZXDH_MSG_CHAN_END_RISC,
+		.module_id = ZXDH_BAR_EVENT_OVS_WITH_VCB,
+		.src_pcieid = hw->pcie_id,
+		.dst_pcieid = 0,
+		.usr = 0,
+	};
+	struct zxdh_msg_recviver_mem result = {
+		.recv_buffer = (void *)out,
+		.buffer_len = sizeof(struct zxdh_inic_recv_msg),
+	};
+	int ret = zxdh_bar_chan_sync_msg_send(&in, &result);
+
+	if (ret != ZXDH_BAR_MSG_OK)
+		return -ret;
+	return ZXDH_BAR_MSG_OK;
+}
+
+int
+zxdh_inic_pf_get_qp_from_vcb(struct zxdh_hw *hw, uint16_t vqm_vfid, uint16_t *qid, uint16_t *qp)
+{
+	struct inic_to_vcb in = {.vqm_vfid = vqm_vfid, .opcode = 0, .cmd = 4,};
+	struct zxdh_inic_recv_msg out;
+	int ret = zxdh_msg_inic_with_vcb(hw, &in, (uint16_t)sizeof(in), &out);
+
+	if (ret == 0) {
+		*qid = out.vqm_queue.start_qid;
+		*qp  = out.vqm_queue.qp_num;
+	} else {
+		PMD_MSG_LOG(ERR, "vqm_vfid:%u get qp fail", vqm_vfid);
+	}
+
+	return ret;
+}
+
 static int
 zxdh_vf_promisc_init(struct zxdh_hw *hw, union zxdh_virport_num vport)
 {
diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h
index 86c406c333..71402d58fb 100644
--- a/drivers/net/zxdh/zxdh_msg.h
+++ b/drivers/net/zxdh/zxdh_msg.h
@@ -570,6 +570,29 @@ struct zxdh_msg_info {
 	} data;
 };
 
+struct inic_to_vcb {
+	uint16_t vqm_vfid;
+	uint16_t opcode;  /* 0:get 1:set */
+	uint16_t cmd;
+	uint16_t version; /* 0:v0.95, 1:v1.0, 2:v1.1 */
+	uint64_t features;
+}; /* 16B */
+
+struct vqm_queue {
+	uint16_t start_qid;
+	uint16_t qp_num;
+}; /* 4B */
+
+struct zxdh_inic_recv_msg {
+	/* fix 4B */
+	uint32_t reps;
+	uint32_t check_result;
+	union {
+		uint8_t data[36];
+		struct vqm_queue vqm_queue;
+	};
+}; /* 44B */
+
 typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len,
 		void *reps_buffer, uint16_t *reps_len, void *dev);
 typedef int (*zxdh_msg_process_callback)(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
@@ -599,5 +622,7 @@ int32_t zxdh_send_msg_to_riscv(struct rte_eth_dev *dev, void *msg_req,
 			uint16_t msg_req_len, void *reply, uint16_t reply_len,
 			enum ZXDH_BAR_MODULE_ID module_id);
 void zxdh_msg_cb_reg(struct zxdh_hw *hw);
+int zxdh_inic_pf_get_qp_from_vcb(struct zxdh_hw *hw, uint16_t vqm_vfid,
+			uint16_t *qid, uint16_t *qp);
 
 #endif /* ZXDH_MSG_H */
diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c
index 4ff0f065df..4ba31905fc 100644
--- a/drivers/net/zxdh/zxdh_pci.c
+++ b/drivers/net/zxdh/zxdh_pci.c
@@ -420,6 +420,8 @@ zxdh_get_pci_dev_config(struct zxdh_hw *hw)
 	uint64_t nego_features = 0;
 
 	hw->host_features = ZXDH_PMD_DEFAULT_HOST_FEATURES;
+	if (hw->switchoffload)
+		hw->host_features = zxdh_pci_get_features(hw);
 
 	guest_features = (uint64_t)ZXDH_PMD_DEFAULT_GUEST_FEATURES;
 	nego_features = guest_features & hw->host_features;
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 24424 bytes --]

      parent reply	other threads:[~2025-07-07  6:04 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-07-07  5:56 [PATCH v1 1/6] net/zxdh: fix meson.build issues in compilation Junlong Wang
2025-07-07  5:56 ` [PATCH v1 2/6] net/zxdh: fix issues related to MAC configuration Junlong Wang
2025-07-07  5:56 ` [PATCH v1 3/6] net/zxdh: fix some VLAN related issues Junlong Wang
2025-07-07  5:56 ` [PATCH v1 4/6] net/zxdh: fix RSS " Junlong Wang
2025-07-07  5:56 ` [PATCH v1 5/6] net/zxdh: add support VLAN TPID set ops Junlong Wang
2025-07-07  5:56 ` Junlong Wang [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250707055621.357606-6-wang.junlong1@zte.com.cn \
    --to=wang.junlong1@zte.com.cn \
    --cc=dev@dpdk.org \
    --cc=stephen@networkplumber.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).