DPDK patches and discussions
 help / color / mirror / Atom feed
From: "WanRenyong" <wanry@yunsilicon.com>
To: <dev@dpdk.org>
Cc: <ferruh.yigit@amd.com>, <thomas@monjalon.net>,
	 <andrew.rybchenko@oktetlabs.ru>, <qianr@yunsilicon.com>,
	 <nana@yunsilicon.com>, <zhangxx@yunsilicon.com>,
	 <zhangxx@yunsilicon.com>, <xudw@yunsilicon.com>,
	<jacky@yunsilicon.com>,  <weihg@yunsilicon.com>
Subject: [PATCH v4 03/15] net/xsc: add xsc mailbox
Date: Fri, 03 Jan 2025 23:04:10 +0800	[thread overview]
Message-ID: <20250103150409.1529663-4-wanry@yunsilicon.com> (raw)
In-Reply-To: <20250103150404.1529663-1-wanry@yunsilicon.com>

XSC mailbox is a mechanism used for interaction between PMD and firmware.

Signed-off-by: WanRenyong <wanry@yunsilicon.com>
Signed-off-by: Rong Qian <qianr@yunsilicon.com>
---
 drivers/net/xsc/meson.build     |   1 +
 drivers/net/xsc/xsc_cmd.h       | 387 ++++++++++++++++++
 drivers/net/xsc/xsc_defs.h      |   2 +
 drivers/net/xsc/xsc_vfio_mbox.c | 691 ++++++++++++++++++++++++++++++++
 drivers/net/xsc/xsc_vfio_mbox.h | 142 +++++++
 5 files changed, 1223 insertions(+)
 create mode 100644 drivers/net/xsc/xsc_cmd.h
 create mode 100644 drivers/net/xsc/xsc_vfio_mbox.c
 create mode 100644 drivers/net/xsc/xsc_vfio_mbox.h

diff --git a/drivers/net/xsc/meson.build b/drivers/net/xsc/meson.build
index 683a1f6632..df4c8ea499 100644
--- a/drivers/net/xsc/meson.build
+++ b/drivers/net/xsc/meson.build
@@ -9,4 +9,5 @@ endif
 sources = files(
         'xsc_ethdev.c',
         'xsc_dev.c',
+        'xsc_vfio_mbox.c',
 )
diff --git a/drivers/net/xsc/xsc_cmd.h b/drivers/net/xsc/xsc_cmd.h
new file mode 100644
index 0000000000..433dcd0afa
--- /dev/null
+++ b/drivers/net/xsc/xsc_cmd.h
@@ -0,0 +1,387 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef _XSC_CMD_H_
+#define _XSC_CMD_H_
+
+#include <sys/types.h>
+#include <unistd.h>
+#include <string.h>
+#include <dirent.h>
+#include <net/if.h>
+
+#define XSC_BOARD_SN_LEN		32
+#define XSC_CMD_QUERY_HCA_CAP_V1	1
+
+enum xsc_cmd_opcode {
+	XSC_CMD_OP_QUERY_HCA_CAP	= 0x100,
+	XSC_CMD_OP_CREATE_CQ		= 0x400,
+	XSC_CMD_OP_DESTROY_CQ		= 0x401,
+	XSC_CMD_OP_CREATE_QP		= 0x500,
+	XSC_CMD_OP_DESTROY_QP		= 0x501,
+	XSC_CMD_OP_RTR2RTS_QP		= 0x504,
+	XSC_CMD_OP_QP_2RST		= 0x50A,
+	XSC_CMD_OP_CREATE_MULTI_QP	= 0x515,
+	XSC_CMD_OP_MODIFY_NIC_HCA	= 0x812,
+	XSC_CMD_OP_MODIFY_RAW_QP	= 0x81f,
+	XSC_CMD_OP_EXEC_NP		= 0x900,
+	XSC_CMD_OP_SET_MTU		= 0x1100,
+	XSC_CMD_OP_QUERY_ETH_MAC	= 0X1101,
+	XSC_CMD_OP_MAX
+};
+
+enum xsc_cmd_status {
+	XSC_CMD_SUCC = 0,
+	XSC_CMD_FAIL,
+	XSC_CMD_TIMEOUT,
+};
+
+struct xsc_cmd_inbox_hdr {
+	rte_be16_t opcode;
+	uint8_t rsvd[4];
+	rte_be16_t ver;
+};
+
+struct xsc_cmd_outbox_hdr {
+	uint8_t status;
+	uint8_t rsvd[5];
+	rte_be16_t ver;
+};
+
+struct xsc_cmd_fw_version {
+	uint8_t major;
+	uint8_t minor;
+	rte_be16_t patch;
+	rte_be32_t tweak;
+	uint8_t extra_flag;
+	uint8_t rsv[7];
+};
+
+struct xsc_cmd_hca_cap {
+	uint8_t rsvd1[12];
+	uint8_t send_seg_num;
+	uint8_t send_wqe_shift;
+	uint8_t recv_seg_num;
+	uint8_t recv_wqe_shift;
+	uint8_t log_max_srq_sz;
+	uint8_t log_max_qp_sz;
+	uint8_t log_max_mtt;
+	uint8_t log_max_qp;
+	uint8_t log_max_strq_sz;
+	uint8_t log_max_srqs;
+	uint8_t rsvd2[2];
+	uint8_t log_max_tso;
+	uint8_t log_max_cq_sz;
+	uint8_t rsvd3;
+	uint8_t log_max_cq;
+	uint8_t log_max_eq_sz;
+	uint8_t log_max_mkey;
+	uint8_t log_max_msix;
+	uint8_t log_max_eq;
+	uint8_t max_indirection;
+	uint8_t log_max_mrw_sz;
+	uint8_t log_max_bsf_list_sz;
+	uint8_t log_max_klm_list_sz;
+	uint8_t rsvd4;
+	uint8_t log_max_ra_req_dc;
+	uint8_t rsvd5;
+	uint8_t log_max_ra_res_dc;
+	uint8_t rsvd6;
+	uint8_t log_max_ra_req_qp;
+	uint8_t log_max_qp_depth;
+	uint8_t log_max_ra_res_qp;
+	rte_be16_t max_vfs;
+	rte_be16_t raweth_qp_id_end;
+	rte_be16_t raw_tpe_qp_num;
+	rte_be16_t max_qp_count;
+	rte_be16_t raweth_qp_id_base;
+	uint8_t rsvd7;
+	uint8_t local_ca_ack_delay;
+	uint8_t max_num_eqs;
+	uint8_t num_ports;
+	uint8_t log_max_msg;
+	uint8_t mac_port;
+	rte_be16_t raweth_rss_qp_id_base;
+	rte_be16_t stat_rate_support;
+	uint8_t rsvd8[2];
+	rte_be64_t flags;
+	uint8_t rsvd9;
+	uint8_t uar_sz;
+	uint8_t rsvd10;
+	uint8_t log_pg_sz;
+	rte_be16_t bf_log_bf_reg_size;
+	rte_be16_t msix_base;
+	rte_be16_t msix_num;
+	rte_be16_t max_desc_sz_sq;
+	uint8_t rsvd11[2];
+	rte_be16_t max_desc_sz_rq;
+	uint8_t rsvd12[2];
+	rte_be16_t max_desc_sz_sq_dc;
+	uint8_t rsvd13[4];
+	rte_be16_t max_qp_mcg;
+	uint8_t rsvd14;
+	uint8_t log_max_mcg;
+	uint8_t rsvd15;
+	uint8_t log_max_pd;
+	uint8_t rsvd16;
+	uint8_t log_max_xrcd;
+	uint8_t rsvd17[40];
+	rte_be32_t uar_page_sz;
+	uint8_t rsvd18[8];
+	rte_be32_t hw_feature_flag;
+	rte_be16_t pf0_vf_funcid_base;
+	rte_be16_t pf0_vf_funcid_top;
+	rte_be16_t pf1_vf_funcid_base;
+	rte_be16_t pf1_vf_funcid_top;
+	rte_be16_t pcie0_pf_funcid_base;
+	rte_be16_t pcie0_pf_funcid_top;
+	rte_be16_t pcie1_pf_funcid_base;
+	rte_be16_t pcie1_pf_funcid_top;
+	uint8_t log_msx_atomic_size_qp;
+	uint8_t pcie_host;
+	uint8_t rsvd19;
+	uint8_t log_msx_atomic_size_dc;
+	uint8_t board_sn[XSC_BOARD_SN_LEN];
+	uint8_t max_tc;
+	uint8_t mac_bit;
+	rte_be16_t funcid_to_logic_port;
+	uint8_t rsvd20[6];
+	uint8_t nif_port_num;
+	uint8_t reg_mr_via_cmdq;
+	rte_be32_t hca_core_clock;
+	rte_be32_t max_rwq_indirection_tables;
+	rte_be32_t max_rwq_indirection_table_size;
+	rte_be32_t chip_ver_h;
+	rte_be32_t chip_ver_m;
+	rte_be32_t chip_ver_l;
+	rte_be32_t hotfix_num;
+	rte_be32_t feature_flag;
+	rte_be32_t rx_pkt_len_max;
+	rte_be32_t glb_func_id;
+	rte_be64_t tx_db;
+	rte_be64_t rx_db;
+	rte_be64_t complete_db;
+	rte_be64_t complete_reg;
+	rte_be64_t event_db;
+	rte_be32_t qp_rate_limit_min;
+	rte_be32_t qp_rate_limit_max;
+	struct xsc_cmd_fw_version fw_ver;
+	uint8_t lag_logic_port_ofst;
+	rte_be64_t max_mr_size;
+	rte_be16_t max_cmd_in_len;
+	rte_be16_t max_cmd_out_len;
+};
+
+struct xsc_cmd_query_hca_cap_mbox_in {
+	struct xsc_cmd_inbox_hdr hdr;
+	rte_be16_t cpu_num;
+	uint8_t rsvd[6];
+};
+
+struct xsc_cmd_query_hca_cap_mbox_out {
+	struct xsc_cmd_outbox_hdr hdr;
+	uint8_t rsvd[8];
+	struct xsc_cmd_hca_cap hca_cap;
+};
+
+struct xsc_cmd_cq_context {
+	uint16_t eqn;
+	uint16_t pa_num;
+	uint16_t glb_func_id;
+	uint8_t log_cq_sz;
+	uint8_t cq_type;
+};
+
+struct xsc_cmd_create_cq_mbox_in {
+	struct xsc_cmd_inbox_hdr hdr;
+	struct xsc_cmd_cq_context ctx;
+	uint64_t pas[];
+};
+
+struct xsc_cmd_create_cq_mbox_out {
+	struct xsc_cmd_outbox_hdr hdr;
+	uint32_t cqn;
+	uint8_t rsvd[4];
+};
+
+struct xsc_cmd_destroy_cq_mbox_in {
+	struct xsc_cmd_inbox_hdr hdr;
+	uint32_t cqn;
+	uint8_t rsvd[4];
+};
+
+struct xsc_cmd_destroy_cq_mbox_out {
+	struct xsc_cmd_outbox_hdr hdr;
+	uint8_t rsvd[8];
+};
+
+struct xsc_cmd_create_qp_request {
+	rte_be16_t input_qpn;
+	rte_be16_t pa_num;
+	uint8_t qp_type;
+	uint8_t log_sq_sz;
+	uint8_t log_rq_sz;
+	uint8_t dma_direct;
+	rte_be32_t pdn;
+	rte_be16_t cqn_send;
+	rte_be16_t cqn_recv;
+	rte_be16_t glb_funcid;
+	uint8_t page_shift;
+	uint8_t rsvd;
+	rte_be64_t pas[];
+};
+
+struct xsc_cmd_create_qp_mbox_in {
+	struct xsc_cmd_inbox_hdr hdr;
+	struct xsc_cmd_create_qp_request req;
+};
+
+struct xsc_cmd_create_qp_mbox_out {
+	struct xsc_cmd_outbox_hdr hdr;
+	uint32_t qpn;
+	uint8_t rsvd[4];
+};
+
+struct xsc_cmd_create_multiqp_mbox_in {
+	struct xsc_cmd_inbox_hdr  hdr;
+	rte_be16_t qp_num;
+	uint8_t qp_type;
+	uint8_t rsvd;
+	rte_be32_t req_len;
+	uint8_t data[];
+};
+
+struct xsc_cmd_create_multiqp_mbox_out {
+	struct xsc_cmd_outbox_hdr hdr;
+	rte_be32_t qpn_base;
+};
+
+struct xsc_cmd_destroy_qp_mbox_in {
+	struct xsc_cmd_inbox_hdr hdr;
+	rte_be32_t qpn;
+	uint8_t rsvd[4];
+};
+
+struct xsc_cmd_destroy_qp_mbox_out {
+	struct xsc_cmd_outbox_hdr hdr;
+	uint8_t rsvd[8];
+};
+
+struct xsc_cmd_qp_context {
+	rte_be32_t remote_qpn;
+	rte_be32_t cqn_send;
+	rte_be32_t cqn_recv;
+	rte_be32_t next_send_psn;
+	rte_be32_t next_recv_psn;
+	rte_be32_t pdn;
+	rte_be16_t src_udp_port;
+	rte_be16_t path_id;
+	uint8_t mtu_mode;
+	uint8_t lag_sel;
+	uint8_t lag_sel_en;
+	uint8_t retry_cnt;
+	uint8_t rnr_retry;
+	uint8_t dscp;
+	uint8_t state;
+	uint8_t hop_limit;
+	uint8_t dmac[6];
+	uint8_t smac[6];
+	rte_be32_t dip[4];
+	rte_be32_t sip[4];
+	rte_be16_t ip_type;
+	rte_be16_t grp_id;
+	uint8_t vlan_valid;
+	uint8_t dci_cfi_prio_sl;
+	rte_be16_t vlan_id;
+	uint8_t qp_out_port;
+	uint8_t pcie_no;
+	rte_be16_t lag_id;
+	rte_be16_t func_id;
+	rte_be16_t rsvd;
+};
+
+struct xsc_cmd_modify_qp_mbox_in {
+	struct xsc_cmd_inbox_hdr hdr;
+	rte_be32_t qpn;
+	struct xsc_cmd_qp_context ctx;
+	uint8_t no_need_wait;
+};
+
+struct xsc_cmd_modify_qp_mbox_out {
+	struct xsc_cmd_outbox_hdr hdr;
+	uint8_t rsvd[8];
+};
+
+struct xsc_cmd_modify_raw_qp_request {
+	uint16_t qpn;
+	uint16_t lag_id;
+	uint16_t func_id;
+	uint8_t dma_direct;
+	uint8_t prio;
+	uint8_t qp_out_port;
+	uint8_t rsvd[7];
+};
+
+struct xsc_cmd_modify_raw_qp_mbox_in {
+	struct xsc_cmd_inbox_hdr hdr;
+	uint8_t pcie_no;
+	uint8_t rsv[7];
+	struct xsc_cmd_modify_raw_qp_request req;
+};
+
+struct xsc_cmd_modify_raw_qp_mbox_out {
+	struct xsc_cmd_outbox_hdr hdr;
+	uint8_t rsvd[8];
+};
+
+struct xsc_cmd_set_mtu_mbox_in {
+	struct xsc_cmd_inbox_hdr hdr;
+	rte_be16_t mtu;
+	rte_be16_t rx_buf_sz_min;
+	uint8_t mac_port;
+	uint8_t rsvd;
+};
+
+struct xsc_cmd_set_mtu_mbox_out {
+	struct xsc_cmd_outbox_hdr hdr;
+};
+
+struct xsc_cmd_query_eth_mac_mbox_in {
+	struct xsc_cmd_inbox_hdr  hdr;
+	uint8_t index;
+};
+
+struct xsc_cmd_query_eth_mac_mbox_out {
+	struct xsc_cmd_outbox_hdr hdr;
+	uint8_t mac[6];
+};
+
+struct xsc_cmd_nic_attr {
+	rte_be16_t caps;
+	rte_be16_t caps_mask;
+	uint8_t mac_addr[6];
+};
+
+struct xsc_cmd_rss_modify_attr {
+	uint8_t caps_mask;
+	uint8_t rss_en;
+	rte_be16_t rqn_base;
+	rte_be16_t rqn_num;
+	uint8_t hfunc;
+	rte_be32_t hash_tmpl;
+	uint8_t hash_key[52];
+};
+
+struct xsc_cmd_modify_nic_hca_mbox_in {
+	struct xsc_cmd_inbox_hdr hdr;
+	struct xsc_cmd_nic_attr nic;
+	struct xsc_cmd_rss_modify_attr rss;
+};
+
+struct xsc_cmd_modify_nic_hca_mbox_out {
+	struct xsc_cmd_outbox_hdr hdr;
+	uint8_t rsvd[4];
+};
+
+#endif /* _XSC_CMD_H_ */
diff --git a/drivers/net/xsc/xsc_defs.h b/drivers/net/xsc/xsc_defs.h
index 60244425cd..a4b36685a6 100644
--- a/drivers/net/xsc/xsc_defs.h
+++ b/drivers/net/xsc/xsc_defs.h
@@ -5,6 +5,8 @@
 #ifndef XSC_DEFS_H_
 #define XSC_DEFS_H_
 
+#define XSC_PAGE_SIZE			4096
+
 #define XSC_PCI_VENDOR_ID		0x1f67
 #define XSC_PCI_DEV_ID_MS		0x1111
 #define XSC_PCI_DEV_ID_MSVF		0x1112
diff --git a/drivers/net/xsc/xsc_vfio_mbox.c b/drivers/net/xsc/xsc_vfio_mbox.c
new file mode 100644
index 0000000000..b1bb06feb8
--- /dev/null
+++ b/drivers/net/xsc/xsc_vfio_mbox.c
@@ -0,0 +1,691 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+#include <rte_malloc.h>
+#include <bus_pci_driver.h>
+
+#include "xsc_vfio_mbox.h"
+#include "xsc_log.h"
+
+#define XSC_MBOX_BUF_NUM		2048
+#define XSC_MBOX_BUF_CACHE_SIZE		256
+#define XSC_CMDQ_DEPTH_LOG		5
+#define XSC_CMDQ_ELEMENT_SIZE_LOG	6
+#define XSC_CMDQ_REQ_TYPE		7
+#define XSC_CMDQ_WAIT_TIMEOUT		10
+#define XSC_CMDQ_WAIT_DELAY_MS		100
+#define XSC_CMD_OP_DUMMY		0x10d
+
+#define XSC_PF_CMDQ_ELEMENT_SZ		0x1020020
+#define XSC_PF_CMDQ_REQ_BASE_H_ADDR	0x1022000
+#define XSC_PF_CMDQ_REQ_BASE_L_ADDR	0x1024000
+#define XSC_PF_CMDQ_RSP_BASE_H_ADDR	0x102a000
+#define XSC_PF_CMDQ_RSP_BASE_L_ADDR	0x102c000
+#define XSC_PF_CMDQ_REQ_PID		0x1026000
+#define XSC_PF_CMDQ_REQ_CID		0x1028000
+#define XSC_PF_CMDQ_RSP_PID		0x102e000
+#define XSC_PF_CMDQ_RSP_CID		0x1030000
+#define XSC_PF_CMDQ_DEPTH		0x1020028
+
+#define XSC_VF_CMDQ_REQ_BASE_H_ADDR	0x0
+#define XSC_VF_CMDQ_REQ_BASE_L_ADDR	0x4
+#define XSC_VF_CMDQ_RSP_BASE_H_ADDR	0x10
+#define XSC_VF_CMDQ_RSP_BASE_L_ADDR	0x14
+#define XSC_VF_CMDQ_REQ_PID		0x8
+#define XSC_VF_CMDQ_REQ_CID		0xc
+#define XSC_VF_CMDQ_RSP_PID		0x18
+#define XSC_VF_CMDQ_RSP_CID		0x1c
+#define XSC_VF_CMDQ_ELEMENT_SZ		0x28
+#define XSC_VF_CMDQ_DEPTH		0x2c
+
+static const char * const xsc_cmd_error[] = {
+	"xsc cmd success",
+	"xsc cmd fail",
+	"xsc cmd timeout"
+};
+
+static struct xsc_cmdq_config xsc_pf_config = {
+	.req_pid_addr = XSC_PF_CMDQ_REQ_PID,
+	.req_cid_addr = XSC_PF_CMDQ_REQ_CID,
+	.rsp_pid_addr = XSC_PF_CMDQ_RSP_PID,
+	.rsp_cid_addr = XSC_PF_CMDQ_RSP_CID,
+	.req_h_addr = XSC_PF_CMDQ_REQ_BASE_H_ADDR,
+	.req_l_addr = XSC_PF_CMDQ_REQ_BASE_L_ADDR,
+	.rsp_h_addr = XSC_PF_CMDQ_RSP_BASE_H_ADDR,
+	.rsp_l_addr = XSC_PF_CMDQ_RSP_BASE_L_ADDR,
+	.elt_sz_addr = XSC_PF_CMDQ_ELEMENT_SZ,
+	.depth_addr = XSC_PF_CMDQ_DEPTH,
+};
+
+static struct xsc_cmdq_config xsc_vf_config = {
+	.req_pid_addr = XSC_VF_CMDQ_REQ_PID,
+	.req_cid_addr = XSC_VF_CMDQ_REQ_CID,
+	.rsp_pid_addr = XSC_VF_CMDQ_RSP_PID,
+	.rsp_cid_addr = XSC_VF_CMDQ_RSP_CID,
+	.req_h_addr = XSC_VF_CMDQ_REQ_BASE_H_ADDR,
+	.req_l_addr = XSC_VF_CMDQ_REQ_BASE_L_ADDR,
+	.rsp_h_addr = XSC_VF_CMDQ_RSP_BASE_H_ADDR,
+	.rsp_l_addr = XSC_VF_CMDQ_RSP_BASE_L_ADDR,
+	.elt_sz_addr = XSC_VF_CMDQ_ELEMENT_SZ,
+	.depth_addr = XSC_VF_CMDQ_DEPTH,
+};
+
+static void
+xsc_cmdq_config_init(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq)
+{
+	if (!xsc_dev_is_vf(xdev))
+		cmdq->config = &xsc_pf_config;
+	else
+		cmdq->config = &xsc_vf_config;
+}
+
+static void
+xsc_cmdq_rsp_cid_update(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq)
+{
+	uint32_t rsp_pid;
+
+	cmdq->rsp_cid = rte_read32((uint8_t *)xdev->bar_addr + cmdq->config->rsp_cid_addr);
+	rsp_pid = rte_read32((uint8_t *)xdev->bar_addr + cmdq->config->rsp_pid_addr);
+	if (rsp_pid != cmdq->rsp_cid) {
+		PMD_DRV_LOG(INFO, "Update cid(%u) to latest pid(%u)",
+			    cmdq->rsp_cid, rsp_pid);
+		cmdq->rsp_cid = rsp_pid;
+		rte_write32(cmdq->rsp_cid, (uint8_t *)xdev->bar_addr + cmdq->config->rsp_cid_addr);
+	}
+}
+
+static void
+xsc_cmdq_depth_set(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq)
+{
+	cmdq->depth_n = XSC_CMDQ_DEPTH_LOG;
+	cmdq->depth_m = (1 << XSC_CMDQ_DEPTH_LOG) - 1;
+	rte_write32(1 << cmdq->depth_n, (uint8_t *)xdev->bar_addr + cmdq->config->depth_addr);
+}
+
+static int
+xsc_cmdq_elt_size_check(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq)
+{
+	uint32_t elts_n;
+
+	elts_n = rte_read32((uint8_t *)xdev->bar_addr + cmdq->config->elt_sz_addr);
+	if (elts_n != XSC_CMDQ_ELEMENT_SIZE_LOG) {
+		PMD_DRV_LOG(ERR, "The cmdq elt size log(%u) is error, should be %u",
+			    elts_n, XSC_CMDQ_ELEMENT_SIZE_LOG);
+		rte_errno = ENODEV;
+		return -1;
+	}
+
+	return 0;
+}
+
+static void
+xsc_cmdq_req_base_addr_set(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq)
+{
+	uint32_t h_addr, l_addr;
+
+	h_addr = (uint32_t)(cmdq->req_mz->iova >> 32);
+	l_addr = (uint32_t)(cmdq->req_mz->iova);
+	rte_write32(h_addr, (uint8_t *)xdev->bar_addr + cmdq->config->req_h_addr);
+	rte_write32(l_addr, (uint8_t *)xdev->bar_addr + cmdq->config->req_l_addr);
+}
+
+static void
+xsc_cmdq_rsp_base_addr_set(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq)
+{
+	uint32_t h_addr, l_addr;
+
+	h_addr = (uint32_t)(cmdq->rsp_mz->iova >> 32);
+	l_addr = (uint32_t)(cmdq->rsp_mz->iova);
+	rte_write32(h_addr, (uint8_t *)xdev->bar_addr + cmdq->config->rsp_h_addr);
+	rte_write32(l_addr, (uint8_t *)xdev->bar_addr + cmdq->config->rsp_l_addr);
+}
+
+static void
+xsc_cmdq_mbox_free(struct xsc_dev *xdev, struct xsc_cmdq_mbox *mbox)
+{
+	struct xsc_cmdq_mbox *next, *head;
+	struct xsc_vfio_priv *priv = (struct xsc_vfio_priv *)xdev->dev_priv;
+
+	head = mbox;
+	while (head != NULL) {
+		next = head->next;
+		if (head->buf != NULL)
+			rte_mempool_put(priv->cmdq->mbox_buf_pool, head->buf);
+		free(head);
+		head = next;
+	}
+}
+
+static struct xsc_cmdq_mbox *
+xsc_cmdq_mbox_alloc(struct xsc_dev *xdev)
+{
+	struct xsc_cmdq_mbox *mbox;
+	int ret;
+	struct xsc_vfio_priv *priv = (struct xsc_vfio_priv *)xdev->dev_priv;
+
+	mbox = (struct xsc_cmdq_mbox *)malloc(sizeof(*mbox));
+	if (mbox == NULL) {
+		rte_errno = -ENOMEM;
+		goto error;
+	}
+	memset(mbox, 0, sizeof(struct xsc_cmdq_mbox));
+
+	ret = rte_mempool_get(priv->cmdq->mbox_buf_pool, (void **)&mbox->buf);
+	if (ret != 0)
+		goto error;
+	mbox->buf_dma = rte_mempool_virt2iova(mbox->buf);
+	memset(mbox->buf, 0, sizeof(struct xsc_cmdq_mbox_buf));
+	mbox->next = NULL;
+
+	return mbox;
+
+error:
+	xsc_cmdq_mbox_free(xdev, mbox);
+	return NULL;
+}
+
+static struct xsc_cmdq_mbox *
+xsc_cmdq_mbox_alloc_bulk(struct xsc_dev *xdev, int n)
+{
+	int i;
+	struct xsc_cmdq_mbox *head = NULL;
+	struct xsc_cmdq_mbox *mbox;
+	struct xsc_cmdq_mbox_buf *mbox_buf;
+
+	for (i = 0; i < n; i++) {
+		mbox = xsc_cmdq_mbox_alloc(xdev);
+		if (mbox == NULL) {
+			PMD_DRV_LOG(ERR, "Failed to alloc mailbox");
+			goto error;
+		}
+
+		mbox_buf = mbox->buf;
+		mbox->next = head;
+		mbox_buf->next = rte_cpu_to_be_64(mbox->next ? mbox->next->buf_dma : 0);
+		mbox_buf->block_num = rte_cpu_to_be_32(n - i - 1);
+		head = mbox;
+	}
+
+	return head;
+
+error:
+	xsc_cmdq_mbox_free(xdev, head);
+	return NULL;
+}
+
+static void
+xsc_cmdq_req_msg_free(struct xsc_dev *xdev, struct xsc_cmdq_req_msg *msg)
+{
+	struct xsc_cmdq_mbox *head;
+
+	if (msg == NULL)
+		return;
+
+	head = msg->next;
+	xsc_cmdq_mbox_free(xdev, head);
+	free(msg);
+}
+
+static struct xsc_cmdq_req_msg *
+xsc_cmdq_req_msg_alloc(struct xsc_dev *xdev, int len)
+{
+	struct xsc_cmdq_req_msg *msg;
+	struct xsc_cmdq_mbox *head = NULL;
+	int cmd_len, nb_mbox;
+
+	msg = (struct xsc_cmdq_req_msg *)malloc(sizeof(*msg));
+	if (msg == NULL) {
+		rte_errno = -ENOMEM;
+		goto error;
+	}
+	memset(msg, 0, sizeof(*msg));
+
+	cmd_len = len - RTE_MIN(sizeof(msg->hdr.data), (uint32_t)len);
+	nb_mbox = (cmd_len + XSC_CMDQ_DATA_SIZE - 1) / XSC_CMDQ_DATA_SIZE;
+	head = xsc_cmdq_mbox_alloc_bulk(xdev, nb_mbox);
+	if (head == NULL && nb_mbox != 0)
+		goto error;
+
+	msg->next = head;
+	msg->len = len;
+
+	return msg;
+
+error:
+	xsc_cmdq_req_msg_free(xdev, msg);
+	return NULL;
+}
+
+static void
+xsc_cmdq_rsp_msg_free(struct xsc_dev *xdev, struct xsc_cmdq_rsp_msg *msg)
+{
+	struct xsc_cmdq_mbox *head;
+
+	if (msg == NULL)
+		return;
+
+	head = msg->next;
+	xsc_cmdq_mbox_free(xdev, head);
+	free(msg);
+}
+
+static struct xsc_cmdq_rsp_msg *
+xsc_cmdq_rsp_msg_alloc(struct xsc_dev *xdev, int len)
+{
+	struct xsc_cmdq_rsp_msg *msg;
+	struct xsc_cmdq_mbox *head = NULL;
+	int cmd_len, nb_mbox;
+
+	msg = (struct xsc_cmdq_rsp_msg *)malloc(sizeof(*msg));
+	if (msg == NULL) {
+		rte_errno = -ENOMEM;
+		goto error;
+	}
+	memset(msg, 0, sizeof(*msg));
+
+	cmd_len = len - RTE_MIN(sizeof(msg->hdr.data), (uint32_t)len);
+	nb_mbox = (cmd_len + XSC_CMDQ_DATA_SIZE - 1) / XSC_CMDQ_DATA_SIZE;
+	head = xsc_cmdq_mbox_alloc_bulk(xdev, nb_mbox);
+	if (head == NULL && nb_mbox != 0)
+		goto error;
+
+	msg->next = head;
+	msg->len = len;
+
+	return msg;
+
+error:
+	xsc_cmdq_rsp_msg_free(xdev, msg);
+	return NULL;
+}
+
+static void
+xsc_cmdq_msg_destruct(struct xsc_dev *xdev,
+		      struct xsc_cmdq_req_msg **req_msg,
+		      struct xsc_cmdq_rsp_msg **rsp_msg)
+{
+	xsc_cmdq_req_msg_free(xdev, *req_msg);
+	xsc_cmdq_rsp_msg_free(xdev, *rsp_msg);
+	*req_msg = NULL;
+	*rsp_msg = NULL;
+}
+
+static int
+xsc_cmdq_msg_construct(struct xsc_dev *xdev,
+		       struct xsc_cmdq_req_msg **req_msg, int in_len,
+		       struct xsc_cmdq_rsp_msg **rsp_msg, int out_len)
+{
+	*req_msg = xsc_cmdq_req_msg_alloc(xdev, in_len);
+	if (*req_msg == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc xsc cmd request msg");
+		goto error;
+	}
+
+	*rsp_msg = xsc_cmdq_rsp_msg_alloc(xdev, out_len);
+	if (*rsp_msg == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc xsc cmd response msg");
+		goto error;
+	}
+
+	return 0;
+
+error:
+	xsc_cmdq_msg_destruct(xdev, req_msg, rsp_msg);
+	return -1;
+}
+
+static int
+xsc_cmdq_req_msg_copy(struct xsc_cmdq_req_msg *req_msg, void *data_in, int in_len)
+{
+	struct xsc_cmdq_mbox_buf *mbox_buf;
+	struct xsc_cmdq_mbox *mbox;
+	int copy;
+	uint8_t *data = data_in;
+
+	if (req_msg == NULL || data == NULL)
+		return -1;
+
+	copy = RTE_MIN((uint32_t)in_len, sizeof(req_msg->hdr.data));
+	memcpy(req_msg->hdr.data, data, copy);
+
+	in_len -= copy;
+	data += copy;
+
+	mbox = req_msg->next;
+	while (in_len > 0) {
+		if (mbox == NULL)
+			return -1;
+
+		copy = RTE_MIN(in_len, XSC_CMDQ_DATA_SIZE);
+		mbox_buf = mbox->buf;
+		memcpy(mbox_buf->data, data, copy);
+		mbox_buf->owner_status = 0;
+		data += copy;
+		in_len -= copy;
+		mbox = mbox->next;
+	}
+
+	return 0;
+}
+
+static int
+xsc_cmdq_rsp_msg_copy(void *data_out, struct xsc_cmdq_rsp_msg *rsp_msg, int out_len)
+{
+	struct xsc_cmdq_mbox_buf *mbox_buf;
+	struct xsc_cmdq_mbox *mbox;
+	int copy;
+	uint8_t *data = data_out;
+
+	if (data == NULL || rsp_msg == NULL)
+		return -1;
+
+	copy = RTE_MIN((uint32_t)out_len, sizeof(rsp_msg->hdr.data));
+	memcpy(data, rsp_msg->hdr.data, copy);
+	out_len -= copy;
+	data += copy;
+
+	mbox = rsp_msg->next;
+	while (out_len > 0) {
+		if (mbox == NULL)
+			return -1;
+		copy = RTE_MIN(out_len, XSC_CMDQ_DATA_SIZE);
+		mbox_buf = mbox->buf;
+		if (!mbox_buf->owner_status)
+			PMD_DRV_LOG(ERR, "Failed to check cmd owner");
+		memcpy(data, mbox_buf->data, copy);
+		data += copy;
+		out_len -= copy;
+		mbox = mbox->next;
+	}
+
+	return 0;
+}
+
+static enum xsc_cmd_status
+xsc_cmdq_wait_completion(struct xsc_dev *xdev, struct xsc_cmdq_rsp_msg *rsp_msg)
+{
+	struct xsc_vfio_priv *priv = (struct xsc_vfio_priv *)xdev->dev_priv;
+	struct xsc_cmd_queue *cmdq = priv->cmdq;
+	volatile struct xsc_cmdq_rsp_layout *rsp_lay;
+	struct xsc_cmd_outbox_hdr *out_hdr = (struct xsc_cmd_outbox_hdr *)rsp_msg->hdr.data;
+	int count = (XSC_CMDQ_WAIT_TIMEOUT * 1000) / XSC_CMDQ_WAIT_DELAY_MS;
+	uint32_t rsp_pid;
+	uint8_t cmd_status;
+	uint32_t i;
+
+	while (count-- > 0) {
+		rsp_pid = rte_read32((uint8_t *)xdev->bar_addr + cmdq->config->rsp_pid_addr);
+		if (rsp_pid == cmdq->rsp_cid) {
+			rte_delay_ms(XSC_CMDQ_WAIT_DELAY_MS);
+			continue;
+		}
+
+		rsp_lay = cmdq->rsp_lay + cmdq->rsp_cid;
+		if (cmdq->owner_learn == 0) {
+			/* First time learning owner_bit from hardware */
+			cmdq->owner_bit = rsp_lay->owner_bit;
+			cmdq->owner_learn = 1;
+		}
+
+		/* Waiting for dma to complete */
+		if (cmdq->owner_bit != rsp_lay->owner_bit)
+			continue;
+
+		for (i = 0; i < XSC_CMDQ_RSP_INLINE_SIZE; i++)
+			rsp_msg->hdr.data[i] = rsp_lay->out[i];
+
+		cmdq->rsp_cid = (cmdq->rsp_cid + 1) & cmdq->depth_m;
+		rte_write32(cmdq->rsp_cid, (uint8_t *)xdev->bar_addr + cmdq->config->rsp_cid_addr);
+
+		/* Change owner bit */
+		if (cmdq->rsp_cid == 0)
+			cmdq->owner_bit = !cmdq->owner_bit;
+
+		cmd_status = out_hdr->status;
+		if (cmd_status != 0)
+			return XSC_CMD_FAIL;
+		return XSC_CMD_SUCC;
+	}
+
+	return XSC_CMD_TIMEOUT;
+}
+
+static int
+xsc_cmdq_dummy_invoke(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq, uint32_t start, int num)
+{
+	struct xsc_cmdq_dummy_mbox_in in;
+	struct xsc_cmdq_dummy_mbox_out out;
+	struct xsc_cmdq_req_msg *req_msg = NULL;
+	struct xsc_cmdq_rsp_msg *rsp_msg = NULL;
+	struct xsc_cmdq_req_layout *req_lay;
+	int in_len = sizeof(in);
+	int out_len = sizeof(out);
+	int ret, i;
+	uint32_t start_pid = start;
+
+	memset(&in, 0, sizeof(in));
+	in.hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_DUMMY);
+
+	ret = xsc_cmdq_msg_construct(xdev, &req_msg, in_len, &rsp_msg, out_len);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to construct cmd msg for dummy exec");
+		return -1;
+	}
+
+	ret = xsc_cmdq_req_msg_copy(req_msg, &in, in_len);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to copy cmd buf to request msg for dummy exec");
+		goto error;
+	}
+
+	rte_spinlock_lock(&cmdq->lock);
+
+	for (i = 0; i < num; i++) {
+		req_lay = cmdq->req_lay + start_pid;
+		memset(req_lay, 0, sizeof(*req_lay));
+		memcpy(req_lay->in, req_msg->hdr.data, sizeof(req_lay->in));
+		req_lay->inlen = rte_cpu_to_be_32(req_msg->len);
+		req_lay->outlen = rte_cpu_to_be_32(rsp_msg->len);
+		req_lay->sig = 0xff;
+		req_lay->idx = 0;
+		req_lay->type = XSC_CMDQ_REQ_TYPE;
+		start_pid = (start_pid + 1) & cmdq->depth_m;
+	}
+
+	/* Ring doorbell after the descriptor is valid */
+	rte_write32(cmdq->req_pid, (uint8_t *)xdev->bar_addr + cmdq->config->req_pid_addr);
+
+	ret = xsc_cmdq_wait_completion(xdev, rsp_msg);
+	rte_spinlock_unlock(&cmdq->lock);
+
+error:
+	xsc_cmdq_msg_destruct(xdev, &req_msg, &rsp_msg);
+	return ret;
+}
+
+static int
+xsc_cmdq_req_status_restore(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq)
+{
+	uint32_t req_pid, req_cid;
+	uint32_t cnt;
+
+	req_pid = rte_read32((uint8_t *)xdev->bar_addr + cmdq->config->req_pid_addr);
+	req_cid = rte_read32((uint8_t *)xdev->bar_addr + cmdq->config->req_cid_addr);
+
+	if (req_pid >= (uint32_t)(1 << cmdq->depth_n) ||
+	    req_cid >= (uint32_t)(1 << cmdq->depth_n)) {
+		PMD_DRV_LOG(ERR, "Request pid %u and cid %u must be less than %u",
+			    req_pid, req_cid, 1 << cmdq->depth_n);
+		return -1;
+	}
+
+	cmdq->req_pid = req_pid;
+	if (req_pid == req_cid)
+		return 0;
+
+	cnt = (req_pid > req_cid) ? (req_pid - req_cid) :
+		((1 << cmdq->depth_n) + req_pid - req_cid);
+	if (xsc_cmdq_dummy_invoke(xdev, cmdq, req_cid, cnt) != 0) {
+		PMD_DRV_LOG(ERR, "Failed to dummy invoke xsc cmd");
+		return -1;
+	}
+
+	return 0;
+}
+
+void
+xsc_vfio_mbox_destroy(struct xsc_cmd_queue *cmdq)
+{
+	if (cmdq == NULL)
+		return;
+
+	rte_memzone_free(cmdq->req_mz);
+	rte_memzone_free(cmdq->rsp_mz);
+	rte_mempool_free(cmdq->mbox_buf_pool);
+	rte_free(cmdq);
+}
+
+int
+xsc_vfio_mbox_init(struct xsc_dev *xdev)
+{
+	struct xsc_cmd_queue *cmdq;
+	struct xsc_vfio_priv *priv = (struct xsc_vfio_priv *)xdev->dev_priv;
+	char name[RTE_MEMZONE_NAMESIZE] = { 0 };
+	uint32_t size;
+
+	cmdq = rte_zmalloc(NULL, sizeof(*cmdq), RTE_CACHE_LINE_SIZE);
+	if (cmdq == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory for xsc_cmd_queue");
+		return -1;
+	}
+
+	snprintf(name, RTE_MEMZONE_NAMESIZE, "%s_cmdq", xdev->pci_dev->device.name);
+	size = (1 << XSC_CMDQ_DEPTH_LOG) * sizeof(struct xsc_cmdq_req_layout);
+	cmdq->req_mz = rte_memzone_reserve_aligned(name,
+						   size, SOCKET_ID_ANY,
+						   RTE_MEMZONE_IOVA_CONTIG,
+						   XSC_PAGE_SIZE);
+	if (cmdq->req_mz == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory for cmd queue");
+		goto error;
+	}
+	cmdq->req_lay = cmdq->req_mz->addr;
+
+	snprintf(name, RTE_MEMZONE_NAMESIZE, "%s_cmd_cq", xdev->pci_dev->device.name);
+	size = (1 << XSC_CMDQ_DEPTH_LOG) * sizeof(struct xsc_cmdq_rsp_layout);
+	cmdq->rsp_mz = rte_memzone_reserve_aligned(name,
+						   size, SOCKET_ID_ANY,
+						   RTE_MEMZONE_IOVA_CONTIG,
+						   XSC_PAGE_SIZE);
+	if (cmdq->rsp_mz == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory for cmd cq");
+		goto error;
+	}
+	cmdq->rsp_lay = cmdq->rsp_mz->addr;
+
+	snprintf(name, RTE_MEMZONE_NAMESIZE, "%s_mempool", xdev->pci_dev->device.name);
+	cmdq->mbox_buf_pool = rte_mempool_create(name, XSC_MBOX_BUF_NUM,
+						 sizeof(struct xsc_cmdq_mbox_buf),
+						 XSC_MBOX_BUF_CACHE_SIZE, 0,
+						 NULL, NULL, NULL, NULL,
+						 SOCKET_ID_ANY, 0);
+	if (cmdq->mbox_buf_pool == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to create mailbox buf pool");
+		goto error;
+	}
+
+	xsc_cmdq_config_init(xdev, cmdq);
+	xsc_cmdq_rsp_cid_update(xdev, cmdq);
+	xsc_cmdq_depth_set(xdev, cmdq);
+	if (xsc_cmdq_elt_size_check(xdev, cmdq) != 0)
+		goto error;
+
+	xsc_cmdq_req_base_addr_set(xdev, cmdq);
+	xsc_cmdq_rsp_base_addr_set(xdev, cmdq);
+	/* Check request status and restore it */
+	if (xsc_cmdq_req_status_restore(xdev, cmdq) != 0)
+		goto error;
+
+	rte_spinlock_init(&cmdq->lock);
+	priv->cmdq = cmdq;
+	return 0;
+
+error:
+	xsc_vfio_mbox_destroy(cmdq);
+	return -1;
+}
+
+static enum xsc_cmd_status
+xsc_cmdq_invoke(struct xsc_dev *xdev, struct xsc_cmdq_req_msg *req_msg,
+		struct xsc_cmdq_rsp_msg *rsp_msg)
+{
+	struct xsc_vfio_priv *priv = (struct xsc_vfio_priv *)xdev->dev_priv;
+	struct xsc_cmd_queue *cmdq = priv->cmdq;
+	struct xsc_cmdq_req_layout *req_lay;
+	enum xsc_cmd_status status = XSC_CMD_FAIL;
+
+	rte_spinlock_lock(&cmdq->lock);
+	req_lay = cmdq->req_lay + cmdq->req_pid;
+	memset(req_lay, 0, sizeof(*req_lay));
+	memcpy(req_lay->in, req_msg->hdr.data, sizeof(req_lay->in));
+	if (req_msg->next != NULL)
+		req_lay->in_ptr = rte_cpu_to_be_64(req_msg->next->buf_dma);
+	req_lay->inlen = rte_cpu_to_be_32(req_msg->len);
+
+	if (rsp_msg->next != NULL)
+		req_lay->out_ptr = rte_cpu_to_be_64(rsp_msg->next->buf_dma);
+	req_lay->outlen = rte_cpu_to_be_32(rsp_msg->len);
+
+	req_lay->sig = 0xff;
+	req_lay->idx = 0;
+	req_lay->type = XSC_CMDQ_REQ_TYPE;
+
+	/* Ring doorbell after the descriptor is valid */
+	cmdq->req_pid = (cmdq->req_pid + 1) & cmdq->depth_m;
+	rte_write32(cmdq->req_pid, (uint8_t *)xdev->bar_addr + cmdq->config->req_pid_addr);
+
+	status = xsc_cmdq_wait_completion(xdev, rsp_msg);
+	rte_spinlock_unlock(&cmdq->lock);
+
+	return status;
+}
+
+int
+xsc_vfio_mbox_exec(struct xsc_dev *xdev, void *data_in,
+	      int in_len, void *data_out, int out_len)
+{
+	struct xsc_cmdq_req_msg *req_msg = NULL;
+	struct xsc_cmdq_rsp_msg *rsp_msg = NULL;
+	int ret;
+	enum xsc_cmd_status status;
+
+	ret = xsc_cmdq_msg_construct(xdev, &req_msg, in_len, &rsp_msg, out_len);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to construct cmd msg");
+		return -1;
+	}
+
+	ret = xsc_cmdq_req_msg_copy(req_msg, data_in, in_len);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to copy cmd buf to request msg");
+		goto error;
+	}
+
+	status = xsc_cmdq_invoke(xdev, req_msg, rsp_msg);
+	if (status != XSC_CMD_SUCC) {
+		PMD_DRV_LOG(ERR, "Failed to invoke xsc cmd, %s",
+			    xsc_cmd_error[status]);
+		ret = -1;
+		goto error;
+	}
+
+	ret = xsc_cmdq_rsp_msg_copy(data_out, rsp_msg, out_len);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to copy response msg to out data");
+		goto error;
+	}
+
+error:
+	xsc_cmdq_msg_destruct(xdev, &req_msg, &rsp_msg);
+	return ret;
+}
diff --git a/drivers/net/xsc/xsc_vfio_mbox.h b/drivers/net/xsc/xsc_vfio_mbox.h
new file mode 100644
index 0000000000..49ca84f7ec
--- /dev/null
+++ b/drivers/net/xsc/xsc_vfio_mbox.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef _XSC_CMDQ_H_
+#define _XSC_CMDQ_H_
+
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_memzone.h>
+#include <rte_spinlock.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+
+#include "xsc_dev.h"
+#include "xsc_cmd.h"
+
+#define XSC_CMDQ_DATA_SIZE		512
+#define XSC_CMDQ_REQ_INLINE_SIZE	8
+#define XSC_CMDQ_RSP_INLINE_SIZE	14
+
+struct xsc_cmdq_config {
+	uint32_t req_pid_addr;
+	uint32_t req_cid_addr;
+	uint32_t rsp_pid_addr;
+	uint32_t rsp_cid_addr;
+	uint32_t req_h_addr;
+	uint32_t req_l_addr;
+	uint32_t rsp_h_addr;
+	uint32_t rsp_l_addr;
+	uint32_t elt_sz_addr;
+	uint32_t depth_addr;
+};
+
+struct xsc_cmd_queue {
+	struct xsc_cmdq_req_layout *req_lay;
+	struct xsc_cmdq_rsp_layout *rsp_lay;
+	const struct rte_memzone *req_mz;
+	const struct rte_memzone *rsp_mz;
+	uint32_t req_pid;
+	uint32_t rsp_cid;
+	uint8_t  owner_bit; /* CMDQ owner bit */
+	uint8_t  owner_learn; /* Learn ownerbit from hw */
+	uint8_t  depth_n; /* Log 2 of CMDQ depth */
+	uint8_t  depth_m; /* CMDQ depth mask */
+	struct rte_mempool *mbox_buf_pool; /* CMDQ data pool */
+	struct xsc_cmdq_config *config;
+	rte_spinlock_t lock;
+};
+
+struct xsc_cmdq_mbox_buf {
+	uint8_t    data[XSC_CMDQ_DATA_SIZE];
+	uint8_t    rsv0[48];
+	rte_be64_t next; /* Next buf dma addr */
+	rte_be32_t block_num;
+	uint8_t    owner_status;
+	uint8_t    token;
+	uint8_t    ctrl_sig;
+	uint8_t    sig;
+};
+
+struct xsc_cmdq_mbox {
+	struct xsc_cmdq_mbox_buf *buf;
+	rte_iova_t buf_dma;
+	struct xsc_cmdq_mbox     *next;
+};
+
+/* CMDQ request msg inline */
+struct xsc_cmdq_req_hdr {
+	rte_be32_t data[XSC_CMDQ_REQ_INLINE_SIZE];
+};
+
+struct xsc_cmdq_req_msg {
+	uint32_t                  len;
+	struct xsc_cmdq_req_hdr    hdr;
+	struct xsc_cmdq_mbox       *next;
+};
+
+/* CMDQ response msg inline */
+struct xsc_cmdq_rsp_hdr {
+	rte_be32_t data[XSC_CMDQ_RSP_INLINE_SIZE];
+};
+
+struct xsc_cmdq_rsp_msg {
+	uint32_t                  len;
+	struct xsc_cmdq_rsp_hdr    hdr;
+	struct xsc_cmdq_mbox       *next;
+};
+
+/* HW will use this for some records(e.g. vf_id) */
+struct xsc_cmdq_rsv {
+	uint16_t vf_id;
+	uint8_t  rsv[2];
+};
+
+/* CMDQ request entry layout */
+struct xsc_cmdq_req_layout {
+	struct xsc_cmdq_rsv rsv0;
+	rte_be32_t          inlen;
+	rte_be64_t          in_ptr;
+	rte_be32_t          in[XSC_CMDQ_REQ_INLINE_SIZE];
+	rte_be64_t          out_ptr;
+	rte_be32_t          outlen;
+	uint8_t             token;
+	uint8_t             sig;
+	uint8_t             idx;
+	uint8_t             type:7;
+	uint8_t             owner_bit:1;
+};
+
+/* CMDQ response entry layout */
+struct xsc_cmdq_rsp_layout {
+	struct xsc_cmdq_rsv rsv0;
+	rte_be32_t          out[XSC_CMDQ_RSP_INLINE_SIZE];
+	uint8_t             token;
+	uint8_t             sig;
+	uint8_t             idx;
+	uint8_t             type:7;
+	uint8_t             owner_bit:1;
+};
+
+struct xsc_cmdq_dummy_mbox_in {
+	struct xsc_cmd_inbox_hdr hdr;
+	uint8_t                  rsv[8];
+};
+
+struct xsc_cmdq_dummy_mbox_out {
+	struct xsc_cmd_outbox_hdr hdr;
+	uint8_t                   rsv[8];
+};
+
+struct xsc_vfio_priv {
+	struct xsc_cmd_queue *cmdq;
+};
+
+int xsc_vfio_mbox_init(struct xsc_dev *xdev);
+void xsc_vfio_mbox_destroy(struct xsc_cmd_queue *cmdq);
+int xsc_vfio_mbox_exec(struct xsc_dev *xdev,
+		       void *data_in, int in_len,
+		       void *data_out, int out_len);
+
+#endif /* _XSC_CMDQ_H_ */
-- 
2.25.1

  parent reply	other threads:[~2025-01-03 15:04 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-01-03 15:04 [PATCH v4 00/15] XSC PMD for Yunsilicon NICs WanRenyong
2025-01-03 15:04 ` [PATCH v4 01/15] net/xsc: add xsc PMD framework WanRenyong
2025-01-03 19:00   ` Stephen Hemminger
2025-01-06  1:36     ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 02/15] net/xsc: add xsc device initialization WanRenyong
2025-01-03 18:58   ` Stephen Hemminger
2025-01-06  3:29     ` WanRenyong
2025-01-03 15:04 ` WanRenyong [this message]
2025-01-03 15:04 ` [PATCH v4 04/15] net/xsc: add xsc dev ops to support VFIO driver WanRenyong
2025-01-03 19:02   ` Stephen Hemminger
2025-01-06  1:53     ` WanRenyong
2025-01-03 19:04   ` Stephen Hemminger
2025-01-06  2:01     ` WanRenyong
2025-01-03 19:06   ` Stephen Hemminger
2025-01-06  2:02     ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 05/15] net/xsc: add PCT interfaces WanRenyong
2025-01-03 15:04 ` [PATCH v4 06/15] net/xsc: initialize xsc representors WanRenyong
2025-01-03 15:04 ` [PATCH v4 07/15] net/xsc: add ethdev configure and RSS ops WanRenyong
2025-01-03 19:14   ` Stephen Hemminger
2025-01-06  2:20     ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 08/15] net/xsc: add Rx and Tx queue setup WanRenyong
2025-01-03 15:04 ` [PATCH v4 09/15] net/xsc: add ethdev start WanRenyong
2025-01-03 19:17   ` Stephen Hemminger
2025-01-06  3:01     ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 10/15] net/xsc: add ethdev stop and close WanRenyong
2025-01-03 15:04 ` [PATCH v4 11/15] net/xsc: add ethdev Rx burst WanRenyong
2025-01-03 15:04 ` [PATCH v4 12/15] net/xsc: add ethdev Tx burst WanRenyong
2025-01-03 15:04 ` [PATCH v4 13/15] net/xsc: add basic stats ops WanRenyong
2025-01-03 15:04 ` [PATCH v4 14/15] net/xsc: add ethdev infos get WanRenyong
2025-01-03 19:22   ` Stephen Hemminger
2025-01-06  4:03     ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 15/15] net/xsc: add ethdev link and MTU ops WanRenyong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250103150409.1529663-4-wanry@yunsilicon.com \
    --to=wanry@yunsilicon.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    --cc=jacky@yunsilicon.com \
    --cc=nana@yunsilicon.com \
    --cc=qianr@yunsilicon.com \
    --cc=thomas@monjalon.net \
    --cc=weihg@yunsilicon.com \
    --cc=xudw@yunsilicon.com \
    --cc=zhangxx@yunsilicon.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).