From: "WanRenyong" <wanry@yunsilicon.com>
To: <dev@dpdk.org>
Cc: <ferruh.yigit@amd.com>, <thomas@monjalon.net>,
<andrew.rybchenko@oktetlabs.ru>, <qianr@yunsilicon.com>,
<nana@yunsilicon.com>, <zhangxx@yunsilicon.com>,
<zhangxx@yunsilicon.com>, <xudw@yunsilicon.com>,
<jacky@yunsilicon.com>, <weihg@yunsilicon.com>
Subject: [PATCH v4 04/15] net/xsc: add xsc dev ops to support VFIO driver
Date: Fri, 03 Jan 2025 23:04:13 +0800 [thread overview]
Message-ID: <20250103150411.1529663-5-wanry@yunsilicon.com> (raw)
In-Reply-To: <20250103150404.1529663-1-wanry@yunsilicon.com>
XSC PMD is designed to support both VFIO and private kernel drivers.
This commit add xsc dev ops to support VFIO driver.
Signed-off-by: WanRenyong <wanry@yunsilicon.com>
Signed-off-by: Na Na <nana@yunsilicon.com>
---
drivers/net/xsc/meson.build | 1 +
drivers/net/xsc/xsc_defs.h | 8 +
drivers/net/xsc/xsc_dev.h | 32 ++
drivers/net/xsc/xsc_rxtx.h | 102 +++++
drivers/net/xsc/xsc_vfio.c | 750 ++++++++++++++++++++++++++++++++++++
5 files changed, 893 insertions(+)
create mode 100644 drivers/net/xsc/xsc_rxtx.h
create mode 100644 drivers/net/xsc/xsc_vfio.c
diff --git a/drivers/net/xsc/meson.build b/drivers/net/xsc/meson.build
index df4c8ea499..4e20b30438 100644
--- a/drivers/net/xsc/meson.build
+++ b/drivers/net/xsc/meson.build
@@ -10,4 +10,5 @@ sources = files(
'xsc_ethdev.c',
'xsc_dev.c',
'xsc_vfio_mbox.c',
+ 'xsc_vfio.c',
)
diff --git a/drivers/net/xsc/xsc_defs.h b/drivers/net/xsc/xsc_defs.h
index a4b36685a6..8fd59133bc 100644
--- a/drivers/net/xsc/xsc_defs.h
+++ b/drivers/net/xsc/xsc_defs.h
@@ -16,6 +16,14 @@
#define XSC_VFREP_BASE_LOGICAL_PORT 1081
+#define XSC_PF_TX_DB_ADDR 0x4802000
+#define XSC_PF_RX_DB_ADDR 0x4804000
+#define XSC_PF_CQ_DB_ADDR 0x2120000
+
+#define XSC_VF_RX_DB_ADDR 0x8d4
+#define XSC_VF_TX_DB_ADDR 0x8d0
+#define XSC_VF_CQ_DB_ADDR 0x8c4
+
enum xsc_nic_mode {
XSC_NIC_MODE_LEGACY,
XSC_NIC_MODE_SWITCHDEV,
diff --git a/drivers/net/xsc/xsc_dev.h b/drivers/net/xsc/xsc_dev.h
index 7eae78d9bf..deeddeb7f1 100644
--- a/drivers/net/xsc/xsc_dev.h
+++ b/drivers/net/xsc/xsc_dev.h
@@ -14,6 +14,7 @@
#include "xsc_defs.h"
#include "xsc_log.h"
+#include "xsc_rxtx.h"
#define XSC_PPH_MODE_ARG "pph_mode"
#define XSC_NIC_MODE_ARG "nic_mode"
@@ -25,6 +26,18 @@
#define XSC_DEV_PCT_IDX_INVALID 0xFFFFFFFF
#define XSC_DEV_REPR_ID_INVALID 0x7FFFFFFF
+enum xsc_queue_type {
+ XSC_QUEUE_TYPE_RDMA_RC = 0,
+ XSC_QUEUE_TYPE_RDMA_MAD = 1,
+ XSC_QUEUE_TYPE_RAW = 2,
+ XSC_QUEUE_TYPE_VIRTIO_NET = 3,
+ XSC_QUEUE_TYPE_VIRTIO_BLK = 4,
+ XSC_QUEUE_TYPE_RAW_TPE = 5,
+ XSC_QUEUE_TYPE_RAW_TSO = 6,
+ XSC_QUEUE_TYPE_RAW_TX = 7,
+ XSC_QUEUE_TYPE_INVALID = 0xFF,
+};
+
struct xsc_hwinfo {
uint8_t valid; /* 1: current phy info is valid, 0 : invalid */
uint32_t pcie_no; /* pcie number , 0 or 1 */
@@ -120,6 +133,25 @@ struct xsc_dev_ops {
enum rte_pci_kernel_driver kdrv;
int (*dev_init)(struct xsc_dev *xdev);
int (*dev_close)(struct xsc_dev *xdev);
+ int (*get_mac)(struct xsc_dev *xdev, uint8_t *mac);
+ int (*set_link_up)(struct xsc_dev *xdev);
+ int (*set_link_down)(struct xsc_dev *xdev);
+ int (*link_update)(struct xsc_dev *xdev, uint8_t funcid_type, int wait_to_complete);
+ int (*set_mtu)(struct xsc_dev *xdev, uint16_t mtu);
+ int (*destroy_qp)(void *qp);
+ int (*destroy_cq)(void *cq);
+ int (*modify_qp_status)(struct xsc_dev *xdev,
+ uint32_t qpn, int num, int opcode);
+ int (*modify_qp_qostree)(struct xsc_dev *xdev, uint16_t qpn);
+
+ int (*rx_cq_create)(struct xsc_dev *xdev, struct xsc_rx_cq_params *cq_params,
+ struct xsc_rx_cq_info *cq_info);
+ int (*tx_cq_create)(struct xsc_dev *xdev, struct xsc_tx_cq_params *cq_params,
+ struct xsc_tx_cq_info *cq_info);
+ int (*tx_qp_create)(struct xsc_dev *xdev, struct xsc_tx_qp_params *qp_params,
+ struct xsc_tx_qp_info *qp_info);
+ int (*mailbox_exec)(struct xsc_dev *xdev, void *data_in,
+ int in_len, void *data_out, int out_len);
};
void xsc_dev_ops_register(struct xsc_dev_ops *new_ops);
diff --git a/drivers/net/xsc/xsc_rxtx.h b/drivers/net/xsc/xsc_rxtx.h
new file mode 100644
index 0000000000..725a5f18d1
--- /dev/null
+++ b/drivers/net/xsc/xsc_rxtx.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef _XSC_RXTX_H_
+#define _XSC_RXTX_H_
+
+#include <rte_byteorder.h>
+
+struct xsc_wqe_data_seg {
+ union {
+ struct {
+ uint8_t in_line:1;
+ uint8_t rsv0:7;
+ };
+ struct {
+ rte_le32_t rsv1:1;
+ rte_le32_t seg_len:31;
+ rte_le32_t lkey;
+ rte_le64_t va;
+ };
+ struct {
+ uint8_t rsv2:1;
+ uint8_t len:7;
+ uint8_t in_line_data[15];
+ };
+ };
+} __rte_packed;
+
+struct xsc_cqe {
+ union {
+ uint8_t msg_opcode;
+ struct {
+ uint8_t error_code:7;
+ uint8_t is_error:1;
+ };
+ };
+ rte_le16_t qp_id:15;
+ rte_le16_t rsv:1;
+ uint8_t se:1;
+ uint8_t has_pph:1;
+ uint8_t type:1;
+ uint8_t with_immdt:1;
+ uint8_t csum_err:4;
+ rte_le32_t imm_data;
+ rte_le32_t msg_len;
+ rte_le32_t vni;
+ rte_le32_t tsl;
+ rte_le32_t tsh:16;
+ rte_le32_t wqe_id:16;
+ rte_le16_t rsv2[3];
+ rte_le16_t rsv3:15;
+ rte_le16_t owner:1;
+} __rte_packed;
+
+struct xsc_tx_cq_params {
+ uint16_t port_id;
+ uint16_t qp_id;
+ uint16_t elts_n;
+};
+
+struct xsc_tx_cq_info {
+ void *cq;
+ void *cqes;
+ uint32_t *cq_db;
+ uint32_t cqn;
+ uint16_t cqe_s;
+ uint16_t cqe_n;
+};
+
+struct xsc_tx_qp_params {
+ void *cq;
+ uint64_t tx_offloads;
+ uint16_t port_id;
+ uint16_t qp_id;
+ uint16_t elts_n;
+};
+
+struct xsc_tx_qp_info {
+ void *qp;
+ void *wqes;
+ uint32_t *qp_db;
+ uint32_t qpn;
+ uint16_t tso_en;
+ uint16_t wqe_n;
+};
+
+struct xsc_rx_cq_params {
+ uint16_t port_id;
+ uint16_t qp_id;
+ uint16_t wqe_s;
+};
+
+struct xsc_rx_cq_info {
+ void *cq;
+ void *cqes;
+ uint32_t *cq_db;
+ uint32_t cqn;
+ uint16_t cqe_n;
+};
+
+#endif /* _XSC_RXTX_H_ */
diff --git a/drivers/net/xsc/xsc_vfio.c b/drivers/net/xsc/xsc_vfio.c
new file mode 100644
index 0000000000..1142aedeac
--- /dev/null
+++ b/drivers/net/xsc/xsc_vfio.c
@@ -0,0 +1,750 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bitops.h>
+
+#include "xsc_defs.h"
+#include "xsc_vfio_mbox.h"
+#include "xsc_ethdev.h"
+#include "xsc_rxtx.h"
+
+#define XSC_FEATURE_ONCHIP_FT_MASK RTE_BIT32(4)
+#define XSC_FEATURE_DMA_RW_TBL_MASK RTE_BIT32(8)
+#define XSC_FEATURE_PCT_EXP_MASK RTE_BIT32(19)
+#define XSC_HOST_PCIE_NO_DEFAULT 0
+#define XSC_SOC_PCIE_NO_DEFAULT 1
+
+#define XSC_SW2HW_MTU(mtu) ((mtu) + 14 + 4)
+#define XSC_SW2HW_RX_PKT_LEN(mtu) ((mtu) + 14 + 256)
+
+enum xsc_cq_type {
+ XSC_CQ_TYPE_NORMAL = 0,
+ XSC_CQ_TYPE_VIRTIO = 1,
+};
+
+struct xsc_vfio_cq {
+ const struct rte_memzone *mz;
+ struct xsc_dev *xdev;
+ uint32_t cqn;
+};
+
+struct xsc_vfio_qp {
+ const struct rte_memzone *mz;
+ struct xsc_dev *xdev;
+ uint32_t qpn;
+};
+
+static void
+xsc_vfio_pcie_no_init(struct xsc_hwinfo *hwinfo)
+{
+ uint func_id = hwinfo->func_id;
+
+ if (func_id >= hwinfo->pf0_vf_funcid_base &&
+ func_id <= hwinfo->pf0_vf_funcid_top)
+ hwinfo->pcie_no = hwinfo->pcie_host;
+ else if (func_id >= hwinfo->pf1_vf_funcid_base &&
+ func_id <= hwinfo->pf1_vf_funcid_top)
+ hwinfo->pcie_no = hwinfo->pcie_host;
+ else if (func_id >= hwinfo->pcie0_pf_funcid_base &&
+ func_id <= hwinfo->pcie0_pf_funcid_top)
+ hwinfo->pcie_no = XSC_HOST_PCIE_NO_DEFAULT;
+ else
+ hwinfo->pcie_no = XSC_SOC_PCIE_NO_DEFAULT;
+}
+
+static int
+xsc_vfio_hwinfo_init(struct xsc_dev *xdev)
+{
+ int ret;
+ uint32_t feature;
+ int in_len, out_len, cmd_len;
+ struct xsc_cmd_query_hca_cap_mbox_in *in;
+ struct xsc_cmd_query_hca_cap_mbox_out *out;
+ struct xsc_cmd_hca_cap *hca_cap;
+
+ in_len = sizeof(struct xsc_cmd_query_hca_cap_mbox_in);
+ out_len = sizeof(struct xsc_cmd_query_hca_cap_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+
+ in = malloc(cmd_len);
+ if (in == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc dev hwinfo cmd memory");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+
+ memset(in, 0, cmd_len);
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_QUERY_HCA_CAP);
+ in->hdr.ver = rte_cpu_to_be_16(XSC_CMD_QUERY_HCA_CAP_V1);
+ out = (struct xsc_cmd_query_hca_cap_mbox_out *)in;
+
+ ret = xsc_vfio_mbox_exec(xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to get dev hwinfo, err=%d, out.status=%u",
+ ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ ret = -rte_errno;
+ goto exit;
+ }
+
+ hca_cap = &out->hca_cap;
+ xdev->hwinfo.valid = 1;
+ xdev->hwinfo.func_id = rte_be_to_cpu_32(hca_cap->glb_func_id);
+ xdev->hwinfo.pcie_host = hca_cap->pcie_host;
+ xdev->hwinfo.mac_phy_port = hca_cap->mac_port;
+ xdev->hwinfo.funcid_to_logic_port_off = rte_be_to_cpu_16(hca_cap->funcid_to_logic_port);
+ xdev->hwinfo.raw_qp_id_base = rte_be_to_cpu_16(hca_cap->raweth_qp_id_base);
+ xdev->hwinfo.raw_rss_qp_id_base = rte_be_to_cpu_16(hca_cap->raweth_rss_qp_id_base);
+ xdev->hwinfo.pf0_vf_funcid_base = rte_be_to_cpu_16(hca_cap->pf0_vf_funcid_base);
+ xdev->hwinfo.pf0_vf_funcid_top = rte_be_to_cpu_16(hca_cap->pf0_vf_funcid_top);
+ xdev->hwinfo.pf1_vf_funcid_base = rte_be_to_cpu_16(hca_cap->pf1_vf_funcid_base);
+ xdev->hwinfo.pf1_vf_funcid_top = rte_be_to_cpu_16(hca_cap->pf1_vf_funcid_top);
+ xdev->hwinfo.pcie0_pf_funcid_base = rte_be_to_cpu_16(hca_cap->pcie0_pf_funcid_base);
+ xdev->hwinfo.pcie0_pf_funcid_top = rte_be_to_cpu_16(hca_cap->pcie0_pf_funcid_top);
+ xdev->hwinfo.pcie1_pf_funcid_base = rte_be_to_cpu_16(hca_cap->pcie1_pf_funcid_base);
+ xdev->hwinfo.pcie1_pf_funcid_top = rte_be_to_cpu_16(hca_cap->pcie1_pf_funcid_top);
+ xdev->hwinfo.lag_port_start = hca_cap->lag_logic_port_ofst;
+ xdev->hwinfo.raw_tpe_qp_num = rte_be_to_cpu_16(hca_cap->raw_tpe_qp_num);
+ xdev->hwinfo.send_seg_num = hca_cap->send_seg_num;
+ xdev->hwinfo.recv_seg_num = hca_cap->recv_seg_num;
+ feature = rte_be_to_cpu_32(hca_cap->feature_flag);
+ xdev->hwinfo.on_chip_tbl_vld = (feature & XSC_FEATURE_ONCHIP_FT_MASK) ? 1 : 0;
+ xdev->hwinfo.dma_rw_tbl_vld = (feature & XSC_FEATURE_DMA_RW_TBL_MASK) ? 1 : 0;
+ xdev->hwinfo.pct_compress_vld = (feature & XSC_FEATURE_PCT_EXP_MASK) ? 1 : 0;
+ xdev->hwinfo.chip_version = rte_be_to_cpu_32(hca_cap->chip_ver_l);
+ xdev->hwinfo.hca_core_clock = rte_be_to_cpu_32(hca_cap->hca_core_clock);
+ xdev->hwinfo.mac_bit = hca_cap->mac_bit;
+ xsc_vfio_pcie_no_init(&xdev->hwinfo);
+
+exit:
+ free(in);
+ return ret;
+}
+
+static int
+xsc_vfio_dev_open(struct xsc_dev *xdev)
+{
+ struct rte_pci_addr *addr = &xdev->pci_dev->addr;
+ struct xsc_vfio_priv *priv;
+
+ snprintf(xdev->name, PCI_PRI_STR_SIZE, PCI_PRI_FMT,
+ addr->domain, addr->bus, addr->devid, addr->function);
+
+ priv = rte_zmalloc(NULL, sizeof(*priv), RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc xsc vfio priv");
+ return -ENOMEM;
+ }
+
+ xdev->dev_priv = (void *)priv;
+ return 0;
+}
+
+static int
+xsc_vfio_bar_init(struct xsc_dev *xdev)
+{
+ int ret;
+
+ ret = rte_pci_map_device(xdev->pci_dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to map pci device");
+ return -EINVAL;
+ }
+
+ xdev->bar_len = xdev->pci_dev->mem_resource[0].len;
+ xdev->bar_addr = (void *)xdev->pci_dev->mem_resource[0].addr;
+ if (xdev->bar_addr == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to attach dev(%s) bar", xdev->pci_dev->device.name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+xsc_vfio_dev_close(struct xsc_dev *xdev)
+{
+ struct xsc_vfio_priv *vfio_priv = (struct xsc_vfio_priv *)xdev->dev_priv;
+
+ xsc_vfio_mbox_destroy(vfio_priv->cmdq);
+ rte_free(vfio_priv);
+
+ return 0;
+}
+
+static int
+xsc_vfio_destroy_qp(void *qp)
+{
+ int ret;
+ int in_len, out_len, cmd_len;
+ struct xsc_cmd_destroy_qp_mbox_in *in;
+ struct xsc_cmd_destroy_qp_mbox_out *out;
+ struct xsc_vfio_qp *data = (struct xsc_vfio_qp *)qp;
+
+ in_len = sizeof(struct xsc_cmd_destroy_qp_mbox_in);
+ out_len = sizeof(struct xsc_cmd_destroy_qp_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+
+ in = malloc(cmd_len);
+ if (in == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc qp destroy cmd memory");
+ return -rte_errno;
+ }
+ memset(in, 0, cmd_len);
+
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_DESTROY_QP);
+ in->qpn = rte_cpu_to_be_32(data->qpn);
+ out = (struct xsc_cmd_destroy_qp_mbox_out *)in;
+ ret = xsc_vfio_mbox_exec(data->xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to destroy qp, type=%d, err=%d, out.status=%u",
+ XSC_QUEUE_TYPE_RAW, ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ ret = -rte_errno;
+ goto exit;
+ }
+
+ rte_memzone_free(data->mz);
+ rte_free(qp);
+
+exit:
+ free(in);
+ return ret;
+}
+
+static int
+xsc_vfio_destroy_cq(void *cq)
+{
+ int ret;
+ int in_len, out_len, cmd_len;
+ struct xsc_cmd_destroy_cq_mbox_in *in;
+ struct xsc_cmd_destroy_cq_mbox_out *out;
+ struct xsc_vfio_cq *data = (struct xsc_vfio_cq *)cq;
+
+ in_len = sizeof(struct xsc_cmd_destroy_cq_mbox_in);
+ out_len = sizeof(struct xsc_cmd_destroy_cq_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+
+ in = malloc(cmd_len);
+ if (in == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc cq destroy cmd memory");
+ return -rte_errno;
+ }
+ memset(in, 0, cmd_len);
+
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_DESTROY_CQ);
+ in->cqn = rte_cpu_to_be_32(data->cqn);
+ out = (struct xsc_cmd_destroy_cq_mbox_out *)in;
+ ret = xsc_vfio_mbox_exec(data->xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to destroy cq, type=%d, err=%d, out.status=%u",
+ XSC_QUEUE_TYPE_RAW, ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ ret = -rte_errno;
+ goto exit;
+ }
+
+ rte_memzone_free(data->mz);
+ rte_free(cq);
+
+exit:
+ free(in);
+ return ret;
+}
+
+static int
+xsc_vfio_set_mtu(struct xsc_dev *xdev, uint16_t mtu)
+{
+ struct xsc_cmd_set_mtu_mbox_in in;
+ struct xsc_cmd_set_mtu_mbox_out out;
+ int ret;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_SET_MTU);
+ in.mtu = rte_cpu_to_be_16(XSC_SW2HW_MTU(mtu));
+ in.rx_buf_sz_min = rte_cpu_to_be_16(XSC_SW2HW_RX_PKT_LEN(mtu));
+ in.mac_port = (uint8_t)xdev->hwinfo.mac_phy_port;
+
+ ret = xsc_vfio_mbox_exec(xdev, &in, sizeof(in), &out, sizeof(out));
+ if (ret != 0 || out.hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to set mtu, port=%d, err=%d, out.status=%u",
+ xdev->port_id, ret, out.hdr.status);
+ rte_errno = ENOEXEC;
+ ret = -rte_errno;
+ }
+
+ return ret;
+}
+
+static int
+xsc_vfio_get_mac(struct xsc_dev *xdev, uint8_t *mac)
+{
+ struct xsc_cmd_query_eth_mac_mbox_in in;
+ struct xsc_cmd_query_eth_mac_mbox_out out;
+ int ret;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_QUERY_ETH_MAC);
+ ret = xsc_vfio_mbox_exec(xdev, &in, sizeof(in), &out, sizeof(out));
+ if (ret != 0 || out.hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to get mtu, port=%d, err=%d, out.status=%u",
+ xdev->port_id, ret, out.hdr.status);
+ rte_errno = ENOEXEC;
+ return -rte_errno;
+ }
+
+ memcpy(mac, out.mac, 6);
+
+ return 0;
+}
+
+static int
+xsc_vfio_modify_qp_status(struct xsc_dev *xdev, uint32_t qpn, int num, int opcode)
+{
+ int i, ret;
+ int in_len, out_len, cmd_len;
+ struct xsc_cmd_modify_qp_mbox_in *in;
+ struct xsc_cmd_modify_qp_mbox_out *out;
+
+ in_len = sizeof(struct xsc_cmd_modify_qp_mbox_in);
+ out_len = sizeof(struct xsc_cmd_modify_qp_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+
+ in = malloc(cmd_len);
+ if (in == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc cmdq qp modify status");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+
+ memset(in, 0, cmd_len);
+ out = (struct xsc_cmd_modify_qp_mbox_out *)in;
+
+ for (i = 0; i < num; i++) {
+ in->hdr.opcode = rte_cpu_to_be_16(opcode);
+ in->hdr.ver = 0;
+ in->qpn = rte_cpu_to_be_32(qpn + i);
+ in->no_need_wait = 1;
+
+ ret = xsc_vfio_mbox_exec(xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Modify qp status failed, qpn=%d, err=%d, out.status=%u",
+ qpn + i, ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ ret = -rte_errno;
+ goto exit;
+ }
+ }
+
+exit:
+ free(in);
+ return ret;
+}
+
+static int
+xsc_vfio_modify_qp_qostree(struct xsc_dev *xdev, uint16_t qpn)
+{
+ int ret;
+ int in_len, out_len, cmd_len;
+ struct xsc_cmd_modify_raw_qp_mbox_in *in;
+ struct xsc_cmd_modify_raw_qp_mbox_out *out;
+
+ in_len = sizeof(struct xsc_cmd_modify_raw_qp_mbox_in);
+ out_len = sizeof(struct xsc_cmd_modify_raw_qp_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+
+ in = malloc(cmd_len);
+ if (in == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc cmdq qp modify qostree");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+
+ memset(in, 0, cmd_len);
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_MODIFY_RAW_QP);
+ in->req.prio = 0;
+ in->req.qp_out_port = 0xFF;
+ in->req.lag_id = rte_cpu_to_be_16(xdev->hwinfo.lag_id);
+ in->req.func_id = rte_cpu_to_be_16(xdev->hwinfo.func_id);
+ in->req.dma_direct = 0;
+ in->req.qpn = rte_cpu_to_be_16(qpn);
+ out = (struct xsc_cmd_modify_raw_qp_mbox_out *)in;
+
+ ret = xsc_vfio_mbox_exec(xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Filed to modify qp qostree, qpn=%d, err=%d, out.status=%u",
+ qpn, ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ ret = -rte_errno;
+ goto exit;
+ }
+
+exit:
+ free(in);
+ return ret;
+}
+
+static int
+xsc_vfio_rx_cq_create(struct xsc_dev *xdev, struct xsc_rx_cq_params *cq_params,
+ struct xsc_rx_cq_info *cq_info)
+{
+ int ret;
+ int pa_len;
+ uint16_t i;
+ uint16_t pa_num;
+ uint8_t log_cq_sz;
+ uint16_t cqe_n;
+ uint32_t cqe_total_sz;
+ int in_len, out_len, cmd_len;
+ char name[RTE_ETH_NAME_MAX_LEN] = { 0 };
+ uint16_t port_id = cq_params->port_id;
+ uint16_t idx = cq_params->qp_id;
+ struct xsc_vfio_cq *cq;
+ const struct rte_memzone *cq_pas = NULL;
+ volatile struct xsc_cqe (*cqes)[];
+ struct xsc_cmd_create_cq_mbox_in *in = NULL;
+ struct xsc_cmd_create_cq_mbox_out *out = NULL;
+
+ cqe_n = cq_params->wqe_s;
+ log_cq_sz = rte_log2_u32(cqe_n);
+ cqe_total_sz = cqe_n * sizeof(struct xsc_cqe);
+ pa_num = (cqe_total_sz + XSC_PAGE_SIZE - 1) / XSC_PAGE_SIZE;
+ pa_len = sizeof(uint64_t) * pa_num;
+ in_len = sizeof(struct xsc_cmd_create_cq_mbox_in) + pa_len;
+ out_len = sizeof(struct xsc_cmd_create_cq_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+
+ cq = rte_zmalloc(NULL, sizeof(struct xsc_vfio_cq), 0);
+ if (cq == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc rx cq memory");
+ return -rte_errno;
+ }
+
+ in = malloc(cmd_len);
+ memset(in, 0, cmd_len);
+ if (in == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc rx cq exec cmd memory");
+ goto error;
+ }
+
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_CREATE_CQ);
+ in->ctx.eqn = 0;
+ in->ctx.pa_num = rte_cpu_to_be_16(pa_num);
+ in->ctx.glb_func_id = rte_cpu_to_be_16((uint16_t)xdev->hwinfo.func_id);
+ in->ctx.log_cq_sz = log_cq_sz;
+ in->ctx.cq_type = XSC_CQ_TYPE_NORMAL;
+
+ snprintf(name, sizeof(name), "mz_cqe_mem_rx_%u_%u", port_id, idx);
+ cq_pas = rte_memzone_reserve_aligned(name,
+ (XSC_PAGE_SIZE * pa_num),
+ SOCKET_ID_ANY,
+ 0, XSC_PAGE_SIZE);
+ if (cq_pas == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc rx cq pas memory");
+ goto error;
+ }
+ cq->mz = cq_pas;
+
+ for (i = 0; i < pa_num; i++)
+ in->pas[i] = rte_cpu_to_be_64(cq_pas->iova + i * XSC_PAGE_SIZE);
+
+ out = (struct xsc_cmd_create_cq_mbox_out *)in;
+ ret = xsc_vfio_mbox_exec(xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to exec rx cq create cmd, port id=%d, err=%d, out.status=%u",
+ port_id, ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ goto error;
+ }
+
+ cq_info->cq = (void *)cq;
+ cq_info->cqe_n = log_cq_sz;
+ cqes = (volatile struct xsc_cqe (*)[])(uintptr_t)cq_pas->addr;
+ for (i = 0; i < (1 << cq_info->cqe_n); i++)
+ (&(*cqes)[i])->owner = 1;
+ cq_info->cqes = (void *)cqes;
+ if (xsc_dev_is_vf(xdev))
+ cq_info->cq_db = (uint32_t *)((uint8_t *)xdev->bar_addr + XSC_VF_CQ_DB_ADDR);
+ else
+ cq_info->cq_db = (uint32_t *)((uint8_t *)xdev->bar_addr + XSC_PF_CQ_DB_ADDR);
+ cq_info->cqn = rte_be_to_cpu_32(out->cqn);
+ cq->cqn = cq_info->cqn;
+ cq->xdev = xdev;
+ PMD_DRV_LOG(INFO, "Port id=%d, Rx cqe_n:%d, cqn:%d",
+ port_id, cq_info->cqe_n, cq_info->cqn);
+
+ free(in);
+ return 0;
+
+error:
+ free(in);
+ rte_memzone_free(cq_pas);
+ rte_free(cq);
+ return -rte_errno;
+}
+
+static int
+xsc_vfio_tx_cq_create(struct xsc_dev *xdev, struct xsc_tx_cq_params *cq_params,
+ struct xsc_tx_cq_info *cq_info)
+{
+ struct xsc_vfio_cq *cq = NULL;
+ char name[RTE_ETH_NAME_MAX_LEN] = {0};
+ struct xsc_cmd_create_cq_mbox_in *in = NULL;
+ struct xsc_cmd_create_cq_mbox_out *out = NULL;
+ const struct rte_memzone *cq_pas = NULL;
+ struct xsc_cqe *cqes;
+ int in_len, out_len, cmd_len;
+ uint16_t pa_num;
+ uint16_t log_cq_sz;
+ int ret = 0;
+ int cqe_s = 1 << cq_params->elts_n;
+ uint64_t iova;
+ int i;
+
+ cq = rte_zmalloc(NULL, sizeof(struct xsc_vfio_cq), 0);
+ if (cq == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc tx cq memory");
+ return -rte_errno;
+ }
+
+ log_cq_sz = rte_log2_u32(cqe_s);
+ pa_num = (((1 << log_cq_sz) * sizeof(struct xsc_cqe)) / XSC_PAGE_SIZE);
+
+ snprintf(name, sizeof(name), "mz_cqe_mem_tx_%u_%u", cq_params->port_id, cq_params->qp_id);
+ cq_pas = rte_memzone_reserve_aligned(name,
+ (XSC_PAGE_SIZE * pa_num),
+ SOCKET_ID_ANY,
+ 0, XSC_PAGE_SIZE);
+ if (cq_pas == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc tx cq pas memory");
+ goto error;
+ }
+
+ cq->mz = cq_pas;
+ in_len = (sizeof(struct xsc_cmd_create_cq_mbox_in) + (pa_num * sizeof(uint64_t)));
+ out_len = sizeof(struct xsc_cmd_create_cq_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+ in = (struct xsc_cmd_create_cq_mbox_in *)malloc(cmd_len);
+ if (in == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc tx cq exec cmd memory");
+ goto error;
+ }
+ memset(in, 0, cmd_len);
+
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_CREATE_CQ);
+ in->ctx.eqn = 0;
+ in->ctx.pa_num = rte_cpu_to_be_16(pa_num);
+ in->ctx.glb_func_id = rte_cpu_to_be_16((uint16_t)xdev->hwinfo.func_id);
+ in->ctx.log_cq_sz = rte_log2_u32(cqe_s);
+ in->ctx.cq_type = XSC_CQ_TYPE_NORMAL;
+ iova = cq->mz->iova;
+ for (i = 0; i < pa_num; i++)
+ in->pas[i] = rte_cpu_to_be_64(iova + i * XSC_PAGE_SIZE);
+
+ out = (struct xsc_cmd_create_cq_mbox_out *)in;
+ ret = xsc_vfio_mbox_exec(xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to create tx cq, port id=%u, err=%d, out.status=%u",
+ cq_params->port_id, ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ goto error;
+ }
+
+ cq->cqn = rte_be_to_cpu_32(out->cqn);
+ cq->xdev = xdev;
+
+ cq_info->cq = cq;
+ cqes = (struct xsc_cqe *)((uint8_t *)cq->mz->addr);
+ if (xsc_dev_is_vf(xdev))
+ cq_info->cq_db = (uint32_t *)((uint8_t *)xdev->bar_addr + XSC_VF_CQ_DB_ADDR);
+ else
+ cq_info->cq_db = (uint32_t *)((uint8_t *)xdev->bar_addr + XSC_PF_CQ_DB_ADDR);
+ cq_info->cqn = cq->cqn;
+ cq_info->cqe_s = cqe_s;
+ cq_info->cqe_n = log_cq_sz;
+
+ for (i = 0; i < cq_info->cqe_s; i++)
+ ((volatile struct xsc_cqe *)(cqes + i))->owner = 1;
+ cq_info->cqes = cqes;
+
+ free(in);
+ return 0;
+
+error:
+ free(in);
+ rte_memzone_free(cq_pas);
+ rte_free(cq);
+ return -rte_errno;
+}
+
+static int
+xsc_vfio_tx_qp_create(struct xsc_dev *xdev, struct xsc_tx_qp_params *qp_params,
+ struct xsc_tx_qp_info *qp_info)
+{
+ struct xsc_cmd_create_qp_mbox_in *in = NULL;
+ struct xsc_cmd_create_qp_mbox_out *out = NULL;
+ const struct rte_memzone *qp_pas = NULL;
+ struct xsc_vfio_cq *cq = (struct xsc_vfio_cq *)qp_params->cq;
+ struct xsc_vfio_qp *qp = NULL;
+ int in_len, out_len, cmd_len;
+ int ret = 0;
+ uint32_t send_ds_num = xdev->hwinfo.send_seg_num;
+ int wqe_s = 1 << qp_params->elts_n;
+ uint16_t pa_num;
+ uint8_t log_ele = 0;
+ uint32_t log_rq_sz = 0;
+ uint32_t log_sq_sz = 0;
+ int i;
+ uint64_t iova;
+ char name[RTE_ETH_NAME_MAX_LEN] = {0};
+
+ qp = rte_zmalloc(NULL, sizeof(struct xsc_vfio_qp), 0);
+ if (qp == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc tx qp memory");
+ return -rte_errno;
+ }
+
+ log_sq_sz = rte_log2_u32(wqe_s * send_ds_num);
+ log_ele = rte_log2_u32(sizeof(struct xsc_wqe_data_seg));
+ pa_num = ((1 << (log_rq_sz + log_sq_sz + log_ele))) / XSC_PAGE_SIZE;
+
+ snprintf(name, sizeof(name), "mz_wqe_mem_tx_%u_%u", qp_params->port_id, qp_params->qp_id);
+ qp_pas = rte_memzone_reserve_aligned(name,
+ (XSC_PAGE_SIZE * pa_num),
+ SOCKET_ID_ANY,
+ 0, XSC_PAGE_SIZE);
+ if (qp_pas == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc tx qp pas memory");
+ goto error;
+ }
+ qp->mz = qp_pas;
+
+ in_len = (sizeof(struct xsc_cmd_create_qp_mbox_in) + (pa_num * sizeof(uint64_t)));
+ out_len = sizeof(struct xsc_cmd_create_qp_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+ in = (struct xsc_cmd_create_qp_mbox_in *)malloc(cmd_len);
+ if (in == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc tx qp exec cmd memory");
+ goto error;
+ }
+ memset(in, 0, cmd_len);
+
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_CREATE_QP);
+ in->req.input_qpn = 0;
+ in->req.pa_num = rte_cpu_to_be_16(pa_num);
+ in->req.qp_type = XSC_QUEUE_TYPE_RAW_TX;
+ in->req.log_sq_sz = log_sq_sz;
+ in->req.log_rq_sz = log_rq_sz;
+ in->req.dma_direct = 0;
+ in->req.pdn = 0;
+ in->req.cqn_send = rte_cpu_to_be_16((uint16_t)cq->cqn);
+ in->req.cqn_recv = 0;
+ in->req.glb_funcid = rte_cpu_to_be_16((uint16_t)xdev->hwinfo.func_id);
+ iova = qp->mz->iova;
+ for (i = 0; i < pa_num; i++)
+ in->req.pas[i] = rte_cpu_to_be_64(iova + i * XSC_PAGE_SIZE);
+
+ out = (struct xsc_cmd_create_qp_mbox_out *)in;
+ ret = xsc_vfio_mbox_exec(xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to create tx qp, port id=%u, err=%d, out.status=%u",
+ qp_params->port_id, ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ goto error;
+ }
+
+ qp->qpn = rte_be_to_cpu_32(out->qpn);
+ qp->xdev = xdev;
+
+ qp_info->qp = qp;
+ qp_info->qpn = qp->qpn;
+ qp_info->wqes = (struct xsc_wqe *)qp->mz->addr;
+ qp_info->wqe_n = rte_log2_u32(wqe_s);
+
+ if (xsc_dev_is_vf(xdev))
+ qp_info->qp_db = (uint32_t *)((uint8_t *)xdev->bar_addr + XSC_VF_TX_DB_ADDR);
+ else
+ qp_info->qp_db = (uint32_t *)((uint8_t *)xdev->bar_addr + XSC_PF_TX_DB_ADDR);
+
+ free(in);
+ return 0;
+
+error:
+ free(in);
+ rte_memzone_free(qp_pas);
+ rte_free(qp);
+ return -rte_errno;
+}
+
+static int
+xsc_vfio_dev_init(struct xsc_dev *xdev)
+{
+ int ret;
+
+ ret = xsc_vfio_dev_open(xdev);
+ if (ret != 0)
+ goto open_fail;
+
+ ret = xsc_vfio_bar_init(xdev);
+ if (ret != 0)
+ goto init_fail;
+
+ if (xsc_vfio_mbox_init(xdev) != 0)
+ goto init_fail;
+
+ ret = xsc_vfio_hwinfo_init(xdev);
+ if (ret != 0)
+ goto init_fail;
+
+ return 0;
+
+init_fail:
+ xsc_vfio_dev_close(xdev);
+
+open_fail:
+ return -1;
+}
+
+static struct xsc_dev_ops *xsc_vfio_ops = &(struct xsc_dev_ops) {
+ .kdrv = RTE_PCI_KDRV_VFIO,
+ .dev_init = xsc_vfio_dev_init,
+ .dev_close = xsc_vfio_dev_close,
+ .set_mtu = xsc_vfio_set_mtu,
+ .get_mac = xsc_vfio_get_mac,
+ .destroy_qp = xsc_vfio_destroy_qp,
+ .destroy_cq = xsc_vfio_destroy_cq,
+ .modify_qp_status = xsc_vfio_modify_qp_status,
+ .modify_qp_qostree = xsc_vfio_modify_qp_qostree,
+ .rx_cq_create = xsc_vfio_rx_cq_create,
+ .tx_cq_create = xsc_vfio_tx_cq_create,
+ .tx_qp_create = xsc_vfio_tx_qp_create,
+ .mailbox_exec = xsc_vfio_mbox_exec,
+};
+
+RTE_INIT(xsc_vfio_ops_reg)
+{
+ xsc_dev_ops_register(xsc_vfio_ops);
+}
--
2.25.1
next prev parent reply other threads:[~2025-01-03 15:04 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-03 15:04 [PATCH v4 00/15] XSC PMD for Yunsilicon NICs WanRenyong
2025-01-03 15:04 ` [PATCH v4 01/15] net/xsc: add xsc PMD framework WanRenyong
2025-01-03 19:00 ` Stephen Hemminger
2025-01-06 1:36 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 02/15] net/xsc: add xsc device initialization WanRenyong
2025-01-03 18:58 ` Stephen Hemminger
2025-01-06 3:29 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 03/15] net/xsc: add xsc mailbox WanRenyong
2025-01-03 15:04 ` WanRenyong [this message]
2025-01-03 19:02 ` [PATCH v4 04/15] net/xsc: add xsc dev ops to support VFIO driver Stephen Hemminger
2025-01-06 1:53 ` WanRenyong
2025-01-03 19:04 ` Stephen Hemminger
2025-01-06 2:01 ` WanRenyong
2025-01-03 19:06 ` Stephen Hemminger
2025-01-06 2:02 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 05/15] net/xsc: add PCT interfaces WanRenyong
2025-01-03 15:04 ` [PATCH v4 06/15] net/xsc: initialize xsc representors WanRenyong
2025-01-03 15:04 ` [PATCH v4 07/15] net/xsc: add ethdev configure and RSS ops WanRenyong
2025-01-03 19:14 ` Stephen Hemminger
2025-01-06 2:20 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 08/15] net/xsc: add Rx and Tx queue setup WanRenyong
2025-01-03 15:04 ` [PATCH v4 09/15] net/xsc: add ethdev start WanRenyong
2025-01-03 19:17 ` Stephen Hemminger
2025-01-06 3:01 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 10/15] net/xsc: add ethdev stop and close WanRenyong
2025-01-03 15:04 ` [PATCH v4 11/15] net/xsc: add ethdev Rx burst WanRenyong
2025-01-03 15:04 ` [PATCH v4 12/15] net/xsc: add ethdev Tx burst WanRenyong
2025-01-03 15:04 ` [PATCH v4 13/15] net/xsc: add basic stats ops WanRenyong
2025-01-03 15:04 ` [PATCH v4 14/15] net/xsc: add ethdev infos get WanRenyong
2025-01-03 19:22 ` Stephen Hemminger
2025-01-06 4:03 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 15/15] net/xsc: add ethdev link and MTU ops WanRenyong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250103150411.1529663-5-wanry@yunsilicon.com \
--to=wanry@yunsilicon.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@amd.com \
--cc=jacky@yunsilicon.com \
--cc=nana@yunsilicon.com \
--cc=qianr@yunsilicon.com \
--cc=thomas@monjalon.net \
--cc=weihg@yunsilicon.com \
--cc=xudw@yunsilicon.com \
--cc=zhangxx@yunsilicon.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).