From: "WanRenyong" <wanry@yunsilicon.com>
To: <dev@dpdk.org>
Cc: <ferruh.yigit@amd.com>, "WanRenyong" <wanry@yunsilicon.com>
Subject: [PATCH v3 05/19] net/xsc: add ioctl command interface
Date: Wed, 18 Sep 2024 14:09:22 +0800 [thread overview]
Message-ID: <20240918060936.1231758-6-wanry@yunsilicon.com> (raw)
IOCTL command interface is one of methods used to interact with
firmware by PMD. By using ioctl interface, PMD sends command to
the kernel module, then the kernel module translates the command
and sends it to firmware, at last, the kernel module send back
PDM the result from firmware.
Signed-off-by: WanRenyong <wanry@yunsilicon.com>
---
v3:
* use malloc instead of rte_zmalloc
* use memcpy instead of rte_memcpy
* use variable length array (VLA) instead of zero length array (ZLA)
---
drivers/net/xsc/meson.build | 1 +
drivers/net/xsc/xsc_ctrl.c | 57 ++++++++++++++++++++++++
drivers/net/xsc/xsc_ctrl.h | 86 +++++++++++++++++++++++++++++++++++++
3 files changed, 144 insertions(+)
create mode 100644 drivers/net/xsc/xsc_ctrl.c
create mode 100644 drivers/net/xsc/xsc_ctrl.h
diff --git a/drivers/net/xsc/meson.build b/drivers/net/xsc/meson.build
index 57d67291df..a4d7f4a884 100644
--- a/drivers/net/xsc/meson.build
+++ b/drivers/net/xsc/meson.build
@@ -15,6 +15,7 @@ sources = files(
'xsc_ethdev.c',
'xsc_dev.c',
'xsc_utils.c',
+ 'xsc_ctrl.c',
)
libnames = ['ibverbs']
diff --git a/drivers/net/xsc/xsc_ctrl.c b/drivers/net/xsc/xsc_ctrl.c
new file mode 100644
index 0000000000..956a8549e6
--- /dev/null
+++ b/drivers/net/xsc/xsc_ctrl.c
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2024 Yunsilicon Technology Co., Ltd.
+ */
+
+#include <stddef.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+
+#include "xsc_log.h"
+#include "xsc_dev.h"
+#include "xsc_ctrl.h"
+
+int
+xsc_ioctl(struct xsc_dev *dev, int cmd, int opcode,
+ void *data_in, int in_len, void *data_out, int out_len)
+{
+ struct xsc_ioctl_hdr *hdr;
+ int data_len = RTE_MAX(in_len, out_len);
+ int alloc_len = sizeof(struct xsc_ioctl_hdr) + data_len;
+ int ret = 0;
+
+ hdr = malloc(alloc_len);
+ memset(hdr, 0, alloc_len);
+ if (hdr == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate xsc ioctl cmd memory");
+ return -ENOMEM;
+ }
+
+ hdr->check_field = XSC_IOCTL_CHECK_FIELD;
+ hdr->attr.opcode = opcode;
+ hdr->attr.length = data_len;
+ hdr->attr.error = 0;
+
+ if (data_in != NULL && in_len > 0)
+ memcpy(hdr + 1, data_in, in_len);
+
+ ret = ioctl(dev->ctrl_fd, cmd, hdr);
+ if (ret == 0) {
+ if (hdr->attr.error != 0)
+ ret = hdr->attr.error;
+ else if (data_out != NULL && out_len > 0)
+ memcpy(data_out, hdr + 1, out_len);
+ }
+
+ free(hdr);
+ return ret;
+}
diff --git a/drivers/net/xsc/xsc_ctrl.h b/drivers/net/xsc/xsc_ctrl.h
new file mode 100644
index 0000000000..833984745b
--- /dev/null
+++ b/drivers/net/xsc/xsc_ctrl.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2024 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef _XSC_CTRL_H_
+#define _XSC_CTRL_H_
+
+#include <sys/ioctl.h>
+
+#define XSC_IOCTL_CHECK_FIELD 0x01234567
+
+#define XSC_IOCTL_MAGIC 0x1b
+#define XSC_IOCTL_CMDQ \
+ _IOWR(XSC_IOCTL_MAGIC, 1, struct xsc_ioctl_hdr)
+#define XSC_IOCTL_DRV_GET \
+ _IOR(XSC_IOCTL_MAGIC, 2, struct xsc_ioctl_hdr)
+#define XSC_IOCTL_CMDQ_RAW \
+ _IOWR(XSC_IOCTL_MAGIC, 5, struct xsc_ioctl_hdr)
+
+enum xsc_ioctl_opcode {
+ XSC_IOCTL_GET_HW_INFO = 0x100,
+};
+
+enum xsc_ioctl_opmod {
+ XSC_IOCTL_OP_GET_LOCAL,
+};
+
+struct xsc_ioctl_attr {
+ uint16_t opcode; /* ioctl cmd */
+ uint16_t length; /* data length */
+ uint32_t error; /* ioctl error info */
+ uint8_t data[]; /* specific table info */
+};
+
+struct xsc_ioctl_hdr {
+ uint32_t check_field;
+ uint32_t domain;
+ uint32_t bus;
+ uint32_t devfn;
+ struct xsc_ioctl_attr attr;
+};
+
+struct xsc_ioctl_data_tl {
+ uint16_t table;
+ uint16_t opmod;
+ uint16_t length;
+ uint16_t rsvd;
+};
+
+struct xsc_ioctl_get_hwinfo {
+ uint32_t domain;
+ uint32_t bus;
+ uint32_t devfn;
+ uint32_t pcie_no;
+ uint32_t func_id;
+ uint32_t pcie_host;
+ uint32_t mac_phy_port;
+ uint32_t funcid_to_logic_port_off;
+ uint16_t lag_id;
+ uint16_t raw_qp_id_base;
+ uint16_t raw_rss_qp_id_base;
+ uint16_t pf0_vf_funcid_base;
+ uint16_t pf0_vf_funcid_top;
+ uint16_t pf1_vf_funcid_base;
+ uint16_t pf1_vf_funcid_top;
+ uint16_t pcie0_pf_funcid_base;
+ uint16_t pcie0_pf_funcid_top;
+ uint16_t pcie1_pf_funcid_base;
+ uint16_t pcie1_pf_funcid_top;
+ uint16_t lag_port_start;
+ uint16_t raw_tpe_qp_num;
+ int send_seg_num;
+ int recv_seg_num;
+ uint8_t on_chip_tbl_vld;
+ uint8_t dma_rw_tbl_vld;
+ uint8_t pct_compress_vld;
+ uint32_t chip_version;
+ uint32_t hca_core_clock;
+ uint8_t mac_bit;
+ uint8_t esw_mode;
+};
+
+int xsc_ioctl(struct xsc_dev *dev, int cmd, int opcode,
+ void *data_in, int in_len, void *data_out, int out_len);
+
+#endif /* _XSC_CTRL_H_ */
--
2.25.1
reply other threads:[~2024-09-18 6:10 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240918060936.1231758-6-wanry@yunsilicon.com \
--to=wanry@yunsilicon.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).