From: Ziyang Xuan <xuanziyang2@huawei.com>
To: <dev@dpdk.org>
Cc: <ferruh.yigit@intel.com>, <cloud.wangxiaoyun@huawei.com>,
<zhouguoyang@huawei.com>, <shahar.belkar@huawei.com>,
<stephen@networkplumber.org>, <luoxianjun@huawei.com>,
Ziyang Xuan <xuanziyang2@huawei.com>
Subject: [dpdk-dev] [PATCH v4 03/11] net/hinic/base: add mgmt module interactive code
Date: Thu, 6 Jun 2019 19:15:25 +0800 [thread overview]
Message-ID: <ea888d7b179837e74206e476fb3c25059c222e3f.1559818024.git.xuanziyang2@huawei.com> (raw)
In-Reply-To: <cover.1559818024.git.xuanziyang2@huawei.com>
Add the structures, functionalities for interaction with
mgmt module.
Signed-off-by: Ziyang Xuan <xuanziyang2@huawei.com>
---
drivers/net/hinic/base/hinic_pmd_hw_mgmt.h | 85 +++
drivers/net/hinic/base/hinic_pmd_mgmt.c | 617 ++++++++++++++++++
drivers/net/hinic/base/hinic_pmd_mgmt.h | 125 ++++
.../net/hinic/base/hinic_pmd_mgmt_interface.h | 503 ++++++++++++++
4 files changed, 1330 insertions(+)
create mode 100644 drivers/net/hinic/base/hinic_pmd_hw_mgmt.h
create mode 100644 drivers/net/hinic/base/hinic_pmd_mgmt.c
create mode 100644 drivers/net/hinic/base/hinic_pmd_mgmt.h
create mode 100644 drivers/net/hinic/base/hinic_pmd_mgmt_interface.h
diff --git a/drivers/net/hinic/base/hinic_pmd_hw_mgmt.h b/drivers/net/hinic/base/hinic_pmd_hw_mgmt.h
new file mode 100644
index 000000000..5f3b12b7d
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_hw_mgmt.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_HW_MGMT_H_
+#define _HINIC_PMD_HW_MGMT_H_
+
+/* show each drivers only such as nic_service_cap,
+ * toe_service_cap structure, but not show service_cap
+ */
+enum hinic_service_type {
+ SERVICE_T_NIC = 0,
+ SERVICE_T_MAX = 7,
+
+ /* Only used for interruption resource management,
+ * mark the request module
+ */
+ SERVICE_T_INTF = (1 << 15),
+ SERVICE_T_CQM = (1 << 16),
+};
+
+enum intr_type {
+ INTR_TYPE_MSIX,
+ INTR_TYPE_MSI,
+ INTR_TYPE_INT,
+ /* PXE,OVS need single thread processing, synchronization
+ * messages must use poll wait mechanism interface
+ */
+ INTR_TYPE_NONE,
+};
+
+struct nic_service_cap {
+ /* PF resources */
+ u16 max_sqs;
+ u16 max_rqs;
+
+ /* VF resources, VF obtain them through the MailBox mechanism from
+ * corresponding PF
+ */
+ u16 vf_max_sqs;
+ u16 vf_max_rqs;
+
+ bool lro_en; /* LRO feature enable bit */
+ u8 lro_sz; /* LRO context space: n*16B */
+ u8 tso_sz; /* TSO context space: n*16B */
+};
+
+/* Defines the IRQ information structure*/
+struct irq_info {
+ u16 msix_entry_idx; /* IRQ corresponding index number */
+ u32 irq_id; /* the IRQ number from OS */
+};
+
+/* Define the version information structure*/
+struct dev_version_info {
+ u8 up_ver; /* uP version, directly read from uP
+ * is not configured to file
+ */
+ u8 ucode_ver; /* The microcode version,
+ * read through the CMDq from microcode
+ */
+ u8 cfg_file_ver; /* uP configuration file version */
+ u8 sdk_ver; /* SDK driver version */
+ u8 hw_ver; /* Hardware version */
+};
+
+/* Obtain service_cap.nic_cap.dev_nic_cap.max_sqs */
+u16 hinic_func_max_qnum(void *hwdev);
+
+u16 hinic_global_func_id(void *hwdev); /* func_attr.glb_func_idx */
+
+enum func_type {
+ TYPE_PF,
+ TYPE_VF,
+ TYPE_PPF,
+};
+
+enum hinic_msix_state {
+ HINIC_MSIX_ENABLE,
+ HINIC_MSIX_DISABLE,
+};
+
+enum func_type hinic_func_type(void *hwdev);
+
+#endif /* _HINIC_PMD_HW_MGMT_H_ */
diff --git a/drivers/net/hinic/base/hinic_pmd_mgmt.c b/drivers/net/hinic/base/hinic_pmd_mgmt.c
new file mode 100644
index 000000000..61246fa48
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_mgmt.c
@@ -0,0 +1,617 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_pmd_dpdev.h"
+
+static void hinic_mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ struct hinic_recv_msg *recv_msg,
+ void *param);
+
+#define BUF_OUT_DEFAULT_SIZE 1
+
+#define MAX_PF_MGMT_BUF_SIZE 2048UL
+
+#define MGMT_MSG_SIZE_MIN 20
+#define MGMT_MSG_SIZE_STEP 16
+#define MGMT_MSG_RSVD_FOR_DEV 8
+
+#define MGMT_MSG_TIMEOUT 5000 /* millisecond */
+
+#define SYNC_MSG_ID_MASK 0x1FF
+#define ASYNC_MSG_ID_MASK 0x1FF
+#define ASYNC_MSG_FLAG 0x200
+
+#define MSG_NO_RESP 0xFFFF
+
+#define MAX_MSG_SZ 2016
+
+#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ)
+
+#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id)
+
+#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \
+ (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK)
+
+#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id)
+
+#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \
+ ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \
+ | ASYNC_MSG_FLAG)
+
+#define HINIC_SEQ_ID_MAX_VAL 42
+#define HINIC_MSG_SEG_LEN 48
+
+/**
+ * mgmt_msg_len - calculate the total message length
+ * @msg_data_len: the length of the message data
+ * Return: the total message length
+ **/
+static u16 mgmt_msg_len(u16 msg_data_len)
+{
+ /* u64 - the size of the header */
+ u16 msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) +
+ msg_data_len);
+
+ if (msg_size > MGMT_MSG_SIZE_MIN)
+ msg_size = MGMT_MSG_SIZE_MIN +
+ ALIGN((msg_size - MGMT_MSG_SIZE_MIN),
+ MGMT_MSG_SIZE_STEP);
+ else
+ msg_size = MGMT_MSG_SIZE_MIN;
+
+ return msg_size;
+}
+
+/**
+ * prepare_header - prepare the header of the message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @header: pointer of the header to prepare
+ * @msg_len: the length of the message
+ * @mod: module in the chip that will get the message
+ * @ack_type: the type to response
+ * @direction: the direction of the original message
+ * @cmd: the command to do
+ * @msg_id: message id
+ **/
+static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ u64 *header, int msg_len, enum hinic_mod_type mod,
+ enum hinic_msg_ack_type ack_type,
+ enum hinic_msg_direction_type direction,
+ u8 cmd, u32 msg_id)
+{
+ struct hinic_hwif *hwif = pf_to_mgmt->hwdev->hwif;
+
+ *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) |
+ HINIC_MSG_HEADER_SET(mod, MODULE) |
+ HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) |
+ HINIC_MSG_HEADER_SET(ack_type, NO_ACK) |
+ HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
+ HINIC_MSG_HEADER_SET(0, SEQID) |
+ HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
+ HINIC_MSG_HEADER_SET(direction, DIRECTION) |
+ HINIC_MSG_HEADER_SET(cmd, CMD) |
+ HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) |
+ HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) |
+ HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
+}
+
+/**
+ * prepare_mgmt_cmd - prepare the mgmt command
+ * @mgmt_cmd: pointer to the command to prepare
+ * @header: pointer of the header to prepare
+ * @msg: the data of the message
+ * @msg_len: the length of the message
+ **/
+static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, void *msg,
+ int msg_len)
+{
+ u32 cmd_buf_max = MAX_PF_MGMT_BUF_SIZE;
+
+ memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV);
+
+ mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV;
+ cmd_buf_max -= MGMT_MSG_RSVD_FOR_DEV;
+ memcpy(mgmt_cmd, header, sizeof(*header));
+
+ mgmt_cmd += sizeof(*header);
+ cmd_buf_max -= sizeof(*header);
+ memcpy(mgmt_cmd, msg, msg_len);
+}
+
+/**
+ * alloc_recv_msg - allocate received message memory
+ * @recv_msg: pointer that will hold the allocated data
+ * Return: 0 - success, negative - failure
+ **/
+static int alloc_recv_msg(struct hinic_recv_msg *recv_msg)
+{
+ int err;
+
+ recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!recv_msg->msg) {
+ PMD_DRV_LOG(ERR, "Allocate recv msg buf failed");
+ return -ENOMEM;
+ }
+
+ recv_msg->buf_out = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!recv_msg->buf_out) {
+ PMD_DRV_LOG(ERR, "Allocate recv msg output buf failed");
+ err = -ENOMEM;
+ goto alloc_buf_out_err;
+ }
+
+ return 0;
+
+alloc_buf_out_err:
+ kfree(recv_msg->msg);
+ return err;
+}
+
+/**
+ * free_recv_msg - free received message memory
+ * @recv_msg: pointer that holds the allocated data
+ **/
+static void free_recv_msg(struct hinic_recv_msg *recv_msg)
+{
+ kfree(recv_msg->buf_out);
+ kfree(recv_msg->msg);
+}
+
+/**
+ * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ * Return: 0 - success, negative - failure
+ **/
+static int alloc_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ int err;
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocate recv msg failed");
+ return err;
+ }
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocate resp recv msg failed");
+ goto alloc_msg_for_resp_err;
+ }
+
+ pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->async_msg_buf) {
+ PMD_DRV_LOG(ERR, "Allocate async msg buf failed");
+ err = -ENOMEM;
+ goto async_msg_buf_err;
+ }
+
+ pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->sync_msg_buf) {
+ PMD_DRV_LOG(ERR, "Allocate sync msg buf failed");
+ err = -ENOMEM;
+ goto sync_msg_buf_err;
+ }
+
+ return 0;
+
+sync_msg_buf_err:
+ kfree(pf_to_mgmt->async_msg_buf);
+
+async_msg_buf_err:
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+
+alloc_msg_for_resp_err:
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+
+ return err;
+}
+
+/**
+ * free_msg_buf - free all the message buffers of PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ * Return: 0 - success, negative - failure
+ **/
+static void free_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ kfree(pf_to_mgmt->sync_msg_buf);
+ kfree(pf_to_mgmt->async_msg_buf);
+
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+}
+
+/**
+ * send_msg_to_mgmt_async - send async message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @msg: the data of the message
+ * @msg_len: the length of the message
+ * @direction: the direction of the original message
+ * @resp_msg_id: message id of response
+ * Return: 0 - success, negative - failure
+ **/
+static int send_msg_to_mgmt_async(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd,
+ void *msg, u16 msg_len,
+ enum hinic_msg_direction_type direction,
+ u16 resp_msg_id)
+{
+ void *mgmt_cmd = pf_to_mgmt->async_msg_buf;
+ struct hinic_api_cmd_chain *chain;
+ u64 header;
+ u16 cmd_size = mgmt_msg_len(msg_len);
+
+ if (direction == HINIC_MSG_RESPONSE)
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK,
+ direction, cmd, resp_msg_id);
+ else
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK,
+ direction, cmd, ASYNC_MSG_ID(pf_to_mgmt));
+
+ prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
+
+ chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU];
+
+ return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd,
+ cmd_size);
+}
+
+/**
+ * send_msg_to_mgmt_sync - send async message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @msg: the msg data
+ * @msg_len: the msg data length
+ * @ack_type: indicate mgmt command whether need ack or not
+ * @direction: the direction of the original message
+ * @resp_msg_id: msg id to response for
+ * Return: 0 - success, negative - failure
+ **/
+static int send_msg_to_mgmt_sync(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd,
+ void *msg, u16 msg_len,
+ enum hinic_msg_ack_type ack_type,
+ enum hinic_msg_direction_type direction,
+ __rte_unused u16 resp_msg_id)
+{
+ void *mgmt_cmd = pf_to_mgmt->sync_msg_buf;
+ struct hinic_api_cmd_chain *chain;
+ u64 header;
+ u16 cmd_size = mgmt_msg_len(msg_len);
+
+ if (direction == HINIC_MSG_RESPONSE)
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
+ direction, cmd, resp_msg_id);
+ else
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
+ direction, cmd, SYNC_MSG_ID(pf_to_mgmt));
+
+ prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
+
+ chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_PMD_WRITE_TO_MGMT];
+
+ return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST,
+ mgmt_cmd, cmd_size);
+}
+
+/**
+ * hinic_pf_to_mgmt_init - initialize PF to MGMT channel
+ * @hwdev: the pointer to the private hardware device object
+ * Return: 0 - success, negative - failure
+ **/
+int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ int err;
+
+ pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL);
+ if (!pf_to_mgmt) {
+ PMD_DRV_LOG(ERR, "Allocate pf to mgmt mem failed");
+ return -ENOMEM;
+ }
+
+ hwdev->pf_to_mgmt = pf_to_mgmt;
+ pf_to_mgmt->hwdev = hwdev;
+
+ spin_lock_init(&pf_to_mgmt->async_msg_lock);
+ spin_lock_init(&pf_to_mgmt->sync_msg_lock);
+
+ err = alloc_msg_buf(pf_to_mgmt);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocate msg buffers failed");
+ goto alloc_msg_buf_err;
+ }
+
+ err = hinic_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Init the api cmd chains failed");
+ goto api_cmd_init_err;
+ }
+
+ return 0;
+
+api_cmd_init_err:
+ free_msg_buf(pf_to_mgmt);
+
+alloc_msg_buf_err:
+ kfree(pf_to_mgmt);
+
+ return err;
+}
+
+/**
+ * hinic_pf_to_mgmt_free - free PF to MGMT channel
+ * @hwdev: the pointer to the private hardware device object
+ **/
+void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
+
+ hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
+ free_msg_buf(pf_to_mgmt);
+ kfree(pf_to_mgmt);
+}
+
+int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt =
+ ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ struct hinic_recv_msg *recv_msg;
+ u32 timeo;
+ int err, i;
+
+ spin_lock(&pf_to_mgmt->sync_msg_lock);
+
+ SYNC_MSG_ID_INC(pf_to_mgmt);
+ recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt;
+
+ err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ HINIC_MSG_ACK, HINIC_MSG_DIRECT_SEND,
+ MSG_NO_RESP);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Send msg to mgmt failed");
+ goto unlock_sync_msg;
+ }
+
+ timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
+ for (i = 0; i < pf_to_mgmt->rx_aeq->poll_retry_nr; i++) {
+ err = hinic_aeq_poll_msg(pf_to_mgmt->rx_aeq, timeo, NULL);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Poll mgmt rsp timeout, mod=%d cmd=%d msg_id=%u rc=%d",
+ mod, cmd, pf_to_mgmt->sync_msg_id, err);
+ err = -ETIMEDOUT;
+ hinic_dump_aeq_info((struct hinic_hwdev *)hwdev);
+ goto unlock_sync_msg;
+ } else {
+ if (mod == recv_msg->mod && cmd == recv_msg->cmd &&
+ recv_msg->msg_id == pf_to_mgmt->sync_msg_id) {
+ /* the expected response polled */
+ break;
+ }
+ PMD_DRV_LOG(ERR, "AEQ[%d] poll(mod=%d, cmd=%d, msg_id=%u) an "
+ "unexpected(mod=%d, cmd=%d, msg_id=%u) response",
+ pf_to_mgmt->rx_aeq->q_id, mod, cmd,
+ pf_to_mgmt->sync_msg_id, recv_msg->mod,
+ recv_msg->cmd, recv_msg->msg_id);
+ }
+ }
+
+ if (i == pf_to_mgmt->rx_aeq->poll_retry_nr) {
+ PMD_DRV_LOG(ERR, "Get %d unexpected mgmt rsp from AEQ[%d], poll mgmt rsp failed",
+ i, pf_to_mgmt->rx_aeq->q_id);
+ err = -EBADMSG;
+ goto unlock_sync_msg;
+ }
+
+ rte_smp_rmb();
+ if (recv_msg->msg_len && buf_out && out_size) {
+ if (recv_msg->msg_len <= *out_size) {
+ memcpy(buf_out, recv_msg->msg,
+ recv_msg->msg_len);
+ *out_size = recv_msg->msg_len;
+ } else {
+ PMD_DRV_LOG(ERR, "Mgmt rsp's msg len:%u overflow.",
+ recv_msg->msg_len);
+ err = -ERANGE;
+ }
+ }
+
+unlock_sync_msg:
+ if (err && out_size)
+ *out_size = 0;
+ spin_unlock(&pf_to_mgmt->sync_msg_lock);
+ return err;
+}
+
+int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, __rte_unused void *buf_out,
+ __rte_unused u16 *out_size)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt =
+ ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ int err = -EINVAL;
+
+ if (!MSG_SZ_IS_VALID(in_size)) {
+ PMD_DRV_LOG(ERR, "Mgmt msg buffer size is invalid");
+ return err;
+ }
+
+ spin_lock(&pf_to_mgmt->sync_msg_lock);
+
+ err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ HINIC_MSG_NO_ACK, HINIC_MSG_DIRECT_SEND,
+ MSG_NO_RESP);
+
+ spin_unlock(&pf_to_mgmt->sync_msg_lock);
+
+ return err;
+}
+
+static bool check_mgmt_seq_id_and_seg_len(struct hinic_recv_msg *recv_msg,
+ u8 seq_id, u8 seg_len)
+{
+ if (seq_id > HINIC_SEQ_ID_MAX_VAL || seg_len > HINIC_MSG_SEG_LEN)
+ return false;
+
+ if (seq_id == 0) {
+ recv_msg->sed_id = seq_id;
+ } else {
+ if (seq_id != recv_msg->sed_id + 1) {
+ recv_msg->sed_id = 0;
+ return false;
+ }
+ recv_msg->sed_id = seq_id;
+ }
+
+ return true;
+}
+
+/**
+ * recv_mgmt_msg_handler - handler a message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @header: the header of the message
+ * @recv_msg: received message details
+ * @param: customized parameter
+ * Return: 0 when aeq is response message, -1 default result,
+ * and when wrong message or not last message
+ **/
+static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ u8 *header, struct hinic_recv_msg *recv_msg,
+ void *param)
+{
+ u64 msg_header = *((u64 *)header);
+ void *msg_body = header + sizeof(msg_header);
+ u8 *dest_msg;
+ u8 seq_id, seq_len;
+ u32 msg_buf_max = MAX_PF_MGMT_BUF_SIZE;
+
+ seq_id = HINIC_MSG_HEADER_GET(msg_header, SEQID);
+ seq_len = HINIC_MSG_HEADER_GET(msg_header, SEG_LEN);
+
+ if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) {
+ PMD_DRV_LOG(ERR,
+ "Mgmt msg sequence and segment check fail, "
+ "func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x",
+ hinic_global_func_id(pf_to_mgmt->hwdev),
+ recv_msg->sed_id, seq_id, seq_len);
+ return HINIC_RECV_NEXT_AEQE;
+ }
+
+ dest_msg = (u8 *)recv_msg->msg + seq_id * HINIC_MSG_SEG_LEN;
+ msg_buf_max -= seq_id * HINIC_MSG_SEG_LEN;
+ memcpy(dest_msg, msg_body, seq_len);
+
+ if (!HINIC_MSG_HEADER_GET(msg_header, LAST))
+ return HINIC_RECV_NEXT_AEQE;
+
+ recv_msg->cmd = HINIC_MSG_HEADER_GET(msg_header, CMD);
+ recv_msg->mod = HINIC_MSG_HEADER_GET(msg_header, MODULE);
+ recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(msg_header,
+ ASYNC_MGMT_TO_PF);
+ recv_msg->msg_len = HINIC_MSG_HEADER_GET(msg_header, MSG_LEN);
+ recv_msg->msg_id = HINIC_MSG_HEADER_GET(msg_header, MSG_ID);
+
+ if (HINIC_MSG_HEADER_GET(msg_header, DIRECTION) == HINIC_MSG_RESPONSE)
+ return HINIC_RECV_DONE;
+
+ hinic_mgmt_recv_msg_handler(pf_to_mgmt, recv_msg, param);
+
+ return HINIC_RECV_NEXT_AEQE;
+}
+
+/**
+ * hinic_mgmt_msg_aeqe_handler - handler for a mgmt message event
+ * @hwdev: the pointer to the private hardware device object
+ * @header: the header of the message
+ * @size: unused
+ * @param: customized parameter
+ * Return: 0 when aeq is response message,
+ * -1 default result, and when wrong message or not last message
+ **/
+int hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header,
+ __rte_unused u8 size, void *param)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt =
+ ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ struct hinic_recv_msg *recv_msg;
+
+ recv_msg = (HINIC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) ==
+ HINIC_MSG_DIRECT_SEND) ?
+ &pf_to_mgmt->recv_msg_from_mgmt :
+ &pf_to_mgmt->recv_resp_msg_from_mgmt;
+
+ return recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg, param);
+}
+
+int hinic_comm_pf_to_mgmt_init(struct hinic_nic_dev *nic_dev)
+{
+ int rc;
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+
+ rc = hinic_pf_to_mgmt_init(hwdev);
+ if (rc)
+ return rc;
+
+ hwdev->pf_to_mgmt->rx_aeq = &hwdev->aeqs->aeq[HINIC_MGMT_RSP_AEQN];
+
+ return 0;
+}
+
+void hinic_comm_pf_to_mgmt_free(struct hinic_nic_dev *nic_dev)
+{
+ hinic_pf_to_mgmt_free(nic_dev->hwdev);
+}
+
+/**
+ * hinic_mgmt_recv_msg_handler - handler for message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: received message details
+ * @param: customized parameter
+ **/
+static void hinic_mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ struct hinic_recv_msg *recv_msg,
+ void *param)
+{
+ void *buf_out = recv_msg->buf_out;
+ u16 out_size = 0;
+
+ switch (recv_msg->mod) {
+ case HINIC_MOD_COMM:
+ hinic_comm_async_event_handle(pf_to_mgmt->hwdev,
+ recv_msg->cmd, recv_msg->msg,
+ recv_msg->msg_len,
+ buf_out, &out_size);
+ break;
+ case HINIC_MOD_L2NIC:
+ hinic_l2nic_async_event_handle(pf_to_mgmt->hwdev, param,
+ recv_msg->cmd, recv_msg->msg,
+ recv_msg->msg_len,
+ buf_out, &out_size);
+ break;
+ case HINIC_MOD_HILINK:
+ hinic_hilink_async_event_handle(pf_to_mgmt->hwdev,
+ recv_msg->cmd, recv_msg->msg,
+ recv_msg->msg_len,
+ buf_out, &out_size);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "No handler, mod = %d", recv_msg->mod);
+ break;
+ }
+
+ if (!recv_msg->async_mgmt_to_pf) {
+ if (!out_size)
+ out_size = BUF_OUT_DEFAULT_SIZE;
+
+ /* MGMT sent sync msg, send the response */
+ (void)send_msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod,
+ recv_msg->cmd, buf_out, out_size,
+ HINIC_MSG_RESPONSE,
+ recv_msg->msg_id);
+ }
+}
diff --git a/drivers/net/hinic/base/hinic_pmd_mgmt.h b/drivers/net/hinic/base/hinic_pmd_mgmt.h
new file mode 100644
index 000000000..c06013795
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_mgmt.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_MGMT_H_
+#define _HINIC_PMD_MGMT_H_
+
+#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0
+#define HINIC_MSG_HEADER_MODULE_SHIFT 11
+#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16
+#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22
+#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23
+#define HINIC_MSG_HEADER_SEQID_SHIFT 24
+#define HINIC_MSG_HEADER_LAST_SHIFT 30
+#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31
+#define HINIC_MSG_HEADER_CMD_SHIFT 32
+#define HINIC_MSG_HEADER_PCI_INTF_IDX_SHIFT 48
+#define HINIC_MSG_HEADER_P2P_IDX_SHIFT 50
+#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54
+
+#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF
+#define HINIC_MSG_HEADER_MODULE_MASK 0x1F
+#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F
+#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1
+#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1
+#define HINIC_MSG_HEADER_SEQID_MASK 0x3F
+#define HINIC_MSG_HEADER_LAST_MASK 0x1
+#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1
+#define HINIC_MSG_HEADER_CMD_MASK 0xFF
+#define HINIC_MSG_HEADER_PCI_INTF_IDX_MASK 0x3
+#define HINIC_MSG_HEADER_P2P_IDX_MASK 0xF
+#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF
+
+#define HINIC_MSG_HEADER_GET(val, member) \
+ (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \
+ HINIC_MSG_HEADER_##member##_MASK)
+
+#define HINIC_MSG_HEADER_SET(val, member) \
+ ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \
+ HINIC_MSG_HEADER_##member##_SHIFT)
+
+enum hinic_msg_direction_type {
+ HINIC_MSG_DIRECT_SEND = 0,
+ HINIC_MSG_RESPONSE = 1
+};
+enum hinic_msg_segment_type {
+ NOT_LAST_SEGMENT = 0,
+ LAST_SEGMENT = 1,
+};
+
+enum hinic_msg_ack_type {
+ HINIC_MSG_ACK = 0,
+ HINIC_MSG_NO_ACK = 1,
+};
+
+struct hinic_recv_msg {
+ void *msg;
+ void *buf_out;
+
+ u16 msg_len;
+ enum hinic_mod_type mod;
+ u8 cmd;
+ u16 msg_id;
+ int async_mgmt_to_pf;
+ u8 sed_id;
+};
+
+#define HINIC_COMM_SELF_CMD_MAX 8
+
+typedef void (*comm_up_self_msg_proc)(void *handle, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+
+struct comm_up_self_msg_sub_info {
+ u8 cmd;
+ comm_up_self_msg_proc proc;
+};
+
+struct comm_up_self_msg_info {
+ u8 cmd_num;
+ struct comm_up_self_msg_sub_info info[HINIC_COMM_SELF_CMD_MAX];
+};
+
+enum comm_pf_to_mgmt_event_state {
+ SEND_EVENT_START = 0,
+ SEND_EVENT_TIMEOUT,
+ SEND_EVENT_END,
+};
+
+struct hinic_msg_pf_to_mgmt {
+ struct hinic_hwdev *hwdev;
+
+ /* Async cmd can not be scheduling */
+ spinlock_t async_msg_lock;
+ /* spinlock for sync message */
+ spinlock_t sync_msg_lock;
+
+ void *async_msg_buf;
+ void *sync_msg_buf;
+
+ struct hinic_recv_msg recv_msg_from_mgmt;
+ struct hinic_recv_msg recv_resp_msg_from_mgmt;
+
+ u16 async_msg_id;
+ u16 sync_msg_id;
+
+ struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX];
+
+ struct hinic_eq *rx_aeq;
+};
+
+int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size);
+
+int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev);
+void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev);
+
+int hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, __rte_unused u8 size,
+ void *param);
+
+int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+#endif /* _HINIC_PMD_MGMT_H_ */
diff --git a/drivers/net/hinic/base/hinic_pmd_mgmt_interface.h b/drivers/net/hinic/base/hinic_pmd_mgmt_interface.h
new file mode 100644
index 000000000..809db8af0
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_mgmt_interface.h
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_MGMT_INTERFACE_H_
+#define _HINIC_PMD_MGMT_INTERFACE_H_
+
+/* cmd of mgmt CPU message for HILINK module */
+enum hinic_hilink_cmd {
+ HINIC_HILINK_CMD_GET_LINK_INFO = 0x3,
+ HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8,
+};
+
+enum hilink_info_print_event {
+ HILINK_EVENT_LINK_UP = 1,
+ HILINK_EVENT_LINK_DOWN,
+ HILINK_EVENT_CABLE_PLUGGED,
+ HILINK_EVENT_MAX_TYPE,
+};
+
+#define NIC_LRO_MAX_WQE_NUM 32
+#define NIC_RSS_INDIR_SIZE 256
+#define NIC_DCB_UP_MAX 0x8
+#define NIC_RSS_KEY_SIZE 40
+#define NIC_RSS_CMD_TEMP_ALLOC 0x01
+#define NIC_RSS_CMD_TEMP_FREE 0x02
+
+enum hinic_resp_aeq_num {
+ HINIC_AEQ0 = 0,
+ HINIC_AEQ1 = 1,
+ HINIC_AEQ2 = 2,
+ HINIC_AEQ3 = 3,
+};
+
+struct hinic_mgmt_msg_head {
+ u8 status;
+ u8 version;
+ u8 resp_aeq_num;
+ u8 rsvd0[5];
+};
+
+enum {
+ RECYCLE_MODE_NIC = 0x0,
+ RECYCLE_MODE_DPDK = 0x1,
+};
+
+struct hinic_fast_recycled_mode {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 fast_recycled_mode;/* 1: enable fast recycle, available in dpdk mode,
+ * 0: normal mode, available in kernel nic mode
+ */
+ u8 rsvd1;
+};
+
+struct hinic_function_table {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rx_wqe_buf_size;
+ u32 mtu;
+};
+
+struct hinic_cmd_qpn {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 base_qpn;
+};
+
+struct hinic_port_mac_set {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 mac[ETH_ALEN];
+};
+
+struct hinic_port_mac_update {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 old_mac[ETH_ALEN];
+ u16 rsvd2;
+ u8 new_mac[ETH_ALEN];
+};
+
+struct hinic_vport_state {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 state;
+ u8 rsvd2[3];
+};
+
+struct hinic_port_state {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u8 state;
+ u8 rsvd1[3];
+};
+
+struct hinic_mtu {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 mtu;
+};
+
+struct hinic_vlan_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 vlan_id;
+};
+
+struct hinic_get_link {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 link_status;
+ u8 rsvd1;
+};
+
+#define HINIC_DEFAUT_PAUSE_CONFIG 1
+struct hinic_pause_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 auto_neg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+
+struct hinic_port_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 port_type;
+ u8 autoneg_cap;
+ u8 autoneg_state;
+ u8 duplex;
+ u8 speed;
+ u8 resv2[3];
+};
+
+struct hinic_set_autoneg {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 enable; /* 1: enable , 0: disable */
+};
+
+struct hinic_up_ets_cfg {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u8 port_id;
+ u8 rsvd1[3];
+ u8 up_tc[HINIC_DCB_UP_MAX];
+ u8 pg_bw[HINIC_DCB_PG_MAX];
+ u8 pgid[HINIC_DCB_UP_MAX];
+ u8 up_bw[HINIC_DCB_UP_MAX];
+ u8 prio[HINIC_DCB_PG_MAX];
+};
+
+struct hinic_tso_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 tso_en;
+ u8 resv2[3];
+};
+
+struct hinic_lro_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 lro_ipv4_en;
+ u8 lro_ipv6_en;
+ u8 lro_max_wqe_num;
+ u8 resv2[13];
+};
+
+struct hinic_checksum_offload {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_csum_offload;
+};
+
+struct hinic_vlan_offload {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 vlan_rx_offload;
+ u8 rsvd1[5];
+};
+
+struct hinic_rx_mode_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_mode;
+};
+
+/* rss */
+struct nic_rss_indirect_tbl {
+ u32 group_index;
+ u32 offset;
+ u32 size;
+ u32 rsvd;
+ u8 entry[NIC_RSS_INDIR_SIZE];
+};
+
+struct nic_rss_context_tbl {
+ u32 group_index;
+ u32 offset;
+ u32 size;
+ u32 rsvd;
+ u32 ctx;
+};
+
+struct hinic_rss_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 rss_en;
+ u8 template_id;
+ u8 rq_priority_number;
+ u8 rsvd1[3];
+ u8 prio_tc[NIC_DCB_UP_MAX];
+};
+
+struct hinic_rss_template_mgmt {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 cmd;
+ u8 template_id;
+ u8 rsvd1[4];
+};
+
+struct hinic_rss_indir_table {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 indir[NIC_RSS_INDIR_SIZE];
+};
+
+struct hinic_rss_template_key {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 key[NIC_RSS_KEY_SIZE];
+};
+
+struct hinic_rss_engine_type {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 template_id;
+ u8 hash_engine;
+ u8 rsvd1[4];
+};
+
+struct hinic_rss_context_table {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u32 context;
+};
+
+struct hinic_port_link_status {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 link;
+ u8 port_id;
+};
+
+struct hinic_cable_plug_event {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 plugged; /* 0: unplugged, 1: plugged */
+ u8 port_id;
+};
+
+struct hinic_link_err_event {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 err_type;
+ u8 port_id;
+};
+
+enum link_err_status {
+ LINK_ERR_MODULE_UNRECOGENIZED,
+ LINK_ERR_NUM,
+};
+
+#define HINIC_PORT_STATS_VERSION 0
+
+struct hinic_port_stats_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 stats_version;
+ u32 stats_size;
+};
+
+struct hinic_port_qfilter_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 filter_enable;
+ u8 filter_type;
+ u8 qid;
+ u8 rsvd2;
+};
+
+struct hinic_port_stats {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ struct hinic_phy_port_stats stats;
+};
+
+struct hinic_cmd_vport_stats {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ struct hinic_vport_stats stats;
+};
+
+struct hinic_clear_port_stats {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd;
+ u32 stats_version;
+ u32 stats_size;
+};
+
+struct hinic_clear_vport_stats {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd;
+ u32 stats_version;
+ u32 stats_size;
+};
+
+#define HINIC_COMPILE_TIME_LEN 20
+struct hinic_version_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u8 ver[HINIC_FW_VERSION_NAME];
+ u8 time[HINIC_COMPILE_TIME_LEN];
+};
+
+/* get or set loopback mode, need to modify by base API */
+#define HINIC_INTERNAL_LP_MODE 5
+
+#define ANTI_ATTACK_DEFAULT_CIR 500000
+#define ANTI_ATTACK_DEFAULT_XIR 600000
+#define ANTI_ATTACK_DEFAULT_CBS 10000000
+#define ANTI_ATTACK_DEFAULT_XBS 12000000
+
+/* set physical port Anti-Attack rate */
+struct hinic_port_anti_attack_rate {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 enable; /* 1: enable rate-limiting, 0: disable rate-limiting */
+ u32 cir; /* Committed Information Rate */
+ u32 xir; /* eXtended Information Rate */
+ u32 cbs; /* Committed Burst Size */
+ u32 xbs; /* eXtended Burst Size */
+};
+
+struct hinic_l2nic_reset {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+};
+
+struct hinic_root_ctxt {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_idx;
+ u16 rsvd1;
+ u8 set_cmdq_depth;
+ u8 cmdq_depth;
+ u8 lro_en;
+ u8 rsvd2;
+ u8 ppf_idx;
+ u8 rsvd3;
+ u16 rq_depth;
+ u16 rx_buf_sz;
+ u16 sq_depth;
+};
+
+struct hinic_page_size {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_idx;
+ u8 ppf_idx;
+ u8 page_size;
+ u32 rsvd;
+};
+
+struct hinic_dcb_state {
+ u8 dcb_on;
+ u8 default_cos;
+ u8 up_cos[8];
+};
+
+struct hinic_vf_default_cos {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_dcb_state state;
+};
+
+struct hinic_reset_link_cfg {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+};
+
+struct hinic_set_vhd_mode {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 vhd_type;
+ u16 rx_wqe_buffer_size;
+ u16 rsvd;
+};
+
+struct hinic_vlan_filter {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 rsvd1[2];
+ u32 vlan_filter_ctrl;
+};
+
+struct hinic_set_link_follow {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd0;
+ u8 follow_status;
+ u8 rsvd1[3];
+};
+
+struct hinic_link_mode_cmd {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u16 supported; /* 0xFFFF represent Invalid value */
+ u16 advertised;
+};
+
+struct hinic_clear_qp_resource {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+};
+
+int hinic_init_function_table(void *hwdev, u16 rx_buf_sz);
+
+int hinic_set_fast_recycle_mode(void *hwdev, u8 mode);
+
+int hinic_get_base_qpn(void *hwdev, u16 *global_qpn);
+
+int hinic_set_pagesize(void *hwdev, u8 page_size);
+
+#endif /* _HINIC_PMD_MGMT_INTERFACE_H_ */
--
2.18.0
WARNING: multiple messages have this Message-ID
From: Ziyang Xuan <xuanziyang2@huawei.com>
To: <dev@dpdk.org>
Cc: <ferruh.yigit@intel.com>, <cloud.wangxiaoyun@huawei.com>,
<zhouguoyang@huawei.com>, <shahar.belkar@huawei.com>,
<stephen@networkplumber.org>, <luoxianjun@huawei.com>,
Ziyang Xuan <xuanziyang2@huawei.com>
Subject: [dpdk-dev] [PATCH v4 03/11] net/hinic/base: add mgmt module interactive code
Date: Thu, 6 Jun 2019 19:05:16 +0800 [thread overview]
Message-ID: <ea888d7b179837e74206e476fb3c25059c222e3f.1559818024.git.xuanziyang2@huawei.com> (raw)
Message-ID: <20190606110516.JfQi-A0kAOGI5oXeE9ABjMA1Xit6ldpDCB-2EXBVpS0@z> (raw)
In-Reply-To: <cover.1559818024.git.xuanziyang2@huawei.com>
Add the structures, functionalities for interaction with
mgmt module.
Signed-off-by: Ziyang Xuan <xuanziyang2@huawei.com>
---
drivers/net/hinic/base/hinic_pmd_hw_mgmt.h | 85 +++
drivers/net/hinic/base/hinic_pmd_mgmt.c | 617 ++++++++++++++++++
drivers/net/hinic/base/hinic_pmd_mgmt.h | 125 ++++
.../net/hinic/base/hinic_pmd_mgmt_interface.h | 503 ++++++++++++++
4 files changed, 1330 insertions(+)
create mode 100644 drivers/net/hinic/base/hinic_pmd_hw_mgmt.h
create mode 100644 drivers/net/hinic/base/hinic_pmd_mgmt.c
create mode 100644 drivers/net/hinic/base/hinic_pmd_mgmt.h
create mode 100644 drivers/net/hinic/base/hinic_pmd_mgmt_interface.h
diff --git a/drivers/net/hinic/base/hinic_pmd_hw_mgmt.h b/drivers/net/hinic/base/hinic_pmd_hw_mgmt.h
new file mode 100644
index 000000000..5f3b12b7d
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_hw_mgmt.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_HW_MGMT_H_
+#define _HINIC_PMD_HW_MGMT_H_
+
+/* show each drivers only such as nic_service_cap,
+ * toe_service_cap structure, but not show service_cap
+ */
+enum hinic_service_type {
+ SERVICE_T_NIC = 0,
+ SERVICE_T_MAX = 7,
+
+ /* Only used for interruption resource management,
+ * mark the request module
+ */
+ SERVICE_T_INTF = (1 << 15),
+ SERVICE_T_CQM = (1 << 16),
+};
+
+enum intr_type {
+ INTR_TYPE_MSIX,
+ INTR_TYPE_MSI,
+ INTR_TYPE_INT,
+ /* PXE,OVS need single thread processing, synchronization
+ * messages must use poll wait mechanism interface
+ */
+ INTR_TYPE_NONE,
+};
+
+struct nic_service_cap {
+ /* PF resources */
+ u16 max_sqs;
+ u16 max_rqs;
+
+ /* VF resources, VF obtain them through the MailBox mechanism from
+ * corresponding PF
+ */
+ u16 vf_max_sqs;
+ u16 vf_max_rqs;
+
+ bool lro_en; /* LRO feature enable bit */
+ u8 lro_sz; /* LRO context space: n*16B */
+ u8 tso_sz; /* TSO context space: n*16B */
+};
+
+/* Defines the IRQ information structure*/
+struct irq_info {
+ u16 msix_entry_idx; /* IRQ corresponding index number */
+ u32 irq_id; /* the IRQ number from OS */
+};
+
+/* Define the version information structure*/
+struct dev_version_info {
+ u8 up_ver; /* uP version, directly read from uP
+ * is not configured to file
+ */
+ u8 ucode_ver; /* The microcode version,
+ * read through the CMDq from microcode
+ */
+ u8 cfg_file_ver; /* uP configuration file version */
+ u8 sdk_ver; /* SDK driver version */
+ u8 hw_ver; /* Hardware version */
+};
+
+/* Obtain service_cap.nic_cap.dev_nic_cap.max_sqs */
+u16 hinic_func_max_qnum(void *hwdev);
+
+u16 hinic_global_func_id(void *hwdev); /* func_attr.glb_func_idx */
+
+enum func_type {
+ TYPE_PF,
+ TYPE_VF,
+ TYPE_PPF,
+};
+
+enum hinic_msix_state {
+ HINIC_MSIX_ENABLE,
+ HINIC_MSIX_DISABLE,
+};
+
+enum func_type hinic_func_type(void *hwdev);
+
+#endif /* _HINIC_PMD_HW_MGMT_H_ */
diff --git a/drivers/net/hinic/base/hinic_pmd_mgmt.c b/drivers/net/hinic/base/hinic_pmd_mgmt.c
new file mode 100644
index 000000000..61246fa48
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_mgmt.c
@@ -0,0 +1,617 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_pmd_dpdev.h"
+
+static void hinic_mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ struct hinic_recv_msg *recv_msg,
+ void *param);
+
+#define BUF_OUT_DEFAULT_SIZE 1
+
+#define MAX_PF_MGMT_BUF_SIZE 2048UL
+
+#define MGMT_MSG_SIZE_MIN 20
+#define MGMT_MSG_SIZE_STEP 16
+#define MGMT_MSG_RSVD_FOR_DEV 8
+
+#define MGMT_MSG_TIMEOUT 5000 /* millisecond */
+
+#define SYNC_MSG_ID_MASK 0x1FF
+#define ASYNC_MSG_ID_MASK 0x1FF
+#define ASYNC_MSG_FLAG 0x200
+
+#define MSG_NO_RESP 0xFFFF
+
+#define MAX_MSG_SZ 2016
+
+#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ)
+
+#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id)
+
+#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \
+ (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK)
+
+#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id)
+
+#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \
+ ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \
+ | ASYNC_MSG_FLAG)
+
+#define HINIC_SEQ_ID_MAX_VAL 42
+#define HINIC_MSG_SEG_LEN 48
+
+/**
+ * mgmt_msg_len - calculate the total message length
+ * @msg_data_len: the length of the message data
+ * Return: the total message length
+ **/
+static u16 mgmt_msg_len(u16 msg_data_len)
+{
+ /* u64 - the size of the header */
+ u16 msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) +
+ msg_data_len);
+
+ if (msg_size > MGMT_MSG_SIZE_MIN)
+ msg_size = MGMT_MSG_SIZE_MIN +
+ ALIGN((msg_size - MGMT_MSG_SIZE_MIN),
+ MGMT_MSG_SIZE_STEP);
+ else
+ msg_size = MGMT_MSG_SIZE_MIN;
+
+ return msg_size;
+}
+
+/**
+ * prepare_header - prepare the header of the message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @header: pointer of the header to prepare
+ * @msg_len: the length of the message
+ * @mod: module in the chip that will get the message
+ * @ack_type: the type to response
+ * @direction: the direction of the original message
+ * @cmd: the command to do
+ * @msg_id: message id
+ **/
+static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ u64 *header, int msg_len, enum hinic_mod_type mod,
+ enum hinic_msg_ack_type ack_type,
+ enum hinic_msg_direction_type direction,
+ u8 cmd, u32 msg_id)
+{
+ struct hinic_hwif *hwif = pf_to_mgmt->hwdev->hwif;
+
+ *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) |
+ HINIC_MSG_HEADER_SET(mod, MODULE) |
+ HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) |
+ HINIC_MSG_HEADER_SET(ack_type, NO_ACK) |
+ HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
+ HINIC_MSG_HEADER_SET(0, SEQID) |
+ HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
+ HINIC_MSG_HEADER_SET(direction, DIRECTION) |
+ HINIC_MSG_HEADER_SET(cmd, CMD) |
+ HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) |
+ HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) |
+ HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
+}
+
+/**
+ * prepare_mgmt_cmd - prepare the mgmt command
+ * @mgmt_cmd: pointer to the command to prepare
+ * @header: pointer of the header to prepare
+ * @msg: the data of the message
+ * @msg_len: the length of the message
+ **/
+static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, void *msg,
+ int msg_len)
+{
+ u32 cmd_buf_max = MAX_PF_MGMT_BUF_SIZE;
+
+ memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV);
+
+ mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV;
+ cmd_buf_max -= MGMT_MSG_RSVD_FOR_DEV;
+ memcpy(mgmt_cmd, header, sizeof(*header));
+
+ mgmt_cmd += sizeof(*header);
+ cmd_buf_max -= sizeof(*header);
+ memcpy(mgmt_cmd, msg, msg_len);
+}
+
+/**
+ * alloc_recv_msg - allocate received message memory
+ * @recv_msg: pointer that will hold the allocated data
+ * Return: 0 - success, negative - failure
+ **/
+static int alloc_recv_msg(struct hinic_recv_msg *recv_msg)
+{
+ int err;
+
+ recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!recv_msg->msg) {
+ PMD_DRV_LOG(ERR, "Allocate recv msg buf failed");
+ return -ENOMEM;
+ }
+
+ recv_msg->buf_out = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!recv_msg->buf_out) {
+ PMD_DRV_LOG(ERR, "Allocate recv msg output buf failed");
+ err = -ENOMEM;
+ goto alloc_buf_out_err;
+ }
+
+ return 0;
+
+alloc_buf_out_err:
+ kfree(recv_msg->msg);
+ return err;
+}
+
+/**
+ * free_recv_msg - free received message memory
+ * @recv_msg: pointer that holds the allocated data
+ **/
+static void free_recv_msg(struct hinic_recv_msg *recv_msg)
+{
+ kfree(recv_msg->buf_out);
+ kfree(recv_msg->msg);
+}
+
+/**
+ * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ * Return: 0 - success, negative - failure
+ **/
+static int alloc_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ int err;
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocate recv msg failed");
+ return err;
+ }
+
+ err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocate resp recv msg failed");
+ goto alloc_msg_for_resp_err;
+ }
+
+ pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->async_msg_buf) {
+ PMD_DRV_LOG(ERR, "Allocate async msg buf failed");
+ err = -ENOMEM;
+ goto async_msg_buf_err;
+ }
+
+ pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
+ if (!pf_to_mgmt->sync_msg_buf) {
+ PMD_DRV_LOG(ERR, "Allocate sync msg buf failed");
+ err = -ENOMEM;
+ goto sync_msg_buf_err;
+ }
+
+ return 0;
+
+sync_msg_buf_err:
+ kfree(pf_to_mgmt->async_msg_buf);
+
+async_msg_buf_err:
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+
+alloc_msg_for_resp_err:
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+
+ return err;
+}
+
+/**
+ * free_msg_buf - free all the message buffers of PF to MGMT channel
+ * @pf_to_mgmt: PF to MGMT channel
+ * Return: 0 - success, negative - failure
+ **/
+static void free_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt)
+{
+ kfree(pf_to_mgmt->sync_msg_buf);
+ kfree(pf_to_mgmt->async_msg_buf);
+
+ free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
+ free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
+}
+
+/**
+ * send_msg_to_mgmt_async - send async message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @msg: the data of the message
+ * @msg_len: the length of the message
+ * @direction: the direction of the original message
+ * @resp_msg_id: message id of response
+ * Return: 0 - success, negative - failure
+ **/
+static int send_msg_to_mgmt_async(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd,
+ void *msg, u16 msg_len,
+ enum hinic_msg_direction_type direction,
+ u16 resp_msg_id)
+{
+ void *mgmt_cmd = pf_to_mgmt->async_msg_buf;
+ struct hinic_api_cmd_chain *chain;
+ u64 header;
+ u16 cmd_size = mgmt_msg_len(msg_len);
+
+ if (direction == HINIC_MSG_RESPONSE)
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK,
+ direction, cmd, resp_msg_id);
+ else
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK,
+ direction, cmd, ASYNC_MSG_ID(pf_to_mgmt));
+
+ prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
+
+ chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU];
+
+ return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd,
+ cmd_size);
+}
+
+/**
+ * send_msg_to_mgmt_sync - send async message
+ * @pf_to_mgmt: PF to MGMT channel
+ * @mod: module in the chip that will get the message
+ * @cmd: command of the message
+ * @msg: the msg data
+ * @msg_len: the msg data length
+ * @ack_type: indicate mgmt command whether need ack or not
+ * @direction: the direction of the original message
+ * @resp_msg_id: msg id to response for
+ * Return: 0 - success, negative - failure
+ **/
+static int send_msg_to_mgmt_sync(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ enum hinic_mod_type mod, u8 cmd,
+ void *msg, u16 msg_len,
+ enum hinic_msg_ack_type ack_type,
+ enum hinic_msg_direction_type direction,
+ __rte_unused u16 resp_msg_id)
+{
+ void *mgmt_cmd = pf_to_mgmt->sync_msg_buf;
+ struct hinic_api_cmd_chain *chain;
+ u64 header;
+ u16 cmd_size = mgmt_msg_len(msg_len);
+
+ if (direction == HINIC_MSG_RESPONSE)
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
+ direction, cmd, resp_msg_id);
+ else
+ prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
+ direction, cmd, SYNC_MSG_ID(pf_to_mgmt));
+
+ prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
+
+ chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_PMD_WRITE_TO_MGMT];
+
+ return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST,
+ mgmt_cmd, cmd_size);
+}
+
+/**
+ * hinic_pf_to_mgmt_init - initialize PF to MGMT channel
+ * @hwdev: the pointer to the private hardware device object
+ * Return: 0 - success, negative - failure
+ **/
+int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+ int err;
+
+ pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL);
+ if (!pf_to_mgmt) {
+ PMD_DRV_LOG(ERR, "Allocate pf to mgmt mem failed");
+ return -ENOMEM;
+ }
+
+ hwdev->pf_to_mgmt = pf_to_mgmt;
+ pf_to_mgmt->hwdev = hwdev;
+
+ spin_lock_init(&pf_to_mgmt->async_msg_lock);
+ spin_lock_init(&pf_to_mgmt->sync_msg_lock);
+
+ err = alloc_msg_buf(pf_to_mgmt);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocate msg buffers failed");
+ goto alloc_msg_buf_err;
+ }
+
+ err = hinic_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Init the api cmd chains failed");
+ goto api_cmd_init_err;
+ }
+
+ return 0;
+
+api_cmd_init_err:
+ free_msg_buf(pf_to_mgmt);
+
+alloc_msg_buf_err:
+ kfree(pf_to_mgmt);
+
+ return err;
+}
+
+/**
+ * hinic_pf_to_mgmt_free - free PF to MGMT channel
+ * @hwdev: the pointer to the private hardware device object
+ **/
+void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
+
+ hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
+ free_msg_buf(pf_to_mgmt);
+ kfree(pf_to_mgmt);
+}
+
+int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt =
+ ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ struct hinic_recv_msg *recv_msg;
+ u32 timeo;
+ int err, i;
+
+ spin_lock(&pf_to_mgmt->sync_msg_lock);
+
+ SYNC_MSG_ID_INC(pf_to_mgmt);
+ recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt;
+
+ err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ HINIC_MSG_ACK, HINIC_MSG_DIRECT_SEND,
+ MSG_NO_RESP);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Send msg to mgmt failed");
+ goto unlock_sync_msg;
+ }
+
+ timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
+ for (i = 0; i < pf_to_mgmt->rx_aeq->poll_retry_nr; i++) {
+ err = hinic_aeq_poll_msg(pf_to_mgmt->rx_aeq, timeo, NULL);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Poll mgmt rsp timeout, mod=%d cmd=%d msg_id=%u rc=%d",
+ mod, cmd, pf_to_mgmt->sync_msg_id, err);
+ err = -ETIMEDOUT;
+ hinic_dump_aeq_info((struct hinic_hwdev *)hwdev);
+ goto unlock_sync_msg;
+ } else {
+ if (mod == recv_msg->mod && cmd == recv_msg->cmd &&
+ recv_msg->msg_id == pf_to_mgmt->sync_msg_id) {
+ /* the expected response polled */
+ break;
+ }
+ PMD_DRV_LOG(ERR, "AEQ[%d] poll(mod=%d, cmd=%d, msg_id=%u) an "
+ "unexpected(mod=%d, cmd=%d, msg_id=%u) response",
+ pf_to_mgmt->rx_aeq->q_id, mod, cmd,
+ pf_to_mgmt->sync_msg_id, recv_msg->mod,
+ recv_msg->cmd, recv_msg->msg_id);
+ }
+ }
+
+ if (i == pf_to_mgmt->rx_aeq->poll_retry_nr) {
+ PMD_DRV_LOG(ERR, "Get %d unexpected mgmt rsp from AEQ[%d], poll mgmt rsp failed",
+ i, pf_to_mgmt->rx_aeq->q_id);
+ err = -EBADMSG;
+ goto unlock_sync_msg;
+ }
+
+ rte_smp_rmb();
+ if (recv_msg->msg_len && buf_out && out_size) {
+ if (recv_msg->msg_len <= *out_size) {
+ memcpy(buf_out, recv_msg->msg,
+ recv_msg->msg_len);
+ *out_size = recv_msg->msg_len;
+ } else {
+ PMD_DRV_LOG(ERR, "Mgmt rsp's msg len:%u overflow.",
+ recv_msg->msg_len);
+ err = -ERANGE;
+ }
+ }
+
+unlock_sync_msg:
+ if (err && out_size)
+ *out_size = 0;
+ spin_unlock(&pf_to_mgmt->sync_msg_lock);
+ return err;
+}
+
+int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, __rte_unused void *buf_out,
+ __rte_unused u16 *out_size)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt =
+ ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ int err = -EINVAL;
+
+ if (!MSG_SZ_IS_VALID(in_size)) {
+ PMD_DRV_LOG(ERR, "Mgmt msg buffer size is invalid");
+ return err;
+ }
+
+ spin_lock(&pf_to_mgmt->sync_msg_lock);
+
+ err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+ HINIC_MSG_NO_ACK, HINIC_MSG_DIRECT_SEND,
+ MSG_NO_RESP);
+
+ spin_unlock(&pf_to_mgmt->sync_msg_lock);
+
+ return err;
+}
+
+static bool check_mgmt_seq_id_and_seg_len(struct hinic_recv_msg *recv_msg,
+ u8 seq_id, u8 seg_len)
+{
+ if (seq_id > HINIC_SEQ_ID_MAX_VAL || seg_len > HINIC_MSG_SEG_LEN)
+ return false;
+
+ if (seq_id == 0) {
+ recv_msg->sed_id = seq_id;
+ } else {
+ if (seq_id != recv_msg->sed_id + 1) {
+ recv_msg->sed_id = 0;
+ return false;
+ }
+ recv_msg->sed_id = seq_id;
+ }
+
+ return true;
+}
+
+/**
+ * recv_mgmt_msg_handler - handler a message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @header: the header of the message
+ * @recv_msg: received message details
+ * @param: customized parameter
+ * Return: 0 when aeq is response message, -1 default result,
+ * and when wrong message or not last message
+ **/
+static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ u8 *header, struct hinic_recv_msg *recv_msg,
+ void *param)
+{
+ u64 msg_header = *((u64 *)header);
+ void *msg_body = header + sizeof(msg_header);
+ u8 *dest_msg;
+ u8 seq_id, seq_len;
+ u32 msg_buf_max = MAX_PF_MGMT_BUF_SIZE;
+
+ seq_id = HINIC_MSG_HEADER_GET(msg_header, SEQID);
+ seq_len = HINIC_MSG_HEADER_GET(msg_header, SEG_LEN);
+
+ if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) {
+ PMD_DRV_LOG(ERR,
+ "Mgmt msg sequence and segment check fail, "
+ "func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x",
+ hinic_global_func_id(pf_to_mgmt->hwdev),
+ recv_msg->sed_id, seq_id, seq_len);
+ return HINIC_RECV_NEXT_AEQE;
+ }
+
+ dest_msg = (u8 *)recv_msg->msg + seq_id * HINIC_MSG_SEG_LEN;
+ msg_buf_max -= seq_id * HINIC_MSG_SEG_LEN;
+ memcpy(dest_msg, msg_body, seq_len);
+
+ if (!HINIC_MSG_HEADER_GET(msg_header, LAST))
+ return HINIC_RECV_NEXT_AEQE;
+
+ recv_msg->cmd = HINIC_MSG_HEADER_GET(msg_header, CMD);
+ recv_msg->mod = HINIC_MSG_HEADER_GET(msg_header, MODULE);
+ recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(msg_header,
+ ASYNC_MGMT_TO_PF);
+ recv_msg->msg_len = HINIC_MSG_HEADER_GET(msg_header, MSG_LEN);
+ recv_msg->msg_id = HINIC_MSG_HEADER_GET(msg_header, MSG_ID);
+
+ if (HINIC_MSG_HEADER_GET(msg_header, DIRECTION) == HINIC_MSG_RESPONSE)
+ return HINIC_RECV_DONE;
+
+ hinic_mgmt_recv_msg_handler(pf_to_mgmt, recv_msg, param);
+
+ return HINIC_RECV_NEXT_AEQE;
+}
+
+/**
+ * hinic_mgmt_msg_aeqe_handler - handler for a mgmt message event
+ * @hwdev: the pointer to the private hardware device object
+ * @header: the header of the message
+ * @size: unused
+ * @param: customized parameter
+ * Return: 0 when aeq is response message,
+ * -1 default result, and when wrong message or not last message
+ **/
+int hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header,
+ __rte_unused u8 size, void *param)
+{
+ struct hinic_msg_pf_to_mgmt *pf_to_mgmt =
+ ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
+ struct hinic_recv_msg *recv_msg;
+
+ recv_msg = (HINIC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) ==
+ HINIC_MSG_DIRECT_SEND) ?
+ &pf_to_mgmt->recv_msg_from_mgmt :
+ &pf_to_mgmt->recv_resp_msg_from_mgmt;
+
+ return recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg, param);
+}
+
+int hinic_comm_pf_to_mgmt_init(struct hinic_nic_dev *nic_dev)
+{
+ int rc;
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+
+ rc = hinic_pf_to_mgmt_init(hwdev);
+ if (rc)
+ return rc;
+
+ hwdev->pf_to_mgmt->rx_aeq = &hwdev->aeqs->aeq[HINIC_MGMT_RSP_AEQN];
+
+ return 0;
+}
+
+void hinic_comm_pf_to_mgmt_free(struct hinic_nic_dev *nic_dev)
+{
+ hinic_pf_to_mgmt_free(nic_dev->hwdev);
+}
+
+/**
+ * hinic_mgmt_recv_msg_handler - handler for message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: received message details
+ * @param: customized parameter
+ **/
+static void hinic_mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
+ struct hinic_recv_msg *recv_msg,
+ void *param)
+{
+ void *buf_out = recv_msg->buf_out;
+ u16 out_size = 0;
+
+ switch (recv_msg->mod) {
+ case HINIC_MOD_COMM:
+ hinic_comm_async_event_handle(pf_to_mgmt->hwdev,
+ recv_msg->cmd, recv_msg->msg,
+ recv_msg->msg_len,
+ buf_out, &out_size);
+ break;
+ case HINIC_MOD_L2NIC:
+ hinic_l2nic_async_event_handle(pf_to_mgmt->hwdev, param,
+ recv_msg->cmd, recv_msg->msg,
+ recv_msg->msg_len,
+ buf_out, &out_size);
+ break;
+ case HINIC_MOD_HILINK:
+ hinic_hilink_async_event_handle(pf_to_mgmt->hwdev,
+ recv_msg->cmd, recv_msg->msg,
+ recv_msg->msg_len,
+ buf_out, &out_size);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "No handler, mod = %d", recv_msg->mod);
+ break;
+ }
+
+ if (!recv_msg->async_mgmt_to_pf) {
+ if (!out_size)
+ out_size = BUF_OUT_DEFAULT_SIZE;
+
+ /* MGMT sent sync msg, send the response */
+ (void)send_msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod,
+ recv_msg->cmd, buf_out, out_size,
+ HINIC_MSG_RESPONSE,
+ recv_msg->msg_id);
+ }
+}
diff --git a/drivers/net/hinic/base/hinic_pmd_mgmt.h b/drivers/net/hinic/base/hinic_pmd_mgmt.h
new file mode 100644
index 000000000..c06013795
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_mgmt.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_MGMT_H_
+#define _HINIC_PMD_MGMT_H_
+
+#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0
+#define HINIC_MSG_HEADER_MODULE_SHIFT 11
+#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16
+#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22
+#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23
+#define HINIC_MSG_HEADER_SEQID_SHIFT 24
+#define HINIC_MSG_HEADER_LAST_SHIFT 30
+#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31
+#define HINIC_MSG_HEADER_CMD_SHIFT 32
+#define HINIC_MSG_HEADER_PCI_INTF_IDX_SHIFT 48
+#define HINIC_MSG_HEADER_P2P_IDX_SHIFT 50
+#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54
+
+#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF
+#define HINIC_MSG_HEADER_MODULE_MASK 0x1F
+#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F
+#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1
+#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1
+#define HINIC_MSG_HEADER_SEQID_MASK 0x3F
+#define HINIC_MSG_HEADER_LAST_MASK 0x1
+#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1
+#define HINIC_MSG_HEADER_CMD_MASK 0xFF
+#define HINIC_MSG_HEADER_PCI_INTF_IDX_MASK 0x3
+#define HINIC_MSG_HEADER_P2P_IDX_MASK 0xF
+#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF
+
+#define HINIC_MSG_HEADER_GET(val, member) \
+ (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \
+ HINIC_MSG_HEADER_##member##_MASK)
+
+#define HINIC_MSG_HEADER_SET(val, member) \
+ ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \
+ HINIC_MSG_HEADER_##member##_SHIFT)
+
+enum hinic_msg_direction_type {
+ HINIC_MSG_DIRECT_SEND = 0,
+ HINIC_MSG_RESPONSE = 1
+};
+enum hinic_msg_segment_type {
+ NOT_LAST_SEGMENT = 0,
+ LAST_SEGMENT = 1,
+};
+
+enum hinic_msg_ack_type {
+ HINIC_MSG_ACK = 0,
+ HINIC_MSG_NO_ACK = 1,
+};
+
+struct hinic_recv_msg {
+ void *msg;
+ void *buf_out;
+
+ u16 msg_len;
+ enum hinic_mod_type mod;
+ u8 cmd;
+ u16 msg_id;
+ int async_mgmt_to_pf;
+ u8 sed_id;
+};
+
+#define HINIC_COMM_SELF_CMD_MAX 8
+
+typedef void (*comm_up_self_msg_proc)(void *handle, void *buf_in,
+ u16 in_size, void *buf_out, u16 *out_size);
+
+struct comm_up_self_msg_sub_info {
+ u8 cmd;
+ comm_up_self_msg_proc proc;
+};
+
+struct comm_up_self_msg_info {
+ u8 cmd_num;
+ struct comm_up_self_msg_sub_info info[HINIC_COMM_SELF_CMD_MAX];
+};
+
+enum comm_pf_to_mgmt_event_state {
+ SEND_EVENT_START = 0,
+ SEND_EVENT_TIMEOUT,
+ SEND_EVENT_END,
+};
+
+struct hinic_msg_pf_to_mgmt {
+ struct hinic_hwdev *hwdev;
+
+ /* Async cmd can not be scheduling */
+ spinlock_t async_msg_lock;
+ /* spinlock for sync message */
+ spinlock_t sync_msg_lock;
+
+ void *async_msg_buf;
+ void *sync_msg_buf;
+
+ struct hinic_recv_msg recv_msg_from_mgmt;
+ struct hinic_recv_msg recv_resp_msg_from_mgmt;
+
+ u16 async_msg_id;
+ u16 sync_msg_id;
+
+ struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX];
+
+ struct hinic_eq *rx_aeq;
+};
+
+int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size);
+
+int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev);
+void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev);
+
+int hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, __rte_unused u8 size,
+ void *param);
+
+int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
+ void *buf_in, u16 in_size, void *buf_out,
+ u16 *out_size, u32 timeout);
+
+#endif /* _HINIC_PMD_MGMT_H_ */
diff --git a/drivers/net/hinic/base/hinic_pmd_mgmt_interface.h b/drivers/net/hinic/base/hinic_pmd_mgmt_interface.h
new file mode 100644
index 000000000..809db8af0
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_mgmt_interface.h
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_MGMT_INTERFACE_H_
+#define _HINIC_PMD_MGMT_INTERFACE_H_
+
+/* cmd of mgmt CPU message for HILINK module */
+enum hinic_hilink_cmd {
+ HINIC_HILINK_CMD_GET_LINK_INFO = 0x3,
+ HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8,
+};
+
+enum hilink_info_print_event {
+ HILINK_EVENT_LINK_UP = 1,
+ HILINK_EVENT_LINK_DOWN,
+ HILINK_EVENT_CABLE_PLUGGED,
+ HILINK_EVENT_MAX_TYPE,
+};
+
+#define NIC_LRO_MAX_WQE_NUM 32
+#define NIC_RSS_INDIR_SIZE 256
+#define NIC_DCB_UP_MAX 0x8
+#define NIC_RSS_KEY_SIZE 40
+#define NIC_RSS_CMD_TEMP_ALLOC 0x01
+#define NIC_RSS_CMD_TEMP_FREE 0x02
+
+enum hinic_resp_aeq_num {
+ HINIC_AEQ0 = 0,
+ HINIC_AEQ1 = 1,
+ HINIC_AEQ2 = 2,
+ HINIC_AEQ3 = 3,
+};
+
+struct hinic_mgmt_msg_head {
+ u8 status;
+ u8 version;
+ u8 resp_aeq_num;
+ u8 rsvd0[5];
+};
+
+enum {
+ RECYCLE_MODE_NIC = 0x0,
+ RECYCLE_MODE_DPDK = 0x1,
+};
+
+struct hinic_fast_recycled_mode {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 fast_recycled_mode;/* 1: enable fast recycle, available in dpdk mode,
+ * 0: normal mode, available in kernel nic mode
+ */
+ u8 rsvd1;
+};
+
+struct hinic_function_table {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rx_wqe_buf_size;
+ u32 mtu;
+};
+
+struct hinic_cmd_qpn {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 base_qpn;
+};
+
+struct hinic_port_mac_set {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 mac[ETH_ALEN];
+};
+
+struct hinic_port_mac_update {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 old_mac[ETH_ALEN];
+ u16 rsvd2;
+ u8 new_mac[ETH_ALEN];
+};
+
+struct hinic_vport_state {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 state;
+ u8 rsvd2[3];
+};
+
+struct hinic_port_state {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u8 state;
+ u8 rsvd1[3];
+};
+
+struct hinic_mtu {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 mtu;
+};
+
+struct hinic_vlan_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 vlan_id;
+};
+
+struct hinic_get_link {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 link_status;
+ u8 rsvd1;
+};
+
+#define HINIC_DEFAUT_PAUSE_CONFIG 1
+struct hinic_pause_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 auto_neg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+
+struct hinic_port_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 port_type;
+ u8 autoneg_cap;
+ u8 autoneg_state;
+ u8 duplex;
+ u8 speed;
+ u8 resv2[3];
+};
+
+struct hinic_set_autoneg {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 enable; /* 1: enable , 0: disable */
+};
+
+struct hinic_up_ets_cfg {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u8 port_id;
+ u8 rsvd1[3];
+ u8 up_tc[HINIC_DCB_UP_MAX];
+ u8 pg_bw[HINIC_DCB_PG_MAX];
+ u8 pgid[HINIC_DCB_UP_MAX];
+ u8 up_bw[HINIC_DCB_UP_MAX];
+ u8 prio[HINIC_DCB_PG_MAX];
+};
+
+struct hinic_tso_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 tso_en;
+ u8 resv2[3];
+};
+
+struct hinic_lro_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 lro_ipv4_en;
+ u8 lro_ipv6_en;
+ u8 lro_max_wqe_num;
+ u8 resv2[13];
+};
+
+struct hinic_checksum_offload {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_csum_offload;
+};
+
+struct hinic_vlan_offload {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 vlan_rx_offload;
+ u8 rsvd1[5];
+};
+
+struct hinic_rx_mode_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_mode;
+};
+
+/* rss */
+struct nic_rss_indirect_tbl {
+ u32 group_index;
+ u32 offset;
+ u32 size;
+ u32 rsvd;
+ u8 entry[NIC_RSS_INDIR_SIZE];
+};
+
+struct nic_rss_context_tbl {
+ u32 group_index;
+ u32 offset;
+ u32 size;
+ u32 rsvd;
+ u32 ctx;
+};
+
+struct hinic_rss_config {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 rss_en;
+ u8 template_id;
+ u8 rq_priority_number;
+ u8 rsvd1[3];
+ u8 prio_tc[NIC_DCB_UP_MAX];
+};
+
+struct hinic_rss_template_mgmt {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 cmd;
+ u8 template_id;
+ u8 rsvd1[4];
+};
+
+struct hinic_rss_indir_table {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 indir[NIC_RSS_INDIR_SIZE];
+};
+
+struct hinic_rss_template_key {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u8 key[NIC_RSS_KEY_SIZE];
+};
+
+struct hinic_rss_engine_type {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 template_id;
+ u8 hash_engine;
+ u8 rsvd1[4];
+};
+
+struct hinic_rss_context_table {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 template_id;
+ u8 rsvd1;
+ u32 context;
+};
+
+struct hinic_port_link_status {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 link;
+ u8 port_id;
+};
+
+struct hinic_cable_plug_event {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 plugged; /* 0: unplugged, 1: plugged */
+ u8 port_id;
+};
+
+struct hinic_link_err_event {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 err_type;
+ u8 port_id;
+};
+
+enum link_err_status {
+ LINK_ERR_MODULE_UNRECOGENIZED,
+ LINK_ERR_NUM,
+};
+
+#define HINIC_PORT_STATS_VERSION 0
+
+struct hinic_port_stats_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 stats_version;
+ u32 stats_size;
+};
+
+struct hinic_port_qfilter_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 filter_enable;
+ u8 filter_type;
+ u8 qid;
+ u8 rsvd2;
+};
+
+struct hinic_port_stats {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ struct hinic_phy_port_stats stats;
+};
+
+struct hinic_cmd_vport_stats {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ struct hinic_vport_stats stats;
+};
+
+struct hinic_clear_port_stats {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd;
+ u32 stats_version;
+ u32 stats_size;
+};
+
+struct hinic_clear_vport_stats {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd;
+ u32 stats_version;
+ u32 stats_size;
+};
+
+#define HINIC_COMPILE_TIME_LEN 20
+struct hinic_version_info {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u8 ver[HINIC_FW_VERSION_NAME];
+ u8 time[HINIC_COMPILE_TIME_LEN];
+};
+
+/* get or set loopback mode, need to modify by base API */
+#define HINIC_INTERNAL_LP_MODE 5
+
+#define ANTI_ATTACK_DEFAULT_CIR 500000
+#define ANTI_ATTACK_DEFAULT_XIR 600000
+#define ANTI_ATTACK_DEFAULT_CBS 10000000
+#define ANTI_ATTACK_DEFAULT_XBS 12000000
+
+/* set physical port Anti-Attack rate */
+struct hinic_port_anti_attack_rate {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 enable; /* 1: enable rate-limiting, 0: disable rate-limiting */
+ u32 cir; /* Committed Information Rate */
+ u32 xir; /* eXtended Information Rate */
+ u32 cbs; /* Committed Burst Size */
+ u32 xbs; /* eXtended Burst Size */
+};
+
+struct hinic_l2nic_reset {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+};
+
+struct hinic_root_ctxt {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_idx;
+ u16 rsvd1;
+ u8 set_cmdq_depth;
+ u8 cmdq_depth;
+ u8 lro_en;
+ u8 rsvd2;
+ u8 ppf_idx;
+ u8 rsvd3;
+ u16 rq_depth;
+ u16 rx_buf_sz;
+ u16 sq_depth;
+};
+
+struct hinic_page_size {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_idx;
+ u8 ppf_idx;
+ u8 page_size;
+ u32 rsvd;
+};
+
+struct hinic_dcb_state {
+ u8 dcb_on;
+ u8 default_cos;
+ u8 up_cos[8];
+};
+
+struct hinic_vf_default_cos {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ struct hinic_dcb_state state;
+};
+
+struct hinic_reset_link_cfg {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+};
+
+struct hinic_set_vhd_mode {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 vhd_type;
+ u16 rx_wqe_buffer_size;
+ u16 rsvd;
+};
+
+struct hinic_vlan_filter {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u8 rsvd1[2];
+ u32 vlan_filter_ctrl;
+};
+
+struct hinic_set_link_follow {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd0;
+ u8 follow_status;
+ u8 rsvd1[3];
+};
+
+struct hinic_link_mode_cmd {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+ u16 supported; /* 0xFFFF represent Invalid value */
+ u16 advertised;
+};
+
+struct hinic_clear_qp_resource {
+ struct hinic_mgmt_msg_head mgmt_msg_head;
+
+ u16 func_id;
+ u16 rsvd1;
+};
+
+int hinic_init_function_table(void *hwdev, u16 rx_buf_sz);
+
+int hinic_set_fast_recycle_mode(void *hwdev, u8 mode);
+
+int hinic_get_base_qpn(void *hwdev, u16 *global_qpn);
+
+int hinic_set_pagesize(void *hwdev, u8 page_size);
+
+#endif /* _HINIC_PMD_MGMT_INTERFACE_H_ */
--
2.18.0
next prev parent reply other threads:[~2019-06-06 11:04 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-06-06 11:04 [dpdk-dev] [PATCH v4 00/11] A new net PMD - hinic Ziyang Xuan
2019-06-06 11:05 ` [dpdk-dev] [PATCH v4 05/11] net/hinic/base: add eqs and context code Ziyang Xuan
2019-06-06 11:06 ` [dpdk-dev] [PATCH v4 06/11] net/hinic/base: add code for nic business Ziyang Xuan
2019-06-06 11:06 ` [dpdk-dev] [PATCH v4 08/11] net/hinic: add hinic PMD build and doc files Ziyang Xuan
2019-06-11 15:56 ` Ferruh Yigit
2019-06-06 11:06 ` [dpdk-dev] [PATCH v4 10/11] net/hinic: add TX module Ziyang Xuan
2019-06-06 11:07 ` [dpdk-dev] [PATCH v4 11/11] net/hinic: add support for basic device operations Ziyang Xuan
2019-06-11 16:02 ` Ferruh Yigit
2019-06-06 11:13 ` [dpdk-dev] [PATCH v4 01/11] net/hinic/base: add registers for Huawei Hi1822 NIC Ziyang Xuan
2019-06-06 11:04 ` Ziyang Xuan
2019-06-06 11:14 ` [dpdk-dev] [PATCH v4 02/11] net/hinic/base: add command channels code Ziyang Xuan
2019-06-06 11:05 ` Ziyang Xuan
2019-06-06 11:15 ` Ziyang Xuan [this message]
2019-06-06 11:05 ` [dpdk-dev] [PATCH v4 03/11] net/hinic/base: add mgmt module interactive code Ziyang Xuan
2019-06-06 11:15 ` [dpdk-dev] [PATCH v4 04/11] net/hinic/base: add code about hardware operation Ziyang Xuan
2019-06-06 11:05 ` Ziyang Xuan
2019-06-06 11:17 ` [dpdk-dev] [PATCH v4 07/11] net/hinic/base: add various headers Ziyang Xuan
2019-06-06 11:06 ` Ziyang Xuan
2019-06-11 16:04 ` Ferruh Yigit
2019-06-06 11:18 ` [dpdk-dev] [PATCH v4 09/11] net/hinic: add RX module Ziyang Xuan
2019-06-06 11:06 ` Ziyang Xuan
2019-06-11 15:57 ` Ferruh Yigit
2019-06-12 14:36 ` [dpdk-dev] 答复: " Xuanziyang (William, Chip Application Design Logic and Hardware Development Dept IT_Products & Solutions)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=ea888d7b179837e74206e476fb3c25059c222e3f.1559818024.git.xuanziyang2@huawei.com \
--to=xuanziyang2@huawei.com \
--cc=cloud.wangxiaoyun@huawei.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=luoxianjun@huawei.com \
--cc=shahar.belkar@huawei.com \
--cc=stephen@networkplumber.org \
--cc=zhouguoyang@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).