From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id 6B756A0096 for ; Thu, 6 Jun 2019 13:04:07 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4EA6D1B95D; Thu, 6 Jun 2019 13:04:07 +0200 (CEST) Received: from huawei.com (szxga07-in.huawei.com [45.249.212.35]) by dpdk.org (Postfix) with ESMTP id 73F0A1B959 for ; Thu, 6 Jun 2019 13:04:05 +0200 (CEST) Received: from DGGEMS410-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 44EF02927A7A83E37D2F; Thu, 6 Jun 2019 19:04:04 +0800 (CST) Received: from tester_149.localdomain (10.175.119.39) by DGGEMS410-HUB.china.huawei.com (10.3.19.210) with Microsoft SMTP Server id 14.3.439.0; Thu, 6 Jun 2019 19:03:55 +0800 From: Ziyang Xuan To: CC: , , , , , , Ziyang Xuan Date: Thu, 6 Jun 2019 19:15:25 +0800 Message-ID: X-Mailer: git-send-email 2.18.0 In-Reply-To: References: MIME-Version: 1.0 Content-Type: text/plain X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v4 03/11] net/hinic/base: add mgmt module interactive code X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add the structures, functionalities for interaction with mgmt module. Signed-off-by: Ziyang Xuan --- drivers/net/hinic/base/hinic_pmd_hw_mgmt.h | 85 +++ drivers/net/hinic/base/hinic_pmd_mgmt.c | 617 ++++++++++++++++++ drivers/net/hinic/base/hinic_pmd_mgmt.h | 125 ++++ .../net/hinic/base/hinic_pmd_mgmt_interface.h | 503 ++++++++++++++ 4 files changed, 1330 insertions(+) create mode 100644 drivers/net/hinic/base/hinic_pmd_hw_mgmt.h create mode 100644 drivers/net/hinic/base/hinic_pmd_mgmt.c create mode 100644 drivers/net/hinic/base/hinic_pmd_mgmt.h create mode 100644 drivers/net/hinic/base/hinic_pmd_mgmt_interface.h diff --git a/drivers/net/hinic/base/hinic_pmd_hw_mgmt.h b/drivers/net/hinic/base/hinic_pmd_hw_mgmt.h new file mode 100644 index 000000000..5f3b12b7d --- /dev/null +++ b/drivers/net/hinic/base/hinic_pmd_hw_mgmt.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_HW_MGMT_H_ +#define _HINIC_PMD_HW_MGMT_H_ + +/* show each drivers only such as nic_service_cap, + * toe_service_cap structure, but not show service_cap + */ +enum hinic_service_type { + SERVICE_T_NIC = 0, + SERVICE_T_MAX = 7, + + /* Only used for interruption resource management, + * mark the request module + */ + SERVICE_T_INTF = (1 << 15), + SERVICE_T_CQM = (1 << 16), +}; + +enum intr_type { + INTR_TYPE_MSIX, + INTR_TYPE_MSI, + INTR_TYPE_INT, + /* PXE,OVS need single thread processing, synchronization + * messages must use poll wait mechanism interface + */ + INTR_TYPE_NONE, +}; + +struct nic_service_cap { + /* PF resources */ + u16 max_sqs; + u16 max_rqs; + + /* VF resources, VF obtain them through the MailBox mechanism from + * corresponding PF + */ + u16 vf_max_sqs; + u16 vf_max_rqs; + + bool lro_en; /* LRO feature enable bit */ + u8 lro_sz; /* LRO context space: n*16B */ + u8 tso_sz; /* TSO context space: n*16B */ +}; + +/* Defines the IRQ information structure*/ +struct irq_info { + u16 msix_entry_idx; /* IRQ corresponding index number */ + u32 irq_id; /* the IRQ number from OS */ +}; + +/* Define the version information structure*/ +struct dev_version_info { + u8 up_ver; /* uP version, directly read from uP + * is not configured to file + */ + u8 ucode_ver; /* The microcode version, + * read through the CMDq from microcode + */ + u8 cfg_file_ver; /* uP configuration file version */ + u8 sdk_ver; /* SDK driver version */ + u8 hw_ver; /* Hardware version */ +}; + +/* Obtain service_cap.nic_cap.dev_nic_cap.max_sqs */ +u16 hinic_func_max_qnum(void *hwdev); + +u16 hinic_global_func_id(void *hwdev); /* func_attr.glb_func_idx */ + +enum func_type { + TYPE_PF, + TYPE_VF, + TYPE_PPF, +}; + +enum hinic_msix_state { + HINIC_MSIX_ENABLE, + HINIC_MSIX_DISABLE, +}; + +enum func_type hinic_func_type(void *hwdev); + +#endif /* _HINIC_PMD_HW_MGMT_H_ */ diff --git a/drivers/net/hinic/base/hinic_pmd_mgmt.c b/drivers/net/hinic/base/hinic_pmd_mgmt.c new file mode 100644 index 000000000..61246fa48 --- /dev/null +++ b/drivers/net/hinic/base/hinic_pmd_mgmt.c @@ -0,0 +1,617 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include "hinic_pmd_dpdev.h" + +static void hinic_mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg, + void *param); + +#define BUF_OUT_DEFAULT_SIZE 1 + +#define MAX_PF_MGMT_BUF_SIZE 2048UL + +#define MGMT_MSG_SIZE_MIN 20 +#define MGMT_MSG_SIZE_STEP 16 +#define MGMT_MSG_RSVD_FOR_DEV 8 + +#define MGMT_MSG_TIMEOUT 5000 /* millisecond */ + +#define SYNC_MSG_ID_MASK 0x1FF +#define ASYNC_MSG_ID_MASK 0x1FF +#define ASYNC_MSG_FLAG 0x200 + +#define MSG_NO_RESP 0xFFFF + +#define MAX_MSG_SZ 2016 + +#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ) + +#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) + +#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ + (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK) + +#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id) + +#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \ + ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \ + | ASYNC_MSG_FLAG) + +#define HINIC_SEQ_ID_MAX_VAL 42 +#define HINIC_MSG_SEG_LEN 48 + +/** + * mgmt_msg_len - calculate the total message length + * @msg_data_len: the length of the message data + * Return: the total message length + **/ +static u16 mgmt_msg_len(u16 msg_data_len) +{ + /* u64 - the size of the header */ + u16 msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + + msg_data_len); + + if (msg_size > MGMT_MSG_SIZE_MIN) + msg_size = MGMT_MSG_SIZE_MIN + + ALIGN((msg_size - MGMT_MSG_SIZE_MIN), + MGMT_MSG_SIZE_STEP); + else + msg_size = MGMT_MSG_SIZE_MIN; + + return msg_size; +} + +/** + * prepare_header - prepare the header of the message + * @pf_to_mgmt: PF to MGMT channel + * @header: pointer of the header to prepare + * @msg_len: the length of the message + * @mod: module in the chip that will get the message + * @ack_type: the type to response + * @direction: the direction of the original message + * @cmd: the command to do + * @msg_id: message id + **/ +static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + u64 *header, int msg_len, enum hinic_mod_type mod, + enum hinic_msg_ack_type ack_type, + enum hinic_msg_direction_type direction, + u8 cmd, u32 msg_id) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwdev->hwif; + + *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) | + HINIC_MSG_HEADER_SET(mod, MODULE) | + HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) | + HINIC_MSG_HEADER_SET(ack_type, NO_ACK) | + HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | + HINIC_MSG_HEADER_SET(0, SEQID) | + HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HINIC_MSG_HEADER_SET(direction, DIRECTION) | + HINIC_MSG_HEADER_SET(cmd, CMD) | + HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) | + HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) | + HINIC_MSG_HEADER_SET(msg_id, MSG_ID); +} + +/** + * prepare_mgmt_cmd - prepare the mgmt command + * @mgmt_cmd: pointer to the command to prepare + * @header: pointer of the header to prepare + * @msg: the data of the message + * @msg_len: the length of the message + **/ +static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, void *msg, + int msg_len) +{ + u32 cmd_buf_max = MAX_PF_MGMT_BUF_SIZE; + + memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); + + mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; + cmd_buf_max -= MGMT_MSG_RSVD_FOR_DEV; + memcpy(mgmt_cmd, header, sizeof(*header)); + + mgmt_cmd += sizeof(*header); + cmd_buf_max -= sizeof(*header); + memcpy(mgmt_cmd, msg, msg_len); +} + +/** + * alloc_recv_msg - allocate received message memory + * @recv_msg: pointer that will hold the allocated data + * Return: 0 - success, negative - failure + **/ +static int alloc_recv_msg(struct hinic_recv_msg *recv_msg) +{ + int err; + + recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!recv_msg->msg) { + PMD_DRV_LOG(ERR, "Allocate recv msg buf failed"); + return -ENOMEM; + } + + recv_msg->buf_out = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!recv_msg->buf_out) { + PMD_DRV_LOG(ERR, "Allocate recv msg output buf failed"); + err = -ENOMEM; + goto alloc_buf_out_err; + } + + return 0; + +alloc_buf_out_err: + kfree(recv_msg->msg); + return err; +} + +/** + * free_recv_msg - free received message memory + * @recv_msg: pointer that holds the allocated data + **/ +static void free_recv_msg(struct hinic_recv_msg *recv_msg) +{ + kfree(recv_msg->buf_out); + kfree(recv_msg->msg); +} + +/** + * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + **/ +static int alloc_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) +{ + int err; + + err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + if (err) { + PMD_DRV_LOG(ERR, "Allocate recv msg failed"); + return err; + } + + err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + if (err) { + PMD_DRV_LOG(ERR, "Allocate resp recv msg failed"); + goto alloc_msg_for_resp_err; + } + + pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->async_msg_buf) { + PMD_DRV_LOG(ERR, "Allocate async msg buf failed"); + err = -ENOMEM; + goto async_msg_buf_err; + } + + pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->sync_msg_buf) { + PMD_DRV_LOG(ERR, "Allocate sync msg buf failed"); + err = -ENOMEM; + goto sync_msg_buf_err; + } + + return 0; + +sync_msg_buf_err: + kfree(pf_to_mgmt->async_msg_buf); + +async_msg_buf_err: + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + +alloc_msg_for_resp_err: + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + + return err; +} + +/** + * free_msg_buf - free all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + **/ +static void free_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) +{ + kfree(pf_to_mgmt->sync_msg_buf); + kfree(pf_to_mgmt->async_msg_buf); + + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); +} + +/** + * send_msg_to_mgmt_async - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the data of the message + * @msg_len: the length of the message + * @direction: the direction of the original message + * @resp_msg_id: message id of response + * Return: 0 - success, negative - failure + **/ +static int send_msg_to_mgmt_async(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + void *msg, u16 msg_len, + enum hinic_msg_direction_type direction, + u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->async_msg_buf; + struct hinic_api_cmd_chain *chain; + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (direction == HINIC_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK, + direction, cmd, ASYNC_MSG_ID(pf_to_mgmt)); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU]; + + return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd, + cmd_size); +} + +/** + * send_msg_to_mgmt_sync - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the msg data + * @msg_len: the msg data length + * @ack_type: indicate mgmt command whether need ack or not + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * Return: 0 - success, negative - failure + **/ +static int send_msg_to_mgmt_sync(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + void *msg, u16 msg_len, + enum hinic_msg_ack_type ack_type, + enum hinic_msg_direction_type direction, + __rte_unused u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->sync_msg_buf; + struct hinic_api_cmd_chain *chain; + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (direction == HINIC_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, SYNC_MSG_ID(pf_to_mgmt)); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_PMD_WRITE_TO_MGMT]; + + return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, + mgmt_cmd, cmd_size); +} + +/** + * hinic_pf_to_mgmt_init - initialize PF to MGMT channel + * @hwdev: the pointer to the private hardware device object + * Return: 0 - success, negative - failure + **/ +int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + int err; + + pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL); + if (!pf_to_mgmt) { + PMD_DRV_LOG(ERR, "Allocate pf to mgmt mem failed"); + return -ENOMEM; + } + + hwdev->pf_to_mgmt = pf_to_mgmt; + pf_to_mgmt->hwdev = hwdev; + + spin_lock_init(&pf_to_mgmt->async_msg_lock); + spin_lock_init(&pf_to_mgmt->sync_msg_lock); + + err = alloc_msg_buf(pf_to_mgmt); + if (err) { + PMD_DRV_LOG(ERR, "Allocate msg buffers failed"); + goto alloc_msg_buf_err; + } + + err = hinic_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain); + if (err) { + PMD_DRV_LOG(ERR, "Init the api cmd chains failed"); + goto api_cmd_init_err; + } + + return 0; + +api_cmd_init_err: + free_msg_buf(pf_to_mgmt); + +alloc_msg_buf_err: + kfree(pf_to_mgmt); + + return err; +} + +/** + * hinic_pf_to_mgmt_free - free PF to MGMT channel + * @hwdev: the pointer to the private hardware device object + **/ +void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; + + hinic_api_cmd_free(pf_to_mgmt->cmd_chain); + free_msg_buf(pf_to_mgmt); + kfree(pf_to_mgmt); +} + +int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = + ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + struct hinic_recv_msg *recv_msg; + u32 timeo; + int err, i; + + spin_lock(&pf_to_mgmt->sync_msg_lock); + + SYNC_MSG_ID_INC(pf_to_mgmt); + recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + HINIC_MSG_ACK, HINIC_MSG_DIRECT_SEND, + MSG_NO_RESP); + if (err) { + PMD_DRV_LOG(ERR, "Send msg to mgmt failed"); + goto unlock_sync_msg; + } + + timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); + for (i = 0; i < pf_to_mgmt->rx_aeq->poll_retry_nr; i++) { + err = hinic_aeq_poll_msg(pf_to_mgmt->rx_aeq, timeo, NULL); + if (err) { + PMD_DRV_LOG(ERR, "Poll mgmt rsp timeout, mod=%d cmd=%d msg_id=%u rc=%d", + mod, cmd, pf_to_mgmt->sync_msg_id, err); + err = -ETIMEDOUT; + hinic_dump_aeq_info((struct hinic_hwdev *)hwdev); + goto unlock_sync_msg; + } else { + if (mod == recv_msg->mod && cmd == recv_msg->cmd && + recv_msg->msg_id == pf_to_mgmt->sync_msg_id) { + /* the expected response polled */ + break; + } + PMD_DRV_LOG(ERR, "AEQ[%d] poll(mod=%d, cmd=%d, msg_id=%u) an " + "unexpected(mod=%d, cmd=%d, msg_id=%u) response", + pf_to_mgmt->rx_aeq->q_id, mod, cmd, + pf_to_mgmt->sync_msg_id, recv_msg->mod, + recv_msg->cmd, recv_msg->msg_id); + } + } + + if (i == pf_to_mgmt->rx_aeq->poll_retry_nr) { + PMD_DRV_LOG(ERR, "Get %d unexpected mgmt rsp from AEQ[%d], poll mgmt rsp failed", + i, pf_to_mgmt->rx_aeq->q_id); + err = -EBADMSG; + goto unlock_sync_msg; + } + + rte_smp_rmb(); + if (recv_msg->msg_len && buf_out && out_size) { + if (recv_msg->msg_len <= *out_size) { + memcpy(buf_out, recv_msg->msg, + recv_msg->msg_len); + *out_size = recv_msg->msg_len; + } else { + PMD_DRV_LOG(ERR, "Mgmt rsp's msg len:%u overflow.", + recv_msg->msg_len); + err = -ERANGE; + } + } + +unlock_sync_msg: + if (err && out_size) + *out_size = 0; + spin_unlock(&pf_to_mgmt->sync_msg_lock); + return err; +} + +int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, __rte_unused void *buf_out, + __rte_unused u16 *out_size) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = + ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + int err = -EINVAL; + + if (!MSG_SZ_IS_VALID(in_size)) { + PMD_DRV_LOG(ERR, "Mgmt msg buffer size is invalid"); + return err; + } + + spin_lock(&pf_to_mgmt->sync_msg_lock); + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + HINIC_MSG_NO_ACK, HINIC_MSG_DIRECT_SEND, + MSG_NO_RESP); + + spin_unlock(&pf_to_mgmt->sync_msg_lock); + + return err; +} + +static bool check_mgmt_seq_id_and_seg_len(struct hinic_recv_msg *recv_msg, + u8 seq_id, u8 seg_len) +{ + if (seq_id > HINIC_SEQ_ID_MAX_VAL || seg_len > HINIC_MSG_SEG_LEN) + return false; + + if (seq_id == 0) { + recv_msg->sed_id = seq_id; + } else { + if (seq_id != recv_msg->sed_id + 1) { + recv_msg->sed_id = 0; + return false; + } + recv_msg->sed_id = seq_id; + } + + return true; +} + +/** + * recv_mgmt_msg_handler - handler a message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @header: the header of the message + * @recv_msg: received message details + * @param: customized parameter + * Return: 0 when aeq is response message, -1 default result, + * and when wrong message or not last message + **/ +static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + u8 *header, struct hinic_recv_msg *recv_msg, + void *param) +{ + u64 msg_header = *((u64 *)header); + void *msg_body = header + sizeof(msg_header); + u8 *dest_msg; + u8 seq_id, seq_len; + u32 msg_buf_max = MAX_PF_MGMT_BUF_SIZE; + + seq_id = HINIC_MSG_HEADER_GET(msg_header, SEQID); + seq_len = HINIC_MSG_HEADER_GET(msg_header, SEG_LEN); + + if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) { + PMD_DRV_LOG(ERR, + "Mgmt msg sequence and segment check fail, " + "func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x", + hinic_global_func_id(pf_to_mgmt->hwdev), + recv_msg->sed_id, seq_id, seq_len); + return HINIC_RECV_NEXT_AEQE; + } + + dest_msg = (u8 *)recv_msg->msg + seq_id * HINIC_MSG_SEG_LEN; + msg_buf_max -= seq_id * HINIC_MSG_SEG_LEN; + memcpy(dest_msg, msg_body, seq_len); + + if (!HINIC_MSG_HEADER_GET(msg_header, LAST)) + return HINIC_RECV_NEXT_AEQE; + + recv_msg->cmd = HINIC_MSG_HEADER_GET(msg_header, CMD); + recv_msg->mod = HINIC_MSG_HEADER_GET(msg_header, MODULE); + recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(msg_header, + ASYNC_MGMT_TO_PF); + recv_msg->msg_len = HINIC_MSG_HEADER_GET(msg_header, MSG_LEN); + recv_msg->msg_id = HINIC_MSG_HEADER_GET(msg_header, MSG_ID); + + if (HINIC_MSG_HEADER_GET(msg_header, DIRECTION) == HINIC_MSG_RESPONSE) + return HINIC_RECV_DONE; + + hinic_mgmt_recv_msg_handler(pf_to_mgmt, recv_msg, param); + + return HINIC_RECV_NEXT_AEQE; +} + +/** + * hinic_mgmt_msg_aeqe_handler - handler for a mgmt message event + * @hwdev: the pointer to the private hardware device object + * @header: the header of the message + * @size: unused + * @param: customized parameter + * Return: 0 when aeq is response message, + * -1 default result, and when wrong message or not last message + **/ +int hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, + __rte_unused u8 size, void *param) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = + ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + struct hinic_recv_msg *recv_msg; + + recv_msg = (HINIC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) == + HINIC_MSG_DIRECT_SEND) ? + &pf_to_mgmt->recv_msg_from_mgmt : + &pf_to_mgmt->recv_resp_msg_from_mgmt; + + return recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg, param); +} + +int hinic_comm_pf_to_mgmt_init(struct hinic_nic_dev *nic_dev) +{ + int rc; + struct hinic_hwdev *hwdev = nic_dev->hwdev; + + rc = hinic_pf_to_mgmt_init(hwdev); + if (rc) + return rc; + + hwdev->pf_to_mgmt->rx_aeq = &hwdev->aeqs->aeq[HINIC_MGMT_RSP_AEQN]; + + return 0; +} + +void hinic_comm_pf_to_mgmt_free(struct hinic_nic_dev *nic_dev) +{ + hinic_pf_to_mgmt_free(nic_dev->hwdev); +} + +/** + * hinic_mgmt_recv_msg_handler - handler for message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + * @param: customized parameter + **/ +static void hinic_mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg, + void *param) +{ + void *buf_out = recv_msg->buf_out; + u16 out_size = 0; + + switch (recv_msg->mod) { + case HINIC_MOD_COMM: + hinic_comm_async_event_handle(pf_to_mgmt->hwdev, + recv_msg->cmd, recv_msg->msg, + recv_msg->msg_len, + buf_out, &out_size); + break; + case HINIC_MOD_L2NIC: + hinic_l2nic_async_event_handle(pf_to_mgmt->hwdev, param, + recv_msg->cmd, recv_msg->msg, + recv_msg->msg_len, + buf_out, &out_size); + break; + case HINIC_MOD_HILINK: + hinic_hilink_async_event_handle(pf_to_mgmt->hwdev, + recv_msg->cmd, recv_msg->msg, + recv_msg->msg_len, + buf_out, &out_size); + break; + default: + PMD_DRV_LOG(ERR, "No handler, mod = %d", recv_msg->mod); + break; + } + + if (!recv_msg->async_mgmt_to_pf) { + if (!out_size) + out_size = BUF_OUT_DEFAULT_SIZE; + + /* MGMT sent sync msg, send the response */ + (void)send_msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, + recv_msg->cmd, buf_out, out_size, + HINIC_MSG_RESPONSE, + recv_msg->msg_id); + } +} diff --git a/drivers/net/hinic/base/hinic_pmd_mgmt.h b/drivers/net/hinic/base/hinic_pmd_mgmt.h new file mode 100644 index 000000000..c06013795 --- /dev/null +++ b/drivers/net/hinic/base/hinic_pmd_mgmt.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_MGMT_H_ +#define _HINIC_PMD_MGMT_H_ + +#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0 +#define HINIC_MSG_HEADER_MODULE_SHIFT 11 +#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16 +#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22 +#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23 +#define HINIC_MSG_HEADER_SEQID_SHIFT 24 +#define HINIC_MSG_HEADER_LAST_SHIFT 30 +#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31 +#define HINIC_MSG_HEADER_CMD_SHIFT 32 +#define HINIC_MSG_HEADER_PCI_INTF_IDX_SHIFT 48 +#define HINIC_MSG_HEADER_P2P_IDX_SHIFT 50 +#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54 + +#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF +#define HINIC_MSG_HEADER_MODULE_MASK 0x1F +#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F +#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1 +#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1 +#define HINIC_MSG_HEADER_SEQID_MASK 0x3F +#define HINIC_MSG_HEADER_LAST_MASK 0x1 +#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1 +#define HINIC_MSG_HEADER_CMD_MASK 0xFF +#define HINIC_MSG_HEADER_PCI_INTF_IDX_MASK 0x3 +#define HINIC_MSG_HEADER_P2P_IDX_MASK 0xF +#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF + +#define HINIC_MSG_HEADER_GET(val, member) \ + (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \ + HINIC_MSG_HEADER_##member##_MASK) + +#define HINIC_MSG_HEADER_SET(val, member) \ + ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \ + HINIC_MSG_HEADER_##member##_SHIFT) + +enum hinic_msg_direction_type { + HINIC_MSG_DIRECT_SEND = 0, + HINIC_MSG_RESPONSE = 1 +}; +enum hinic_msg_segment_type { + NOT_LAST_SEGMENT = 0, + LAST_SEGMENT = 1, +}; + +enum hinic_msg_ack_type { + HINIC_MSG_ACK = 0, + HINIC_MSG_NO_ACK = 1, +}; + +struct hinic_recv_msg { + void *msg; + void *buf_out; + + u16 msg_len; + enum hinic_mod_type mod; + u8 cmd; + u16 msg_id; + int async_mgmt_to_pf; + u8 sed_id; +}; + +#define HINIC_COMM_SELF_CMD_MAX 8 + +typedef void (*comm_up_self_msg_proc)(void *handle, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +struct comm_up_self_msg_sub_info { + u8 cmd; + comm_up_self_msg_proc proc; +}; + +struct comm_up_self_msg_info { + u8 cmd_num; + struct comm_up_self_msg_sub_info info[HINIC_COMM_SELF_CMD_MAX]; +}; + +enum comm_pf_to_mgmt_event_state { + SEND_EVENT_START = 0, + SEND_EVENT_TIMEOUT, + SEND_EVENT_END, +}; + +struct hinic_msg_pf_to_mgmt { + struct hinic_hwdev *hwdev; + + /* Async cmd can not be scheduling */ + spinlock_t async_msg_lock; + /* spinlock for sync message */ + spinlock_t sync_msg_lock; + + void *async_msg_buf; + void *sync_msg_buf; + + struct hinic_recv_msg recv_msg_from_mgmt; + struct hinic_recv_msg recv_resp_msg_from_mgmt; + + u16 async_msg_id; + u16 sync_msg_id; + + struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX]; + + struct hinic_eq *rx_aeq; +}; + +int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev); +void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev); + +int hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, __rte_unused u8 size, + void *param); + +int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); + +#endif /* _HINIC_PMD_MGMT_H_ */ diff --git a/drivers/net/hinic/base/hinic_pmd_mgmt_interface.h b/drivers/net/hinic/base/hinic_pmd_mgmt_interface.h new file mode 100644 index 000000000..809db8af0 --- /dev/null +++ b/drivers/net/hinic/base/hinic_pmd_mgmt_interface.h @@ -0,0 +1,503 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_MGMT_INTERFACE_H_ +#define _HINIC_PMD_MGMT_INTERFACE_H_ + +/* cmd of mgmt CPU message for HILINK module */ +enum hinic_hilink_cmd { + HINIC_HILINK_CMD_GET_LINK_INFO = 0x3, + HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8, +}; + +enum hilink_info_print_event { + HILINK_EVENT_LINK_UP = 1, + HILINK_EVENT_LINK_DOWN, + HILINK_EVENT_CABLE_PLUGGED, + HILINK_EVENT_MAX_TYPE, +}; + +#define NIC_LRO_MAX_WQE_NUM 32 +#define NIC_RSS_INDIR_SIZE 256 +#define NIC_DCB_UP_MAX 0x8 +#define NIC_RSS_KEY_SIZE 40 +#define NIC_RSS_CMD_TEMP_ALLOC 0x01 +#define NIC_RSS_CMD_TEMP_FREE 0x02 + +enum hinic_resp_aeq_num { + HINIC_AEQ0 = 0, + HINIC_AEQ1 = 1, + HINIC_AEQ2 = 2, + HINIC_AEQ3 = 3, +}; + +struct hinic_mgmt_msg_head { + u8 status; + u8 version; + u8 resp_aeq_num; + u8 rsvd0[5]; +}; + +enum { + RECYCLE_MODE_NIC = 0x0, + RECYCLE_MODE_DPDK = 0x1, +}; + +struct hinic_fast_recycled_mode { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 fast_recycled_mode;/* 1: enable fast recycle, available in dpdk mode, + * 0: normal mode, available in kernel nic mode + */ + u8 rsvd1; +}; + +struct hinic_function_table { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rx_wqe_buf_size; + u32 mtu; +}; + +struct hinic_cmd_qpn { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 base_qpn; +}; + +struct hinic_port_mac_set { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 mac[ETH_ALEN]; +}; + +struct hinic_port_mac_update { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 old_mac[ETH_ALEN]; + u16 rsvd2; + u8 new_mac[ETH_ALEN]; +}; + +struct hinic_vport_state { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u8 state; + u8 rsvd2[3]; +}; + +struct hinic_port_state { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u8 state; + u8 rsvd1[3]; +}; + +struct hinic_mtu { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u32 mtu; +}; + +struct hinic_vlan_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 vlan_id; +}; + +struct hinic_get_link { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 link_status; + u8 rsvd1; +}; + +#define HINIC_DEFAUT_PAUSE_CONFIG 1 +struct hinic_pause_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u32 auto_neg; + u32 rx_pause; + u32 tx_pause; +}; + +struct hinic_port_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; + u8 resv2[3]; +}; + +struct hinic_set_autoneg { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 enable; /* 1: enable , 0: disable */ +}; + +struct hinic_up_ets_cfg { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u8 port_id; + u8 rsvd1[3]; + u8 up_tc[HINIC_DCB_UP_MAX]; + u8 pg_bw[HINIC_DCB_PG_MAX]; + u8 pgid[HINIC_DCB_UP_MAX]; + u8 up_bw[HINIC_DCB_UP_MAX]; + u8 prio[HINIC_DCB_PG_MAX]; +}; + +struct hinic_tso_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u8 tso_en; + u8 resv2[3]; +}; + +struct hinic_lro_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u8 lro_ipv4_en; + u8 lro_ipv6_en; + u8 lro_max_wqe_num; + u8 resv2[13]; +}; + +struct hinic_checksum_offload { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u32 rx_csum_offload; +}; + +struct hinic_vlan_offload { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 vlan_rx_offload; + u8 rsvd1[5]; +}; + +struct hinic_rx_mode_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u32 rx_mode; +}; + +/* rss */ +struct nic_rss_indirect_tbl { + u32 group_index; + u32 offset; + u32 size; + u32 rsvd; + u8 entry[NIC_RSS_INDIR_SIZE]; +}; + +struct nic_rss_context_tbl { + u32 group_index; + u32 offset; + u32 size; + u32 rsvd; + u32 ctx; +}; + +struct hinic_rss_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 rss_en; + u8 template_id; + u8 rq_priority_number; + u8 rsvd1[3]; + u8 prio_tc[NIC_DCB_UP_MAX]; +}; + +struct hinic_rss_template_mgmt { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 cmd; + u8 template_id; + u8 rsvd1[4]; +}; + +struct hinic_rss_indir_table { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 template_id; + u8 rsvd1; + u8 indir[NIC_RSS_INDIR_SIZE]; +}; + +struct hinic_rss_template_key { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 template_id; + u8 rsvd1; + u8 key[NIC_RSS_KEY_SIZE]; +}; + +struct hinic_rss_engine_type { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 template_id; + u8 hash_engine; + u8 rsvd1[4]; +}; + +struct hinic_rss_context_table { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 template_id; + u8 rsvd1; + u32 context; +}; + +struct hinic_port_link_status { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 link; + u8 port_id; +}; + +struct hinic_cable_plug_event { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 plugged; /* 0: unplugged, 1: plugged */ + u8 port_id; +}; + +struct hinic_link_err_event { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 err_type; + u8 port_id; +}; + +enum link_err_status { + LINK_ERR_MODULE_UNRECOGENIZED, + LINK_ERR_NUM, +}; + +#define HINIC_PORT_STATS_VERSION 0 + +struct hinic_port_stats_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u32 stats_version; + u32 stats_size; +}; + +struct hinic_port_qfilter_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u8 filter_enable; + u8 filter_type; + u8 qid; + u8 rsvd2; +}; + +struct hinic_port_stats { + struct hinic_mgmt_msg_head mgmt_msg_head; + + struct hinic_phy_port_stats stats; +}; + +struct hinic_cmd_vport_stats { + struct hinic_mgmt_msg_head mgmt_msg_head; + + struct hinic_vport_stats stats; +}; + +struct hinic_clear_port_stats { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd; + u32 stats_version; + u32 stats_size; +}; + +struct hinic_clear_vport_stats { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd; + u32 stats_version; + u32 stats_size; +}; + +#define HINIC_COMPILE_TIME_LEN 20 +struct hinic_version_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u8 ver[HINIC_FW_VERSION_NAME]; + u8 time[HINIC_COMPILE_TIME_LEN]; +}; + +/* get or set loopback mode, need to modify by base API */ +#define HINIC_INTERNAL_LP_MODE 5 + +#define ANTI_ATTACK_DEFAULT_CIR 500000 +#define ANTI_ATTACK_DEFAULT_XIR 600000 +#define ANTI_ATTACK_DEFAULT_CBS 10000000 +#define ANTI_ATTACK_DEFAULT_XBS 12000000 + +/* set physical port Anti-Attack rate */ +struct hinic_port_anti_attack_rate { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 enable; /* 1: enable rate-limiting, 0: disable rate-limiting */ + u32 cir; /* Committed Information Rate */ + u32 xir; /* eXtended Information Rate */ + u32 cbs; /* Committed Burst Size */ + u32 xbs; /* eXtended Burst Size */ +}; + +struct hinic_l2nic_reset { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; +}; + +struct hinic_root_ctxt { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_idx; + u16 rsvd1; + u8 set_cmdq_depth; + u8 cmdq_depth; + u8 lro_en; + u8 rsvd2; + u8 ppf_idx; + u8 rsvd3; + u16 rq_depth; + u16 rx_buf_sz; + u16 sq_depth; +}; + +struct hinic_page_size { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_idx; + u8 ppf_idx; + u8 page_size; + u32 rsvd; +}; + +struct hinic_dcb_state { + u8 dcb_on; + u8 default_cos; + u8 up_cos[8]; +}; + +struct hinic_vf_default_cos { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hinic_dcb_state state; +}; + +struct hinic_reset_link_cfg { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; +}; + +struct hinic_set_vhd_mode { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 vhd_type; + u16 rx_wqe_buffer_size; + u16 rsvd; +}; + +struct hinic_vlan_filter { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 rsvd1[2]; + u32 vlan_filter_ctrl; +}; + +struct hinic_set_link_follow { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd0; + u8 follow_status; + u8 rsvd1[3]; +}; + +struct hinic_link_mode_cmd { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u16 supported; /* 0xFFFF represent Invalid value */ + u16 advertised; +}; + +struct hinic_clear_qp_resource { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; +}; + +int hinic_init_function_table(void *hwdev, u16 rx_buf_sz); + +int hinic_set_fast_recycle_mode(void *hwdev, u8 mode); + +int hinic_get_base_qpn(void *hwdev, u16 *global_qpn); + +int hinic_set_pagesize(void *hwdev, u8 page_size); + +#endif /* _HINIC_PMD_MGMT_INTERFACE_H_ */ -- 2.18.0 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id 285FCA0096 for ; Thu, 6 Jun 2019 13:10:27 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id EC5021B958; Thu, 6 Jun 2019 13:10:26 +0200 (CEST) Received: from huawei.com (szxga05-in.huawei.com [45.249.212.191]) by dpdk.org (Postfix) with ESMTP id 6CC021B946 for ; Thu, 6 Jun 2019 13:10:24 +0200 (CEST) Received: from DGGEMS401-HUB.china.huawei.com (unknown [172.30.72.59]) by Forcepoint Email with ESMTP id 5BF4FDD229CE538C5ACE; Thu, 6 Jun 2019 18:53:55 +0800 (CST) Received: from tester_149.localdomain (10.175.119.39) by DGGEMS401-HUB.china.huawei.com (10.3.19.201) with Microsoft SMTP Server id 14.3.439.0; Thu, 6 Jun 2019 18:53:45 +0800 From: Ziyang Xuan To: CC: , , , , , , Ziyang Xuan Date: Thu, 6 Jun 2019 19:05:16 +0800 Message-ID: X-Mailer: git-send-email 2.18.0 In-Reply-To: References: MIME-Version: 1.0 Content-Type: text/plain; charset="UTF-8" X-Originating-IP: [10.175.119.39] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v4 03/11] net/hinic/base: add mgmt module interactive code X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Message-ID: <20190606110516.JfQi-A0kAOGI5oXeE9ABjMA1Xit6ldpDCB-2EXBVpS0@z> Add the structures, functionalities for interaction with mgmt module. Signed-off-by: Ziyang Xuan --- drivers/net/hinic/base/hinic_pmd_hw_mgmt.h | 85 +++ drivers/net/hinic/base/hinic_pmd_mgmt.c | 617 ++++++++++++++++++ drivers/net/hinic/base/hinic_pmd_mgmt.h | 125 ++++ .../net/hinic/base/hinic_pmd_mgmt_interface.h | 503 ++++++++++++++ 4 files changed, 1330 insertions(+) create mode 100644 drivers/net/hinic/base/hinic_pmd_hw_mgmt.h create mode 100644 drivers/net/hinic/base/hinic_pmd_mgmt.c create mode 100644 drivers/net/hinic/base/hinic_pmd_mgmt.h create mode 100644 drivers/net/hinic/base/hinic_pmd_mgmt_interface.h diff --git a/drivers/net/hinic/base/hinic_pmd_hw_mgmt.h b/drivers/net/hinic/base/hinic_pmd_hw_mgmt.h new file mode 100644 index 000000000..5f3b12b7d --- /dev/null +++ b/drivers/net/hinic/base/hinic_pmd_hw_mgmt.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_HW_MGMT_H_ +#define _HINIC_PMD_HW_MGMT_H_ + +/* show each drivers only such as nic_service_cap, + * toe_service_cap structure, but not show service_cap + */ +enum hinic_service_type { + SERVICE_T_NIC = 0, + SERVICE_T_MAX = 7, + + /* Only used for interruption resource management, + * mark the request module + */ + SERVICE_T_INTF = (1 << 15), + SERVICE_T_CQM = (1 << 16), +}; + +enum intr_type { + INTR_TYPE_MSIX, + INTR_TYPE_MSI, + INTR_TYPE_INT, + /* PXE,OVS need single thread processing, synchronization + * messages must use poll wait mechanism interface + */ + INTR_TYPE_NONE, +}; + +struct nic_service_cap { + /* PF resources */ + u16 max_sqs; + u16 max_rqs; + + /* VF resources, VF obtain them through the MailBox mechanism from + * corresponding PF + */ + u16 vf_max_sqs; + u16 vf_max_rqs; + + bool lro_en; /* LRO feature enable bit */ + u8 lro_sz; /* LRO context space: n*16B */ + u8 tso_sz; /* TSO context space: n*16B */ +}; + +/* Defines the IRQ information structure*/ +struct irq_info { + u16 msix_entry_idx; /* IRQ corresponding index number */ + u32 irq_id; /* the IRQ number from OS */ +}; + +/* Define the version information structure*/ +struct dev_version_info { + u8 up_ver; /* uP version, directly read from uP + * is not configured to file + */ + u8 ucode_ver; /* The microcode version, + * read through the CMDq from microcode + */ + u8 cfg_file_ver; /* uP configuration file version */ + u8 sdk_ver; /* SDK driver version */ + u8 hw_ver; /* Hardware version */ +}; + +/* Obtain service_cap.nic_cap.dev_nic_cap.max_sqs */ +u16 hinic_func_max_qnum(void *hwdev); + +u16 hinic_global_func_id(void *hwdev); /* func_attr.glb_func_idx */ + +enum func_type { + TYPE_PF, + TYPE_VF, + TYPE_PPF, +}; + +enum hinic_msix_state { + HINIC_MSIX_ENABLE, + HINIC_MSIX_DISABLE, +}; + +enum func_type hinic_func_type(void *hwdev); + +#endif /* _HINIC_PMD_HW_MGMT_H_ */ diff --git a/drivers/net/hinic/base/hinic_pmd_mgmt.c b/drivers/net/hinic/base/hinic_pmd_mgmt.c new file mode 100644 index 000000000..61246fa48 --- /dev/null +++ b/drivers/net/hinic/base/hinic_pmd_mgmt.c @@ -0,0 +1,617 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#include "hinic_pmd_dpdev.h" + +static void hinic_mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg, + void *param); + +#define BUF_OUT_DEFAULT_SIZE 1 + +#define MAX_PF_MGMT_BUF_SIZE 2048UL + +#define MGMT_MSG_SIZE_MIN 20 +#define MGMT_MSG_SIZE_STEP 16 +#define MGMT_MSG_RSVD_FOR_DEV 8 + +#define MGMT_MSG_TIMEOUT 5000 /* millisecond */ + +#define SYNC_MSG_ID_MASK 0x1FF +#define ASYNC_MSG_ID_MASK 0x1FF +#define ASYNC_MSG_FLAG 0x200 + +#define MSG_NO_RESP 0xFFFF + +#define MAX_MSG_SZ 2016 + +#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ) + +#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) + +#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ + (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK) + +#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id) + +#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \ + ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \ + | ASYNC_MSG_FLAG) + +#define HINIC_SEQ_ID_MAX_VAL 42 +#define HINIC_MSG_SEG_LEN 48 + +/** + * mgmt_msg_len - calculate the total message length + * @msg_data_len: the length of the message data + * Return: the total message length + **/ +static u16 mgmt_msg_len(u16 msg_data_len) +{ + /* u64 - the size of the header */ + u16 msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + + msg_data_len); + + if (msg_size > MGMT_MSG_SIZE_MIN) + msg_size = MGMT_MSG_SIZE_MIN + + ALIGN((msg_size - MGMT_MSG_SIZE_MIN), + MGMT_MSG_SIZE_STEP); + else + msg_size = MGMT_MSG_SIZE_MIN; + + return msg_size; +} + +/** + * prepare_header - prepare the header of the message + * @pf_to_mgmt: PF to MGMT channel + * @header: pointer of the header to prepare + * @msg_len: the length of the message + * @mod: module in the chip that will get the message + * @ack_type: the type to response + * @direction: the direction of the original message + * @cmd: the command to do + * @msg_id: message id + **/ +static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + u64 *header, int msg_len, enum hinic_mod_type mod, + enum hinic_msg_ack_type ack_type, + enum hinic_msg_direction_type direction, + u8 cmd, u32 msg_id) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwdev->hwif; + + *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) | + HINIC_MSG_HEADER_SET(mod, MODULE) | + HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) | + HINIC_MSG_HEADER_SET(ack_type, NO_ACK) | + HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | + HINIC_MSG_HEADER_SET(0, SEQID) | + HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HINIC_MSG_HEADER_SET(direction, DIRECTION) | + HINIC_MSG_HEADER_SET(cmd, CMD) | + HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) | + HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) | + HINIC_MSG_HEADER_SET(msg_id, MSG_ID); +} + +/** + * prepare_mgmt_cmd - prepare the mgmt command + * @mgmt_cmd: pointer to the command to prepare + * @header: pointer of the header to prepare + * @msg: the data of the message + * @msg_len: the length of the message + **/ +static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, void *msg, + int msg_len) +{ + u32 cmd_buf_max = MAX_PF_MGMT_BUF_SIZE; + + memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); + + mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; + cmd_buf_max -= MGMT_MSG_RSVD_FOR_DEV; + memcpy(mgmt_cmd, header, sizeof(*header)); + + mgmt_cmd += sizeof(*header); + cmd_buf_max -= sizeof(*header); + memcpy(mgmt_cmd, msg, msg_len); +} + +/** + * alloc_recv_msg - allocate received message memory + * @recv_msg: pointer that will hold the allocated data + * Return: 0 - success, negative - failure + **/ +static int alloc_recv_msg(struct hinic_recv_msg *recv_msg) +{ + int err; + + recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!recv_msg->msg) { + PMD_DRV_LOG(ERR, "Allocate recv msg buf failed"); + return -ENOMEM; + } + + recv_msg->buf_out = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!recv_msg->buf_out) { + PMD_DRV_LOG(ERR, "Allocate recv msg output buf failed"); + err = -ENOMEM; + goto alloc_buf_out_err; + } + + return 0; + +alloc_buf_out_err: + kfree(recv_msg->msg); + return err; +} + +/** + * free_recv_msg - free received message memory + * @recv_msg: pointer that holds the allocated data + **/ +static void free_recv_msg(struct hinic_recv_msg *recv_msg) +{ + kfree(recv_msg->buf_out); + kfree(recv_msg->msg); +} + +/** + * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + **/ +static int alloc_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) +{ + int err; + + err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + if (err) { + PMD_DRV_LOG(ERR, "Allocate recv msg failed"); + return err; + } + + err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + if (err) { + PMD_DRV_LOG(ERR, "Allocate resp recv msg failed"); + goto alloc_msg_for_resp_err; + } + + pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->async_msg_buf) { + PMD_DRV_LOG(ERR, "Allocate async msg buf failed"); + err = -ENOMEM; + goto async_msg_buf_err; + } + + pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->sync_msg_buf) { + PMD_DRV_LOG(ERR, "Allocate sync msg buf failed"); + err = -ENOMEM; + goto sync_msg_buf_err; + } + + return 0; + +sync_msg_buf_err: + kfree(pf_to_mgmt->async_msg_buf); + +async_msg_buf_err: + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + +alloc_msg_for_resp_err: + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + + return err; +} + +/** + * free_msg_buf - free all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + **/ +static void free_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt) +{ + kfree(pf_to_mgmt->sync_msg_buf); + kfree(pf_to_mgmt->async_msg_buf); + + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); +} + +/** + * send_msg_to_mgmt_async - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the data of the message + * @msg_len: the length of the message + * @direction: the direction of the original message + * @resp_msg_id: message id of response + * Return: 0 - success, negative - failure + **/ +static int send_msg_to_mgmt_async(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + void *msg, u16 msg_len, + enum hinic_msg_direction_type direction, + u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->async_msg_buf; + struct hinic_api_cmd_chain *chain; + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (direction == HINIC_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK, + direction, cmd, ASYNC_MSG_ID(pf_to_mgmt)); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU]; + + return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd, + cmd_size); +} + +/** + * send_msg_to_mgmt_sync - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the msg data + * @msg_len: the msg data length + * @ack_type: indicate mgmt command whether need ack or not + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * Return: 0 - success, negative - failure + **/ +static int send_msg_to_mgmt_sync(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + void *msg, u16 msg_len, + enum hinic_msg_ack_type ack_type, + enum hinic_msg_direction_type direction, + __rte_unused u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->sync_msg_buf; + struct hinic_api_cmd_chain *chain; + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (direction == HINIC_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, SYNC_MSG_ID(pf_to_mgmt)); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_PMD_WRITE_TO_MGMT]; + + return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, + mgmt_cmd, cmd_size); +} + +/** + * hinic_pf_to_mgmt_init - initialize PF to MGMT channel + * @hwdev: the pointer to the private hardware device object + * Return: 0 - success, negative - failure + **/ +int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt; + int err; + + pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL); + if (!pf_to_mgmt) { + PMD_DRV_LOG(ERR, "Allocate pf to mgmt mem failed"); + return -ENOMEM; + } + + hwdev->pf_to_mgmt = pf_to_mgmt; + pf_to_mgmt->hwdev = hwdev; + + spin_lock_init(&pf_to_mgmt->async_msg_lock); + spin_lock_init(&pf_to_mgmt->sync_msg_lock); + + err = alloc_msg_buf(pf_to_mgmt); + if (err) { + PMD_DRV_LOG(ERR, "Allocate msg buffers failed"); + goto alloc_msg_buf_err; + } + + err = hinic_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain); + if (err) { + PMD_DRV_LOG(ERR, "Init the api cmd chains failed"); + goto api_cmd_init_err; + } + + return 0; + +api_cmd_init_err: + free_msg_buf(pf_to_mgmt); + +alloc_msg_buf_err: + kfree(pf_to_mgmt); + + return err; +} + +/** + * hinic_pf_to_mgmt_free - free PF to MGMT channel + * @hwdev: the pointer to the private hardware device object + **/ +void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; + + hinic_api_cmd_free(pf_to_mgmt->cmd_chain); + free_msg_buf(pf_to_mgmt); + kfree(pf_to_mgmt); +} + +int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = + ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + struct hinic_recv_msg *recv_msg; + u32 timeo; + int err, i; + + spin_lock(&pf_to_mgmt->sync_msg_lock); + + SYNC_MSG_ID_INC(pf_to_mgmt); + recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + HINIC_MSG_ACK, HINIC_MSG_DIRECT_SEND, + MSG_NO_RESP); + if (err) { + PMD_DRV_LOG(ERR, "Send msg to mgmt failed"); + goto unlock_sync_msg; + } + + timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); + for (i = 0; i < pf_to_mgmt->rx_aeq->poll_retry_nr; i++) { + err = hinic_aeq_poll_msg(pf_to_mgmt->rx_aeq, timeo, NULL); + if (err) { + PMD_DRV_LOG(ERR, "Poll mgmt rsp timeout, mod=%d cmd=%d msg_id=%u rc=%d", + mod, cmd, pf_to_mgmt->sync_msg_id, err); + err = -ETIMEDOUT; + hinic_dump_aeq_info((struct hinic_hwdev *)hwdev); + goto unlock_sync_msg; + } else { + if (mod == recv_msg->mod && cmd == recv_msg->cmd && + recv_msg->msg_id == pf_to_mgmt->sync_msg_id) { + /* the expected response polled */ + break; + } + PMD_DRV_LOG(ERR, "AEQ[%d] poll(mod=%d, cmd=%d, msg_id=%u) an " + "unexpected(mod=%d, cmd=%d, msg_id=%u) response", + pf_to_mgmt->rx_aeq->q_id, mod, cmd, + pf_to_mgmt->sync_msg_id, recv_msg->mod, + recv_msg->cmd, recv_msg->msg_id); + } + } + + if (i == pf_to_mgmt->rx_aeq->poll_retry_nr) { + PMD_DRV_LOG(ERR, "Get %d unexpected mgmt rsp from AEQ[%d], poll mgmt rsp failed", + i, pf_to_mgmt->rx_aeq->q_id); + err = -EBADMSG; + goto unlock_sync_msg; + } + + rte_smp_rmb(); + if (recv_msg->msg_len && buf_out && out_size) { + if (recv_msg->msg_len <= *out_size) { + memcpy(buf_out, recv_msg->msg, + recv_msg->msg_len); + *out_size = recv_msg->msg_len; + } else { + PMD_DRV_LOG(ERR, "Mgmt rsp's msg len:%u overflow.", + recv_msg->msg_len); + err = -ERANGE; + } + } + +unlock_sync_msg: + if (err && out_size) + *out_size = 0; + spin_unlock(&pf_to_mgmt->sync_msg_lock); + return err; +} + +int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, __rte_unused void *buf_out, + __rte_unused u16 *out_size) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = + ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + int err = -EINVAL; + + if (!MSG_SZ_IS_VALID(in_size)) { + PMD_DRV_LOG(ERR, "Mgmt msg buffer size is invalid"); + return err; + } + + spin_lock(&pf_to_mgmt->sync_msg_lock); + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + HINIC_MSG_NO_ACK, HINIC_MSG_DIRECT_SEND, + MSG_NO_RESP); + + spin_unlock(&pf_to_mgmt->sync_msg_lock); + + return err; +} + +static bool check_mgmt_seq_id_and_seg_len(struct hinic_recv_msg *recv_msg, + u8 seq_id, u8 seg_len) +{ + if (seq_id > HINIC_SEQ_ID_MAX_VAL || seg_len > HINIC_MSG_SEG_LEN) + return false; + + if (seq_id == 0) { + recv_msg->sed_id = seq_id; + } else { + if (seq_id != recv_msg->sed_id + 1) { + recv_msg->sed_id = 0; + return false; + } + recv_msg->sed_id = seq_id; + } + + return true; +} + +/** + * recv_mgmt_msg_handler - handler a message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @header: the header of the message + * @recv_msg: received message details + * @param: customized parameter + * Return: 0 when aeq is response message, -1 default result, + * and when wrong message or not last message + **/ +static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + u8 *header, struct hinic_recv_msg *recv_msg, + void *param) +{ + u64 msg_header = *((u64 *)header); + void *msg_body = header + sizeof(msg_header); + u8 *dest_msg; + u8 seq_id, seq_len; + u32 msg_buf_max = MAX_PF_MGMT_BUF_SIZE; + + seq_id = HINIC_MSG_HEADER_GET(msg_header, SEQID); + seq_len = HINIC_MSG_HEADER_GET(msg_header, SEG_LEN); + + if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) { + PMD_DRV_LOG(ERR, + "Mgmt msg sequence and segment check fail, " + "func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x", + hinic_global_func_id(pf_to_mgmt->hwdev), + recv_msg->sed_id, seq_id, seq_len); + return HINIC_RECV_NEXT_AEQE; + } + + dest_msg = (u8 *)recv_msg->msg + seq_id * HINIC_MSG_SEG_LEN; + msg_buf_max -= seq_id * HINIC_MSG_SEG_LEN; + memcpy(dest_msg, msg_body, seq_len); + + if (!HINIC_MSG_HEADER_GET(msg_header, LAST)) + return HINIC_RECV_NEXT_AEQE; + + recv_msg->cmd = HINIC_MSG_HEADER_GET(msg_header, CMD); + recv_msg->mod = HINIC_MSG_HEADER_GET(msg_header, MODULE); + recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(msg_header, + ASYNC_MGMT_TO_PF); + recv_msg->msg_len = HINIC_MSG_HEADER_GET(msg_header, MSG_LEN); + recv_msg->msg_id = HINIC_MSG_HEADER_GET(msg_header, MSG_ID); + + if (HINIC_MSG_HEADER_GET(msg_header, DIRECTION) == HINIC_MSG_RESPONSE) + return HINIC_RECV_DONE; + + hinic_mgmt_recv_msg_handler(pf_to_mgmt, recv_msg, param); + + return HINIC_RECV_NEXT_AEQE; +} + +/** + * hinic_mgmt_msg_aeqe_handler - handler for a mgmt message event + * @hwdev: the pointer to the private hardware device object + * @header: the header of the message + * @size: unused + * @param: customized parameter + * Return: 0 when aeq is response message, + * -1 default result, and when wrong message or not last message + **/ +int hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, + __rte_unused u8 size, void *param) +{ + struct hinic_msg_pf_to_mgmt *pf_to_mgmt = + ((struct hinic_hwdev *)hwdev)->pf_to_mgmt; + struct hinic_recv_msg *recv_msg; + + recv_msg = (HINIC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) == + HINIC_MSG_DIRECT_SEND) ? + &pf_to_mgmt->recv_msg_from_mgmt : + &pf_to_mgmt->recv_resp_msg_from_mgmt; + + return recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg, param); +} + +int hinic_comm_pf_to_mgmt_init(struct hinic_nic_dev *nic_dev) +{ + int rc; + struct hinic_hwdev *hwdev = nic_dev->hwdev; + + rc = hinic_pf_to_mgmt_init(hwdev); + if (rc) + return rc; + + hwdev->pf_to_mgmt->rx_aeq = &hwdev->aeqs->aeq[HINIC_MGMT_RSP_AEQN]; + + return 0; +} + +void hinic_comm_pf_to_mgmt_free(struct hinic_nic_dev *nic_dev) +{ + hinic_pf_to_mgmt_free(nic_dev->hwdev); +} + +/** + * hinic_mgmt_recv_msg_handler - handler for message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + * @param: customized parameter + **/ +static void hinic_mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg, + void *param) +{ + void *buf_out = recv_msg->buf_out; + u16 out_size = 0; + + switch (recv_msg->mod) { + case HINIC_MOD_COMM: + hinic_comm_async_event_handle(pf_to_mgmt->hwdev, + recv_msg->cmd, recv_msg->msg, + recv_msg->msg_len, + buf_out, &out_size); + break; + case HINIC_MOD_L2NIC: + hinic_l2nic_async_event_handle(pf_to_mgmt->hwdev, param, + recv_msg->cmd, recv_msg->msg, + recv_msg->msg_len, + buf_out, &out_size); + break; + case HINIC_MOD_HILINK: + hinic_hilink_async_event_handle(pf_to_mgmt->hwdev, + recv_msg->cmd, recv_msg->msg, + recv_msg->msg_len, + buf_out, &out_size); + break; + default: + PMD_DRV_LOG(ERR, "No handler, mod = %d", recv_msg->mod); + break; + } + + if (!recv_msg->async_mgmt_to_pf) { + if (!out_size) + out_size = BUF_OUT_DEFAULT_SIZE; + + /* MGMT sent sync msg, send the response */ + (void)send_msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, + recv_msg->cmd, buf_out, out_size, + HINIC_MSG_RESPONSE, + recv_msg->msg_id); + } +} diff --git a/drivers/net/hinic/base/hinic_pmd_mgmt.h b/drivers/net/hinic/base/hinic_pmd_mgmt.h new file mode 100644 index 000000000..c06013795 --- /dev/null +++ b/drivers/net/hinic/base/hinic_pmd_mgmt.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_MGMT_H_ +#define _HINIC_PMD_MGMT_H_ + +#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0 +#define HINIC_MSG_HEADER_MODULE_SHIFT 11 +#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16 +#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22 +#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23 +#define HINIC_MSG_HEADER_SEQID_SHIFT 24 +#define HINIC_MSG_HEADER_LAST_SHIFT 30 +#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31 +#define HINIC_MSG_HEADER_CMD_SHIFT 32 +#define HINIC_MSG_HEADER_PCI_INTF_IDX_SHIFT 48 +#define HINIC_MSG_HEADER_P2P_IDX_SHIFT 50 +#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54 + +#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF +#define HINIC_MSG_HEADER_MODULE_MASK 0x1F +#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F +#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1 +#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1 +#define HINIC_MSG_HEADER_SEQID_MASK 0x3F +#define HINIC_MSG_HEADER_LAST_MASK 0x1 +#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1 +#define HINIC_MSG_HEADER_CMD_MASK 0xFF +#define HINIC_MSG_HEADER_PCI_INTF_IDX_MASK 0x3 +#define HINIC_MSG_HEADER_P2P_IDX_MASK 0xF +#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF + +#define HINIC_MSG_HEADER_GET(val, member) \ + (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \ + HINIC_MSG_HEADER_##member##_MASK) + +#define HINIC_MSG_HEADER_SET(val, member) \ + ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \ + HINIC_MSG_HEADER_##member##_SHIFT) + +enum hinic_msg_direction_type { + HINIC_MSG_DIRECT_SEND = 0, + HINIC_MSG_RESPONSE = 1 +}; +enum hinic_msg_segment_type { + NOT_LAST_SEGMENT = 0, + LAST_SEGMENT = 1, +}; + +enum hinic_msg_ack_type { + HINIC_MSG_ACK = 0, + HINIC_MSG_NO_ACK = 1, +}; + +struct hinic_recv_msg { + void *msg; + void *buf_out; + + u16 msg_len; + enum hinic_mod_type mod; + u8 cmd; + u16 msg_id; + int async_mgmt_to_pf; + u8 sed_id; +}; + +#define HINIC_COMM_SELF_CMD_MAX 8 + +typedef void (*comm_up_self_msg_proc)(void *handle, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +struct comm_up_self_msg_sub_info { + u8 cmd; + comm_up_self_msg_proc proc; +}; + +struct comm_up_self_msg_info { + u8 cmd_num; + struct comm_up_self_msg_sub_info info[HINIC_COMM_SELF_CMD_MAX]; +}; + +enum comm_pf_to_mgmt_event_state { + SEND_EVENT_START = 0, + SEND_EVENT_TIMEOUT, + SEND_EVENT_END, +}; + +struct hinic_msg_pf_to_mgmt { + struct hinic_hwdev *hwdev; + + /* Async cmd can not be scheduling */ + spinlock_t async_msg_lock; + /* spinlock for sync message */ + spinlock_t sync_msg_lock; + + void *async_msg_buf; + void *sync_msg_buf; + + struct hinic_recv_msg recv_msg_from_mgmt; + struct hinic_recv_msg recv_resp_msg_from_mgmt; + + u16 async_msg_id; + u16 sync_msg_id; + + struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX]; + + struct hinic_eq *rx_aeq; +}; + +int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev); +void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev); + +int hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, __rte_unused u8 size, + void *param); + +int hinic_pf_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); + +#endif /* _HINIC_PMD_MGMT_H_ */ diff --git a/drivers/net/hinic/base/hinic_pmd_mgmt_interface.h b/drivers/net/hinic/base/hinic_pmd_mgmt_interface.h new file mode 100644 index 000000000..809db8af0 --- /dev/null +++ b/drivers/net/hinic/base/hinic_pmd_mgmt_interface.h @@ -0,0 +1,503 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef _HINIC_PMD_MGMT_INTERFACE_H_ +#define _HINIC_PMD_MGMT_INTERFACE_H_ + +/* cmd of mgmt CPU message for HILINK module */ +enum hinic_hilink_cmd { + HINIC_HILINK_CMD_GET_LINK_INFO = 0x3, + HINIC_HILINK_CMD_SET_LINK_SETTINGS = 0x8, +}; + +enum hilink_info_print_event { + HILINK_EVENT_LINK_UP = 1, + HILINK_EVENT_LINK_DOWN, + HILINK_EVENT_CABLE_PLUGGED, + HILINK_EVENT_MAX_TYPE, +}; + +#define NIC_LRO_MAX_WQE_NUM 32 +#define NIC_RSS_INDIR_SIZE 256 +#define NIC_DCB_UP_MAX 0x8 +#define NIC_RSS_KEY_SIZE 40 +#define NIC_RSS_CMD_TEMP_ALLOC 0x01 +#define NIC_RSS_CMD_TEMP_FREE 0x02 + +enum hinic_resp_aeq_num { + HINIC_AEQ0 = 0, + HINIC_AEQ1 = 1, + HINIC_AEQ2 = 2, + HINIC_AEQ3 = 3, +}; + +struct hinic_mgmt_msg_head { + u8 status; + u8 version; + u8 resp_aeq_num; + u8 rsvd0[5]; +}; + +enum { + RECYCLE_MODE_NIC = 0x0, + RECYCLE_MODE_DPDK = 0x1, +}; + +struct hinic_fast_recycled_mode { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 fast_recycled_mode;/* 1: enable fast recycle, available in dpdk mode, + * 0: normal mode, available in kernel nic mode + */ + u8 rsvd1; +}; + +struct hinic_function_table { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rx_wqe_buf_size; + u32 mtu; +}; + +struct hinic_cmd_qpn { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 base_qpn; +}; + +struct hinic_port_mac_set { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 mac[ETH_ALEN]; +}; + +struct hinic_port_mac_update { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 old_mac[ETH_ALEN]; + u16 rsvd2; + u8 new_mac[ETH_ALEN]; +}; + +struct hinic_vport_state { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u8 state; + u8 rsvd2[3]; +}; + +struct hinic_port_state { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u8 state; + u8 rsvd1[3]; +}; + +struct hinic_mtu { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u32 mtu; +}; + +struct hinic_vlan_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 vlan_id; +}; + +struct hinic_get_link { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 link_status; + u8 rsvd1; +}; + +#define HINIC_DEFAUT_PAUSE_CONFIG 1 +struct hinic_pause_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u32 auto_neg; + u32 rx_pause; + u32 tx_pause; +}; + +struct hinic_port_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; + u8 resv2[3]; +}; + +struct hinic_set_autoneg { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 enable; /* 1: enable , 0: disable */ +}; + +struct hinic_up_ets_cfg { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u8 port_id; + u8 rsvd1[3]; + u8 up_tc[HINIC_DCB_UP_MAX]; + u8 pg_bw[HINIC_DCB_PG_MAX]; + u8 pgid[HINIC_DCB_UP_MAX]; + u8 up_bw[HINIC_DCB_UP_MAX]; + u8 prio[HINIC_DCB_PG_MAX]; +}; + +struct hinic_tso_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u8 tso_en; + u8 resv2[3]; +}; + +struct hinic_lro_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u8 lro_ipv4_en; + u8 lro_ipv6_en; + u8 lro_max_wqe_num; + u8 resv2[13]; +}; + +struct hinic_checksum_offload { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u32 rx_csum_offload; +}; + +struct hinic_vlan_offload { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 vlan_rx_offload; + u8 rsvd1[5]; +}; + +struct hinic_rx_mode_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u32 rx_mode; +}; + +/* rss */ +struct nic_rss_indirect_tbl { + u32 group_index; + u32 offset; + u32 size; + u32 rsvd; + u8 entry[NIC_RSS_INDIR_SIZE]; +}; + +struct nic_rss_context_tbl { + u32 group_index; + u32 offset; + u32 size; + u32 rsvd; + u32 ctx; +}; + +struct hinic_rss_config { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 rss_en; + u8 template_id; + u8 rq_priority_number; + u8 rsvd1[3]; + u8 prio_tc[NIC_DCB_UP_MAX]; +}; + +struct hinic_rss_template_mgmt { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 cmd; + u8 template_id; + u8 rsvd1[4]; +}; + +struct hinic_rss_indir_table { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 template_id; + u8 rsvd1; + u8 indir[NIC_RSS_INDIR_SIZE]; +}; + +struct hinic_rss_template_key { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 template_id; + u8 rsvd1; + u8 key[NIC_RSS_KEY_SIZE]; +}; + +struct hinic_rss_engine_type { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 template_id; + u8 hash_engine; + u8 rsvd1[4]; +}; + +struct hinic_rss_context_table { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 template_id; + u8 rsvd1; + u32 context; +}; + +struct hinic_port_link_status { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 link; + u8 port_id; +}; + +struct hinic_cable_plug_event { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 plugged; /* 0: unplugged, 1: plugged */ + u8 port_id; +}; + +struct hinic_link_err_event { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 err_type; + u8 port_id; +}; + +enum link_err_status { + LINK_ERR_MODULE_UNRECOGENIZED, + LINK_ERR_NUM, +}; + +#define HINIC_PORT_STATS_VERSION 0 + +struct hinic_port_stats_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u32 stats_version; + u32 stats_size; +}; + +struct hinic_port_qfilter_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u8 filter_enable; + u8 filter_type; + u8 qid; + u8 rsvd2; +}; + +struct hinic_port_stats { + struct hinic_mgmt_msg_head mgmt_msg_head; + + struct hinic_phy_port_stats stats; +}; + +struct hinic_cmd_vport_stats { + struct hinic_mgmt_msg_head mgmt_msg_head; + + struct hinic_vport_stats stats; +}; + +struct hinic_clear_port_stats { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd; + u32 stats_version; + u32 stats_size; +}; + +struct hinic_clear_vport_stats { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd; + u32 stats_version; + u32 stats_size; +}; + +#define HINIC_COMPILE_TIME_LEN 20 +struct hinic_version_info { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u8 ver[HINIC_FW_VERSION_NAME]; + u8 time[HINIC_COMPILE_TIME_LEN]; +}; + +/* get or set loopback mode, need to modify by base API */ +#define HINIC_INTERNAL_LP_MODE 5 + +#define ANTI_ATTACK_DEFAULT_CIR 500000 +#define ANTI_ATTACK_DEFAULT_XIR 600000 +#define ANTI_ATTACK_DEFAULT_CBS 10000000 +#define ANTI_ATTACK_DEFAULT_XBS 12000000 + +/* set physical port Anti-Attack rate */ +struct hinic_port_anti_attack_rate { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 enable; /* 1: enable rate-limiting, 0: disable rate-limiting */ + u32 cir; /* Committed Information Rate */ + u32 xir; /* eXtended Information Rate */ + u32 cbs; /* Committed Burst Size */ + u32 xbs; /* eXtended Burst Size */ +}; + +struct hinic_l2nic_reset { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; +}; + +struct hinic_root_ctxt { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_idx; + u16 rsvd1; + u8 set_cmdq_depth; + u8 cmdq_depth; + u8 lro_en; + u8 rsvd2; + u8 ppf_idx; + u8 rsvd3; + u16 rq_depth; + u16 rx_buf_sz; + u16 sq_depth; +}; + +struct hinic_page_size { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_idx; + u8 ppf_idx; + u8 page_size; + u32 rsvd; +}; + +struct hinic_dcb_state { + u8 dcb_on; + u8 default_cos; + u8 up_cos[8]; +}; + +struct hinic_vf_default_cos { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hinic_dcb_state state; +}; + +struct hinic_reset_link_cfg { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; +}; + +struct hinic_set_vhd_mode { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 vhd_type; + u16 rx_wqe_buffer_size; + u16 rsvd; +}; + +struct hinic_vlan_filter { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u8 rsvd1[2]; + u32 vlan_filter_ctrl; +}; + +struct hinic_set_link_follow { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd0; + u8 follow_status; + u8 rsvd1[3]; +}; + +struct hinic_link_mode_cmd { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; + u16 supported; /* 0xFFFF represent Invalid value */ + u16 advertised; +}; + +struct hinic_clear_qp_resource { + struct hinic_mgmt_msg_head mgmt_msg_head; + + u16 func_id; + u16 rsvd1; +}; + +int hinic_init_function_table(void *hwdev, u16 rx_buf_sz); + +int hinic_set_fast_recycle_mode(void *hwdev, u8 mode); + +int hinic_get_base_qpn(void *hwdev, u16 *global_qpn); + +int hinic_set_pagesize(void *hwdev, u8 page_size); + +#endif /* _HINIC_PMD_MGMT_INTERFACE_H_ */ -- 2.18.0