From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 301761B4AA for ; Tue, 18 Dec 2018 09:42:01 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 18 Dec 2018 00:42:00 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.56,367,1539673200"; d="scan'208";a="111449455" Received: from dpdk26.sh.intel.com ([10.67.110.164]) by orsmga003.jf.intel.com with ESMTP; 18 Dec 2018 00:41:59 -0800 From: Wenzhuo Lu To: dev@dpdk.org Cc: Paul M Stillwell Jr Date: Tue, 18 Dec 2018 16:46:15 +0800 Message-Id: <1545122800-57293-7-git-send-email-wenzhuo.lu@intel.com> X-Mailer: git-send-email 1.9.3 In-Reply-To: <1545122800-57293-1-git-send-email-wenzhuo.lu@intel.com> References: <1542956179-80951-1-git-send-email-wenzhuo.lu@intel.com> <1545122800-57293-1-git-send-email-wenzhuo.lu@intel.com> Subject: [dpdk-dev] [PATCH v6 06/31] net/ice/base: add control queue information X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 18 Dec 2018 08:42:02 -0000 From: Paul M Stillwell Jr Add the structures for the control queues. Signed-off-by: Paul M Stillwell Jr --- drivers/net/ice/base/ice_controlq.c | 1098 +++++++++++++++++++++++++++++++++++ drivers/net/ice/base/ice_controlq.h | 97 ++++ 2 files changed, 1195 insertions(+) create mode 100644 drivers/net/ice/base/ice_controlq.c create mode 100644 drivers/net/ice/base/ice_controlq.h diff --git a/drivers/net/ice/base/ice_controlq.c b/drivers/net/ice/base/ice_controlq.c new file mode 100644 index 0000000..fb82c23 --- /dev/null +++ b/drivers/net/ice/base/ice_controlq.c @@ -0,0 +1,1098 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2018 + */ + +#include "ice_common.h" + + +#define ICE_CQ_INIT_REGS(qinfo, prefix) \ +do { \ + (qinfo)->sq.head = prefix##_ATQH; \ + (qinfo)->sq.tail = prefix##_ATQT; \ + (qinfo)->sq.len = prefix##_ATQLEN; \ + (qinfo)->sq.bah = prefix##_ATQBAH; \ + (qinfo)->sq.bal = prefix##_ATQBAL; \ + (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ + (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ + (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ + (qinfo)->rq.head = prefix##_ARQH; \ + (qinfo)->rq.tail = prefix##_ARQT; \ + (qinfo)->rq.len = prefix##_ARQLEN; \ + (qinfo)->rq.bah = prefix##_ARQBAH; \ + (qinfo)->rq.bal = prefix##_ARQBAL; \ + (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ + (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ + (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ +} while (0) + +/** + * ice_adminq_init_regs - Initialize AdminQ registers + * @hw: pointer to the hardware structure + * + * This assumes the alloc_sq and alloc_rq functions have already been called + */ +static void ice_adminq_init_regs(struct ice_hw *hw) +{ + struct ice_ctl_q_info *cq = &hw->adminq; + + ICE_CQ_INIT_REGS(cq, PF_FW); +} + +/** + * ice_mailbox_init_regs - Initialize Mailbox registers + * @hw: pointer to the hardware structure + * + * This assumes the alloc_sq and alloc_rq functions have already been called + */ +static void ice_mailbox_init_regs(struct ice_hw *hw) +{ + struct ice_ctl_q_info *cq = &hw->mailboxq; + + ICE_CQ_INIT_REGS(cq, PF_MBX); +} + + +/** + * ice_check_sq_alive + * @hw: pointer to the hw struct + * @cq: pointer to the specific Control queue + * + * Returns true if Queue is enabled else false. + */ +bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + /* check both queue-length and queue-enable fields */ + if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) + return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | + cq->sq.len_ena_mask)) == + (cq->num_sq_entries | cq->sq.len_ena_mask); + + return false; +} + +/** + * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + */ +static enum ice_status +ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); + + cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size); + if (!cq->sq.desc_buf.va) + return ICE_ERR_NO_MEMORY; + + cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries, + sizeof(struct ice_sq_cd)); + if (!cq->sq.cmd_buf) { + ice_free_dma_mem(hw, &cq->sq.desc_buf); + return ICE_ERR_NO_MEMORY; + } + + return ICE_SUCCESS; +} + +/** + * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + */ +static enum ice_status +ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); + + cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size); + if (!cq->rq.desc_buf.va) + return ICE_ERR_NO_MEMORY; + return ICE_SUCCESS; +} + +/** + * ice_free_cq_ring - Free control queue ring + * @hw: pointer to the hardware structure + * @ring: pointer to the specific control queue ring + * + * This assumes the posted buffers have already been cleaned + * and de-allocated + */ +static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) +{ + ice_free_dma_mem(hw, &ring->desc_buf); +} + +/** + * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + */ +static enum ice_status +ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + int i; + + /* We'll be allocating the buffer info memory first, then we can + * allocate the mapped buffers for the event processing + */ + cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries, + sizeof(cq->rq.desc_buf)); + if (!cq->rq.dma_head) + return ICE_ERR_NO_MEMORY; + cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; + + /* allocate the mapped buffers */ + for (i = 0; i < cq->num_rq_entries; i++) { + struct ice_aq_desc *desc; + struct ice_dma_mem *bi; + + bi = &cq->rq.r.rq_bi[i]; + bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size); + if (!bi->va) + goto unwind_alloc_rq_bufs; + + /* now configure the descriptors for use */ + desc = ICE_CTL_Q_DESC(cq->rq, i); + + desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF); + if (cq->rq_buf_size > ICE_AQ_LG_BUF) + desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB); + desc->opcode = 0; + /* This is in accordance with Admin queue design, there is no + * register for buffer size configuration + */ + desc->datalen = CPU_TO_LE16(bi->size); + desc->retval = 0; + desc->cookie_high = 0; + desc->cookie_low = 0; + desc->params.generic.addr_high = + CPU_TO_LE32(ICE_HI_DWORD(bi->pa)); + desc->params.generic.addr_low = + CPU_TO_LE32(ICE_LO_DWORD(bi->pa)); + desc->params.generic.param0 = 0; + desc->params.generic.param1 = 0; + } + return ICE_SUCCESS; + +unwind_alloc_rq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]); + ice_free(hw, cq->rq.dma_head); + + return ICE_ERR_NO_MEMORY; +} + +/** + * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + */ +static enum ice_status +ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + int i; + + /* No mapped memory needed yet, just the buffer info structures */ + cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries, + sizeof(cq->sq.desc_buf)); + if (!cq->sq.dma_head) + return ICE_ERR_NO_MEMORY; + cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; + + /* allocate the mapped buffers */ + for (i = 0; i < cq->num_sq_entries; i++) { + struct ice_dma_mem *bi; + + bi = &cq->sq.r.sq_bi[i]; + bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size); + if (!bi->va) + goto unwind_alloc_sq_bufs; + } + return ICE_SUCCESS; + +unwind_alloc_sq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]); + ice_free(hw, cq->sq.dma_head); + + return ICE_ERR_NO_MEMORY; +} + +static enum ice_status +ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) +{ + /* Clear Head and Tail */ + wr32(hw, ring->head, 0); + wr32(hw, ring->tail, 0); + + /* set starting point */ + wr32(hw, ring->len, (num_entries | ring->len_ena_mask)); + wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa)); + wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa)); + + /* Check one register to verify that config was applied */ + if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa)) + return ICE_ERR_AQ_ERROR; + + return ICE_SUCCESS; +} + +/** + * ice_cfg_sq_regs - configure Control ATQ registers + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * Configure base address and length registers for the transmit queue + */ +static enum ice_status +ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); +} + +/** + * ice_cfg_rq_regs - configure Control ARQ register + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * Configure base address and length registers for the receive (event q) + */ +static enum ice_status +ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + enum ice_status status; + + status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); + if (status) + return status; + + /* Update tail in the HW to post pre-allocated buffers */ + wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); + + return ICE_SUCCESS; +} + +/** + * ice_init_sq - main initialization routine for Control ATQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * This is the main initialization routine for the Control Send Queue + * Prior to calling this function, drivers *MUST* set the following fields + * in the cq->structure: + * - cq->num_sq_entries + * - cq->sq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + */ +static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + enum ice_status ret_code; + + if (cq->sq.count > 0) { + /* queue already initialized */ + ret_code = ICE_ERR_NOT_READY; + goto init_ctrlq_exit; + } + + /* verify input for valid configuration */ + if (!cq->num_sq_entries || !cq->sq_buf_size) { + ret_code = ICE_ERR_CFG; + goto init_ctrlq_exit; + } + + cq->sq.next_to_use = 0; + cq->sq.next_to_clean = 0; + + /* allocate the ring memory */ + ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); + if (ret_code) + goto init_ctrlq_exit; + + /* allocate buffers in the rings */ + ret_code = ice_alloc_sq_bufs(hw, cq); + if (ret_code) + goto init_ctrlq_free_rings; + + /* initialize base registers */ + ret_code = ice_cfg_sq_regs(hw, cq); + if (ret_code) + goto init_ctrlq_free_rings; + + /* success! */ + cq->sq.count = cq->num_sq_entries; + goto init_ctrlq_exit; + +init_ctrlq_free_rings: + ice_free_cq_ring(hw, &cq->sq); + +init_ctrlq_exit: + return ret_code; +} + +/** + * ice_init_rq - initialize ARQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * The main initialization routine for the Admin Receive (Event) Queue. + * Prior to calling this function, drivers *MUST* set the following fields + * in the cq->structure: + * - cq->num_rq_entries + * - cq->rq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + */ +static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + enum ice_status ret_code; + + if (cq->rq.count > 0) { + /* queue already initialized */ + ret_code = ICE_ERR_NOT_READY; + goto init_ctrlq_exit; + } + + /* verify input for valid configuration */ + if (!cq->num_rq_entries || !cq->rq_buf_size) { + ret_code = ICE_ERR_CFG; + goto init_ctrlq_exit; + } + + cq->rq.next_to_use = 0; + cq->rq.next_to_clean = 0; + + /* allocate the ring memory */ + ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); + if (ret_code) + goto init_ctrlq_exit; + + /* allocate buffers in the rings */ + ret_code = ice_alloc_rq_bufs(hw, cq); + if (ret_code) + goto init_ctrlq_free_rings; + + /* initialize base registers */ + ret_code = ice_cfg_rq_regs(hw, cq); + if (ret_code) + goto init_ctrlq_free_rings; + + /* success! */ + cq->rq.count = cq->num_rq_entries; + goto init_ctrlq_exit; + +init_ctrlq_free_rings: + ice_free_cq_ring(hw, &cq->rq); + +init_ctrlq_exit: + return ret_code; +} + +#define ICE_FREE_CQ_BUFS(hw, qi, ring) \ +do { \ + int i; \ + /* free descriptors */ \ + for (i = 0; i < (qi)->num_##ring##_entries; i++) \ + if ((qi)->ring.r.ring##_bi[i].pa) \ + ice_free_dma_mem((hw), \ + &(qi)->ring.r.ring##_bi[i]); \ + /* free the buffer info list */ \ + if ((qi)->ring.cmd_buf) \ + ice_free(hw, (qi)->ring.cmd_buf); \ + /* free dma head */ \ + ice_free(hw, (qi)->ring.dma_head); \ +} while (0) + +/** + * ice_shutdown_sq - shutdown the Control ATQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * The main shutdown routine for the Control Transmit Queue + */ +static enum ice_status +ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + enum ice_status ret_code = ICE_SUCCESS; + + ice_acquire_lock(&cq->sq_lock); + + if (!cq->sq.count) { + ret_code = ICE_ERR_NOT_READY; + goto shutdown_sq_out; + } + + /* Stop firmware AdminQ processing */ + wr32(hw, cq->sq.head, 0); + wr32(hw, cq->sq.tail, 0); + wr32(hw, cq->sq.len, 0); + wr32(hw, cq->sq.bal, 0); + wr32(hw, cq->sq.bah, 0); + + cq->sq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers and the ring itself */ + ICE_FREE_CQ_BUFS(hw, cq, sq); + ice_free_cq_ring(hw, &cq->sq); + +shutdown_sq_out: + ice_release_lock(&cq->sq_lock); + return ret_code; +} + +/** + * ice_aq_ver_check - Check the reported AQ API version. + * @hw: pointer to the hardware structure + * + * Checks if the driver should load on a given AQ API version. + * + * Return: 'true' iff the driver should attempt to load. 'false' otherwise. + */ +static bool ice_aq_ver_check(struct ice_hw *hw) +{ + if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { + /* Major API version is newer than expected, don't load */ + ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); + return false; + } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { + if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) + ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); + else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) + ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + } else { + /* Major API version is older than expected, log a warning */ + ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + } + return true; +} + +/** + * ice_shutdown_rq - shutdown Control ARQ + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * The main shutdown routine for the Control Receive Queue + */ +static enum ice_status +ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + enum ice_status ret_code = ICE_SUCCESS; + + ice_acquire_lock(&cq->rq_lock); + + if (!cq->rq.count) { + ret_code = ICE_ERR_NOT_READY; + goto shutdown_rq_out; + } + + /* Stop Control Queue processing */ + wr32(hw, cq->rq.head, 0); + wr32(hw, cq->rq.tail, 0); + wr32(hw, cq->rq.len, 0); + wr32(hw, cq->rq.bal, 0); + wr32(hw, cq->rq.bah, 0); + + /* set rq.count to 0 to indicate uninitialized queue */ + cq->rq.count = 0; + + /* free ring buffers and the ring itself */ + ICE_FREE_CQ_BUFS(hw, cq, rq); + ice_free_cq_ring(hw, &cq->rq); + +shutdown_rq_out: + ice_release_lock(&cq->rq_lock); + return ret_code; +} + + +/** + * ice_init_check_adminq - Check version for Admin Queue to know if its alive + * @hw: pointer to the hardware structure + */ +static enum ice_status ice_init_check_adminq(struct ice_hw *hw) +{ + struct ice_ctl_q_info *cq = &hw->adminq; + enum ice_status status; + + + status = ice_aq_get_fw_ver(hw, NULL); + if (status) + goto init_ctrlq_free_rq; + + + if (!ice_aq_ver_check(hw)) { + status = ICE_ERR_FW_API_VER; + goto init_ctrlq_free_rq; + } + + return ICE_SUCCESS; + +init_ctrlq_free_rq: + if (cq->rq.count) { + ice_shutdown_rq(hw, cq); + ice_destroy_lock(&cq->rq_lock); + } + if (cq->sq.count) { + ice_shutdown_sq(hw, cq); + ice_destroy_lock(&cq->sq_lock); + } + return status; +} + +/** + * ice_init_ctrlq - main initialization routine for any control Queue + * @hw: pointer to the hardware structure + * @q_type: specific Control queue type + * + * Prior to calling this function, drivers *MUST* set the following fields + * in the cq->structure: + * - cq->num_sq_entries + * - cq->num_rq_entries + * - cq->rq_buf_size + * - cq->sq_buf_size + */ +static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +{ + struct ice_ctl_q_info *cq; + enum ice_status ret_code; + + switch (q_type) { + case ICE_CTL_Q_ADMIN: + ice_adminq_init_regs(hw); + cq = &hw->adminq; + break; + case ICE_CTL_Q_MAILBOX: + ice_mailbox_init_regs(hw); + cq = &hw->mailboxq; + break; + default: + return ICE_ERR_PARAM; + } + cq->qtype = q_type; + + /* verify input for valid configuration */ + if (!cq->num_rq_entries || !cq->num_sq_entries || + !cq->rq_buf_size || !cq->sq_buf_size) { + return ICE_ERR_CFG; + } + ice_init_lock(&cq->sq_lock); + ice_init_lock(&cq->rq_lock); + + /* setup SQ command write back timeout */ + cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; + + /* allocate the ATQ */ + ret_code = ice_init_sq(hw, cq); + if (ret_code) + goto init_ctrlq_destroy_locks; + + /* allocate the ARQ */ + ret_code = ice_init_rq(hw, cq); + if (ret_code) + goto init_ctrlq_free_sq; + + /* success! */ + return ICE_SUCCESS; + +init_ctrlq_free_sq: + ice_shutdown_sq(hw, cq); +init_ctrlq_destroy_locks: + ice_destroy_lock(&cq->sq_lock); + ice_destroy_lock(&cq->rq_lock); + return ret_code; +} + +/** + * ice_init_all_ctrlq - main initialization routine for all control queues + * @hw: pointer to the hardware structure + * + * Prior to calling this function, drivers *MUST* set the following fields + * in the cq->structure for all control queues: + * - cq->num_sq_entries + * - cq->num_rq_entries + * - cq->rq_buf_size + * - cq->sq_buf_size + */ +enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) +{ + enum ice_status ret_code; + + + /* Init FW admin queue */ + ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); + if (ret_code) + return ret_code; + + ret_code = ice_init_check_adminq(hw); + if (ret_code) + return ret_code; + /* Init Mailbox queue */ + return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); +} + +/** + * ice_shutdown_ctrlq - shutdown routine for any control queue + * @hw: pointer to the hardware structure + * @q_type: specific Control queue type + */ +static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +{ + struct ice_ctl_q_info *cq; + + switch (q_type) { + case ICE_CTL_Q_ADMIN: + cq = &hw->adminq; + if (ice_check_sq_alive(hw, cq)) + ice_aq_q_shutdown(hw, true); + break; + case ICE_CTL_Q_MAILBOX: + cq = &hw->mailboxq; + break; + default: + return; + } + + if (cq->sq.count) { + ice_shutdown_sq(hw, cq); + ice_destroy_lock(&cq->sq_lock); + } + if (cq->rq.count) { + ice_shutdown_rq(hw, cq); + ice_destroy_lock(&cq->rq_lock); + } +} + +/** + * ice_shutdown_all_ctrlq - shutdown routine for all control queues + * @hw: pointer to the hardware structure + */ +void ice_shutdown_all_ctrlq(struct ice_hw *hw) +{ + /* Shutdown FW admin queue */ + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + /* Shutdown PF-VF Mailbox */ + ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); +} + +/** + * ice_clean_sq - cleans Admin send queue (ATQ) + * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue + * + * returns the number of free desc + */ +static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + struct ice_ctl_q_ring *sq = &cq->sq; + u16 ntc = sq->next_to_clean; + struct ice_sq_cd *details; +#if 0 + struct ice_aq_desc desc_cb; +#endif + struct ice_aq_desc *desc; + + desc = ICE_CTL_Q_DESC(*sq, ntc); + details = ICE_CTL_Q_DETAILS(*sq, ntc); + + while (rd32(hw, cq->sq.head) != ntc) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); +#if 0 + if (details->callback) { + ICE_CTL_Q_CALLBACK cb_func = + (ICE_CTL_Q_CALLBACK)details->callback; + ice_memcpy(&desc_cb, desc, sizeof(desc_cb), + ICE_DMA_TO_DMA); + cb_func(hw, &desc_cb); + } +#endif + ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM); + ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM); + ntc++; + if (ntc == sq->count) + ntc = 0; + desc = ICE_CTL_Q_DESC(*sq, ntc); + details = ICE_CTL_Q_DETAILS(*sq, ntc); + } + + sq->next_to_clean = ntc; + + return ICE_CTL_Q_DESC_UNUSED(sq); +} + +/** + * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) + * @hw: pointer to the hw struct + * @cq: pointer to the specific Control queue + * + * Returns true if the firmware has processed all descriptors on the + * admin send queue. Returns false if there are still requests pending. + */ +bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + return rd32(hw, cq->sq.head) == cq->sq.next_to_use; +} + +/** + * ice_sq_send_cmd - send command to Control Queue (ATQ) + * @hw: pointer to the hw struct + * @cq: pointer to the specific Control queue + * @desc: prefilled descriptor describing the command (non DMA mem) + * @buf: buffer to use for indirect commands (or NULL for direct commands) + * @buf_size: size of buffer for indirect commands (or 0 for direct commands) + * @cd: pointer to command details structure + * + * This is the main send command routine for the ATQ. It runs the queue, + * cleans the queue, etc. + */ +enum ice_status +ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, + struct ice_aq_desc *desc, void *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_dma_mem *dma_buf = NULL; + struct ice_aq_desc *desc_on_ring; + bool cmd_completed = false; + enum ice_status status = ICE_SUCCESS; + struct ice_sq_cd *details; + u32 total_delay = 0; + u16 retval = 0; + u32 val = 0; + + /* if reset is in progress return a soft error */ + if (hw->reset_ongoing) + return ICE_ERR_RESET_ONGOING; + ice_acquire_lock(&cq->sq_lock); + + cq->sq_last_status = ICE_AQ_RC_OK; + + if (!cq->sq.count) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Send queue not initialized.\n"); + status = ICE_ERR_AQ_EMPTY; + goto sq_send_command_error; + } + + if ((buf && !buf_size) || (!buf && buf_size)) { + status = ICE_ERR_PARAM; + goto sq_send_command_error; + } + + if (buf) { + if (buf_size > cq->sq_buf_size) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Invalid buffer size for Control Send queue: %d.\n", + buf_size); + status = ICE_ERR_INVAL_SIZE; + goto sq_send_command_error; + } + + desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF); + if (buf_size > ICE_AQ_LG_BUF) + desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB); + } + + val = rd32(hw, cq->sq.head); + if (val >= cq->num_sq_entries) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "head overrun at %d in the Control Send Queue ring\n", + val); + status = ICE_ERR_AQ_EMPTY; + goto sq_send_command_error; + } + + details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); + if (cd) + *details = *cd; +#if 0 + /* FIXME: if/when this block gets enabled (when the #if 0 + * is removed), add braces to both branches of the surrounding + * conditional expression. The braces have been removed to + * prevent checkpatch complaining. + */ + + /* If the command details are defined copy the cookie. The + * CPU_TO_LE32 is not needed here because the data is ignored + * by the FW, only used by the driver + */ + if (details->cookie) { + desc->cookie_high = + CPU_TO_LE32(ICE_HI_DWORD(details->cookie)); + desc->cookie_low = + CPU_TO_LE32(ICE_LO_DWORD(details->cookie)); + } +#endif + else + ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM); +#if 0 + /* clear requested flags and then set additional flags if defined */ + desc->flags &= ~CPU_TO_LE16(details->flags_dis); + desc->flags |= CPU_TO_LE16(details->flags_ena); + + if (details->postpone && !details->async) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Async flag not set along with postpone flag\n"); + status = ICE_ERR_PARAM; + goto sq_send_command_error; + } +#endif + + /* Call clean and check queue available function to reclaim the + * descriptors that were processed by FW/MBX; the function returns the + * number of desc available. The clean function called here could be + * called in a separate thread in case of asynchronous completions. + */ + if (ice_clean_sq(hw, cq) == 0) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Error: Control Send Queue is full.\n"); + status = ICE_ERR_AQ_FULL; + goto sq_send_command_error; + } + + /* initialize the temp desc pointer with the right desc */ + desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); + + /* if the desc is available copy the temp desc to the right place */ + ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring), + ICE_NONDMA_TO_DMA); + + /* if buf is not NULL assume indirect command */ + if (buf) { + dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; + /* copy the user buf into the respective DMA buf */ + ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA); + desc_on_ring->datalen = CPU_TO_LE16(buf_size); + + /* Update the address values in the desc with the pa value + * for respective buffer + */ + desc_on_ring->params.generic.addr_high = + CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa)); + desc_on_ring->params.generic.addr_low = + CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa)); + } + + /* Debug desc and buffer */ + ice_debug(hw, ICE_DBG_AQ_MSG, + "ATQ: Control Send queue desc and buffer:\n"); + + ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size); + + + (cq->sq.next_to_use)++; + if (cq->sq.next_to_use == cq->sq.count) + cq->sq.next_to_use = 0; +#if 0 + /* FIXME - handle this case? */ + if (!details->postpone) +#endif + wr32(hw, cq->sq.tail, cq->sq.next_to_use); + +#if 0 + /* if command details are not defined or async flag is not set, + * we need to wait for desc write back + */ + if (!details->async && !details->postpone) { + /* FIXME - handle this case? */ + } +#endif + do { + if (ice_sq_done(hw, cq)) + break; + + ice_msec_delay(1, false); + total_delay++; + } while (total_delay < cq->sq_cmd_timeout); + + /* if ready, copy the desc back to temp */ + if (ice_sq_done(hw, cq)) { + ice_memcpy(desc, desc_on_ring, sizeof(*desc), + ICE_DMA_TO_NONDMA); + if (buf) { + /* get returned length to copy */ + u16 copy_size = LE16_TO_CPU(desc->datalen); + + if (copy_size > buf_size) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Return len %d > than buf len %d\n", + copy_size, buf_size); + status = ICE_ERR_AQ_ERROR; + } else { + ice_memcpy(buf, dma_buf->va, copy_size, + ICE_DMA_TO_NONDMA); + } + } + retval = LE16_TO_CPU(desc->retval); + if (retval) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Send Queue command completed with error 0x%x\n", + retval); + + /* strip off FW internal code */ + retval &= 0xff; + } + cmd_completed = true; + if (!status && retval != ICE_AQ_RC_OK) + status = ICE_ERR_AQ_ERROR; + cq->sq_last_status = (enum ice_aq_err)retval; + } + + ice_debug(hw, ICE_DBG_AQ_MSG, + "ATQ: desc and buffer writeback:\n"); + + ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size); + + + /* save writeback AQ if requested */ + if (details->wb_desc) + ice_memcpy(details->wb_desc, desc_on_ring, + sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA); + + /* update the error if time out occurred */ + if (!cmd_completed) { +#if 0 + (!details->async && !details->postpone)) { +#endif + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Send Queue Writeback timeout.\n"); + status = ICE_ERR_AQ_TIMEOUT; + } + +sq_send_command_error: + ice_release_lock(&cq->sq_lock); + return status; +} + +/** + * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function + * @desc: pointer to the temp descriptor (non DMA mem) + * @opcode: the opcode can be used to decide which flags to turn off or on + * + * Fill the desc with default values + */ +void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) +{ + /* zero out the desc */ + ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM); + desc->opcode = CPU_TO_LE16(opcode); + desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI); +} + +/** + * ice_clean_rq_elem + * @hw: pointer to the hw struct + * @cq: pointer to the specific Control queue + * @e: event info from the receive descriptor, includes any buffers + * @pending: number of events that could be left to process + * + * This function cleans one Admin Receive Queue element and returns + * the contents through e. It can also return how many events are + * left to process through 'pending'. + */ +enum ice_status +ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, + struct ice_rq_event_info *e, u16 *pending) +{ + u16 ntc = cq->rq.next_to_clean; + enum ice_status ret_code = ICE_SUCCESS; + struct ice_aq_desc *desc; + struct ice_dma_mem *bi; + u16 desc_idx; + u16 datalen; + u16 flags; + u16 ntu; + + /* pre-clean the event info */ + ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM); + + /* take the lock before we start messing with the ring */ + ice_acquire_lock(&cq->rq_lock); + + if (!cq->rq.count) { + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Receive queue not initialized.\n"); + ret_code = ICE_ERR_AQ_EMPTY; + goto clean_rq_elem_err; + } + + /* set next_to_use to head */ + ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); + + if (ntu == ntc) { + /* nothing to do - shouldn't need to update ring's values */ + ret_code = ICE_ERR_AQ_NO_WORK; + goto clean_rq_elem_out; + } + + /* now clean the next descriptor */ + desc = ICE_CTL_Q_DESC(cq->rq, ntc); + desc_idx = ntc; + + cq->rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval); + flags = LE16_TO_CPU(desc->flags); + if (flags & ICE_AQ_FLAG_ERR) { + ret_code = ICE_ERR_AQ_ERROR; + ice_debug(hw, ICE_DBG_AQ_MSG, + "Control Receive Queue Event received with error 0x%x\n", + cq->rq_last_status); + } + ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA); + datalen = LE16_TO_CPU(desc->datalen); + e->msg_len = min(datalen, e->buf_len); + if (e->msg_buf && e->msg_len) + ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, + e->msg_len, ICE_DMA_TO_NONDMA); + + ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n"); + + ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf, + cq->rq_buf_size); + + + /* Restore the original datalen and buffer address in the desc, + * FW updates datalen to indicate the event message size + */ + bi = &cq->rq.r.rq_bi[ntc]; + ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM); + + desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF); + if (cq->rq_buf_size > ICE_AQ_LG_BUF) + desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB); + desc->datalen = CPU_TO_LE16(bi->size); + desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa)); + desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa)); + + /* set tail = the last cleaned desc index. */ + wr32(hw, cq->rq.tail, ntc); + /* ntc is updated to tail + 1 */ + ntc++; + if (ntc == cq->num_rq_entries) + ntc = 0; + cq->rq.next_to_clean = ntc; + cq->rq.next_to_use = ntu; + +#if 0 + ice_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode)); +#endif +clean_rq_elem_out: + /* Set pending if needed, unlock and return */ + if (pending) { + /* re-read HW head to calculate actual pending messages */ + ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); + *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); + } +clean_rq_elem_err: + ice_release_lock(&cq->rq_lock); + + return ret_code; +} diff --git a/drivers/net/ice/base/ice_controlq.h b/drivers/net/ice/base/ice_controlq.h new file mode 100644 index 0000000..db2db93 --- /dev/null +++ b/drivers/net/ice/base/ice_controlq.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2001-2018 + */ + +#ifndef _ICE_CONTROLQ_H_ +#define _ICE_CONTROLQ_H_ + +#include "ice_adminq_cmd.h" + + +/* Maximum buffer lengths for all control queue types */ +#define ICE_AQ_MAX_BUF_LEN 4096 +#define ICE_MBXQ_MAX_BUF_LEN 4096 + +#define ICE_CTL_Q_DESC(R, i) \ + (&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) + +#define ICE_CTL_Q_DESC_UNUSED(R) \ + (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +/* Defines that help manage the driver vs FW API checks. + * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage. + */ +#define EXP_FW_API_VER_BRANCH 0x00 +#define EXP_FW_API_VER_MAJOR 0x01 +#define EXP_FW_API_VER_MINOR 0x03 + +/* Different control queue types: These are mainly for SW consumption. */ +enum ice_ctl_q { + ICE_CTL_Q_UNKNOWN = 0, + ICE_CTL_Q_ADMIN, + ICE_CTL_Q_MAILBOX, +}; + +/* Control Queue default settings */ +#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */ + +struct ice_ctl_q_ring { + void *dma_head; /* Virtual address to dma head */ + struct ice_dma_mem desc_buf; /* descriptor ring memory */ + void *cmd_buf; /* command buffer memory */ + + union { + struct ice_dma_mem *sq_bi; + struct ice_dma_mem *rq_bi; + } r; + + u16 count; /* Number of descriptors */ + + /* used for interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + /* used for queue tracking */ + u32 head; + u32 tail; + u32 len; + u32 bah; + u32 bal; + u32 len_mask; + u32 len_ena_mask; + u32 head_mask; +}; + +/* sq transaction details */ +struct ice_sq_cd { + struct ice_aq_desc *wb_desc; +}; + +#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i])) + +/* rq event information */ +struct ice_rq_event_info { + struct ice_aq_desc desc; + u16 msg_len; + u16 buf_len; + u8 *msg_buf; +}; + +/* Control Queue information */ +struct ice_ctl_q_info { + enum ice_ctl_q qtype; + struct ice_ctl_q_ring rq; /* receive queue */ + struct ice_ctl_q_ring sq; /* send queue */ + u32 sq_cmd_timeout; /* send queue cmd write back timeout */ + u16 num_rq_entries; /* receive queue depth */ + u16 num_sq_entries; /* send queue depth */ + u16 rq_buf_size; /* receive queue buffer size */ + u16 sq_buf_size; /* send queue buffer size */ + struct ice_lock sq_lock; /* Send queue lock */ + struct ice_lock rq_lock; /* Receive queue lock */ + enum ice_aq_err sq_last_status; /* last status on send queue */ + enum ice_aq_err rq_last_status; /* last status on receive queue */ +}; + +#endif /* _ICE_CONTROLQ_H_ */ -- 1.9.3