From: Rasesh Mody <rasesh.mody@cavium.com>
To: dev@dpdk.org, ferruh.yigit@intel.com
Cc: Rasesh Mody <rasesh.mody@cavium.com>, Dept-EngDPDKDev@cavium.com
Subject: [dpdk-dev] [PATCH 19/53] net/qede/base: revise management FW mbox access scheme
Date: Mon, 18 Sep 2017 18:29:59 -0700 [thread overview]
Message-ID: <1505784633-1171-20-git-send-email-rasesh.mody@cavium.com> (raw)
In-Reply-To: <1505784633-1171-1-git-send-email-rasesh.mody@cavium.com>
Revise the manamgement FW mbox access locking scheme for the access to the
MFW mailbox:
- add a new linked list called cmd_list to ecore_mcp_info that tracks all
the mailbox commands sent to management FW and ones waiting for
response.
- add a mutex lock called cmd_lock to ecore_mcp_info, a spinlock used to
serialize the access to this cmd_list and makes sure that the mbox is
not a pending one before sending a new mbox request. It protects the
access to the mailbox commands list and sending of the commands.
- add ecore_mcp_cmd_add|del|get_elem() APIs for new access scheme
- remove ecore_mcp_mb_lock() and ecore_mcp_mb_unlock()
- add a mutex lock called link_lock to ecore_mcp_info, a spinlock used for
syncing SW link-changes and link-changes originating from attention
context. This locking scheme prevents possible race conditions that may
occur, such as during link status reporting.
- Surround OSAL_{MUTEX,SPIN_LOCK}_{ALLOC,DEALLOC} with
'#ifdef CONFIG_ECORE_LOCK_ALLOC'. In case memory has to be allocated for
lock primitives, then compile driver with CONFIG_ECORE_LOCK_ALLOC flag.
Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
drivers/net/qede/base/bcm_osal.h | 6 +-
drivers/net/qede/base/ecore_cxt.c | 4 +
drivers/net/qede/base/ecore_dev.c | 4 +
drivers/net/qede/base/ecore_hw.c | 4 +
drivers/net/qede/base/ecore_mcp.c | 461 +++++++++++++++++++++++++------------
drivers/net/qede/base/ecore_mcp.h | 18 +-
drivers/net/qede/base/ecore_spq.c | 5 +
drivers/net/qede/base/ecore_vf.c | 6 +
8 files changed, 347 insertions(+), 161 deletions(-)
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 29edfb2..f4c7028 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -345,8 +345,8 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
#define OSAL_IOV_VF_VPORT_UPDATE(hwfn, vfid, p_params, p_mask) 0
#define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(_dev_p, _resc_resp) 0
#define OSAL_IOV_GET_OS_TYPE() 0
-#define OSAL_IOV_VF_MSG_TYPE(hwfn, vfid, vf_msg_type) 0
-#define OSAL_IOV_PF_RESP_TYPE(hwfn, vfid, pf_resp_type) 0
+#define OSAL_IOV_VF_MSG_TYPE(hwfn, vfid, vf_msg_type) nothing
+#define OSAL_IOV_PF_RESP_TYPE(hwfn, vfid, pf_resp_type) nothing
u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf);
@@ -434,7 +434,7 @@ void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type,
#define OSAL_CRC32(crc, buf, length) qede_crc32(crc, buf, length)
#define OSAL_CRC8_POPULATE(table, polynomial) nothing
#define OSAL_CRC8(table, pdata, nbytes, crc) 0
-#define OSAL_MFW_TLV_REQ(p_hwfn) (0)
+#define OSAL_MFW_TLV_REQ(p_hwfn) nothing
#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
#endif /* __BCM_OSAL_H */
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 08a616e..73dc7cb 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -1170,7 +1170,9 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
p_mngr->vf_count = p_hwfn->p_dev->p_iov_info->total_vfs;
/* Initialize the dynamic ILT allocation mutex */
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex);
+#endif
OSAL_MUTEX_INIT(&p_mngr->mutex);
/* Set the cxt mangr pointer priori to further allocations */
@@ -1219,7 +1221,9 @@ void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
ecore_cid_map_free(p_hwfn);
ecore_cxt_src_t2_free(p_hwfn);
ecore_ilt_shadow_free(p_hwfn);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);
+#endif
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
}
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 9af6348..1608b19 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -121,7 +121,9 @@ void ecore_init_struct(struct ecore_dev *p_dev)
p_hwfn->my_id = i;
p_hwfn->b_active = false;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex);
+#endif
OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex);
}
@@ -3862,7 +3864,9 @@ void ecore_hw_remove(struct ecore_dev *p_dev)
ecore_hw_hwfn_free(p_hwfn);
ecore_mcp_free(p_hwfn);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex);
+#endif
}
ecore_iov_free_hw_info(p_dev);
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 2bcc32d..31e2776 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -64,7 +64,9 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
}
p_hwfn->p_ptt_pool = p_pool;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
+#endif
OSAL_SPIN_LOCK_INIT(&p_pool->lock);
return ECORE_SUCCESS;
@@ -83,8 +85,10 @@ void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
{
+#ifdef CONFIG_ECORE_LOCK_ALLOC
if (p_hwfn->p_ptt_pool)
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
+#endif
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
}
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index b334997..db44aa3 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -96,13 +96,80 @@ void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
}
}
+struct ecore_mcp_cmd_elem {
+ osal_list_entry_t list;
+ struct ecore_mcp_mb_params *p_mb_params;
+ u16 expected_seq_num;
+ bool b_is_completed;
+};
+
+/* Must be called while cmd_lock is acquired */
+static struct ecore_mcp_cmd_elem *
+ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_mb_params *p_mb_params,
+ u16 expected_seq_num)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
+
+ p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
+ sizeof(*p_cmd_elem));
+ if (!p_cmd_elem) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
+ goto out;
+ }
+
+ p_cmd_elem->p_mb_params = p_mb_params;
+ p_cmd_elem->expected_seq_num = expected_seq_num;
+ OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+out:
+ return p_cmd_elem;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_cmd_elem *p_cmd_elem)
+{
+ OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+ OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
+}
+
+/* Must be called while cmd_lock is acquired */
+static struct ecore_mcp_cmd_elem *
+ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
+
+ OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
+ struct ecore_mcp_cmd_elem) {
+ if (p_cmd_elem->expected_seq_num == seq_num)
+ return p_cmd_elem;
+ }
+
+ return OSAL_NULL;
+}
+
enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
{
if (p_hwfn->mcp_info) {
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
+
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
+ &p_hwfn->mcp_info->cmd_list, list,
+ struct ecore_mcp_cmd_elem) {
+ ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ }
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
- OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
+#endif
}
+
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
return ECORE_SUCCESS;
@@ -157,8 +224,7 @@ static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
DRV_PULSE_SEQ_MASK;
- p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
- MISCS_REG_GENERIC_POR_0);
+ p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
return ECORE_SUCCESS;
}
@@ -190,9 +256,15 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
- /* Initialize the MFW spinlock */
- OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
- OSAL_SPIN_LOCK_INIT(&p_info->lock);
+ /* Initialize the MFW spinlocks */
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock);
+ OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
+#endif
+ OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
+ OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
+
+ OSAL_LIST_INIT(&p_info->cmd_list);
return ECORE_SUCCESS;
@@ -202,62 +274,28 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
return ECORE_NOMEM;
}
-/* Locks the MFW mailbox of a PF to ensure a single access.
- * The lock is achieved in most cases by holding a spinlock, causing other
- * threads to wait till a previous access is done.
- * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
- * access is achieved by setting a blocking flag, which will fail other
- * competing contexts to send their mailboxes.
- */
-static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
- u32 cmd)
-{
- OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
-
- /* The spinlock shouldn't be acquired when the mailbox command is
- * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
- * pending [UN]LOAD_REQ command of another PF together with a spinlock
- * (i.e. interrupts are disabled) - can lead to a deadlock.
- * It is assumed that for a single PF, no other mailbox commands can be
- * sent from another context while sending LOAD_REQ, and that any
- * parallel commands to UNLOAD_REQ can be cancelled.
- */
- if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
- p_hwfn->mcp_info->block_mb_sending = false;
+static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
- /* There's at least a single command that is sent by ecore during the
- * load sequence [expectation of MFW].
+ /* Use MCP history register to check if MCP reset occurred between init
+ * time and now.
*/
- if ((p_hwfn->mcp_info->block_mb_sending) &&
- (cmd != DRV_MSG_CODE_FEATURE_SUPPORT)) {
- DP_NOTICE(p_hwfn, false,
- "Trying to send a MFW mailbox command [0x%x]"
- " in parallel to [UN]LOAD_REQ. Aborting.\n",
- cmd);
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
- return ECORE_BUSY;
- }
+ if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
+ p_hwfn->mcp_info->mcp_hist, generic_por_0);
- if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
- p_hwfn->mcp_info->block_mb_sending = true;
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+ ecore_load_mcp_offsets(p_hwfn, p_ptt);
+ ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
}
-
- return ECORE_SUCCESS;
-}
-
-static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
-{
- if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
}
enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
- u32 delay = CHIP_MCP_RESP_ITER_US;
- u32 org_mcp_reset_seq, cnt = 0;
+ u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
#ifndef ASIC_ONLY
@@ -265,15 +303,14 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
delay = EMUL_MCP_RESP_ITER_US;
#endif
- /* Ensure that only a single thread is accessing the mailbox at a
- * certain time.
- */
- rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
- if (rc != ECORE_SUCCESS)
- return rc;
+ /* Ensure that only a single thread is accessing the mailbox */
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
- /* Set drv command along with the updated sequence */
org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+ /* Set drv command along with the updated sequence */
+ ecore_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
do {
@@ -293,73 +330,207 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
rc = ECORE_AGAIN;
}
- ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
-static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 cmd, u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param)
+/* Must be called while cmd_lock is acquired */
+static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
{
- u32 delay = CHIP_MCP_RESP_ITER_US;
- u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
- u32 seq, cnt = 1, actual_mb_seq;
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
+
+ /* There is at most one pending command at a certain time, and if it
+ * exists - it is placed at the HEAD of the list.
+ */
+ if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
+ p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
+ struct ecore_mcp_cmd_elem,
+ list);
+ return !p_cmd_elem->b_is_completed;
+ }
+
+ return false;
+}
+
+/* Must be called while cmd_lock is acquired */
+static enum _ecore_status_t
+ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_mb_params *p_mb_params;
+ struct ecore_mcp_cmd_elem *p_cmd_elem;
+ u32 mcp_resp;
+ u16 seq_num;
+
+ mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
+
+ /* Return if no new non-handled response has been received */
+ if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
+ return ECORE_AGAIN;
+
+ p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
+ if (!p_cmd_elem) {
+ DP_ERR(p_hwfn,
+ "Failed to find a pending mailbox cmd that expects sequence number %d\n",
+ seq_num);
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ p_mb_params = p_cmd_elem->p_mb_params;
+
+ /* Get the MFW response along with the sequence number */
+ p_mb_params->mcp_resp = mcp_resp;
+
+ /* Get the MFW param */
+ p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+
+ /* Get the union data */
+ if (p_mb_params->p_data_dst != OSAL_NULL &&
+ p_mb_params->data_dst_size) {
+ u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ OFFSETOF(struct public_drv_mb,
+ union_data);
+ ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+ union_data_addr, p_mb_params->data_dst_size);
+ }
+
+ p_cmd_elem->b_is_completed = true;
+
+ return ECORE_SUCCESS;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_mb_params *p_mb_params,
+ u16 seq_num)
+{
+ union drv_union_data union_data;
+ u32 union_data_addr;
+
+ /* Set the union data */
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ OFFSETOF(struct public_drv_mb, union_data);
+ OSAL_MEM_ZERO(&union_data, sizeof(union_data));
+ if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
+ OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
+ p_mb_params->data_src_size);
+ ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
+ sizeof(union_data));
+
+ /* Set the drv param */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
+
+ /* Set the drv command along with the sequence number */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mailbox: command 0x%08x param 0x%08x\n",
+ (p_mb_params->cmd | seq_num), p_mb_params->param);
+}
+
+static enum _ecore_status_t
+_ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_mb_params *p_mb_params,
+ u32 max_retries, u32 delay)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem;
+ u32 cnt = 0;
+ u16 seq_num;
enum _ecore_status_t rc = ECORE_SUCCESS;
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
- delay = EMUL_MCP_RESP_ITER_US;
- /* There is a built-in delay of 100usec in each MFW response read */
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
- max_retries /= 10;
-#endif
+ /* Wait until the mailbox is non-occupied */
+ do {
+ /* Exit the loop if there is no pending command, or if the
+ * pending command is completed during this iteration.
+ * The spinlock stays locked until the command is sent.
+ */
- /* Get actual driver mailbox sequence */
- actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK;
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
- /* Use MCP history register to check if MCP reset occurred between
- * init time and now.
- */
- if (p_hwfn->mcp_info->mcp_hist !=
- ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
- ecore_load_mcp_offsets(p_hwfn, p_ptt);
- ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+ if (!ecore_mcp_has_pending_cmd(p_hwfn))
+ break;
+
+ rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (rc == ECORE_SUCCESS)
+ break;
+ else if (rc != ECORE_AGAIN)
+ goto err;
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_UDELAY(delay);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return ECORE_AGAIN;
}
- seq = ++p_hwfn->mcp_info->drv_mb_seq;
- /* Set drv param */
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+ /* Send the mailbox command */
+ ecore_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
+ p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
+ if (!p_cmd_elem) {
+ rc = ECORE_NOMEM;
+ goto err;
+ }
- /* Set drv command along with the updated sequence */
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+ __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ /* Wait for the MFW response */
do {
- /* Wait for MFW response */
+ /* Exit the loop if the command is already completed, or if the
+ * command is completed during this iteration.
+ * The spinlock stays locked until the list element is removed.
+ */
+
OSAL_UDELAY(delay);
- *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
- /* Give the FW up to 5 second (500*10ms) */
- } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
- (cnt++ < max_retries));
+ if (p_cmd_elem->b_is_completed)
+ break;
+
+ rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (rc == ECORE_SUCCESS)
+ break;
+ else if (rc != ECORE_AGAIN)
+ goto err;
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+ ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
- /* Is this a reply to our command? */
- if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
- *o_mcp_resp &= FW_MSG_CODE_MASK;
- /* Get the MCP param */
- *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
- } else {
- /* FW BUG! */
- DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
- cmd, param);
- *o_mcp_resp = 0;
- rc = ECORE_AGAIN;
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
+ return ECORE_AGAIN;
}
+
+ ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
+ p_mb_params->mcp_resp, p_mb_params->mcp_param,
+ (cnt * delay) / 1000, (cnt * delay) % 1000);
+
+ /* Clear the sequence number from the MFW response */
+ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
+
+ return ECORE_SUCCESS;
+
+err:
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
@@ -368,9 +539,17 @@ static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_mcp_mb_params *p_mb_params)
{
- union drv_union_data union_data;
- u32 union_data_addr;
- enum _ecore_status_t rc;
+ osal_size_t union_data_size = sizeof(union drv_union_data);
+ u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
+ u32 delay = CHIP_MCP_RESP_ITER_US;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ delay = EMUL_MCP_RESP_ITER_US;
+ /* There is a built-in delay of 100usec in each MFW response read */
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ max_retries /= 10;
+#endif
/* MCP not initialized */
if (!ecore_mcp_is_init(p_hwfn)) {
@@ -378,44 +557,17 @@ static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
return ECORE_BUSY;
}
- if (p_mb_params->data_src_size > sizeof(union_data) ||
- p_mb_params->data_dst_size > sizeof(union_data)) {
+ if (p_mb_params->data_src_size > union_data_size ||
+ p_mb_params->data_dst_size > union_data_size) {
DP_ERR(p_hwfn,
"The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
p_mb_params->data_src_size, p_mb_params->data_dst_size,
- sizeof(union_data));
+ union_data_size);
return ECORE_INVAL;
}
- union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
- OFFSETOF(struct public_drv_mb, union_data);
-
- /* Ensure that only a single thread is accessing the mailbox at a
- * certain time.
- */
- rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- OSAL_MEM_ZERO(&union_data, sizeof(union_data));
- if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
- OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
- p_mb_params->data_src_size);
- ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
- sizeof(union_data));
-
- rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
- p_mb_params->param, &p_mb_params->mcp_resp,
- &p_mb_params->mcp_param);
-
- if (p_mb_params->p_data_dst != OSAL_NULL &&
- p_mb_params->data_dst_size)
- ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
- union_data_addr, p_mb_params->data_dst_size);
-
- ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
-
- return rc;
+ return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
+ delay);
}
enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
@@ -809,9 +961,6 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
DP_INFO(p_hwfn,
"MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
- /* The previous load request set the mailbox blocking */
- p_hwfn->mcp_info->block_mb_sending = false;
-
in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
OSAL_MEM_ZERO(&out_params, sizeof(out_params));
rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
@@ -820,9 +969,6 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
return rc;
} else if (out_params.load_code ==
FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
- /* The previous load request set the mailbox blocking */
- p_hwfn->mcp_info->block_mb_sending = false;
-
if (ecore_mcp_can_force_load(in_params.drv_role,
out_params.exist_drv_role,
p_params->override_force_load)) {
@@ -1067,6 +1213,9 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
u8 max_bw, min_bw;
u32 status = 0;
+ /* Prevent SW/attentions from doing this at the same time */
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
+
p_link = &p_hwfn->mcp_info->link_output;
OSAL_MEMSET(p_link, 0, sizeof(*p_link));
if (!b_reset) {
@@ -1082,7 +1231,7 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
} else {
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"Resetting link indications\n");
- return;
+ goto out;
}
if (p_hwfn->b_drv_link_init)
@@ -1197,6 +1346,8 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
OSAL_LINK_UPDATE(p_hwfn, p_ptt);
+out:
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
}
enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
@@ -1266,9 +1417,13 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
return rc;
}
- /* Reset the link status if needed */
- if (!b_up)
- ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
+ /* Mimic link-change attention, done for several reasons:
+ * - On reset, there's no guarantee MFW would trigger
+ * an attention.
+ * - On initialization, older MFWs might not indicate link change
+ * during LFA, so we'll never get an UP indication.
+ */
+ ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
return rc;
}
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index b84f0d1..6c91046 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -32,10 +32,18 @@
ecore_device_num_engines((_p_hwfn)->p_dev)))
struct ecore_mcp_info {
- /* Spinlock used for protecting the access to the MFW mailbox */
- osal_spinlock_t lock;
- /* Flag to indicate whether sending a MFW mailbox is forbidden */
- bool block_mb_sending;
+ /* List for mailbox commands which were sent and wait for a response */
+ osal_list_t cmd_list;
+
+ /* Spinlock used for protecting the access to the mailbox commands list
+ * and the sending of the commands.
+ */
+ osal_spinlock_t cmd_lock;
+
+ /* Spinlock used for syncing SW link-changes and link-changes
+ * originating from attention context.
+ */
+ osal_spinlock_t link_lock;
/* Address of the MCP public area */
u32 public_base;
@@ -60,7 +68,7 @@ struct ecore_mcp_info {
u8 *mfw_mb_cur;
u8 *mfw_mb_shadow;
u16 mfw_mb_length;
- u16 mcp_hist;
+ u32 mcp_hist;
/* Capabilties negotiated with the MFW */
u32 capabilities;
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index 25d573e..29ba660 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -536,7 +536,9 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
p_spq->p_virt = p_virt;
p_spq->p_phys = p_phys;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
+#endif
p_hwfn->p_spq = p_spq;
return ECORE_SUCCESS;
@@ -565,7 +567,10 @@ void ecore_spq_free(struct ecore_hwfn *p_hwfn)
}
ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
+#endif
+
OSAL_FREE(p_hwfn->p_dev, p_spq);
}
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index 0a26141..5002ada 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -453,7 +453,9 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys,
p_iov->bulletin.size);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex);
+#endif
OSAL_MUTEX_INIT(&p_iov->mutex);
p_hwfn->vf_iov_info = p_iov;
@@ -1349,6 +1351,10 @@ enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
p_iov->bulletin.phys, size);
}
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_MUTEX_DEALLOC(&p_iov->mutex);
+#endif
+
OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
return rc;
--
1.7.10.3
next prev parent reply other threads:[~2017-09-19 1:31 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-09-19 1:29 [dpdk-dev] [PATCH 00/53] net/qede/base: update PMD to 2.6.0.1 Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 01/53] net/qede/base: add NVM config options Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 02/53] net/qede/base: update management FW supported features Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 03/53] net/qede/base: use crc32 OSAL macro Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 04/53] net/qede/base: allocate VF queues before PF Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 05/53] net/qede/base: convert device type to enum Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 06/53] net/qede/base: changes for VF queue zone Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 07/53] net/qede/base: interchangeably use SB between PF and VF Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 08/53] net/qede/base: add API to configure coalescing for VF queues Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 09/53] net/qede/base: restrict cache line size register padding Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 10/53] net/qede/base: fix to use a passed ptt handle Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 11/53] net/qede/base: add a sanity check Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 12/53] net/qede/base: add SmartAN support Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 13/53] net/qede/base: alter driver's force load behavior Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 14/53] net/qede/base: add mdump sub-commands Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 15/53] net/qede/base: add EEE support Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 16/53] net/qede/base: use passed ptt handler Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 17/53] net/qede/base: prevent re-assertions of parity errors Rasesh Mody
2017-09-19 1:29 ` [dpdk-dev] [PATCH 18/53] net/qede/base: avoid possible race condition Rasesh Mody
2017-09-19 1:29 ` Rasesh Mody [this message]
2017-09-19 1:30 ` [dpdk-dev] [PATCH 20/53] net/qede/base: remove helper functions/structures Rasesh Mody
2017-09-19 1:30 ` [dpdk-dev] [PATCH 21/53] net/qede/base: initialize resc lock/unlock params Rasesh Mody
2017-09-19 1:30 ` [dpdk-dev] [PATCH 22/53] net/qede/base: rename MFW get/set field defines Rasesh Mody
2017-09-19 1:30 ` [dpdk-dev] [PATCH 23/53] net/qede/base: allow clients to override VF MSI-X table size Rasesh Mody
2017-09-19 1:30 ` [dpdk-dev] [PATCH 24/53] net/qede/base: add API to send STAG config update to FW Rasesh Mody
2017-09-19 1:30 ` [dpdk-dev] [PATCH 25/53] net/qede/base: add support for doorbell overflow recovery Rasesh Mody
2017-09-19 1:30 ` [dpdk-dev] [PATCH 26/53] net/qede/base: block mbox command to unresponsive MFW Rasesh Mody
2017-09-19 1:30 ` [dpdk-dev] [PATCH 27/53] net/qede/base: prevent stop vport assert by malicious VF Rasesh Mody
2017-09-19 1:30 ` [dpdk-dev] [PATCH 28/53] net/qede/base: remove unused parameters Rasesh Mody
2017-09-19 1:30 ` [dpdk-dev] [PATCH 29/53] net/qede/base: fix macros to check chip revision/metal Rasesh Mody
2017-09-20 11:00 ` [dpdk-dev] [PATCH 00/53] net/qede/base: update PMD to 2.6.0.1 Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1505784633-1171-20-git-send-email-rasesh.mody@cavium.com \
--to=rasesh.mody@cavium.com \
--cc=Dept-EngDPDKDev@cavium.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).