Commit eaa50fc5 authored by Tomer Tayar's avatar Tomer Tayar Committed by David S. Miller

qed: Prevent a possible deadlock during driver load and unload

The MFW manages an internal lock to prevent concurrent hardware
(de)initialization of different PFs.
This, together with the busy-waiting for the MFW's responses for commands,
might lead to a deadlock during concurrent load or unload of PFs.
This patch adds the option to sleep within the busy-waiting, and uses it
for the (un)load requests (which are not sent from an interrupt context) to
prevent the possible deadlock.
Signed-off-by: default avatarTomer Tayar <Tomer.Tayar@cavium.com>
Signed-off-by: default avatarAriel Elior <Ariel.Elior@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 76271809
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sriov.h" #include "qed_sriov.h"
#define CHIP_MCP_RESP_ITER_US 10 #define QED_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
...@@ -317,7 +317,7 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn, ...@@ -317,7 +317,7 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
int rc = 0; int rc = 0;
/* Ensure that only a single thread is accessing the mailbox */ /* Ensure that only a single thread is accessing the mailbox */
...@@ -449,10 +449,10 @@ static int ...@@ -449,10 +449,10 @@ static int
_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct qed_mcp_mb_params *p_mb_params, struct qed_mcp_mb_params *p_mb_params,
u32 max_retries, u32 delay) u32 max_retries, u32 usecs)
{ {
u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
struct qed_mcp_cmd_elem *p_cmd_elem; struct qed_mcp_cmd_elem *p_cmd_elem;
u32 cnt = 0;
u16 seq_num; u16 seq_num;
int rc = 0; int rc = 0;
...@@ -475,7 +475,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, ...@@ -475,7 +475,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
goto err; goto err;
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
udelay(delay);
if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
msleep(msecs);
else
udelay(usecs);
} while (++cnt < max_retries); } while (++cnt < max_retries);
if (cnt >= max_retries) { if (cnt >= max_retries) {
...@@ -504,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, ...@@ -504,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
* The spinlock stays locked until the list element is removed. * The spinlock stays locked until the list element is removed.
*/ */
udelay(delay); if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
msleep(msecs);
else
udelay(usecs);
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
if (p_cmd_elem->b_is_completed) if (p_cmd_elem->b_is_completed)
...@@ -539,7 +547,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, ...@@ -539,7 +547,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
"MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
p_mb_params->mcp_resp, p_mb_params->mcp_resp,
p_mb_params->mcp_param, p_mb_params->mcp_param,
(cnt * delay) / 1000, (cnt * delay) % 1000); (cnt * usecs) / 1000, (cnt * usecs) % 1000);
/* Clear the sequence number from the MFW response */ /* Clear the sequence number from the MFW response */
p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
...@@ -557,7 +565,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, ...@@ -557,7 +565,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
{ {
size_t union_data_size = sizeof(union drv_union_data); size_t union_data_size = sizeof(union drv_union_data);
u32 max_retries = QED_DRV_MB_MAX_RETRIES; u32 max_retries = QED_DRV_MB_MAX_RETRIES;
u32 delay = CHIP_MCP_RESP_ITER_US; u32 usecs = QED_MCP_RESP_ITER_US;
/* MCP not initialized */ /* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) { if (!qed_mcp_is_init(p_hwfn)) {
...@@ -574,8 +582,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, ...@@ -574,8 +582,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EINVAL; return -EINVAL;
} }
if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
max_retries = DIV_ROUND_UP(max_retries, 1000);
usecs *= 1000;
}
return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
delay); usecs);
} }
int qed_mcp_cmd(struct qed_hwfn *p_hwfn, int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
...@@ -793,6 +806,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn, ...@@ -793,6 +806,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
mb_params.data_src_size = sizeof(load_req); mb_params.data_src_size = sizeof(load_req);
mb_params.p_data_dst = &load_rsp; mb_params.p_data_dst = &load_rsp;
mb_params.data_dst_size = sizeof(load_rsp); mb_params.data_dst_size = sizeof(load_rsp);
mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
DP_VERBOSE(p_hwfn, QED_MSG_SP, DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
...@@ -1014,7 +1028,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, ...@@ -1014,7 +1028,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
u32 wol_param, mcp_resp, mcp_param; struct qed_mcp_mb_params mb_params;
u32 wol_param;
switch (p_hwfn->cdev->wol_config) { switch (p_hwfn->cdev->wol_config) {
case QED_OV_WOL_DISABLED: case QED_OV_WOL_DISABLED:
...@@ -1032,8 +1047,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -1032,8 +1047,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
} }
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, memset(&mb_params, 0, sizeof(mb_params));
&mcp_resp, &mcp_param); mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
mb_params.param = wol_param;
mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
} }
int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
......
...@@ -660,14 +660,19 @@ struct qed_mcp_info { ...@@ -660,14 +660,19 @@ struct qed_mcp_info {
}; };
struct qed_mcp_mb_params { struct qed_mcp_mb_params {
u32 cmd; u32 cmd;
u32 param; u32 param;
void *p_data_src; void *p_data_src;
u8 data_src_size; void *p_data_dst;
void *p_data_dst; u8 data_src_size;
u8 data_dst_size; u8 data_dst_size;
u32 mcp_resp; u32 mcp_resp;
u32 mcp_param; u32 mcp_param;
u32 flags;
#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0)
#define QED_MB_FLAGS_IS_SET(params, flag) \
({ typeof(params) __params = (params); \
(__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
}; };
struct qed_drv_tlv_hdr { struct qed_drv_tlv_hdr {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment