Commit c552a50e authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-load-unload-mfw'

Yuval Mintz says:

====================
qed: load/unload mfw series

This series correct the unload flow and greatly enhances its
initialization flow in regard to interactions between driver
and management firmware.

Patch #1 makes sure unloading is done under management-firmware's
'criticial section' protection.

Patches #2 - #4 move driver into using a newer scheme for loading
in regard to the MFW; This newer scheme would help cleaning the device
in case a previous instance has dirtied it [preboot, PDA, etc.].

Patches #5 - #6 let driver inform management-firmware on number of
resources which are dependent on the non-management firmware used.
Patch #7 then uses a new resource [BDQ] instead of some set value.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 30b38236 d0d40a73
...@@ -51,7 +51,19 @@ ...@@ -51,7 +51,19 @@
#include "qed_hsi.h" #include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass; extern const struct qed_common_ops qed_common_ops_pass;
#define DRV_MODULE_VERSION "8.10.10.21"
#define QED_MAJOR_VERSION 8
#define QED_MINOR_VERSION 10
#define QED_REVISION_VERSION 10
#define QED_ENGINEERING_VERSION 21
#define QED_VERSION \
((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
(QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
#define STORM_FW_VERSION \
((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
(FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
#define MAX_HWFNS_PER_DEVICE (4) #define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16 #define NAME_SIZE 16
...@@ -59,8 +71,6 @@ extern const struct qed_common_ops qed_common_ops_pass; ...@@ -59,8 +71,6 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_WFQ_UNIT 100 #define QED_WFQ_UNIT 100
#define ISCSI_BDQ_ID(_port_id) (_port_id)
#define FCOE_BDQ_ID(_port_id) ((_port_id) + 2)
#define QED_WID_SIZE (1024) #define QED_WID_SIZE (1024)
#define QED_PF_DEMS_SIZE (4) #define QED_PF_DEMS_SIZE (4)
...@@ -76,6 +86,15 @@ union qed_mcp_protocol_stats; ...@@ -76,6 +86,15 @@ union qed_mcp_protocol_stats;
enum qed_mcp_protocol_type; enum qed_mcp_protocol_type;
/* helpers */ /* helpers */
#define QED_MFW_GET_FIELD(name, field) \
(((name) & (field ## _MASK)) >> (field ## _SHIFT))
#define QED_MFW_SET_FIELD(name, field, value) \
do { \
(name) &= ~((field ## _MASK) << (field ## _SHIFT)); \
(name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
} while (0)
static inline u32 qed_db_addr(u32 cid, u32 DEMS) static inline u32 qed_db_addr(u32 cid, u32 DEMS)
{ {
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
...@@ -198,6 +217,7 @@ enum qed_resources { ...@@ -198,6 +217,7 @@ enum qed_resources {
QED_LL2_QUEUE, QED_LL2_QUEUE,
QED_CMDQS_CQS, QED_CMDQS_CQS,
QED_RDMA_STATS_QUEUE, QED_RDMA_STATS_QUEUE,
QED_BDQ,
QED_MAX_RESC, QED_MAX_RESC,
}; };
...@@ -355,6 +375,12 @@ struct qed_fw_data { ...@@ -355,6 +375,12 @@ struct qed_fw_data {
u32 init_ops_size; u32 init_ops_size;
}; };
#define DRV_MODULE_VERSION \
__stringify(QED_MAJOR_VERSION) "." \
__stringify(QED_MINOR_VERSION) "." \
__stringify(QED_REVISION_VERSION) "." \
__stringify(QED_ENGINEERING_VERSION)
struct qed_simd_fp_handler { struct qed_simd_fp_handler {
void *token; void *token;
void (*func)(void *); void (*func)(void *);
...@@ -732,5 +758,6 @@ void qed_get_protocol_stats(struct qed_dev *cdev, ...@@ -732,5 +758,6 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
enum qed_mcp_protocol_type type, enum qed_mcp_protocol_type type,
union qed_mcp_protocol_stats *stats); union qed_mcp_protocol_stats *stats);
int qed_slowpath_irq_req(struct qed_hwfn *hwfn); int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
#endif /* _QED_H */ #endif /* _QED_H */
...@@ -85,9 +85,6 @@ struct qed_dcbx_app_metadata { ...@@ -85,9 +85,6 @@ struct qed_dcbx_app_metadata {
enum qed_pci_personality personality; enum qed_pci_personality personality;
}; };
#define QED_MFW_GET_FIELD(name, field) \
(((name) & (field ## _MASK)) >> (field ## _SHIFT))
struct qed_dcbx_info { struct qed_dcbx_info {
struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS]; struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS]; struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
......
...@@ -1106,25 +1106,34 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, ...@@ -1106,25 +1106,34 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length); p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
} }
int qed_hw_init(struct qed_dev *cdev, static void
struct qed_tunn_start_params *p_tunn, qed_fill_load_req_params(struct qed_load_req_params *p_load_req,
bool b_hw_start, struct qed_drv_load_params *p_drv_load)
enum qed_int_mode int_mode,
bool allow_npar_tx_switch,
const u8 *bin_fw_data)
{ {
memset(p_load_req, 0, sizeof(*p_load_req));
p_load_req->drv_role = p_drv_load->is_crash_kernel ?
QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS;
p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
p_load_req->override_force_load = p_drv_load->override_force_load;
}
int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
{
struct qed_load_req_params load_req_params;
u32 load_code, param, drv_mb_param; u32 load_code, param, drv_mb_param;
bool b_default_mtu = true; bool b_default_mtu = true;
struct qed_hwfn *p_hwfn; struct qed_hwfn *p_hwfn;
int rc = 0, mfw_rc, i; int rc = 0, mfw_rc, i;
if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
return -EINVAL; return -EINVAL;
} }
if (IS_PF(cdev)) { if (IS_PF(cdev)) {
rc = qed_init_fw_data(cdev, bin_fw_data); rc = qed_init_fw_data(cdev, p_params->bin_fw_data);
if (rc) if (rc)
return rc; return rc;
} }
...@@ -1150,17 +1159,21 @@ int qed_hw_init(struct qed_dev *cdev, ...@@ -1150,17 +1159,21 @@ int qed_hw_init(struct qed_dev *cdev,
if (rc) if (rc)
return rc; return rc;
rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code); qed_fill_load_req_params(&load_req_params,
p_params->p_drv_load_params);
rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
&load_req_params);
if (rc) { if (rc) {
DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n"); DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n");
return rc; return rc;
} }
qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); load_code = load_req_params.load_code;
DP_VERBOSE(p_hwfn, QED_MSG_SP, DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Load request was sent. Resp:0x%x, Load code: 0x%x\n", "Load request was sent. Load code: 0x%x\n",
rc, load_code); load_code);
qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
p_hwfn->first_on_engine = (load_code == p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE); FW_MSG_CODE_DRV_LOAD_ENGINE);
...@@ -1181,11 +1194,15 @@ int qed_hw_init(struct qed_dev *cdev, ...@@ -1181,11 +1194,15 @@ int qed_hw_init(struct qed_dev *cdev,
/* Fall into */ /* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION: case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
p_tunn, p_hwfn->hw_info.hw_mode, p_params->p_tunn,
b_hw_start, int_mode, p_hwfn->hw_info.hw_mode,
allow_npar_tx_switch); p_params->b_hw_start,
p_params->int_mode,
p_params->allow_npar_tx_switch);
break; break;
default: default:
DP_NOTICE(p_hwfn,
"Unexpected load code [0x%08x]", load_code);
rc = -EINVAL; rc = -EINVAL;
break; break;
} }
...@@ -1225,10 +1242,7 @@ int qed_hw_init(struct qed_dev *cdev, ...@@ -1225,10 +1242,7 @@ int qed_hw_init(struct qed_dev *cdev,
if (IS_PF(cdev)) { if (IS_PF(cdev)) {
p_hwfn = QED_LEADING_HWFN(cdev); p_hwfn = QED_LEADING_HWFN(cdev);
drv_mb_param = (FW_MAJOR_VERSION << 24) | drv_mb_param = STORM_FW_VERSION;
(FW_MINOR_VERSION << 16) |
(FW_REVISION_VERSION << 8) |
(FW_ENGINEERING_VERSION);
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
drv_mb_param, &load_code, &param); drv_mb_param, &load_code, &param);
...@@ -1303,27 +1317,53 @@ void qed_hw_timers_stop_all(struct qed_dev *cdev) ...@@ -1303,27 +1317,53 @@ void qed_hw_timers_stop_all(struct qed_dev *cdev)
int qed_hw_stop(struct qed_dev *cdev) int qed_hw_stop(struct qed_dev *cdev)
{ {
int rc = 0, t_rc; struct qed_hwfn *p_hwfn;
struct qed_ptt *p_ptt;
int rc, rc2 = 0;
int j; int j;
for_each_hwfn(cdev, j) { for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; p_hwfn = &cdev->hwfns[j];
struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; p_ptt = p_hwfn->p_main_ptt;
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
if (IS_VF(cdev)) { if (IS_VF(cdev)) {
qed_vf_pf_int_cleanup(p_hwfn); qed_vf_pf_int_cleanup(p_hwfn);
rc = qed_vf_pf_reset(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn,
"qed_vf_pf_reset failed. rc = %d.\n",
rc);
rc2 = -EINVAL;
}
continue; continue;
} }
/* mark the hw as uninitialized... */ /* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false; p_hwfn->hw_init_done = false;
/* Send unload command to MCP */
rc = qed_mcp_unload_req(p_hwfn, p_ptt);
if (rc) {
DP_NOTICE(p_hwfn,
"Failed sending a UNLOAD_REQ command. rc = %d.\n",
rc);
rc2 = -EINVAL;
}
qed_slowpath_irq_sync(p_hwfn);
/* After this point no MFW attentions are expected, e.g. prevent
* race between pf stop and dcbx pf update.
*/
rc = qed_sp_pf_stop(p_hwfn); rc = qed_sp_pf_stop(p_hwfn);
if (rc) if (rc) {
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n"); "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
rc);
rc2 = -EINVAL;
}
qed_wr(p_hwfn, p_ptt, qed_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
...@@ -1346,20 +1386,37 @@ int qed_hw_stop(struct qed_dev *cdev) ...@@ -1346,20 +1386,37 @@ int qed_hw_stop(struct qed_dev *cdev)
/* Need to wait 1ms to guarantee SBs are cleared */ /* Need to wait 1ms to guarantee SBs are cleared */
usleep_range(1000, 2000); usleep_range(1000, 2000);
/* Disable PF in HW blocks */
qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
qed_mcp_unload_done(p_hwfn, p_ptt);
if (rc) {
DP_NOTICE(p_hwfn,
"Failed sending a UNLOAD_DONE command. rc = %d.\n",
rc);
rc2 = -EINVAL;
}
} }
if (IS_PF(cdev)) { if (IS_PF(cdev)) {
p_hwfn = QED_LEADING_HWFN(cdev);
p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt;
/* Disable DMAE in PXP - in CMT, this should only be done for /* Disable DMAE in PXP - in CMT, this should only be done for
* first hw-function, and only after all transactions have * first hw-function, and only after all transactions have
* stopped for all active hw-functions. * stopped for all active hw-functions.
*/ */
t_rc = qed_change_pci_hwfn(&cdev->hwfns[0], rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false);
cdev->hwfns[0].p_main_ptt, false); if (rc) {
if (t_rc != 0) DP_NOTICE(p_hwfn,
rc = t_rc; "qed_change_pci_hwfn failed. rc = %d.\n", rc);
rc2 = -EINVAL;
}
} }
return rc; return rc2;
} }
void qed_hw_stop_fastpath(struct qed_dev *cdev) void qed_hw_stop_fastpath(struct qed_dev *cdev)
...@@ -1404,89 +1461,6 @@ void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) ...@@ -1404,89 +1461,6 @@ void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
} }
static int qed_reg_assert(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 reg, bool expected)
{
u32 assert_val = qed_rd(p_hwfn, p_ptt, reg);
if (assert_val != expected) {
DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n",
reg, expected);
return -EINVAL;
}
return 0;
}
int qed_hw_reset(struct qed_dev *cdev)
{
int rc = 0;
u32 unload_resp, unload_param;
u32 wol_param;
int i;
switch (cdev->wol_config) {
case QED_OV_WOL_DISABLED:
wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
break;
case QED_OV_WOL_ENABLED:
wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
break;
default:
DP_NOTICE(cdev,
"Unknown WoL configuration %02x\n", cdev->wol_config);
/* Fallthrough */
case QED_OV_WOL_DEFAULT:
wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (IS_VF(cdev)) {
rc = qed_vf_pf_reset(p_hwfn);
if (rc)
return rc;
continue;
}
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
/* Check for incorrect states */
qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
QM_REG_USG_CNT_PF_TX, 0);
qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
QM_REG_USG_CNT_PF_OTHER, 0);
/* Disable PF in HW blocks */
qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
TCFC_REG_STRONG_ENABLE_PF, 0);
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
CCFC_REG_STRONG_ENABLE_PF, 0);
/* Send unload command to MCP */
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_UNLOAD_REQ, wol_param,
&unload_resp, &unload_param);
if (rc) {
DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
}
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_UNLOAD_DONE,
0, &unload_resp, &unload_param);
if (rc) {
DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
return rc;
}
}
return rc;
}
/* Free hwfn memory and resources acquired in hw_hwfn_prepare */ /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
{ {
...@@ -1591,187 +1565,222 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -1591,187 +1565,222 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
RESC_NUM(p_hwfn, QED_SB)); RESC_NUM(p_hwfn, QED_SB));
} }
static enum resource_id_enum qed_hw_get_mfw_res_id(enum qed_resources res_id) const char *qed_hw_get_resc_name(enum qed_resources res_id)
{ {
enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
switch (res_id) { switch (res_id) {
case QED_SB:
mfw_res_id = RESOURCE_NUM_SB_E;
break;
case QED_L2_QUEUE: case QED_L2_QUEUE:
mfw_res_id = RESOURCE_NUM_L2_QUEUE_E; return "L2_QUEUE";
break;
case QED_VPORT: case QED_VPORT:
mfw_res_id = RESOURCE_NUM_VPORT_E; return "VPORT";
break;
case QED_RSS_ENG: case QED_RSS_ENG:
mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E; return "RSS_ENG";
break;
case QED_PQ: case QED_PQ:
mfw_res_id = RESOURCE_NUM_PQ_E; return "PQ";
break;
case QED_RL: case QED_RL:
mfw_res_id = RESOURCE_NUM_RL_E; return "RL";
break;
case QED_MAC: case QED_MAC:
return "MAC";
case QED_VLAN: case QED_VLAN:
/* Each VFC resource can accommodate both a MAC and a VLAN */ return "VLAN";
mfw_res_id = RESOURCE_VFC_FILTER_E; case QED_RDMA_CNQ_RAM:
break; return "RDMA_CNQ_RAM";
case QED_ILT: case QED_ILT:
mfw_res_id = RESOURCE_ILT_E; return "ILT";
break; case QED_LL2_QUEUE:
return "LL2_QUEUE";
case QED_CMDQS_CQS:
return "CMDQS_CQS";
case QED_RDMA_STATS_QUEUE:
return "RDMA_STATS_QUEUE";
case QED_BDQ:
return "BDQ";
case QED_SB:
return "SB";
default:
return "UNKNOWN_RESOURCE";
}
}
static int
__qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_resources res_id,
u32 resc_max_val, u32 *p_mcp_resp)
{
int rc;
rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
resc_max_val, p_mcp_resp);
if (rc) {
DP_NOTICE(p_hwfn,
"MFW response failure for a max value setting of resource %d [%s]\n",
res_id, qed_hw_get_resc_name(res_id));
return rc;
}
if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
DP_INFO(p_hwfn,
"Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp);
return 0;
}
static int
qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
bool b_ah = QED_IS_AH(p_hwfn->cdev);
u32 resc_max_val, mcp_resp;
u8 res_id;
int rc;
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
switch (res_id) {
case QED_LL2_QUEUE: case QED_LL2_QUEUE:
mfw_res_id = RESOURCE_LL2_QUEUE_E; resc_max_val = MAX_NUM_LL2_RX_QUEUES;
break; break;
case QED_RDMA_CNQ_RAM: case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS: /* No need for a case for QED_CMDQS_CQS since
/* CNQ/CMDQS are the same resource */ * CNQ/CMDQS are the same resource.
mfw_res_id = RESOURCE_CQS_E; */
resc_max_val = NUM_OF_CMDQS_CQS;
break; break;
case QED_RDMA_STATS_QUEUE: case QED_RDMA_STATS_QUEUE:
mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E; resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
: RDMA_NUM_STATISTIC_COUNTERS_BB;
break; break;
default: case QED_BDQ:
resc_max_val = BDQ_NUM_RESOURCES;
break; break;
default:
continue;
}
rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
resc_max_val, &mcp_resp);
if (rc)
return rc;
/* There's no point to continue to the next resource if the
* command is not supported by the MFW.
* We do continue if the command is supported but the resource
* is unknown to the MFW. Such a resource will be later
* configured with the default allocation values.
*/
if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
return -EINVAL;
} }
return mfw_res_id; return 0;
} }
static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn, static
enum qed_resources res_id) int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
enum qed_resources res_id,
u32 *p_resc_num, u32 *p_resc_start)
{ {
u8 num_funcs = p_hwfn->num_funcs_on_engine; u8 num_funcs = p_hwfn->num_funcs_on_engine;
bool b_ah = QED_IS_AH(p_hwfn->cdev); bool b_ah = QED_IS_AH(p_hwfn->cdev);
struct qed_sb_cnt_info sb_cnt_info; struct qed_sb_cnt_info sb_cnt_info;
u32 dflt_resc_num = 0;
switch (res_id) { switch (res_id) {
case QED_SB:
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
dflt_resc_num = sb_cnt_info.sb_cnt;
break;
case QED_L2_QUEUE: case QED_L2_QUEUE:
dflt_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
: MAX_NUM_L2_QUEUES_BB) / num_funcs; MAX_NUM_L2_QUEUES_BB) / num_funcs;
break; break;
case QED_VPORT: case QED_VPORT:
dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs; *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
dflt_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 MAX_NUM_VPORTS_BB) / num_funcs;
: MAX_NUM_VPORTS_BB) / num_funcs;
break; break;
case QED_RSS_ENG: case QED_RSS_ENG:
dflt_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
: ETH_RSS_ENGINE_NUM_BB) / num_funcs; ETH_RSS_ENGINE_NUM_BB) / num_funcs;
break; break;
case QED_PQ: case QED_PQ:
/* The granularity of the PQs is 8 */ *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
dflt_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 MAX_QM_TX_QUEUES_BB) / num_funcs;
: MAX_QM_TX_QUEUES_BB) / num_funcs; *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
dflt_resc_num &= ~0x7;
break; break;
case QED_RL: case QED_RL:
dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
break; break;
case QED_MAC: case QED_MAC:
case QED_VLAN: case QED_VLAN:
/* Each VFC resource can accommodate both a MAC and a VLAN */ /* Each VFC resource can accommodate both a MAC and a VLAN */
dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break; break;
case QED_ILT: case QED_ILT:
dflt_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
: PXP_NUM_ILT_RECORDS_BB) / num_funcs; PXP_NUM_ILT_RECORDS_BB) / num_funcs;
break; break;
case QED_LL2_QUEUE: case QED_LL2_QUEUE:
dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
break; break;
case QED_RDMA_CNQ_RAM: case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS: case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */ /* CNQ/CMDQS are the same resource */
dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs; *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
break; break;
case QED_RDMA_STATS_QUEUE: case QED_RDMA_STATS_QUEUE:
dflt_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
: RDMA_NUM_STATISTIC_COUNTERS_BB) / RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs;
num_funcs;
break; break;
default: case QED_BDQ:
if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
p_hwfn->hw_info.personality != QED_PCI_FCOE)
*p_resc_num = 0;
else
*p_resc_num = 1;
break; break;
case QED_SB:
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
*p_resc_num = sb_cnt_info.sb_cnt;
break;
default:
return -EINVAL;
} }
return dflt_resc_num;
}
static const char *qed_hw_get_resc_name(enum qed_resources res_id)
{
switch (res_id) { switch (res_id) {
case QED_SB: case QED_BDQ:
return "SB"; if (!*p_resc_num)
case QED_L2_QUEUE: *p_resc_start = 0;
return "L2_QUEUE"; else if (p_hwfn->cdev->num_ports_in_engines == 4)
case QED_VPORT: *p_resc_start = p_hwfn->port_id;
return "VPORT"; else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
case QED_RSS_ENG: *p_resc_start = p_hwfn->port_id;
return "RSS_ENG"; else if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
case QED_PQ: *p_resc_start = p_hwfn->port_id + 2;
return "PQ"; break;
case QED_RL:
return "RL";
case QED_MAC:
return "MAC";
case QED_VLAN:
return "VLAN";
case QED_RDMA_CNQ_RAM:
return "RDMA_CNQ_RAM";
case QED_ILT:
return "ILT";
case QED_LL2_QUEUE:
return "LL2_QUEUE";
case QED_CMDQS_CQS:
return "CMDQS_CQS";
case QED_RDMA_STATS_QUEUE:
return "RDMA_STATS_QUEUE";
default: default:
return "UNKNOWN_RESOURCE"; *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
break;
} }
return 0;
} }
static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
enum qed_resources res_id) enum qed_resources res_id)
{ {
u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param; u32 dflt_resc_num = 0, dflt_resc_start = 0;
u32 *p_resc_num, *p_resc_start; u32 mcp_resp, *p_resc_num, *p_resc_start;
struct resource_info resc_info;
int rc; int rc;
p_resc_num = &RESC_NUM(p_hwfn, res_id); p_resc_num = &RESC_NUM(p_hwfn, res_id);
p_resc_start = &RESC_START(p_hwfn, res_id); p_resc_start = &RESC_START(p_hwfn, res_id);
/* Default values assumes that each function received equal share */ rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
dflt_resc_num = qed_hw_get_dflt_resc_num(p_hwfn, res_id); &dflt_resc_start);
if (!dflt_resc_num) { if (rc) {
DP_ERR(p_hwfn, DP_ERR(p_hwfn,
"Failed to get default amount for resource %d [%s]\n", "Failed to get default amount for resource %d [%s]\n",
res_id, qed_hw_get_resc_name(res_id)); res_id, qed_hw_get_resc_name(res_id));
return -EINVAL; return rc;
}
dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx;
memset(&resc_info, 0, sizeof(resc_info));
resc_info.res_id = qed_hw_get_mfw_res_id(res_id);
if (resc_info.res_id == RESOURCE_NUM_INVALID) {
DP_ERR(p_hwfn,
"Failed to match resource %d [%s] with the MFW resources\n",
res_id, qed_hw_get_resc_name(res_id));
return -EINVAL;
} }
rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info, rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
&mcp_resp, &mcp_param); &mcp_resp, p_resc_num, p_resc_start);
if (rc) { if (rc) {
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"MFW response failure for an allocation request for resource %d [%s]\n", "MFW response failure for an allocation request for resource %d [%s]\n",
...@@ -1784,10 +1793,9 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, ...@@ -1784,10 +1793,9 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
* - There is an internal error in the MFW while processing the request * - There is an internal error in the MFW while processing the request
* - The resource ID is unknown to the MFW * - The resource ID is unknown to the MFW
*/ */
if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK && if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) { DP_INFO(p_hwfn,
DP_NOTICE(p_hwfn, "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
"Resource %d [%s]: No allocation info was received [mcp_resp 0x%x]. Applying default values [num %d, start %d].\n",
res_id, res_id,
qed_hw_get_resc_name(res_id), qed_hw_get_resc_name(res_id),
mcp_resp, dflt_resc_num, dflt_resc_start); mcp_resp, dflt_resc_num, dflt_resc_start);
...@@ -1798,13 +1806,9 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, ...@@ -1798,13 +1806,9 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
/* Special handling for status blocks; Would be revised in future */ /* Special handling for status blocks; Would be revised in future */
if (res_id == QED_SB) { if (res_id == QED_SB) {
resc_info.size -= 1; *p_resc_num -= 1;
resc_info.offset -= p_hwfn->enabled_func_idx; *p_resc_start -= p_hwfn->enabled_func_idx;
} }
*p_resc_num = resc_info.size;
*p_resc_start = resc_info.offset;
out: out:
/* PQs have to divide by 8 [that's the HW granularity]. /* PQs have to divide by 8 [that's the HW granularity].
* Reduce number so it would fit. * Reduce number so it would fit.
...@@ -1822,18 +1826,85 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, ...@@ -1822,18 +1826,85 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn)
{ {
bool b_ah = QED_IS_AH(p_hwfn->cdev);
u8 res_id;
int rc; int rc;
u8 res_id;
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
rc = qed_hw_set_resc_info(p_hwfn, res_id); rc = __qed_hw_set_resc_info(p_hwfn, res_id);
if (rc) if (rc)
return rc; return rc;
} }
return 0;
}
#define QED_RESC_ALLOC_LOCK_RETRY_CNT 10
#define QED_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */
static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_resc_unlock_params resc_unlock_params;
struct qed_resc_lock_params resc_lock_params;
bool b_ah = QED_IS_AH(p_hwfn->cdev);
u8 res_id;
int rc;
/* Setting the max values of the soft resources and the following
* resources allocation queries should be atomic. Since several PFs can
* run in parallel - a resource lock is needed.
* If either the resource lock or resource set value commands are not
* supported - skip the the max values setting, release the lock if
* needed, and proceed to the queries. Other failures, including a
* failure to acquire the lock, will cause this function to fail.
*/
memset(&resc_lock_params, 0, sizeof(resc_lock_params));
resc_lock_params.resource = QED_RESC_LOCK_RESC_ALLOC;
resc_lock_params.retry_num = QED_RESC_ALLOC_LOCK_RETRY_CNT;
resc_lock_params.retry_interval = QED_RESC_ALLOC_LOCK_RETRY_INTVL_US;
resc_lock_params.sleep_b4_retry = true;
memset(&resc_unlock_params, 0, sizeof(resc_unlock_params));
resc_unlock_params.resource = QED_RESC_LOCK_RESC_ALLOC;
rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
if (rc && rc != -EINVAL) {
return rc;
} else if (rc == -EINVAL) {
DP_INFO(p_hwfn,
"Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
} else if (!rc && !resc_lock_params.b_granted) {
DP_NOTICE(p_hwfn,
"Failed to acquire the resource lock for the resource allocation commands\n");
return -EBUSY;
} else {
rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt);
if (rc && rc != -EINVAL) {
DP_NOTICE(p_hwfn,
"Failed to set the max values of the soft resources\n");
goto unlock_and_exit;
} else if (rc == -EINVAL) {
DP_INFO(p_hwfn,
"Skip the max values setting of the soft resources since it is not supported by the MFW\n");
rc = qed_mcp_resc_unlock(p_hwfn, p_ptt,
&resc_unlock_params);
if (rc)
DP_INFO(p_hwfn,
"Failed to release the resource lock for the resource allocation commands\n");
}
}
rc = qed_hw_set_resc_info(p_hwfn);
if (rc)
goto unlock_and_exit;
if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
if (rc)
DP_INFO(p_hwfn,
"Failed to release the resource lock for the resource allocation commands\n");
}
/* Sanity for ILT */ /* Sanity for ILT */
if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) || if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
(!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) { (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
...@@ -1845,8 +1916,6 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) ...@@ -1845,8 +1916,6 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
qed_hw_set_feat(p_hwfn); qed_hw_set_feat(p_hwfn);
DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
"The numbers for each resource are:\n");
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n", DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n",
qed_hw_get_resc_name(res_id), qed_hw_get_resc_name(res_id),
...@@ -1854,6 +1923,11 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) ...@@ -1854,6 +1923,11 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
RESC_START(p_hwfn, res_id)); RESC_START(p_hwfn, res_id));
return 0; return 0;
unlock_and_exit:
if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
return rc;
} }
static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
...@@ -2184,7 +2258,7 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, ...@@ -2184,7 +2258,7 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
if (qed_mcp_is_init(p_hwfn)) if (qed_mcp_is_init(p_hwfn))
p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
return qed_hw_get_resc(p_hwfn); return qed_hw_get_resc(p_hwfn, p_ptt);
} }
static int qed_get_dev_info(struct qed_dev *cdev) static int qed_get_dev_info(struct qed_dev *cdev)
...@@ -2306,6 +2380,15 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, ...@@ -2306,6 +2380,15 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
goto err2; goto err2;
} }
/* Sending a mailbox to the MFW should be done after qed_get_hw_info()
* is called as it sets the ports number in an engine.
*/
if (IS_LEAD_HWFN(p_hwfn)) {
rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
if (rc)
DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n");
}
/* Allocate the init RT array and initialize the init-ops engine */ /* Allocate the init RT array and initialize the init-ops engine */
rc = qed_init_alloc(p_hwfn); rc = qed_init_alloc(p_hwfn);
if (rc) if (rc)
......
...@@ -82,26 +82,63 @@ int qed_resc_alloc(struct qed_dev *cdev); ...@@ -82,26 +82,63 @@ int qed_resc_alloc(struct qed_dev *cdev);
*/ */
void qed_resc_setup(struct qed_dev *cdev); void qed_resc_setup(struct qed_dev *cdev);
enum qed_override_force_load {
QED_OVERRIDE_FORCE_LOAD_NONE,
QED_OVERRIDE_FORCE_LOAD_ALWAYS,
QED_OVERRIDE_FORCE_LOAD_NEVER,
};
struct qed_drv_load_params {
/* Indicates whether the driver is running over a crash kernel.
* As part of the load request, this will be used for providing the
* driver role to the MFW.
* In case of a crash kernel over PDA - this should be set to false.
*/
bool is_crash_kernel;
/* The timeout value that the MFW should use when locking the engine for
* the driver load process.
* A value of '0' means the default value, and '255' means no timeout.
*/
u8 mfw_timeout_val;
#define QED_LOAD_REQ_LOCK_TO_DEFAULT 0
#define QED_LOAD_REQ_LOCK_TO_NONE 255
/* Avoid engine reset when first PF loads on it */
bool avoid_eng_reset;
/* Allow overriding the default force load behavior */
enum qed_override_force_load override_force_load;
};
struct qed_hw_init_params {
/* Tunneling parameters */
struct qed_tunn_start_params *p_tunn;
bool b_hw_start;
/* Interrupt mode [msix, inta, etc.] to use */
enum qed_int_mode int_mode;
/* NPAR tx switching to be used for vports for tx-switching */
bool allow_npar_tx_switch;
/* Binary fw data pointer in binary fw file */
const u8 *bin_fw_data;
/* Driver load parameters */
struct qed_drv_load_params *p_drv_load_params;
};
/** /**
* @brief qed_hw_init - * @brief qed_hw_init -
* *
* @param cdev * @param cdev
* @param p_tunn * @param p_params
* @param b_hw_start
* @param int_mode - interrupt mode [msix, inta, etc.] to use.
* @param allow_npar_tx_switch - npar tx switching to be used
* for vports configured for tx-switching.
* @param bin_fw_data - binary fw data pointer in binary fw file.
* Pass NULL if not using binary fw file.
* *
* @return int * @return int
*/ */
int qed_hw_init(struct qed_dev *cdev, int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params);
struct qed_tunn_start_params *p_tunn,
bool b_hw_start,
enum qed_int_mode int_mode,
bool allow_npar_tx_switch,
const u8 *bin_fw_data);
/** /**
* @brief qed_hw_timers_stop_all - stop the timers HW block * @brief qed_hw_timers_stop_all - stop the timers HW block
...@@ -140,14 +177,6 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev); ...@@ -140,14 +177,6 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev);
*/ */
void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn); void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
/**
* @brief qed_hw_reset -
*
* @param cdev
*
* @return int
*/
int qed_hw_reset(struct qed_dev *cdev);
/** /**
* @brief qed_hw_prepare - * @brief qed_hw_prepare -
...@@ -441,4 +470,6 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -441,4 +470,6 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
*/ */
int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 coalesce, u8 qid, u16 sb_id); u16 coalesce, u8 qid, u16 sb_id);
const char *qed_hw_get_resc_name(enum qed_resources res_id);
#endif #endif
...@@ -191,7 +191,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, ...@@ -191,7 +191,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi; p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi; p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
p_data->q_params.bdq_resource_id = FCOE_BDQ_ID(p_hwfn->port_id); p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ], DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]); fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
...@@ -512,19 +512,31 @@ static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) ...@@ -512,19 +512,31 @@ static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id) u8 bdq_id)
{ {
u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id); if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM + GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id); MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
QED_BDQ),
bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
}
} }
static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id) u8 bdq_id)
{ {
u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id); if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM + GTT_BAR0_MAP_REG_TSDM_RAM +
TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id); TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
QED_BDQ),
bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
}
} }
struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn) struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
......
...@@ -9887,9 +9887,11 @@ struct public_func { ...@@ -9887,9 +9887,11 @@ struct public_func {
#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff #define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
#define DRV_ID_PDA_COMP_VER_SHIFT 0 #define DRV_ID_PDA_COMP_VER_SHIFT 0
#define LOAD_REQ_HSI_VERSION 2
#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 #define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
#define DRV_ID_MCP_HSI_VER_SHIFT 16 #define DRV_ID_MCP_HSI_VER_SHIFT 16
#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT) #define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
DRV_ID_MCP_HSI_VER_SHIFT)
#define DRV_ID_DRV_TYPE_MASK 0x7f000000 #define DRV_ID_DRV_TYPE_MASK 0x7f000000
#define DRV_ID_DRV_TYPE_SHIFT 24 #define DRV_ID_DRV_TYPE_SHIFT 24
...@@ -9984,6 +9986,7 @@ enum resource_id_enum { ...@@ -9984,6 +9986,7 @@ enum resource_id_enum {
RESOURCE_NUM_RSS_ENGINES_E = 14, RESOURCE_NUM_RSS_ENGINES_E = 14,
RESOURCE_LL2_QUEUE_E = 15, RESOURCE_LL2_QUEUE_E = 15,
RESOURCE_RDMA_STATS_QUEUE_E = 16, RESOURCE_RDMA_STATS_QUEUE_E = 16,
RESOURCE_BDQ_E = 17,
RESOURCE_MAX_NUM, RESOURCE_MAX_NUM,
RESOURCE_NUM_INVALID = 0xFFFFFFFF RESOURCE_NUM_INVALID = 0xFFFFFFFF
}; };
...@@ -10001,6 +10004,46 @@ struct resource_info { ...@@ -10001,6 +10004,46 @@ struct resource_info {
#define RESOURCE_ELEMENT_STRICT (1 << 0) #define RESOURCE_ELEMENT_STRICT (1 << 0)
}; };
#define DRV_ROLE_NONE 0
#define DRV_ROLE_PREBOOT 1
#define DRV_ROLE_OS 2
#define DRV_ROLE_KDUMP 3
struct load_req_stc {
u32 drv_ver_0;
u32 drv_ver_1;
u32 fw_ver;
u32 misc0;
#define LOAD_REQ_ROLE_MASK 0x000000FF
#define LOAD_REQ_ROLE_SHIFT 0
#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
#define LOAD_REQ_LOCK_TO_SHIFT 8
#define LOAD_REQ_LOCK_TO_DEFAULT 0
#define LOAD_REQ_LOCK_TO_NONE 255
#define LOAD_REQ_FORCE_MASK 0x000F0000
#define LOAD_REQ_FORCE_SHIFT 16
#define LOAD_REQ_FORCE_NONE 0
#define LOAD_REQ_FORCE_PF 1
#define LOAD_REQ_FORCE_ALL 2
#define LOAD_REQ_FLAGS0_MASK 0x00F00000
#define LOAD_REQ_FLAGS0_SHIFT 20
#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
};
struct load_rsp_stc {
u32 drv_ver_0;
u32 drv_ver_1;
u32 fw_ver;
u32 misc0;
#define LOAD_RSP_ROLE_MASK 0x000000FF
#define LOAD_RSP_ROLE_SHIFT 0
#define LOAD_RSP_HSI_MASK 0x0000FF00
#define LOAD_RSP_HSI_SHIFT 8
#define LOAD_RSP_FLAGS0_MASK 0x000F0000
#define LOAD_RSP_FLAGS0_SHIFT 16
#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
};
union drv_union_data { union drv_union_data {
u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
struct mcp_mac wol_mac; struct mcp_mac wol_mac;
...@@ -10032,6 +10075,7 @@ struct public_drv_mb { ...@@ -10032,6 +10075,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_LOAD_REQ 0x10000000 #define DRV_MSG_CODE_LOAD_REQ 0x10000000
#define DRV_MSG_CODE_LOAD_DONE 0x11000000 #define DRV_MSG_CODE_LOAD_DONE 0x11000000
#define DRV_MSG_CODE_INIT_HW 0x12000000 #define DRV_MSG_CODE_INIT_HW 0x12000000
#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000
#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 #define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
#define DRV_MSG_CODE_INIT_PHY 0x22000000 #define DRV_MSG_CODE_INIT_PHY 0x22000000
...@@ -10044,12 +10088,14 @@ struct public_drv_mb { ...@@ -10044,12 +10088,14 @@ struct public_drv_mb {
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000 #define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000 #define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 #define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 #define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000 #define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000 #define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 #define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 #define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
...@@ -10075,6 +10121,33 @@ struct public_drv_mb { ...@@ -10075,6 +10121,33 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000 #define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000 #define DRV_MSG_CODE_SET_LED_MODE 0x00200000
#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
#define RESOURCE_CMD_REQ_RESC_SHIFT 0
#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
#define RESOURCE_OPCODE_REQ 1
#define RESOURCE_OPCODE_REQ_WO_AGING 2
#define RESOURCE_OPCODE_REQ_W_AGING 3
#define RESOURCE_OPCODE_RELEASE 4
#define RESOURCE_OPCODE_FORCE_RELEASE 5
#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
#define RESOURCE_CMD_REQ_AGE_SHIFT 8
#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
#define RESOURCE_OPCODE_GNT 1
#define RESOURCE_OPCODE_BUSY 2
#define RESOURCE_OPCODE_RELEASED 3
#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
#define RESOURCE_OPCODE_WRONG_OWNER 5
#define RESOURCE_OPCODE_UNKNOWN_CMD 255
#define RESOURCE_DUMP 0
#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000 #define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
#define DRV_MSG_CODE_OS_WOL 0x002e0000 #define DRV_MSG_CODE_OS_WOL 0x002e0000
...@@ -10163,12 +10236,16 @@ struct public_drv_mb { ...@@ -10163,12 +10236,16 @@ struct public_drv_mb {
u32 fw_mb_header; u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_MASK 0xffff0000
#define FW_MSG_CODE_UNSUPPORTED 0x00000000
#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 #define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000 #define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000 #define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000 #define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000
#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000 #define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
...@@ -10188,6 +10265,10 @@ struct public_drv_mb { ...@@ -10188,6 +10265,10 @@ struct public_drv_mb {
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 fw_mb_param; u32 fw_mb_param;
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
/* get pf rdma protocol command responce */ /* get pf rdma protocol command responce */
#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 #define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
......
...@@ -216,7 +216,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, ...@@ -216,7 +216,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val); p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
} }
p_queue->bdq_resource_id = ISCSI_BDQ_ID(p_hwfn->port_id); p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ], DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ],
p_params->bdq_pbl_base_addr[BDQ_ID_RQ]); p_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
...@@ -593,21 +593,31 @@ static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) ...@@ -593,21 +593,31 @@ static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id) u8 bdq_id)
{ {
u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id); if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM + GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
QED_BDQ),
bdq_id); bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
}
} }
static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id) u8 bdq_id)
{ {
u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id); if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM + GTT_BAR0_MAP_REG_TSDM_RAM +
TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
QED_BDQ),
bdq_id); bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
}
} }
static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn, static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/crash_dump.h>
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h> #include <linux/qed/qed_ll2_if.h>
...@@ -589,6 +590,19 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn) ...@@ -589,6 +590,19 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
return rc; return rc;
} }
void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
{
struct qed_dev *cdev = p_hwfn->cdev;
u8 id = p_hwfn->my_id;
u32 int_mode;
int_mode = cdev->int_params.out.int_mode;
if (int_mode == QED_INT_MODE_MSIX)
synchronize_irq(cdev->int_params.msix_table[id].vector);
else
synchronize_irq(cdev->pdev->irq);
}
static void qed_slowpath_irq_free(struct qed_dev *cdev) static void qed_slowpath_irq_free(struct qed_dev *cdev)
{ {
int i; int i;
...@@ -631,19 +645,6 @@ static int qed_nic_stop(struct qed_dev *cdev) ...@@ -631,19 +645,6 @@ static int qed_nic_stop(struct qed_dev *cdev)
return rc; return rc;
} }
static int qed_nic_reset(struct qed_dev *cdev)
{
int rc;
rc = qed_hw_reset(cdev);
if (rc)
return rc;
qed_resc_free(cdev);
return 0;
}
static int qed_nic_setup(struct qed_dev *cdev) static int qed_nic_setup(struct qed_dev *cdev)
{ {
int rc, i; int rc, i;
...@@ -901,6 +902,8 @@ static void qed_update_pf_params(struct qed_dev *cdev, ...@@ -901,6 +902,8 @@ static void qed_update_pf_params(struct qed_dev *cdev,
static int qed_slowpath_start(struct qed_dev *cdev, static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params) struct qed_slowpath_params *params)
{ {
struct qed_drv_load_params drv_load_params;
struct qed_hw_init_params hw_init_params;
struct qed_tunn_start_params tunn_info; struct qed_tunn_start_params tunn_info;
struct qed_mcp_drv_version drv_version; struct qed_mcp_drv_version drv_version;
const u8 *data = NULL; const u8 *data = NULL;
...@@ -966,9 +969,21 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -966,9 +969,21 @@ static int qed_slowpath_start(struct qed_dev *cdev,
tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN; tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
/* Start the slowpath */ /* Start the slowpath */
rc = qed_hw_init(cdev, &tunn_info, true, memset(&hw_init_params, 0, sizeof(hw_init_params));
cdev->int_params.out.int_mode, hw_init_params.p_tunn = &tunn_info;
true, data); hw_init_params.b_hw_start = true;
hw_init_params.int_mode = cdev->int_params.out.int_mode;
hw_init_params.allow_npar_tx_switch = true;
hw_init_params.bin_fw_data = data;
memset(&drv_load_params, 0, sizeof(drv_load_params));
drv_load_params.is_crash_kernel = is_kdump_kernel();
drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
drv_load_params.avoid_eng_reset = false;
drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
hw_init_params.p_drv_load_params = &drv_load_params;
rc = qed_hw_init(cdev, &hw_init_params);
if (rc) if (rc)
goto err2; goto err2;
...@@ -1043,7 +1058,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev) ...@@ -1043,7 +1058,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
} }
qed_disable_msix(cdev); qed_disable_msix(cdev);
qed_nic_reset(cdev);
qed_resc_free(cdev);
qed_iov_wq_stop(cdev, true); qed_iov_wq_stop(cdev, true);
......
...@@ -550,32 +550,12 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, ...@@ -550,32 +550,12 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param) u32 *o_mcp_param)
{ {
struct qed_mcp_mb_params mb_params; struct qed_mcp_mb_params mb_params;
struct mcp_mac wol_mac;
int rc; int rc;
memset(&mb_params, 0, sizeof(mb_params)); memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = cmd; mb_params.cmd = cmd;
mb_params.param = param; mb_params.param = param;
/* In case of UNLOAD_DONE, set the primary MAC */
if ((cmd == DRV_MSG_CODE_UNLOAD_DONE) &&
(p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) {
u8 *p_mac = p_hwfn->cdev->wol_mac;
memset(&wol_mac, 0, sizeof(wol_mac));
wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
p_mac[4] << 8 | p_mac[5];
DP_VERBOSE(p_hwfn,
(QED_MSG_SP | NETIF_MSG_IFDOWN),
"Setting WoL MAC: %pM --> [%08x,%08x]\n",
p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
mb_params.p_data_src = &wol_mac;
mb_params.data_src_size = sizeof(wol_mac);
}
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) if (rc)
return rc; return rc;
...@@ -618,51 +598,408 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, ...@@ -618,51 +598,408 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
int qed_mcp_load_req(struct qed_hwfn *p_hwfn, static bool
struct qed_ptt *p_ptt, u32 *p_load_code) qed_mcp_can_force_load(u8 drv_role,
u8 exist_drv_role,
enum qed_override_force_load override_force_load)
{
bool can_force_load = false;
switch (override_force_load) {
case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
can_force_load = true;
break;
case QED_OVERRIDE_FORCE_LOAD_NEVER:
can_force_load = false;
break;
default:
can_force_load = (drv_role == DRV_ROLE_OS &&
exist_drv_role == DRV_ROLE_PREBOOT) ||
(drv_role == DRV_ROLE_KDUMP &&
exist_drv_role == DRV_ROLE_OS);
break;
}
return can_force_load;
}
static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 resp = 0, param = 0;
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
&resp, &param);
if (rc)
DP_NOTICE(p_hwfn,
"Failed to send cancel load request, rc = %d\n", rc);
return rc;
}
#define CONFIG_QEDE_BITMAP_IDX BIT(0)
#define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
#define CONFIG_QEDR_BITMAP_IDX BIT(2)
#define CONFIG_QEDF_BITMAP_IDX BIT(4)
#define CONFIG_QEDI_BITMAP_IDX BIT(5)
#define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
static u32 qed_get_config_bitmap(void)
{
u32 config_bitmap = 0x0;
if (IS_ENABLED(CONFIG_QEDE))
config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
if (IS_ENABLED(CONFIG_QED_SRIOV))
config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
if (IS_ENABLED(CONFIG_QED_RDMA))
config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
if (IS_ENABLED(CONFIG_QED_FCOE))
config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
if (IS_ENABLED(CONFIG_QED_ISCSI))
config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
if (IS_ENABLED(CONFIG_QED_LL2))
config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
return config_bitmap;
}
struct qed_load_req_in_params {
u8 hsi_ver;
#define QED_LOAD_REQ_HSI_VER_DEFAULT 0
#define QED_LOAD_REQ_HSI_VER_1 1
u32 drv_ver_0;
u32 drv_ver_1;
u32 fw_ver;
u8 drv_role;
u8 timeout_val;
u8 force_cmd;
bool avoid_eng_reset;
};
struct qed_load_req_out_params {
u32 load_code;
u32 exist_drv_ver_0;
u32 exist_drv_ver_1;
u32 exist_fw_ver;
u8 exist_drv_role;
u8 mfw_hsi_ver;
bool drv_exists;
};
static int
__qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_load_req_in_params *p_in_params,
struct qed_load_req_out_params *p_out_params)
{ {
struct qed_dev *cdev = p_hwfn->cdev;
struct qed_mcp_mb_params mb_params; struct qed_mcp_mb_params mb_params;
union drv_union_data union_data; struct load_req_stc load_req;
struct load_rsp_stc load_rsp;
u32 hsi_ver;
int rc; int rc;
memset(&load_req, 0, sizeof(load_req));
load_req.drv_ver_0 = p_in_params->drv_ver_0;
load_req.drv_ver_1 = p_in_params->drv_ver_1;
load_req.fw_ver = p_in_params->fw_ver;
QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
p_in_params->timeout_val);
QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
p_in_params->force_cmd);
QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
p_in_params->avoid_eng_reset);
hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
DRV_ID_MCP_HSI_VER_CURRENT :
(p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
memset(&mb_params, 0, sizeof(mb_params)); memset(&mb_params, 0, sizeof(mb_params));
/* Load Request */
mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT | mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
cdev->drv_type; mb_params.p_data_src = &load_req;
memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE); mb_params.data_src_size = sizeof(load_req);
mb_params.p_data_src = &union_data; mb_params.p_data_dst = &load_rsp;
mb_params.data_src_size = sizeof(union_data.ver_str); mb_params.data_dst_size = sizeof(load_rsp);
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */ DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
mb_params.param,
QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
load_req.drv_ver_0,
load_req.drv_ver_1,
load_req.fw_ver,
load_req.misc0,
QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
QED_MFW_GET_FIELD(load_req.misc0,
LOAD_REQ_LOCK_TO),
QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
}
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) { if (rc) {
DP_ERR(p_hwfn, "MCP response failure, aborting\n"); DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
return rc; return rc;
} }
*p_load_code = mb_params.mcp_resp; DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Load Response: resp 0x%08x\n", mb_params.mcp_resp);
p_out_params->load_code = mb_params.mcp_resp;
if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
"Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
load_rsp.drv_ver_0,
load_rsp.drv_ver_1,
load_rsp.fw_ver,
load_rsp.misc0,
QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
p_out_params->exist_fw_ver = load_rsp.fw_ver;
p_out_params->exist_drv_role =
QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
p_out_params->mfw_hsi_ver =
QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
p_out_params->drv_exists =
QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
LOAD_RSP_FLAGS0_DRV_EXISTS;
}
return 0;
}
static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
enum qed_drv_role drv_role,
u8 *p_mfw_drv_role)
{
switch (drv_role) {
case QED_DRV_ROLE_OS:
*p_mfw_drv_role = DRV_ROLE_OS;
break;
case QED_DRV_ROLE_KDUMP:
*p_mfw_drv_role = DRV_ROLE_KDUMP;
break;
default:
DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
return -EINVAL;
}
return 0;
}
enum qed_load_req_force {
QED_LOAD_REQ_FORCE_NONE,
QED_LOAD_REQ_FORCE_PF,
QED_LOAD_REQ_FORCE_ALL,
};
static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
enum qed_load_req_force force_cmd,
u8 *p_mfw_force_cmd)
{
switch (force_cmd) {
case QED_LOAD_REQ_FORCE_NONE:
*p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
break;
case QED_LOAD_REQ_FORCE_PF:
*p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
break;
case QED_LOAD_REQ_FORCE_ALL:
*p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
break;
}
}
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_load_req_params *p_params)
{
struct qed_load_req_out_params out_params;
struct qed_load_req_in_params in_params;
u8 mfw_drv_role, mfw_force_cmd;
int rc;
memset(&in_params, 0, sizeof(in_params));
in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
in_params.drv_ver_0 = QED_VERSION;
in_params.drv_ver_1 = qed_get_config_bitmap();
in_params.fw_ver = STORM_FW_VERSION;
rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
if (rc)
return rc;
in_params.drv_role = mfw_drv_role;
in_params.timeout_val = p_params->timeout_val;
qed_get_mfw_force_cmd(p_hwfn,
QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
in_params.force_cmd = mfw_force_cmd;
in_params.avoid_eng_reset = p_params->avoid_eng_reset;
memset(&out_params, 0, sizeof(out_params));
rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
if (rc)
return rc;
/* If MFW refused (e.g. other port is in diagnostic mode) we /* First handle cases where another load request should/might be sent:
* must abort. This can happen in the following cases: * - MFW expects the old interface [HSI version = 1]
* - Other port is in diagnostic mode * - MFW responds that a force load request is required
* - Previously loaded function on the engine is not compliant with
* the requester.
* - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
* -
*/ */
if (!(*p_load_code) || if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) || DP_INFO(p_hwfn,
((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) || "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
DP_ERR(p_hwfn, "MCP refused load request, aborting\n"); in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
memset(&out_params, 0, sizeof(out_params));
rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
if (rc)
return rc;
} else if (out_params.load_code ==
FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
if (qed_mcp_can_force_load(in_params.drv_role,
out_params.exist_drv_role,
p_params->override_force_load)) {
DP_INFO(p_hwfn,
"A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
in_params.drv_role, in_params.fw_ver,
in_params.drv_ver_0, in_params.drv_ver_1,
out_params.exist_drv_role,
out_params.exist_fw_ver,
out_params.exist_drv_ver_0,
out_params.exist_drv_ver_1);
qed_get_mfw_force_cmd(p_hwfn,
QED_LOAD_REQ_FORCE_ALL,
&mfw_force_cmd);
in_params.force_cmd = mfw_force_cmd;
memset(&out_params, 0, sizeof(out_params));
rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
&out_params);
if (rc)
return rc;
} else {
DP_NOTICE(p_hwfn,
"A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
in_params.drv_role, in_params.fw_ver,
in_params.drv_ver_0, in_params.drv_ver_1,
out_params.exist_drv_role,
out_params.exist_fw_ver,
out_params.exist_drv_ver_0,
out_params.exist_drv_ver_1);
DP_NOTICE(p_hwfn,
"Avoid sending a force load request to prevent disruption of active PFs\n");
qed_mcp_cancel_load_req(p_hwfn, p_ptt);
return -EBUSY;
}
}
/* Now handle the other types of responses.
* The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
* expected here after the additional revised load requests were sent.
*/
switch (out_params.load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
case FW_MSG_CODE_DRV_LOAD_PORT:
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
out_params.drv_exists) {
/* The role and fw/driver version match, but the PF is
* already loaded and has not been unloaded gracefully.
*/
DP_NOTICE(p_hwfn,
"PF is already loaded\n");
return -EINVAL;
}
break;
default:
DP_NOTICE(p_hwfn,
"Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
out_params.load_code);
return -EBUSY; return -EBUSY;
} }
p_params->load_code = out_params.load_code;
return 0; return 0;
} }
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 wol_param, mcp_resp, mcp_param;
switch (p_hwfn->cdev->wol_config) {
case QED_OV_WOL_DISABLED:
wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
break;
case QED_OV_WOL_ENABLED:
wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
break;
default:
DP_NOTICE(p_hwfn,
"Unknown WoL configuration %02x\n",
p_hwfn->cdev->wol_config);
/* Fallthrough */
case QED_OV_WOL_DEFAULT:
wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
}
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
&mcp_resp, &mcp_param);
}
int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_mb_params mb_params;
struct mcp_mac wol_mac;
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
/* Set the primary MAC if WoL is enabled */
if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
u8 *p_mac = p_hwfn->cdev->wol_mac;
memset(&wol_mac, 0, sizeof(wol_mac));
wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
p_mac[4] << 8 | p_mac[5];
DP_VERBOSE(p_hwfn,
(QED_MSG_SP | NETIF_MSG_IFDOWN),
"Setting WoL MAC: %pM --> [%08x,%08x]\n",
p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
mb_params.p_data_src = &wol_mac;
mb_params.data_src_size = sizeof(wol_mac);
}
return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
}
static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn, static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt) struct qed_ptt *p_ptt)
{ {
...@@ -1883,46 +2220,396 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, ...@@ -1883,46 +2220,396 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
#define QED_RESC_ALLOC_VERSION_MAJOR 1 static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
{
enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
switch (res_id) {
case QED_SB:
mfw_res_id = RESOURCE_NUM_SB_E;
break;
case QED_L2_QUEUE:
mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
break;
case QED_VPORT:
mfw_res_id = RESOURCE_NUM_VPORT_E;
break;
case QED_RSS_ENG:
mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
break;
case QED_PQ:
mfw_res_id = RESOURCE_NUM_PQ_E;
break;
case QED_RL:
mfw_res_id = RESOURCE_NUM_RL_E;
break;
case QED_MAC:
case QED_VLAN:
/* Each VFC resource can accommodate both a MAC and a VLAN */
mfw_res_id = RESOURCE_VFC_FILTER_E;
break;
case QED_ILT:
mfw_res_id = RESOURCE_ILT_E;
break;
case QED_LL2_QUEUE:
mfw_res_id = RESOURCE_LL2_QUEUE_E;
break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
mfw_res_id = RESOURCE_CQS_E;
break;
case QED_RDMA_STATS_QUEUE:
mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
break;
case QED_BDQ:
mfw_res_id = RESOURCE_BDQ_E;
break;
default:
break;
}
return mfw_res_id;
}
#define QED_RESC_ALLOC_VERSION_MAJOR 2
#define QED_RESC_ALLOC_VERSION_MINOR 0 #define QED_RESC_ALLOC_VERSION_MINOR 0
#define QED_RESC_ALLOC_VERSION \ #define QED_RESC_ALLOC_VERSION \
((QED_RESC_ALLOC_VERSION_MAJOR << \ ((QED_RESC_ALLOC_VERSION_MAJOR << \
DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
(QED_RESC_ALLOC_VERSION_MINOR << \ (QED_RESC_ALLOC_VERSION_MINOR << \
DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT)) DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
struct qed_resc_alloc_in_params {
u32 cmd;
enum qed_resources res_id;
u32 resc_max_val;
};
struct qed_resc_alloc_out_params {
u32 mcp_resp;
u32 mcp_param;
u32 resc_num;
u32 resc_start;
u32 vf_resc_num;
u32 vf_resc_start;
u32 flags;
};
static int
qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct resource_info *p_resc_info, struct qed_resc_alloc_in_params *p_in_params,
u32 *p_mcp_resp, u32 *p_mcp_param) struct qed_resc_alloc_out_params *p_out_params)
{ {
struct qed_mcp_mb_params mb_params; struct qed_mcp_mb_params mb_params;
struct resource_info mfw_resc_info;
int rc; int rc;
memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
DP_ERR(p_hwfn,
"Failed to match resource %d [%s] with the MFW resources\n",
p_in_params->res_id,
qed_hw_get_resc_name(p_in_params->res_id));
return -EINVAL;
}
switch (p_in_params->cmd) {
case DRV_MSG_SET_RESOURCE_VALUE_MSG:
mfw_resc_info.size = p_in_params->resc_max_val;
/* Fallthrough */
case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
break;
default:
DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
p_in_params->cmd);
return -EINVAL;
}
memset(&mb_params, 0, sizeof(mb_params)); memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; mb_params.cmd = p_in_params->cmd;
mb_params.param = QED_RESC_ALLOC_VERSION; mb_params.param = QED_RESC_ALLOC_VERSION;
mb_params.p_data_src = &mfw_resc_info;
mb_params.data_src_size = sizeof(mfw_resc_info);
mb_params.p_data_dst = mb_params.p_data_src;
mb_params.data_dst_size = mb_params.data_src_size;
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
"Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
p_in_params->cmd,
p_in_params->res_id,
qed_hw_get_resc_name(p_in_params->res_id),
QED_MFW_GET_FIELD(mb_params.param,
DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
QED_MFW_GET_FIELD(mb_params.param,
DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
p_in_params->resc_max_val);
mb_params.p_data_src = p_resc_info;
mb_params.data_src_size = sizeof(*p_resc_info);
mb_params.p_data_dst = p_resc_info;
mb_params.data_dst_size = sizeof(*p_resc_info);
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) if (rc)
return rc; return rc;
/* Copy the data back */ p_out_params->mcp_resp = mb_params.mcp_resp;
*p_mcp_resp = mb_params.mcp_resp; p_out_params->mcp_param = mb_params.mcp_param;
*p_mcp_param = mb_params.mcp_param; p_out_params->resc_num = mfw_resc_info.size;
p_out_params->resc_start = mfw_resc_info.offset;
p_out_params->vf_resc_num = mfw_resc_info.vf_size;
p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
p_out_params->flags = mfw_resc_info.flags;
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
QED_MSG_SP, QED_MSG_SP,
"MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x, offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n", "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
*p_mcp_param, QED_MFW_GET_FIELD(p_out_params->mcp_param,
p_resc_info->res_id, FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
p_resc_info->size, QED_MFW_GET_FIELD(p_out_params->mcp_param,
p_resc_info->offset, FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
p_resc_info->vf_size, p_out_params->resc_num,
p_resc_info->vf_offset, p_resc_info->flags); p_out_params->resc_start,
p_out_params->vf_resc_num,
p_out_params->vf_resc_start, p_out_params->flags);
return 0;
}
int
qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_resources res_id,
u32 resc_max_val, u32 *p_mcp_resp)
{
struct qed_resc_alloc_out_params out_params;
struct qed_resc_alloc_in_params in_params;
int rc;
memset(&in_params, 0, sizeof(in_params));
in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
in_params.res_id = res_id;
in_params.resc_max_val = resc_max_val;
memset(&out_params, 0, sizeof(out_params));
rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
&out_params);
if (rc)
return rc;
*p_mcp_resp = out_params.mcp_resp;
return 0;
}
int
qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_resources res_id,
u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
{
struct qed_resc_alloc_out_params out_params;
struct qed_resc_alloc_in_params in_params;
int rc;
memset(&in_params, 0, sizeof(in_params));
in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
in_params.res_id = res_id;
memset(&out_params, 0, sizeof(out_params));
rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
&out_params);
if (rc)
return rc;
*p_mcp_resp = out_params.mcp_resp;
if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
*p_resc_num = out_params.resc_num;
*p_resc_start = out_params.resc_start;
}
return 0;
}
int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 mcp_resp, mcp_param;
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
&mcp_resp, &mcp_param);
}
static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
{
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
p_mcp_resp, p_mcp_param);
if (rc)
return rc;
if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
DP_INFO(p_hwfn,
"The resource command is unsupported by the MFW\n");
return -EINVAL;
}
if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
DP_NOTICE(p_hwfn,
"The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
param, opcode);
return -EINVAL;
}
return rc;
}
int
__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_resc_lock_params *p_params)
{
u32 param = 0, mcp_resp, mcp_param;
u8 opcode;
int rc;
switch (p_params->timeout) {
case QED_MCP_RESC_LOCK_TO_DEFAULT:
opcode = RESOURCE_OPCODE_REQ;
p_params->timeout = 0;
break;
case QED_MCP_RESC_LOCK_TO_NONE:
opcode = RESOURCE_OPCODE_REQ_WO_AGING;
p_params->timeout = 0;
break;
default:
opcode = RESOURCE_OPCODE_REQ_W_AGING;
break;
}
QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
"Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
param, p_params->timeout, opcode, p_params->resource);
/* Attempt to acquire the resource */
rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
if (rc)
return rc;
/* Analyze the response */
p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
"Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
mcp_param, opcode, p_params->owner);
switch (opcode) {
case RESOURCE_OPCODE_GNT:
p_params->b_granted = true;
break;
case RESOURCE_OPCODE_BUSY:
p_params->b_granted = false;
break;
default:
DP_NOTICE(p_hwfn,
"Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
mcp_param, opcode);
return -EINVAL;
}
return 0;
}
int
qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
{
u32 retry_cnt = 0;
int rc;
do {
/* No need for an interval before the first iteration */
if (retry_cnt) {
if (p_params->sleep_b4_retry) {
u16 retry_interval_in_ms =
DIV_ROUND_UP(p_params->retry_interval,
1000);
msleep(retry_interval_in_ms);
} else {
udelay(p_params->retry_interval);
}
}
rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
if (rc)
return rc;
if (p_params->b_granted)
break;
} while (retry_cnt++ < p_params->retry_num);
return 0;
}
int
qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_resc_unlock_params *p_params)
{
u32 param = 0, mcp_resp, mcp_param;
u8 opcode;
int rc;
opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
: RESOURCE_OPCODE_RELEASE;
QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
param, opcode, p_params->resource);
/* Attempt to release the resource */
rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
if (rc)
return rc;
/* Analyze the response */
opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
mcp_param, opcode);
switch (opcode) {
case RESOURCE_OPCODE_RELEASED_PREVIOUS:
DP_INFO(p_hwfn,
"Resource unlock request for an already released resource [%d]\n",
p_params->resource);
/* Fallthrough */
case RESOURCE_OPCODE_RELEASED:
p_params->b_released = true;
break;
case RESOURCE_OPCODE_WRONG_OWNER:
p_params->b_released = false;
break;
default:
DP_NOTICE(p_hwfn,
"Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
mcp_param, opcode);
return -EINVAL;
}
return 0; return 0;
} }
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/qed/qed_fcoe_if.h> #include <linux/qed/qed_fcoe_if.h>
#include "qed_hsi.h" #include "qed_hsi.h"
#include "qed_dev_api.h"
struct qed_mcp_link_speed_params { struct qed_mcp_link_speed_params {
bool autoneg; bool autoneg;
...@@ -570,27 +571,55 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn); ...@@ -570,27 +571,55 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn);
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt); struct qed_ptt *p_ptt);
enum qed_drv_role {
QED_DRV_ROLE_OS,
QED_DRV_ROLE_KDUMP,
};
struct qed_load_req_params {
/* Input params */
enum qed_drv_role drv_role;
u8 timeout_val;
bool avoid_eng_reset;
enum qed_override_force_load override_force_load;
/* Output params */
u32 load_code;
};
/** /**
* @brief Sends a LOAD_REQ to the MFW, and in case operation * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
* succeed, returns whether this PF is the first on the * returns whether this PF is the first on the engine/port or function.
* chip/engine/port or function. This function should be
* called when driver is ready to accept MFW events after
* Storms initializations are done.
* *
* @param p_hwfn - hw function * @param p_hwfn
* @param p_ptt - PTT required for register access * @param p_ptt
* @param p_load_code - The MCP response param containing one * @param p_params
* of the following: *
* FW_MSG_CODE_DRV_LOAD_ENGINE * @return int - 0 - Operation was successful.
* FW_MSG_CODE_DRV_LOAD_PORT
* FW_MSG_CODE_DRV_LOAD_FUNCTION
* @return int -
* 0 - Operation was successul.
* -EBUSY - Operation failed
*/ */
int qed_mcp_load_req(struct qed_hwfn *p_hwfn, int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
u32 *p_load_code); struct qed_load_req_params *p_params);
/**
* @brief Sends a UNLOAD_REQ message to the MFW
*
* @param p_hwfn
* @param p_ptt
*
* @return int - 0 - Operation was successful.
*/
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief Sends a UNLOAD_DONE message to the MFW
*
* @param p_hwfn
* @param p_ptt
*
* @return int - 0 - Operation was successful.
*/
int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/** /**
* @brief Read the MFW mailbox into Current buffer. * @brief Read the MFW mailbox into Current buffer.
...@@ -713,6 +742,41 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, ...@@ -713,6 +742,41 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities); struct qed_ptt *p_ptt, u32 mask_parities);
/**
* @brief - Sets the MFW's max value for the given resource
*
* @param p_hwfn
* @param p_ptt
* @param res_id
* @param resc_max_val
* @param p_mcp_resp
*
* @return int - 0 - operation was successful.
*/
int
qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_resources res_id,
u32 resc_max_val, u32 *p_mcp_resp);
/**
* @brief - Gets the MFW allocation info for the given resource
*
* @param p_hwfn
* @param p_ptt
* @param res_id
* @param p_mcp_resp
* @param p_resc_num
* @param p_resc_start
*
* @return int - 0 - operation was successful.
*/
int
qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_resources res_id,
u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
/** /**
* @brief Send eswitch mode to MFW * @brief Send eswitch mode to MFW
* *
...@@ -726,19 +790,86 @@ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, ...@@ -726,19 +790,86 @@ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
enum qed_ov_eswitch eswitch); enum qed_ov_eswitch eswitch);
#define QED_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP
#define QED_MCP_RESC_LOCK_MAX_VAL 31
enum qed_resc_lock {
QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL,
QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL
};
/** /**
* @brief - Gets the MFW allocation info for the given resource * @brief - Initiates PF FLR
* *
* @param p_hwfn * @param p_hwfn
* @param p_ptt * @param p_ptt
* @param p_resc_info - descriptor of requested resource
* @param p_mcp_resp
* @param p_mcp_param
* *
* @return int - 0 - operation was successful. * @return int - 0 - operation was successful.
*/ */
int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
struct qed_resc_lock_params {
/* Resource number [valid values are 0..31] */
u8 resource;
/* Lock timeout value in seconds [default, none or 1..254] */
u8 timeout;
#define QED_MCP_RESC_LOCK_TO_DEFAULT 0
#define QED_MCP_RESC_LOCK_TO_NONE 255
/* Number of times to retry locking */
u8 retry_num;
/* The interval in usec between retries */
u16 retry_interval;
/* Use sleep or delay between retries */
bool sleep_b4_retry;
/* Will be set as true if the resource is free and granted */
bool b_granted;
/* Will be filled with the resource owner.
* [0..15 = PF0-15, 16 = MFW]
*/
u8 owner;
};
/**
* @brief Acquires MFW generic resource lock
*
* @param p_hwfn
* @param p_ptt
* @param p_params
*
* @return int - 0 - operation was successful.
*/
int
qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params);
struct qed_resc_unlock_params {
/* Resource number [valid values are 0..31] */
u8 resource;
/* Allow to release a resource even if belongs to another PF */
bool b_force;
/* Will be set as true if the resource is released */
bool b_released;
};
/**
* @brief Releases MFW generic resource lock
*
* @param p_hwfn
* @param p_ptt
* @param p_params
*
* @return int - 0 - operation was successful.
*/
int
qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct resource_info *p_resc_info, struct qed_resc_unlock_params *p_params);
u32 *p_mcp_resp, u32 *p_mcp_param);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment