Commit f31e4b6f authored by Anirudh Venkataramanan's avatar Anirudh Venkataramanan Committed by Jeff Kirsher

ice: Start hardware initialization

This patch implements multiple pieces of the initialization flow
as follows:

1) A reset is issued to ensure a clean device state, followed
   by initialization of admin queue interface.

2) Once the admin queue interface is up, clear the PF config
   and transition the device to non-PXE mode.

3) Get the NVM configuration stored in the device's non-volatile
   memory (NVM) using ice_init_nvm.

CC: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: default avatarAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Acked-by: default avatarShannon Nelson <shannon.nelson@oracle.com>
Tested-by: default avatarTony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 7ec59eea
...@@ -9,4 +9,5 @@ obj-$(CONFIG_ICE) += ice.o ...@@ -9,4 +9,5 @@ obj-$(CONFIG_ICE) += ice.o
ice-y := ice_main.o \ ice-y := ice_main.o \
ice_controlq.o \ ice_controlq.o \
ice_common.o ice_common.o \
ice_nvm.o
...@@ -16,8 +16,10 @@ ...@@ -16,8 +16,10 @@
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include "ice_devids.h" #include "ice_devids.h"
#include "ice_type.h" #include "ice_type.h"
#include "ice_common.h"
#define ICE_BAR0 0 #define ICE_BAR0 0
#define ICE_AQ_LEN 64
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
......
...@@ -36,6 +36,67 @@ struct ice_aqc_q_shutdown { ...@@ -36,6 +36,67 @@ struct ice_aqc_q_shutdown {
u8 reserved[12]; u8 reserved[12];
}; };
/* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009)
*/
struct ice_aqc_req_res {
__le16 res_id;
#define ICE_AQC_RES_ID_NVM 1
#define ICE_AQC_RES_ID_SDP 2
#define ICE_AQC_RES_ID_CHNG_LOCK 3
#define ICE_AQC_RES_ID_GLBL_LOCK 4
__le16 access_type;
#define ICE_AQC_RES_ACCESS_READ 1
#define ICE_AQC_RES_ACCESS_WRITE 2
/* Upon successful completion, FW writes this value and driver is
* expected to release resource before timeout. This value is provided
* in milliseconds.
*/
__le32 timeout;
#define ICE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
/* For SDP: pin id of the SDP */
__le32 res_number;
/* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */
__le16 status;
#define ICE_AQ_RES_GLBL_SUCCESS 0
#define ICE_AQ_RES_GLBL_IN_PROG 1
#define ICE_AQ_RES_GLBL_DONE 2
u8 reserved[2];
};
/* Clear PXE Command and response (direct 0x0110) */
struct ice_aqc_clear_pxe {
u8 rx_cnt;
#define ICE_AQC_CLEAR_PXE_RX_CNT 0x2
u8 reserved[15];
};
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
*/
struct ice_aqc_nvm {
u8 cmd_flags;
#define ICE_AQC_NVM_LAST_CMD BIT(0)
#define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */
#define ICE_AQC_NVM_PRESERVATION_S 1
#define ICE_AQC_NVM_PRESERVATION_M (3 << CSR_AQ_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_NO_PRESERVATION (0 << CSR_AQ_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_PRESERVE_ALL BIT(1)
#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
u8 module_typeid;
__le16 length;
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
__le32 offset;
__le32 addr_high;
__le32 addr_low;
};
/** /**
* struct ice_aq_desc - Admin Queue (AQ) descriptor * struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags * @flags: ICE_AQ_FLAG_* flags
...@@ -65,6 +126,9 @@ struct ice_aq_desc { ...@@ -65,6 +126,9 @@ struct ice_aq_desc {
struct ice_aqc_generic generic; struct ice_aqc_generic generic;
struct ice_aqc_get_ver get_ver; struct ice_aqc_get_ver get_ver;
struct ice_aqc_q_shutdown q_shutdown; struct ice_aqc_q_shutdown q_shutdown;
struct ice_aqc_req_res res_owner;
struct ice_aqc_clear_pxe clear_pxe;
struct ice_aqc_nvm nvm;
} params; } params;
}; };
...@@ -82,6 +146,8 @@ struct ice_aq_desc { ...@@ -82,6 +146,8 @@ struct ice_aq_desc {
/* error codes */ /* error codes */
enum ice_aq_err { enum ice_aq_err {
ICE_AQ_RC_OK = 0, /* success */ ICE_AQ_RC_OK = 0, /* success */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* object already exists */
}; };
/* Admin Queue command opcodes */ /* Admin Queue command opcodes */
...@@ -89,6 +155,19 @@ enum ice_adminq_opc { ...@@ -89,6 +155,19 @@ enum ice_adminq_opc {
/* AQ commands */ /* AQ commands */
ice_aqc_opc_get_ver = 0x0001, ice_aqc_opc_get_ver = 0x0001,
ice_aqc_opc_q_shutdown = 0x0003, ice_aqc_opc_q_shutdown = 0x0003,
/* resource ownership */
ice_aqc_opc_req_res = 0x0008,
ice_aqc_opc_release_res = 0x0009,
/* PXE */
ice_aqc_opc_clear_pxe_mode = 0x0110,
ice_aqc_opc_clear_pf_cfg = 0x02A4,
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
}; };
#endif /* _ICE_ADMINQ_CMD_H_ */ #endif /* _ICE_ADMINQ_CMD_H_ */
...@@ -4,6 +4,224 @@ ...@@ -4,6 +4,224 @@
#include "ice_common.h" #include "ice_common.h"
#include "ice_adminq_cmd.h" #include "ice_adminq_cmd.h"
#define ICE_PF_RESET_WAIT_COUNT 200
/**
* ice_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
*
* This function sets the MAC type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
*/
static enum ice_status ice_set_mac_type(struct ice_hw *hw)
{
if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
return ICE_ERR_DEVICE_NOT_SUPPORTED;
hw->mac_type = ICE_MAC_GENERIC;
return 0;
}
/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*/
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
{
struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
enum ice_status ice_init_hw(struct ice_hw *hw)
{
enum ice_status status;
/* Set MAC type based on DeviceID */
status = ice_set_mac_type(hw);
if (status)
return status;
hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
PF_FUNC_RID_FUNC_NUM_M) >>
PF_FUNC_RID_FUNC_NUM_S;
status = ice_reset(hw, ICE_RESET_PFR);
if (status)
return status;
status = ice_init_all_ctrlq(hw);
if (status)
goto err_unroll_cqinit;
status = ice_clear_pf_cfg(hw);
if (status)
goto err_unroll_cqinit;
ice_clear_pxe_mode(hw);
status = ice_init_nvm(hw);
if (status)
goto err_unroll_cqinit;
return 0;
err_unroll_cqinit:
ice_shutdown_all_ctrlq(hw);
return status;
}
/**
* ice_deinit_hw - unroll initialization operations done by ice_init_hw
* @hw: pointer to the hardware structure
*/
void ice_deinit_hw(struct ice_hw *hw)
{
ice_shutdown_all_ctrlq(hw);
}
/**
* ice_check_reset - Check to see if a global reset is complete
* @hw: pointer to the hardware structure
*/
enum ice_status ice_check_reset(struct ice_hw *hw)
{
u32 cnt, reg = 0, grst_delay;
/* Poll for Device Active state in case a recent CORER, GLOBR,
* or EMPR has occurred. The grst delay value is in 100ms units.
* Add 1sec for outstanding AQ commands that can take a long time.
*/
grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
GLGEN_RSTCTL_GRSTDEL_S) + 10;
for (cnt = 0; cnt < grst_delay; cnt++) {
mdelay(100);
reg = rd32(hw, GLGEN_RSTAT);
if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
break;
}
if (cnt == grst_delay) {
ice_debug(hw, ICE_DBG_INIT,
"Global reset polling failed to complete.\n");
return ICE_ERR_RESET_FAILED;
}
#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
GLNVM_ULD_GLOBR_DONE_M)
/* Device is Active; check Global Reset processes are done */
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
if (reg == ICE_RESET_DONE_MASK) {
ice_debug(hw, ICE_DBG_INIT,
"Global reset processes done. %d\n", cnt);
break;
}
mdelay(10);
}
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
ice_debug(hw, ICE_DBG_INIT,
"Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
reg);
return ICE_ERR_RESET_FAILED;
}
return 0;
}
/**
* ice_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
*
* If a global reset has been triggered, this function checks
* for its completion and then issues the PF reset
*/
static enum ice_status ice_pf_reset(struct ice_hw *hw)
{
u32 cnt, reg;
/* If at function entry a global reset was already in progress, i.e.
* state is not 'device active' or any of the reset done bits are not
* set in GLNVM_ULD, there is no need for a PF Reset; poll until the
* global reset is done.
*/
if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
(rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
/* poll on global reset currently in progress until done */
if (ice_check_reset(hw))
return ICE_ERR_RESET_FAILED;
return 0;
}
/* Reset the PF */
reg = rd32(hw, PFGEN_CTRL);
wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
reg = rd32(hw, PFGEN_CTRL);
if (!(reg & PFGEN_CTRL_PFSWR_M))
break;
mdelay(1);
}
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
ice_debug(hw, ICE_DBG_INIT,
"PF reset polling failed to complete.\n");
return ICE_ERR_RESET_FAILED;
}
return 0;
}
/**
* ice_reset - Perform different types of reset
* @hw: pointer to the hardware structure
* @req: reset request
*
* This function triggers a reset as specified by the req parameter.
*
* Note:
* If anything other than a PF reset is triggered, PXE mode is restored.
* This has to be cleared using ice_clear_pxe_mode again, once the AQ
* interface has been restored in the rebuild flow.
*/
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
{
u32 val = 0;
switch (req) {
case ICE_RESET_PFR:
return ice_pf_reset(hw);
case ICE_RESET_CORER:
ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
val = GLGEN_RTRIG_CORER_M;
break;
case ICE_RESET_GLOBR:
ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
val = GLGEN_RTRIG_GLOBR_M;
break;
}
val |= rd32(hw, GLGEN_RTRIG);
wr32(hw, GLGEN_RTRIG, val);
ice_flush(hw);
/* wait for the FW to be ready */
return ice_check_reset(hw);
}
/** /**
* ice_debug_cq * ice_debug_cq
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
...@@ -128,3 +346,190 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) ...@@ -128,3 +346,190 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
} }
/**
* ice_aq_req_res
* @hw: pointer to the hw struct
* @res: resource id
* @access: access type
* @sdp_number: resource number
* @timeout: the maximum time in ms that the driver may hold the resource
* @cd: pointer to command details structure or NULL
*
* requests common resource using the admin queue commands (0x0008)
*/
static enum ice_status
ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
struct ice_sq_cd *cd)
{
struct ice_aqc_req_res *cmd_resp;
struct ice_aq_desc desc;
enum ice_status status;
cmd_resp = &desc.params.res_owner;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
cmd_resp->res_id = cpu_to_le16(res);
cmd_resp->access_type = cpu_to_le16(access);
cmd_resp->res_number = cpu_to_le32(sdp_number);
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
/* The completion specifies the maximum time in ms that the driver
* may hold the resource in the Timeout field.
* If the resource is held by someone else, the command completes with
* busy return value and the timeout field indicates the maximum time
* the current owner of the resource has to free it.
*/
if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
*timeout = le32_to_cpu(cmd_resp->timeout);
return status;
}
/**
* ice_aq_release_res
* @hw: pointer to the hw struct
* @res: resource id
* @sdp_number: resource number
* @cd: pointer to command details structure or NULL
*
* release common resource using the admin queue commands (0x0009)
*/
static enum ice_status
ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
struct ice_sq_cd *cd)
{
struct ice_aqc_req_res *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.res_owner;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
cmd->res_id = cpu_to_le16(res);
cmd->res_number = cpu_to_le32(sdp_number);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
* ice_acquire_res
* @hw: pointer to the HW structure
* @res: resource id
* @access: access type (read or write)
*
* This function will attempt to acquire the ownership of a resource.
*/
enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access)
{
#define ICE_RES_POLLING_DELAY_MS 10
u32 delay = ICE_RES_POLLING_DELAY_MS;
enum ice_status status;
u32 time_left = 0;
u32 timeout;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
/* An admin queue return code of ICE_AQ_RC_EEXIST means that another
* driver has previously acquired the resource and performed any
* necessary updates; in this case the caller does not obtain the
* resource and has no further work to do.
*/
if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
status = ICE_ERR_AQ_NO_WORK;
goto ice_acquire_res_exit;
}
if (status)
ice_debug(hw, ICE_DBG_RES,
"resource %d acquire type %d failed.\n", res, access);
/* If necessary, poll until the current lock owner timeouts */
timeout = time_left;
while (status && timeout && time_left) {
mdelay(delay);
timeout = (timeout > delay) ? timeout - delay : 0;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
/* lock free, but no work to do */
status = ICE_ERR_AQ_NO_WORK;
break;
}
if (!status)
/* lock acquired */
break;
}
if (status && status != ICE_ERR_AQ_NO_WORK)
ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
ice_acquire_res_exit:
if (status == ICE_ERR_AQ_NO_WORK) {
if (access == ICE_RES_WRITE)
ice_debug(hw, ICE_DBG_RES,
"resource indicates no work to do.\n");
else
ice_debug(hw, ICE_DBG_RES,
"Warning: ICE_ERR_AQ_NO_WORK not expected\n");
}
return status;
}
/**
* ice_release_res
* @hw: pointer to the HW structure
* @res: resource id
*
* This function will release a resource using the proper Admin Command.
*/
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
{
enum ice_status status;
u32 total_delay = 0;
status = ice_aq_release_res(hw, res, 0, NULL);
/* there are some rare cases when trying to release the resource
* results in an admin Q timeout, so handle them correctly
*/
while ((status == ICE_ERR_AQ_TIMEOUT) &&
(total_delay < hw->adminq.sq_cmd_timeout)) {
mdelay(1);
status = ice_aq_release_res(hw, res, 0, NULL);
total_delay++;
}
}
/**
* ice_aq_clear_pxe_mode
* @hw: pointer to the hw struct
*
* Tell the firmware that the driver is taking over from PXE (0x0110).
*/
static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
{
struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
/**
* ice_clear_pxe_mode - clear pxe operations mode
* @hw: pointer to the hw struct
*
* Make sure all PXE mode settings are cleared, including things
* like descriptor fetch/write-back mode.
*/
void ice_clear_pxe_mode(struct ice_hw *hw)
{
if (ice_check_sq_alive(hw, &hw->adminq))
ice_aq_clear_pxe_mode(hw);
}
...@@ -9,12 +9,22 @@ ...@@ -9,12 +9,22 @@
void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
u16 buf_len); u16 buf_len);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw); void ice_shutdown_all_ctrlq(struct ice_hw *hw);
enum ice_status enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
...@@ -22,4 +32,5 @@ enum ice_status ...@@ -22,4 +32,5 @@ enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd); void *buf, u16 buf_size, struct ice_sq_cd *cd);
enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */ #endif /* _ICE_COMMON_H_ */
...@@ -6,6 +6,9 @@ ...@@ -6,6 +6,9 @@
#include "ice_adminq_cmd.h" #include "ice_adminq_cmd.h"
/* Maximum buffer lengths for all control queue types */
#define ICE_AQ_MAX_BUF_LEN 4096
#define ICE_CTL_Q_DESC(R, i) \ #define ICE_CTL_Q_DESC(R, i) \
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) (&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
......
...@@ -28,5 +28,35 @@ ...@@ -28,5 +28,35 @@
#define PF_FW_ATQLEN_ATQENABLE_S 31 #define PF_FW_ATQLEN_ATQENABLE_S 31
#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S) #define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S)
#define PF_FW_ATQT 0x00080400 #define PF_FW_ATQT 0x00080400
#define GLGEN_RSTAT 0x000B8188
#define GLGEN_RSTAT_DEVSTATE_S 0
#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, GLGEN_RSTAT_DEVSTATE_S)
#define GLGEN_RSTCTL 0x000B8180
#define GLGEN_RSTCTL_GRSTDEL_S 0
#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S)
#define GLGEN_RTRIG 0x000B8190
#define GLGEN_RTRIG_CORER_S 0
#define GLGEN_RTRIG_CORER_M BIT(GLGEN_RTRIG_CORER_S)
#define GLGEN_RTRIG_GLOBR_S 1
#define GLGEN_RTRIG_GLOBR_M BIT(GLGEN_RTRIG_GLOBR_S)
#define GLGEN_STAT 0x000B612C
#define PFGEN_CTRL 0x00091000
#define PFGEN_CTRL_PFSWR_S 0
#define PFGEN_CTRL_PFSWR_M BIT(PFGEN_CTRL_PFSWR_S)
#define GLLAN_RCTL_0 0x002941F8
#define GLNVM_FLA 0x000B6108
#define GLNVM_FLA_LOCKED_S 6
#define GLNVM_FLA_LOCKED_M BIT(GLNVM_FLA_LOCKED_S)
#define GLNVM_GENS 0x000B6100
#define GLNVM_GENS_SR_SIZE_S 5
#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, GLNVM_GENS_SR_SIZE_S)
#define GLNVM_ULD 0x000B6008
#define GLNVM_ULD_CORER_DONE_S 3
#define GLNVM_ULD_CORER_DONE_M BIT(GLNVM_ULD_CORER_DONE_S)
#define GLNVM_ULD_GLOBR_DONE_S 4
#define GLNVM_ULD_GLOBR_DONE_M BIT(GLNVM_ULD_GLOBR_DONE_S)
#define PF_FUNC_RID 0x0009E880
#define PF_FUNC_RID_FUNC_NUM_S 0
#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, PF_FUNC_RID_FUNC_NUM_S)
#endif /* _ICE_HW_AUTOGEN_H_ */ #endif /* _ICE_HW_AUTOGEN_H_ */
...@@ -26,6 +26,18 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX ...@@ -26,6 +26,18 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX
MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */ #endif /* !CONFIG_DYNAMIC_DEBUG */
/**
* ice_set_ctrlq_len - helper function to set controlq length
* @hw: pointer to the hw instance
*/
static void ice_set_ctrlq_len(struct ice_hw *hw)
{
hw->adminq.num_rq_entries = ICE_AQ_LEN;
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
}
/** /**
* ice_probe - Device initialization routine * ice_probe - Device initialization routine
* @pdev: PCI device information struct * @pdev: PCI device information struct
...@@ -81,6 +93,8 @@ static int ice_probe(struct pci_dev *pdev, ...@@ -81,6 +93,8 @@ static int ice_probe(struct pci_dev *pdev,
hw->subsystem_device_id = pdev->subsystem_device; hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn);
ice_set_ctrlq_len(hw);
pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
#ifndef CONFIG_DYNAMIC_DEBUG #ifndef CONFIG_DYNAMIC_DEBUG
...@@ -88,7 +102,22 @@ static int ice_probe(struct pci_dev *pdev, ...@@ -88,7 +102,22 @@ static int ice_probe(struct pci_dev *pdev,
hw->debug_mask = debug; hw->debug_mask = debug;
#endif #endif
err = ice_init_hw(hw);
if (err) {
dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
err = -EIO;
goto err_exit_unroll;
}
dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
hw->api_maj_ver, hw->api_min_ver);
return 0; return 0;
err_exit_unroll:
pci_disable_pcie_error_reporting(pdev);
return err;
} }
/** /**
...@@ -103,6 +132,8 @@ static void ice_remove(struct pci_dev *pdev) ...@@ -103,6 +132,8 @@ static void ice_remove(struct pci_dev *pdev)
return; return;
set_bit(__ICE_DOWN, pf->state); set_bit(__ICE_DOWN, pf->state);
ice_deinit_hw(&pf->hw);
pci_disable_pcie_error_reporting(pdev); pci_disable_pcie_error_reporting(pdev);
} }
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
#include "ice_common.h"
/**
* ice_aq_read_nvm
* @hw: pointer to the hw struct
* @module_typeid: module pointer location in words from the NVM beginning
* @offset: byte offset from the module beginning
* @length: length of the section to be read (in bytes from the offset)
* @data: command buffer (size [bytes] = length)
* @last_command: tells if this is the last command in a series
* @cd: pointer to command details structure or NULL
*
* Read the NVM using the admin queue commands (0x0701)
*/
static enum ice_status
ice_aq_read_nvm(struct ice_hw *hw, u8 module_typeid, u32 offset, u16 length,
void *data, bool last_command, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;
cmd = &desc.params.nvm;
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000)
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
cmd->module_typeid = module_typeid;
cmd->offset = cpu_to_le32(offset);
cmd->length = cpu_to_le16(length);
return ice_aq_send_cmd(hw, &desc, data, length, cd);
}
/**
* ice_check_sr_access_params - verify params for Shadow RAM R/W operations.
* @hw: pointer to the HW structure
* @offset: offset in words from module start
* @words: number of words to access
*/
static enum ice_status
ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
{
if ((offset + words) > hw->nvm.sr_words) {
ice_debug(hw, ICE_DBG_NVM,
"NVM error: offset beyond SR lmt.\n");
return ICE_ERR_PARAM;
}
if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) {
/* We can access only up to 4KB (one sector), in one AQ write */
ice_debug(hw, ICE_DBG_NVM,
"NVM error: tried to access %d words, limit is %d.\n",
words, ICE_SR_SECTOR_SIZE_IN_WORDS);
return ICE_ERR_PARAM;
}
if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) !=
(offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) {
/* A single access cannot spread over two sectors */
ice_debug(hw, ICE_DBG_NVM,
"NVM error: cannot spread over two sectors.\n");
return ICE_ERR_PARAM;
}
return 0;
}
/**
* ice_read_sr_aq - Read Shadow RAM.
* @hw: pointer to the HW structure
* @offset: offset in words from module start
* @words: number of words to read
* @data: buffer for words reads from Shadow RAM
* @last_command: tells the AdminQ that this is the last command
*
* Reads 16-bit word buffers from the Shadow RAM using the admin command.
*/
static enum ice_status
ice_read_sr_aq(struct ice_hw *hw, u32 offset, u16 words, u16 *data,
bool last_command)
{
enum ice_status status;
status = ice_check_sr_access_params(hw, offset, words);
/* values in "offset" and "words" parameters are sized as words
* (16 bits) but ice_aq_read_nvm expects these values in bytes.
* So do this conversion while calling ice_aq_read_nvm.
*/
if (!status)
status = ice_aq_read_nvm(hw, 0, 2 * offset, 2 * words, data,
last_command, NULL);
return status;
}
/**
* ice_read_sr_word_aq - Reads Shadow RAM via AQ
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_aq method.
*/
static enum ice_status
ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
{
enum ice_status status;
status = ice_read_sr_aq(hw, offset, 1, data, true);
if (!status)
*data = le16_to_cpu(*(__le16 *)data);
return status;
}
/**
* ice_acquire_nvm - Generic request for acquiring the NVM ownership
* @hw: pointer to the HW structure
* @access: NVM access type (read or write)
*
* This function will request NVM ownership.
*/
static enum
ice_status ice_acquire_nvm(struct ice_hw *hw,
enum ice_aq_res_access_type access)
{
if (hw->nvm.blank_nvm_mode)
return 0;
return ice_acquire_res(hw, ICE_NVM_RES_ID, access);
}
/**
* ice_release_nvm - Generic request for releasing the NVM ownership
* @hw: pointer to the HW structure
*
* This function will release NVM ownership.
*/
static void ice_release_nvm(struct ice_hw *hw)
{
if (hw->nvm.blank_nvm_mode)
return;
ice_release_res(hw, ICE_NVM_RES_ID);
}
/**
* ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
static enum ice_status
ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
enum ice_status status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (!status) {
status = ice_read_sr_word_aq(hw, offset, data);
ice_release_nvm(hw);
}
return status;
}
/**
* ice_init_nvm - initializes NVM setting
* @hw: pointer to the hw struct
*
* This function reads and populates NVM settings such as Shadow RAM size,
* max_timeout, and blank_nvm_mode
*/
enum ice_status ice_init_nvm(struct ice_hw *hw)
{
struct ice_nvm_info *nvm = &hw->nvm;
u16 eetrack_lo, eetrack_hi;
enum ice_status status = 0;
u32 fla, gens_stat;
u8 sr_size;
/* The SR size is stored regardless of the nvm programming mode
* as the blank mode may be used in the factory line.
*/
gens_stat = rd32(hw, GLNVM_GENS);
sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
/* Switching to words (sr_size contains power of 2) */
nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
/* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, GLNVM_FLA);
if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
nvm->blank_nvm_mode = false;
} else { /* Blank programming mode */
nvm->blank_nvm_mode = true;
status = ICE_ERR_NVM_BLANK_MODE;
ice_debug(hw, ICE_DBG_NVM,
"NVM init error: unsupported blank mode.\n");
return status;
}
status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &hw->nvm.ver);
if (status) {
ice_debug(hw, ICE_DBG_INIT,
"Failed to read DEV starter version.\n");
return status;
}
status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n");
return status;
}
status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n");
return status;
}
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
return status;
}
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) #define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
#define rd64(a, reg) readq((a)->hw_addr + (reg)) #define rd64(a, reg) readq((a)->hw_addr + (reg))
#define ice_flush(a) rd32((a), GLGEN_STAT)
#define ICE_M(m, s) ((m) << (s)) #define ICE_M(m, s) ((m) << (s))
struct ice_dma_mem { struct ice_dma_mem {
......
...@@ -9,12 +9,17 @@ enum ice_status { ...@@ -9,12 +9,17 @@ enum ice_status {
ICE_ERR_PARAM = -1, ICE_ERR_PARAM = -1,
ICE_ERR_NOT_READY = -3, ICE_ERR_NOT_READY = -3,
ICE_ERR_INVAL_SIZE = -6, ICE_ERR_INVAL_SIZE = -6,
ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
ICE_ERR_RESET_FAILED = -9,
ICE_ERR_FW_API_VER = -10, ICE_ERR_FW_API_VER = -10,
ICE_ERR_NO_MEMORY = -11, ICE_ERR_NO_MEMORY = -11,
ICE_ERR_CFG = -12, ICE_ERR_CFG = -12,
ICE_ERR_OUT_OF_RANGE = -13,
ICE_ERR_NVM_BLANK_MODE = -53,
ICE_ERR_AQ_ERROR = -100, ICE_ERR_AQ_ERROR = -100,
ICE_ERR_AQ_TIMEOUT = -101, ICE_ERR_AQ_TIMEOUT = -101,
ICE_ERR_AQ_FULL = -102, ICE_ERR_AQ_FULL = -102,
ICE_ERR_AQ_NO_WORK = -103,
ICE_ERR_AQ_EMPTY = -104, ICE_ERR_AQ_EMPTY = -104,
}; };
......
...@@ -10,20 +10,58 @@ ...@@ -10,20 +10,58 @@
#include "ice_controlq.h" #include "ice_controlq.h"
/* debug masks - set these bits in hw->debug_mask to control output */ /* debug masks - set these bits in hw->debug_mask to control output */
#define ICE_DBG_INIT BIT_ULL(1)
#define ICE_DBG_NVM BIT_ULL(7)
#define ICE_DBG_RES BIT_ULL(17)
#define ICE_DBG_AQ_MSG BIT_ULL(24) #define ICE_DBG_AQ_MSG BIT_ULL(24)
#define ICE_DBG_AQ_CMD BIT_ULL(27) #define ICE_DBG_AQ_CMD BIT_ULL(27)
enum ice_aq_res_ids {
ICE_NVM_RES_ID = 1,
ICE_SPD_RES_ID,
ICE_GLOBAL_CFG_LOCK_RES_ID,
ICE_CHANGE_LOCK_RES_ID
};
enum ice_aq_res_access_type {
ICE_RES_READ = 1,
ICE_RES_WRITE
};
/* Various MAC types */
enum ice_mac_type {
ICE_MAC_UNKNOWN = 0,
ICE_MAC_GENERIC,
};
/* Various RESET request, These are not tied with HW reset types */
enum ice_reset_req {
ICE_RESET_PFR = 0,
ICE_RESET_CORER = 1,
ICE_RESET_GLOBR = 2,
};
/* Bus parameters */ /* Bus parameters */
struct ice_bus_info { struct ice_bus_info {
u16 device; u16 device;
u8 func; u8 func;
}; };
/* NVM Information */
struct ice_nvm_info {
u32 eetrack; /* NVM data version */
u32 oem_ver; /* OEM version info */
u16 sr_words; /* Shadow RAM size in words */
u16 ver; /* NVM package version */
bool blank_nvm_mode; /* is NVM empty (no FW present) */
};
/* Port hardware description */ /* Port hardware description */
struct ice_hw { struct ice_hw {
u8 __iomem *hw_addr; u8 __iomem *hw_addr;
void *back; void *back;
u64 debug_mask; /* bitmap for debug mask */ u64 debug_mask; /* bitmap for debug mask */
enum ice_mac_type mac_type;
/* pci info */ /* pci info */
u16 device_id; u16 device_id;
...@@ -32,7 +70,11 @@ struct ice_hw { ...@@ -32,7 +70,11 @@ struct ice_hw {
u16 subsystem_vendor_id; u16 subsystem_vendor_id;
u8 revision_id; u8 revision_id;
u8 pf_id; /* device profile info */
struct ice_bus_info bus; struct ice_bus_info bus;
struct ice_nvm_info nvm;
/* Control Queue info */ /* Control Queue info */
struct ice_ctl_q_info adminq; struct ice_ctl_q_info adminq;
...@@ -47,4 +89,11 @@ struct ice_hw { ...@@ -47,4 +89,11 @@ struct ice_hw {
u32 fw_build; /* firmware build number */ u32 fw_build; /* firmware build number */
}; };
/* Checksum and Shadow RAM pointers */
#define ICE_SR_NVM_DEV_STARTER_VER 0x18
#define ICE_SR_NVM_EETRACK_LO 0x2D
#define ICE_SR_NVM_EETRACK_HI 0x2E
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
#define ICE_SR_WORDS_IN_1KB 512
#endif /* _ICE_TYPE_H_ */ #endif /* _ICE_TYPE_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment