Commit 4565e370 authored by Wayne Boyer's avatar Wayne Boyer Committed by James Bottomley

[SCSI] ipr: add error handling updates for the next generation chip

Add support for the new log data notification and overlay IDs.
Signed-off-by: default avatarWayne Boyer <wayneb@linux.vnet.ibm.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@suse.de>
parent 3e7ebdfa
......@@ -1079,7 +1079,7 @@ static char *ipr_format_resource_path(u8 *res_path, char *buffer)
sprintf(buffer, "%02X", res_path[0]);
for (i=1; res_path[i] != 0xff; i++)
sprintf(buffer, "%s:%02X", buffer, res_path[i]);
sprintf(buffer, "%s-%02X", buffer, res_path[i]);
return buffer;
}
......@@ -1385,8 +1385,12 @@ static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_hostrcb *hostrcb)
{
struct ipr_hostrcb_type_12_error *error =
&hostrcb->hcam.u.error.u.type_12_error;
struct ipr_hostrcb_type_12_error *error;
if (ioa_cfg->sis64)
error = &hostrcb->hcam.u.error64.u.type_12_error;
else
error = &hostrcb->hcam.u.error.u.type_12_error;
ipr_err("-----Current Configuration-----\n");
ipr_err("Cache Directory Card Information:\n");
......@@ -1478,6 +1482,48 @@ static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
}
}
/**
* ipr_log_sis64_config_error - Log a device error.
* @ioa_cfg: ioa config struct
* @hostrcb: hostrcb struct
*
* Return value:
* none
**/
static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_hostrcb *hostrcb)
{
int errors_logged, i;
struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
struct ipr_hostrcb_type_23_error *error;
char buffer[IPR_MAX_RES_PATH_LENGTH];
error = &hostrcb->hcam.u.error64.u.type_23_error;
errors_logged = be32_to_cpu(error->errors_logged);
ipr_err("Device Errors Detected/Logged: %d/%d\n",
be32_to_cpu(error->errors_detected), errors_logged);
dev_entry = error->dev;
for (i = 0; i < errors_logged; i++, dev_entry++) {
ipr_err_separator;
ipr_err("Device %d : %s", i + 1,
ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
ipr_log_ext_vpd(&dev_entry->vpd);
ipr_err("-----New Device Information-----\n");
ipr_log_ext_vpd(&dev_entry->new_vpd);
ipr_err("Cache Directory Card Information:\n");
ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
ipr_err("Adapter Card Information:\n");
ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
}
}
/**
* ipr_log_config_error - Log a configuration error.
* @ioa_cfg: ioa config struct
......@@ -1672,7 +1718,11 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
{
struct ipr_hostrcb_type_17_error *error;
if (ioa_cfg->sis64)
error = &hostrcb->hcam.u.error64.u.type_17_error;
else
error = &hostrcb->hcam.u.error.u.type_17_error;
error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
strim(error->failure_reason);
......@@ -1779,6 +1829,42 @@ static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
}
/**
* ipr_log64_fabric_path - Log a fabric path error
* @hostrcb: hostrcb struct
* @fabric: fabric descriptor
*
* Return value:
* none
**/
static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
struct ipr_hostrcb64_fabric_desc *fabric)
{
int i, j;
u8 path_state = fabric->path_state;
u8 active = path_state & IPR_PATH_ACTIVE_MASK;
u8 state = path_state & IPR_PATH_STATE_MASK;
char buffer[IPR_MAX_RES_PATH_LENGTH];
for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
if (path_active_desc[i].active != active)
continue;
for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
if (path_state_desc[j].state != state)
continue;
ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
path_active_desc[i].desc, path_state_desc[j].desc,
ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
return;
}
}
ipr_err("Path state=%02X Resource Path=%s\n", path_state,
ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
}
static const struct {
u8 type;
char *desc;
......@@ -1887,6 +1973,49 @@ static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
}
/**
* ipr_log64_path_elem - Log a fabric path element.
* @hostrcb: hostrcb struct
* @cfg: fabric path element struct
*
* Return value:
* none
**/
static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
struct ipr_hostrcb64_config_element *cfg)
{
int i, j;
u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
char buffer[IPR_MAX_RES_PATH_LENGTH];
if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
return;
for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
if (path_type_desc[i].type != type)
continue;
for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
if (path_status_desc[j].status != status)
continue;
ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
path_status_desc[j].desc, path_type_desc[i].desc,
ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
return;
}
}
ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
"WWN=%08X%08X\n", cfg->type_status,
ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
}
/**
* ipr_log_fabric_error - Log a fabric error.
* @ioa_cfg: ioa config struct
......@@ -1924,6 +2053,96 @@ static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
}
/**
* ipr_log_sis64_array_error - Log a sis64 array error.
* @ioa_cfg: ioa config struct
* @hostrcb: hostrcb struct
*
* Return value:
* none
**/
static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_hostrcb *hostrcb)
{
int i, num_entries;
struct ipr_hostrcb_type_24_error *error;
struct ipr_hostrcb64_array_data_entry *array_entry;
char buffer[IPR_MAX_RES_PATH_LENGTH];
const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
error = &hostrcb->hcam.u.error64.u.type_24_error;
ipr_err_separator;
ipr_err("RAID %s Array Configuration: %s\n",
error->protection_level,
ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
ipr_err_separator;
array_entry = error->array_member;
num_entries = min_t(u32, be32_to_cpu(error->num_entries),
sizeof(error->array_member));
for (i = 0; i < num_entries; i++, array_entry++) {
if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
continue;
if (error->exposed_mode_adn == i)
ipr_err("Exposed Array Member %d:\n", i);
else
ipr_err("Array Member %d:\n", i);
ipr_err("Array Member %d:\n", i);
ipr_log_ext_vpd(&array_entry->vpd);
ipr_err("Current Location: %s",
ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
ipr_err("Expected Location: %s",
ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
ipr_err_separator;
}
}
/**
* ipr_log_sis64_fabric_error - Log a sis64 fabric error.
* @ioa_cfg: ioa config struct
* @hostrcb: hostrcb struct
*
* Return value:
* none
**/
static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_hostrcb *hostrcb)
{
struct ipr_hostrcb_type_30_error *error;
struct ipr_hostrcb64_fabric_desc *fabric;
struct ipr_hostrcb64_config_element *cfg;
int i, add_len;
error = &hostrcb->hcam.u.error64.u.type_30_error;
error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
add_len = be32_to_cpu(hostrcb->hcam.length) -
(offsetof(struct ipr_hostrcb64_error, u) +
offsetof(struct ipr_hostrcb_type_30_error, desc));
for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
ipr_log64_fabric_path(hostrcb, fabric);
for_each_fabric_cfg(fabric, cfg)
ipr_log64_path_elem(hostrcb, cfg);
add_len -= be16_to_cpu(fabric->length);
fabric = (struct ipr_hostrcb64_fabric_desc *)
((unsigned long)fabric + be16_to_cpu(fabric->length));
}
ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
}
/**
* ipr_log_generic_error - Log an adapter error.
* @ioa_cfg: ioa config struct
......@@ -1983,13 +2202,16 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
if (ioa_cfg->sis64)
ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
else
ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
/* Tell the midlayer we had a bus reset so it will handle the UA properly */
scsi_report_bus_reset(ioa_cfg->host,
hostrcb->hcam.u.error.failing_dev_res_addr.bus);
hostrcb->hcam.u.error.fd_res_addr.bus);
}
error_index = ipr_get_error(ioasc);
......@@ -2037,6 +2259,16 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
case IPR_HOST_RCB_OVERLAY_ID_20:
ipr_log_fabric_error(ioa_cfg, hostrcb);
break;
case IPR_HOST_RCB_OVERLAY_ID_23:
ipr_log_sis64_config_error(ioa_cfg, hostrcb);
break;
case IPR_HOST_RCB_OVERLAY_ID_24:
case IPR_HOST_RCB_OVERLAY_ID_26:
ipr_log_sis64_array_error(ioa_cfg, hostrcb);
break;
case IPR_HOST_RCB_OVERLAY_ID_30:
ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
break;
case IPR_HOST_RCB_OVERLAY_ID_1:
case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
default:
......@@ -2061,7 +2293,12 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
u32 fd_ioasc;
if (ioa_cfg->sis64)
fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
else
fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
list_del(&hostrcb->queue);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
......@@ -6996,7 +7233,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
if (!rc) {
ipr_handle_log_data(ioa_cfg, hostrcb);
ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
ioa_cfg->sdt_state == GET_DUMP)
ioa_cfg->sdt_state = WAIT_FOR_DUMP;
......
......@@ -754,12 +754,29 @@ struct ipr_hostrcb_device_data_entry_enhanced {
struct ipr_ext_vpd cfc_last_with_dev_vpd;
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb64_device_data_entry_enhanced {
struct ipr_ext_vpd vpd;
u8 ccin[4];
u8 res_path[8];
struct ipr_ext_vpd new_vpd;
u8 new_ccin[4];
struct ipr_ext_vpd ioa_last_with_dev_vpd;
struct ipr_ext_vpd cfc_last_with_dev_vpd;
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_array_data_entry {
struct ipr_vpd vpd;
struct ipr_res_addr expected_dev_res_addr;
struct ipr_res_addr dev_res_addr;
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb64_array_data_entry {
struct ipr_ext_vpd vpd;
u8 ccin[4];
u8 expected_res_path[8];
u8 res_path[8];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_array_data_entry_enhanced {
struct ipr_ext_vpd vpd;
u8 ccin[4];
......@@ -811,6 +828,14 @@ struct ipr_hostrcb_type_13_error {
struct ipr_hostrcb_device_data_entry_enhanced dev[3];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_type_23_error {
struct ipr_ext_vpd ioa_vpd;
struct ipr_ext_vpd cfc_vpd;
__be32 errors_detected;
__be32 errors_logged;
struct ipr_hostrcb64_device_data_entry_enhanced dev[3];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_type_04_error {
struct ipr_vpd ioa_vpd;
struct ipr_vpd cfc_vpd;
......@@ -838,6 +863,22 @@ struct ipr_hostrcb_type_14_error {
struct ipr_hostrcb_array_data_entry_enhanced array_member[18];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_type_24_error {
struct ipr_ext_vpd ioa_vpd;
struct ipr_ext_vpd cfc_vpd;
u8 reserved[2];
u8 exposed_mode_adn;
#define IPR_INVALID_ARRAY_DEV_NUM 0xff
u8 array_id;
u8 last_res_path[8];
u8 protection_level[8];
struct ipr_ext_vpd array_vpd;
u8 description[16];
u8 reserved2[3];
u8 num_entries;
struct ipr_hostrcb64_array_data_entry array_member[32];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_type_07_error {
u8 failure_reason[64];
struct ipr_vpd vpd;
......@@ -875,6 +916,22 @@ struct ipr_hostrcb_config_element {
__be32 wwid[2];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb64_config_element {
__be16 length;
u8 descriptor_id;
#define IPR_DESCRIPTOR_MASK 0xC0
#define IPR_DESCRIPTOR_SIS64 0x00
u8 reserved;
u8 type_status;
u8 reserved2[2];
u8 link_rate;
u8 res_path[8];
__be32 wwid[2];
}__attribute__((packed, aligned (8)));
struct ipr_hostrcb_fabric_desc {
__be16 length;
u8 ioa_port;
......@@ -896,6 +953,20 @@ struct ipr_hostrcb_fabric_desc {
struct ipr_hostrcb_config_element elem[1];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb64_fabric_desc {
__be16 length;
u8 descriptor_id;
u8 reserved;
u8 path_state;
u8 reserved2[2];
u8 res_path[8];
u8 reserved3[6];
__be16 num_entries;
struct ipr_hostrcb64_config_element elem[1];
}__attribute__((packed, aligned (8)));
#define for_each_fabric_cfg(fabric, cfg) \
for (cfg = (fabric)->elem; \
cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
......@@ -908,10 +979,17 @@ struct ipr_hostrcb_type_20_error {
struct ipr_hostrcb_fabric_desc desc[1];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_type_30_error {
u8 failure_reason[64];
u8 reserved[3];
u8 num_entries;
struct ipr_hostrcb64_fabric_desc desc[1];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_error {
__be32 failing_dev_ioasc;
struct ipr_res_addr failing_dev_res_addr;
__be32 failing_dev_res_handle;
__be32 fd_ioasc;
struct ipr_res_addr fd_res_addr;
__be32 fd_res_handle;
__be32 prc;
union {
struct ipr_hostrcb_type_ff_error type_ff_error;
......@@ -928,6 +1006,26 @@ struct ipr_hostrcb_error {
} u;
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb64_error {
__be32 fd_ioasc;
__be32 ioa_fw_level;
__be32 fd_res_handle;
__be32 prc;
__be64 fd_dev_id;
__be64 fd_lun;
u8 fd_res_path[8];
__be64 time_stamp;
u8 reserved[2];
union {
struct ipr_hostrcb_type_ff_error type_ff_error;
struct ipr_hostrcb_type_12_error type_12_error;
struct ipr_hostrcb_type_17_error type_17_error;
struct ipr_hostrcb_type_23_error type_23_error;
struct ipr_hostrcb_type_24_error type_24_error;
struct ipr_hostrcb_type_30_error type_30_error;
} u;
}__attribute__((packed, aligned (8)));
struct ipr_hostrcb_raw {
__be32 data[sizeof(struct ipr_hostrcb_error)/sizeof(__be32)];
}__attribute__((packed, aligned (4)));
......@@ -965,6 +1063,10 @@ struct ipr_hcam {
#define IPR_HOST_RCB_OVERLAY_ID_16 0x16
#define IPR_HOST_RCB_OVERLAY_ID_17 0x17
#define IPR_HOST_RCB_OVERLAY_ID_20 0x20
#define IPR_HOST_RCB_OVERLAY_ID_23 0x23
#define IPR_HOST_RCB_OVERLAY_ID_24 0x24
#define IPR_HOST_RCB_OVERLAY_ID_26 0x26
#define IPR_HOST_RCB_OVERLAY_ID_30 0x30
#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF
u8 reserved1[3];
......@@ -975,6 +1077,7 @@ struct ipr_hcam {
union {
struct ipr_hostrcb_error error;
struct ipr_hostrcb64_error error64;
struct ipr_hostrcb_cfg_ch_not ccn;
struct ipr_hostrcb_raw raw;
} u;
......@@ -985,6 +1088,7 @@ struct ipr_hostrcb {
dma_addr_t hostrcb_dma;
struct list_head queue;
struct ipr_ioa_cfg *ioa_cfg;
char rp_buffer[IPR_MAX_RES_PATH_LENGTH];
};
/* IPR smart dump table structures */
......@@ -1522,12 +1626,19 @@ struct ipr_ucode_image_header {
#define ipr_hcam_err(hostrcb, fmt, ...) \
{ \
if (ipr_is_device(&(hostrcb)->hcam.u.error.failing_dev_res_addr)) { \
if (ipr_is_device(hostrcb)) { \
if ((hostrcb)->ioa_cfg->sis64) { \
printk(KERN_ERR IPR_NAME ": %s: " fmt, \
ipr_format_resource_path(&hostrcb->hcam.u.error64.fd_res_path[0], \
&hostrcb->rp_buffer[0]), \
__VA_ARGS__); \
} else { \
ipr_ra_err((hostrcb)->ioa_cfg, \
(hostrcb)->hcam.u.error.failing_dev_res_addr, \
fmt, ##__VA_ARGS__); \
(hostrcb)->hcam.u.error.fd_res_addr, \
fmt, __VA_ARGS__); \
} \
} else { \
dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, ##__VA_ARGS__); \
dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, __VA_ARGS__); \
} \
}
......@@ -1637,18 +1748,29 @@ static inline int ipr_is_naca_model(struct ipr_resource_entry *res)
}
/**
* ipr_is_device - Determine if resource address is that of a device
* @res_addr: resource address struct
* ipr_is_device - Determine if the hostrcb structure is related to a device
* @hostrcb: host resource control blocks struct
*
* Return value:
* 1 if AF / 0 if not AF
**/
static inline int ipr_is_device(struct ipr_res_addr *res_addr)
static inline int ipr_is_device(struct ipr_hostrcb *hostrcb)
{
struct ipr_res_addr *res_addr;
u8 *res_path;
if (hostrcb->ioa_cfg->sis64) {
res_path = &hostrcb->hcam.u.error64.fd_res_path[0];
if ((res_path[0] == 0x00 || res_path[0] == 0x80 ||
res_path[0] == 0x81) && res_path[2] != 0xFF)
return 1;
} else {
res_addr = &hostrcb->hcam.u.error.fd_res_addr;
if ((res_addr->bus < IPR_MAX_NUM_BUSES) &&
(res_addr->target < (IPR_MAX_NUM_TARGETS_PER_BUS - 1)))
return 1;
}
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment