Commit 2e90f4b5 authored by James Smart's avatar James Smart Committed by James Bottomley

[SCSI] lpfc 8.3.28: Critical Miscellaneous fixes

- Make lpfc_sli4_pci_mem_unset interface type aware (CR 124390)
- Convert byte count to word count when calling __iowrite32_copy (CR 122550)
- Checked the ERR1 and ERR2 registers for error attention due to SLI
  Port state affected by forced debug dump. (CR 122986, 122426, 124859)
- Use the lpfc_readl routine instead of the readl for the port status
  register read in lpfc_handle_eratt_s4 (CR 125403)
- Call lpfc_sli4_queue_destroy inside of lpfc_sli4_brdreset before doing
  a pci function reset (CR 125124, 125168, 125572, 125622)
- Zero out the HBQ when it is allocated (CR 125663)
- Alter port reset log messages to indicate error type (CR 125989)
- Added proper NULL pointer checking to all the places that accessing
  the queue memory (CR 125832)
Signed-off-by: default avatarAlex Iannicelli <alex.iannicelli@emulex.com>
Signed-off-by: default avatarJames Smart <james.smart@emulex.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent df9e1b59
/******************************************************************* /*******************************************************************
* This file is part of the Emulex Linux Device Driver for * * This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. * * Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2005 Emulex. All rights reserved. * * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. * * EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com * * www.emulex.com *
* * * *
...@@ -82,7 +82,8 @@ lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes) ...@@ -82,7 +82,8 @@ lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
static inline void static inline void
lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes) lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
{ {
__iowrite32_copy(dest, src, bytes); /* convert bytes in argument list to word count for copy function */
__iowrite32_copy(dest, src, bytes / sizeof(uint32_t));
} }
static inline void static inline void
......
This diff is collapsed.
...@@ -1417,7 +1417,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) ...@@ -1417,7 +1417,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
uint32_t event_data; uint32_t event_data;
struct Scsi_Host *shost; struct Scsi_Host *shost;
uint32_t if_type; uint32_t if_type;
struct lpfc_register portstat_reg; struct lpfc_register portstat_reg = {0};
uint32_t reg_err1, reg_err2;
uint32_t uerrlo_reg, uemasklo_reg;
uint32_t pci_rd_rc1, pci_rd_rc2;
int rc; int rc;
/* If the pci channel is offline, ignore possible errors, since /* If the pci channel is offline, ignore possible errors, since
...@@ -1429,27 +1432,29 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) ...@@ -1429,27 +1432,29 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (!phba->cfg_enable_hba_reset) if (!phba->cfg_enable_hba_reset)
return; return;
/* Send an internal error event to mgmt application */
lpfc_board_errevt_to_mgmt(phba);
/* For now, the actual action for SLI4 device handling is not
* specified yet, just treated it as adaptor hardware failure
*/
event_data = FC_REG_DUMP_EVENT;
shost = lpfc_shost_from_vport(vport);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(event_data), (char *) &event_data,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
switch (if_type) { switch (if_type) {
case LPFC_SLI_INTF_IF_TYPE_0: case LPFC_SLI_INTF_IF_TYPE_0:
pci_rd_rc1 = lpfc_readl(
phba->sli4_hba.u.if_type0.UERRLOregaddr,
&uerrlo_reg);
pci_rd_rc2 = lpfc_readl(
phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
&uemasklo_reg);
/* consider PCI bus read error as pci_channel_offline */
if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
return;
lpfc_sli4_offline_eratt(phba); lpfc_sli4_offline_eratt(phba);
break; break;
case LPFC_SLI_INTF_IF_TYPE_2: case LPFC_SLI_INTF_IF_TYPE_2:
portstat_reg.word0 = pci_rd_rc1 = lpfc_readl(
readl(phba->sli4_hba.u.if_type2.STATUSregaddr); phba->sli4_hba.u.if_type2.STATUSregaddr,
&portstat_reg.word0);
/* consider PCI bus read error as pci_channel_offline */
if (pci_rd_rc1 == -EIO)
return;
reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
/* TODO: Register for Overtemp async events. */ /* TODO: Register for Overtemp async events. */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
...@@ -1459,8 +1464,20 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) ...@@ -1459,8 +1464,20 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
phba->over_temp_state = HBA_OVER_TEMP; phba->over_temp_state = HBA_OVER_TEMP;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
lpfc_sli4_offline_eratt(phba); lpfc_sli4_offline_eratt(phba);
return; break;
} }
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3143 Port Down: Firmware Restarted\n");
else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3144 Port Down: Debug Dump\n");
else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3145 Port Down: Provisioning\n");
/* /*
* On error status condition, driver need to wait for port * On error status condition, driver need to wait for port
* ready before performing reset. * ready before performing reset.
...@@ -1469,14 +1486,19 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) ...@@ -1469,14 +1486,19 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (!rc) { if (!rc) {
/* need reset: attempt for port recovery */ /* need reset: attempt for port recovery */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2887 Port Error: Attempting " "2887 Reset Needed: Attempting Port "
"Port Recovery\n"); "Recovery...\n");
lpfc_offline_prep(phba); lpfc_offline_prep(phba);
lpfc_offline(phba); lpfc_offline(phba);
lpfc_sli_brdrestart(phba); lpfc_sli_brdrestart(phba);
if (lpfc_online(phba) == 0) { if (lpfc_online(phba) == 0) {
lpfc_unblock_mgmt_io(phba); lpfc_unblock_mgmt_io(phba);
/* don't report event on forced debug dump */
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
return; return;
else
break;
} }
/* fall through for not able to recover */ /* fall through for not able to recover */
} }
...@@ -1486,6 +1508,16 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) ...@@ -1486,6 +1508,16 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
default: default:
break; break;
} }
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"3123 Report dump event to upper layer\n");
/* Send an internal error event to mgmt application */
lpfc_board_errevt_to_mgmt(phba);
event_data = FC_REG_DUMP_EVENT;
shost = lpfc_shost_from_vport(vport);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(event_data), (char *) &event_data,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
} }
/** /**
...@@ -6475,6 +6507,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) ...@@ -6475,6 +6507,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
} }
kfree(phba->sli4_hba.fcp_wq); kfree(phba->sli4_hba.fcp_wq);
phba->sli4_hba.fcp_wq = NULL;
out_free_els_wq: out_free_els_wq:
lpfc_sli4_queue_free(phba->sli4_hba.els_wq); lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
phba->sli4_hba.els_wq = NULL; phba->sli4_hba.els_wq = NULL;
...@@ -6487,6 +6520,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) ...@@ -6487,6 +6520,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
} }
kfree(phba->sli4_hba.fcp_cq); kfree(phba->sli4_hba.fcp_cq);
phba->sli4_hba.fcp_cq = NULL;
out_free_els_cq: out_free_els_cq:
lpfc_sli4_queue_free(phba->sli4_hba.els_cq); lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
phba->sli4_hba.els_cq = NULL; phba->sli4_hba.els_cq = NULL;
...@@ -6499,6 +6533,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) ...@@ -6499,6 +6533,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
} }
kfree(phba->sli4_hba.fp_eq); kfree(phba->sli4_hba.fp_eq);
phba->sli4_hba.fp_eq = NULL;
out_free_sp_eq: out_free_sp_eq:
lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
phba->sli4_hba.sp_eq = NULL; phba->sli4_hba.sp_eq = NULL;
...@@ -6532,7 +6567,9 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) ...@@ -6532,7 +6567,9 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.els_wq = NULL; phba->sli4_hba.els_wq = NULL;
/* Release FCP work queue */ /* Release FCP work queue */
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) if (phba->sli4_hba.fcp_wq != NULL)
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
fcp_qidx++)
lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
kfree(phba->sli4_hba.fcp_wq); kfree(phba->sli4_hba.fcp_wq);
phba->sli4_hba.fcp_wq = NULL; phba->sli4_hba.fcp_wq = NULL;
...@@ -6553,6 +6590,7 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) ...@@ -6553,6 +6590,7 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
/* Release FCP response complete queue */ /* Release FCP response complete queue */
fcp_qidx = 0; fcp_qidx = 0;
if (phba->sli4_hba.fcp_cq != NULL)
do do
lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
while (++fcp_qidx < phba->cfg_fcp_eq_count); while (++fcp_qidx < phba->cfg_fcp_eq_count);
...@@ -6560,7 +6598,9 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) ...@@ -6560,7 +6598,9 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.fcp_cq = NULL; phba->sli4_hba.fcp_cq = NULL;
/* Release fast-path event queue */ /* Release fast-path event queue */
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) if (phba->sli4_hba.fp_eq != NULL)
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
fcp_qidx++)
lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
kfree(phba->sli4_hba.fp_eq); kfree(phba->sli4_hba.fp_eq);
phba->sli4_hba.fp_eq = NULL; phba->sli4_hba.fp_eq = NULL;
...@@ -6614,6 +6654,11 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) ...@@ -6614,6 +6654,11 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.sp_eq->queue_id); phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path event queue */ /* Set up fast-path event queue */
if (!phba->sli4_hba.fp_eq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3147 Fast-path EQs not allocated\n");
goto out_destroy_sp_eq;
}
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
...@@ -6678,6 +6723,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) ...@@ -6678,6 +6723,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.sp_eq->queue_id); phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path FCP Response Complete Queue */ /* Set up fast-path FCP Response Complete Queue */
if (!phba->sli4_hba.fcp_cq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3148 Fast-path FCP CQ array not "
"allocated\n");
goto out_destroy_els_cq;
}
fcp_cqidx = 0; fcp_cqidx = 0;
do { do {
if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
...@@ -6757,6 +6808,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) ...@@ -6757,6 +6808,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.els_cq->queue_id); phba->sli4_hba.els_cq->queue_id);
/* Set up fast-path FCP Work Queue */ /* Set up fast-path FCP Work Queue */
if (!phba->sli4_hba.fcp_wq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3149 Fast-path FCP WQ array not "
"allocated\n");
goto out_destroy_els_wq;
}
for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
...@@ -6818,18 +6875,21 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) ...@@ -6818,18 +6875,21 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
out_destroy_fcp_wq: out_destroy_fcp_wq:
for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
out_destroy_els_wq:
lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
out_destroy_mbx_wq: out_destroy_mbx_wq:
lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
out_destroy_fcp_cq: out_destroy_fcp_cq:
for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
out_destroy_els_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
out_destroy_mbx_cq: out_destroy_mbx_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
out_destroy_fp_eq: out_destroy_fp_eq:
for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
out_destroy_sp_eq:
lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
out_error: out_error:
return rc; return rc;
...@@ -6866,13 +6926,18 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) ...@@ -6866,13 +6926,18 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
/* Unset ELS complete queue */ /* Unset ELS complete queue */
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
/* Unset FCP response complete queue */ /* Unset FCP response complete queue */
if (phba->sli4_hba.fcp_cq) {
fcp_qidx = 0; fcp_qidx = 0;
do { do {
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
} while (++fcp_qidx < phba->cfg_fcp_eq_count); } while (++fcp_qidx < phba->cfg_fcp_eq_count);
}
/* Unset fast-path event queue */ /* Unset fast-path event queue */
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) if (phba->sli4_hba.fp_eq) {
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
fcp_qidx++)
lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
}
/* Unset slow-path event queue */ /* Unset slow-path event queue */
lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
} }
...@@ -7411,22 +7476,25 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) ...@@ -7411,22 +7476,25 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
static void static void
lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
{ {
struct pci_dev *pdev; uint32_t if_type;
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
/* Obtain PCI device reference */
if (!phba->pcidev)
return;
else
pdev = phba->pcidev;
/* Free coherent DMA memory allocated */
/* Unmap I/O memory space */ switch (if_type) {
case LPFC_SLI_INTF_IF_TYPE_0:
iounmap(phba->sli4_hba.drbl_regs_memmap_p); iounmap(phba->sli4_hba.drbl_regs_memmap_p);
iounmap(phba->sli4_hba.ctrl_regs_memmap_p); iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
iounmap(phba->sli4_hba.conf_regs_memmap_p); iounmap(phba->sli4_hba.conf_regs_memmap_p);
break;
return; case LPFC_SLI_INTF_IF_TYPE_2:
iounmap(phba->sli4_hba.conf_regs_memmap_p);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
dev_printk(KERN_ERR, &phba->pcidev->dev,
"FATAL - unsupported SLI4 interface type - %d\n",
if_type);
break;
}
} }
/** /**
......
...@@ -389,7 +389,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) ...@@ -389,7 +389,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
{ {
struct hbq_dmabuf *hbqbp; struct hbq_dmabuf *hbqbp;
hbqbp = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
if (!hbqbp) if (!hbqbp)
return NULL; return NULL;
...@@ -441,7 +441,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba) ...@@ -441,7 +441,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
{ {
struct hbq_dmabuf *dma_buf; struct hbq_dmabuf *dma_buf;
dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
if (!dma_buf) if (!dma_buf)
return NULL; return NULL;
......
This diff is collapsed.
...@@ -420,7 +420,16 @@ struct lpfc_sli4_hba { ...@@ -420,7 +420,16 @@ struct lpfc_sli4_hba {
void __iomem *STATUSregaddr; void __iomem *STATUSregaddr;
void __iomem *CTRLregaddr; void __iomem *CTRLregaddr;
void __iomem *ERR1regaddr; void __iomem *ERR1regaddr;
#define SLIPORT_ERR1_REG_ERR_CODE_1 0x1
#define SLIPORT_ERR1_REG_ERR_CODE_2 0x2
void __iomem *ERR2regaddr; void __iomem *ERR2regaddr;
#define SLIPORT_ERR2_REG_FW_RESTART 0x0
#define SLIPORT_ERR2_REG_FUNC_PROVISON 0x1
#define SLIPORT_ERR2_REG_FORCED_DUMP 0x2
#define SLIPORT_ERR2_REG_FAILURE_EQ 0x3
#define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
#define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
#define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
} if_type2; } if_type2;
} u; } u;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment