Commit 8a891429 authored by Rasesh Mody's avatar Rasesh Mody Committed by David S. Miller

bna: Fixed build break for allyesconfig

This is the patch to fix the build break caused by multiple
definitions of symbols between Brocade's FC/FCOE driver(BFA)
and 10G Networking Driver(BNA).

Changes are:

1. locally used functions are made static 

2. unused functions are removed

3. using unique namespaces for the function names that must be
globally visible
Signed-off-by: default avatarDebashis Dutt <ddutt@brocade.com>
Signed-off-by: default avatarRasesh Mody <rmody@brocade.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ced1de4c
...@@ -152,7 +152,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status) ...@@ -152,7 +152,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status); cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
} }
/** /**
* bfa_cee_meminfo() * bfa_nw_cee_meminfo()
* *
* @brief Returns the size of the DMA memory needed by CEE module * @brief Returns the size of the DMA memory needed by CEE module
* *
...@@ -161,13 +161,13 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status) ...@@ -161,13 +161,13 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
* @return Size of DMA region * @return Size of DMA region
*/ */
u32 u32
bfa_cee_meminfo(void) bfa_nw_cee_meminfo(void)
{ {
return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo(); return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
} }
/** /**
* bfa_cee_mem_claim() * bfa_nw_cee_mem_claim()
* *
* @brief Initialized CEE DMA Memory * @brief Initialized CEE DMA Memory
* *
...@@ -178,7 +178,7 @@ bfa_cee_meminfo(void) ...@@ -178,7 +178,7 @@ bfa_cee_meminfo(void)
* @return void * @return void
*/ */
void void
bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
{ {
cee->attr_dma.kva = dma_kva; cee->attr_dma.kva = dma_kva;
cee->attr_dma.pa = dma_pa; cee->attr_dma.pa = dma_pa;
...@@ -189,108 +189,6 @@ bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) ...@@ -189,108 +189,6 @@ bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
(dma_kva + bfa_cee_attr_meminfo()); (dma_kva + bfa_cee_attr_meminfo());
} }
/**
* bfa_cee_get_attr()
*
* @brief
* Send the request to the f/w to fetch CEE attributes.
*
* @param[in] Pointer to the CEE module data structure.
*
* @return Status
*/
enum bfa_status
bfa_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
{
struct bfi_cee_get_req *cmd;
BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
if (!bfa_ioc_is_operational(cee->ioc))
return BFA_STATUS_IOC_FAILURE;
if (cee->get_attr_pending == true)
return BFA_STATUS_DEVBUSY;
cee->get_attr_pending = true;
cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg;
cee->attr = attr;
cee->cbfn.get_attr_cbfn = cbfn;
cee->cbfn.get_attr_cbarg = cbarg;
bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
bfa_ioc_portid(cee->ioc));
bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
return BFA_STATUS_OK;
}
/**
* bfa_cee_get_stats()
*
* @brief
* Send the request to the f/w to fetch CEE statistics.
*
* @param[in] Pointer to the CEE module data structure.
*
* @return Status
*/
enum bfa_status
bfa_cee_get_stats(struct bfa_cee *cee, struct bfa_cee_stats *stats,
bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
{
struct bfi_cee_get_req *cmd;
BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
if (!bfa_ioc_is_operational(cee->ioc))
return BFA_STATUS_IOC_FAILURE;
if (cee->get_stats_pending == true)
return BFA_STATUS_DEVBUSY;
cee->get_stats_pending = true;
cmd = (struct bfi_cee_get_req *) cee->get_stats_mb.msg;
cee->stats = stats;
cee->cbfn.get_stats_cbfn = cbfn;
cee->cbfn.get_stats_cbarg = cbarg;
bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
bfa_ioc_portid(cee->ioc));
bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
return BFA_STATUS_OK;
}
/**
* bfa_cee_reset_stats()
*
* @brief Clears CEE Stats in the f/w.
*
* @param[in] Pointer to the CEE module data structure.
*
* @return Status
*/
enum bfa_status
bfa_cee_reset_stats(struct bfa_cee *cee, bfa_cee_reset_stats_cbfn_t cbfn,
void *cbarg)
{
struct bfi_cee_reset_stats *cmd;
BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
if (!bfa_ioc_is_operational(cee->ioc))
return BFA_STATUS_IOC_FAILURE;
if (cee->reset_stats_pending == true)
return BFA_STATUS_DEVBUSY;
cee->reset_stats_pending = true;
cmd = (struct bfi_cee_reset_stats *) cee->reset_stats_mb.msg;
cee->cbfn.reset_stats_cbfn = cbfn;
cee->cbfn.reset_stats_cbarg = cbarg;
bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
bfa_ioc_portid(cee->ioc));
bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
return BFA_STATUS_OK;
}
/** /**
* bfa_cee_isrs() * bfa_cee_isrs()
* *
...@@ -301,7 +199,7 @@ bfa_cee_reset_stats(struct bfa_cee *cee, bfa_cee_reset_stats_cbfn_t cbfn, ...@@ -301,7 +199,7 @@ bfa_cee_reset_stats(struct bfa_cee *cee, bfa_cee_reset_stats_cbfn_t cbfn,
* @return void * @return void
*/ */
void static void
bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m) bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
{ {
union bfi_cee_i2h_msg_u *msg; union bfi_cee_i2h_msg_u *msg;
...@@ -334,7 +232,7 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m) ...@@ -334,7 +232,7 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
* @return void * @return void
*/ */
void static void
bfa_cee_hbfail(void *arg) bfa_cee_hbfail(void *arg)
{ {
struct bfa_cee *cee; struct bfa_cee *cee;
...@@ -367,7 +265,7 @@ bfa_cee_hbfail(void *arg) ...@@ -367,7 +265,7 @@ bfa_cee_hbfail(void *arg)
} }
/** /**
* bfa_cee_attach() * bfa_nw_cee_attach()
* *
* @brief CEE module-attach API * @brief CEE module-attach API
* *
...@@ -380,28 +278,14 @@ bfa_cee_hbfail(void *arg) ...@@ -380,28 +278,14 @@ bfa_cee_hbfail(void *arg)
* @return void * @return void
*/ */
void void
bfa_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
void *dev) void *dev)
{ {
BUG_ON(!(cee != NULL)); BUG_ON(!(cee != NULL));
cee->dev = dev; cee->dev = dev;
cee->ioc = ioc; cee->ioc = ioc;
bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee); bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee); bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
bfa_ioc_hbfail_register(cee->ioc, &cee->hbfail); bfa_nw_ioc_hbfail_register(cee->ioc, &cee->hbfail);
}
/**
* bfa_cee_detach()
*
* @brief CEE module-detach API
*
* @param[in] cee - Pointer to the CEE module data structure
*
* @return void
*/
void
bfa_cee_detach(struct bfa_cee *cee)
{
} }
...@@ -56,17 +56,9 @@ struct bfa_cee { ...@@ -56,17 +56,9 @@ struct bfa_cee {
struct bfa_mbox_cmd reset_stats_mb; struct bfa_mbox_cmd reset_stats_mb;
}; };
u32 bfa_cee_meminfo(void); u32 bfa_nw_cee_meminfo(void);
void bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
u64 dma_pa); u64 dma_pa);
void bfa_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev); void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
void bfa_cee_detach(struct bfa_cee *cee);
enum bfa_status bfa_cee_get_attr(struct bfa_cee *cee,
struct bfa_cee_attr *attr, bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
enum bfa_status bfa_cee_get_stats(struct bfa_cee *cee,
struct bfa_cee_stats *stats, bfa_cee_get_stats_cbfn_t cbfn,
void *cbarg);
enum bfa_status bfa_cee_reset_stats(struct bfa_cee *cee,
bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg);
#endif /* __BFA_CEE_H__ */ #endif /* __BFA_CEE_H__ */
This diff is collapsed.
...@@ -239,13 +239,9 @@ struct bfa_ioc_hwif { ...@@ -239,13 +239,9 @@ struct bfa_ioc_hwif {
/** /**
* IOC mailbox interface * IOC mailbox interface
*/ */
void bfa_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd); void bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd);
void bfa_ioc_mbox_register(struct bfa_ioc *ioc, void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
bfa_ioc_mbox_mcfunc_t *mcfuncs); void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
void bfa_ioc_mbox_isr(struct bfa_ioc *ioc);
void bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len);
void bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg);
void bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
/** /**
...@@ -256,83 +252,45 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, ...@@ -256,83 +252,45 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
(__ioc)->fcmode)) (__ioc)->fcmode))
enum bfa_status bfa_ioc_pll_init(struct bfa_ioc *ioc);
enum bfa_status bfa_ioc_cb_pll_init(void __iomem *rb, bool fcmode);
enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
#define bfa_ioc_isr_mode_set(__ioc, __msix) \ #define bfa_ioc_isr_mode_set(__ioc, __msix) \
((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
#define bfa_ioc_ownership_reset(__ioc) \ #define bfa_ioc_ownership_reset(__ioc) \
((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
void bfa_ioc_set_ct_hwif(struct bfa_ioc *ioc); void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc);
void bfa_ioc_attach(struct bfa_ioc *ioc, void *bfa, void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
struct bfa_ioc_cbfn *cbfn); struct bfa_ioc_cbfn *cbfn);
void bfa_ioc_auto_recover(bool auto_recover); void bfa_nw_ioc_auto_recover(bool auto_recover);
void bfa_ioc_detach(struct bfa_ioc *ioc); void bfa_nw_ioc_detach(struct bfa_ioc *ioc);
void bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
enum bfi_mclass mc); enum bfi_mclass mc);
u32 bfa_ioc_meminfo(void); u32 bfa_nw_ioc_meminfo(void);
void bfa_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa); void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa);
void bfa_ioc_enable(struct bfa_ioc *ioc); void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
void bfa_ioc_disable(struct bfa_ioc *ioc); void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
bool bfa_ioc_intx_claim(struct bfa_ioc *ioc);
void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
u32 boot_param);
void bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *msg); void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
void bfa_ioc_error_isr(struct bfa_ioc *ioc); void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
bool bfa_ioc_is_operational(struct bfa_ioc *ioc);
bool bfa_ioc_is_initialized(struct bfa_ioc *ioc);
bool bfa_ioc_is_disabled(struct bfa_ioc *ioc);
bool bfa_ioc_fw_mismatch(struct bfa_ioc *ioc);
bool bfa_ioc_adapter_is_disabled(struct bfa_ioc *ioc);
void bfa_ioc_cfg_complete(struct bfa_ioc *ioc);
enum bfa_ioc_type bfa_ioc_get_type(struct bfa_ioc *ioc);
void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num);
void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver);
void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver);
void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
char *manufacturer);
void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev);
enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc *ioc);
void bfa_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
void bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
struct bfa_adapter_attr *ad_attr);
u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
void bfa_ioc_set_fcmode(struct bfa_ioc *ioc);
bool bfa_ioc_get_fcmode(struct bfa_ioc *ioc);
void bfa_ioc_hbfail_register(struct bfa_ioc *ioc,
struct bfa_ioc_hbfail_notify *notify); struct bfa_ioc_hbfail_notify *notify);
bool bfa_ioc_sem_get(void __iomem *sem_reg); bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
void bfa_ioc_sem_release(void __iomem *sem_reg); void bfa_nw_ioc_sem_release(void __iomem *sem_reg);
void bfa_ioc_hw_sem_release(struct bfa_ioc *ioc); void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc);
void bfa_ioc_fwver_get(struct bfa_ioc *ioc, void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr); struct bfi_ioc_image_hdr *fwhdr);
bool bfa_ioc_fwver_cmp(struct bfa_ioc *ioc, bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr); struct bfi_ioc_image_hdr *fwhdr);
mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
/* /*
* Timeout APIs * Timeout APIs
*/ */
void bfa_ioc_timeout(void *ioc); void bfa_nw_ioc_timeout(void *ioc);
void bfa_ioc_hb_check(void *ioc); void bfa_nw_ioc_hb_check(void *ioc);
void bfa_ioc_sem_timeout(void *ioc); void bfa_nw_ioc_sem_timeout(void *ioc);
/*
* bfa mfg wwn API functions
*/
u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
u64 bfa_ioc_get_nwwn(struct bfa_ioc *ioc);
mac_t bfa_ioc_get_mac(struct bfa_ioc *ioc);
u64 bfa_ioc_get_mfg_pwwn(struct bfa_ioc *ioc);
u64 bfa_ioc_get_mfg_nwwn(struct bfa_ioc *ioc);
mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
u64 bfa_ioc_get_adid(struct bfa_ioc *ioc);
/* /*
* F/W Image Size & Chunk * F/W Image Size & Chunk
......
...@@ -32,25 +32,26 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); ...@@ -32,25 +32,26 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc); static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
struct bfa_ioc_hwif hwif_ct; struct bfa_ioc_hwif nw_hwif_ct;
/** /**
* Called from bfa_ioc_attach() to map asic specific calls. * Called from bfa_ioc_attach() to map asic specific calls.
*/ */
void void
bfa_ioc_set_ct_hwif(struct bfa_ioc *ioc) bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
{ {
hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init; nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock; nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock; nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail; nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
ioc->ioc_hwif = &hwif_ct; ioc->ioc_hwif = &nw_hwif_ct;
} }
/** /**
...@@ -76,7 +77,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) ...@@ -76,7 +77,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
BFA_IOC_FWIMG_MINSZ) BFA_IOC_FWIMG_MINSZ)
return true; return true;
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
usecnt = readl(ioc->ioc_regs.ioc_usage_reg); usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
/** /**
...@@ -84,7 +85,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) ...@@ -84,7 +85,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
*/ */
if (usecnt == 0) { if (usecnt == 0) {
writel(1, ioc->ioc_regs.ioc_usage_reg); writel(1, ioc->ioc_regs.ioc_usage_reg);
bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
return true; return true;
} }
...@@ -98,9 +99,9 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) ...@@ -98,9 +99,9 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
/** /**
* Check if another driver with a different firmware is active * Check if another driver with a different firmware is active
*/ */
bfa_ioc_fwver_get(ioc, &fwhdr); bfa_nw_ioc_fwver_get(ioc, &fwhdr);
if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
return false; return false;
} }
...@@ -109,7 +110,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) ...@@ -109,7 +110,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
*/ */
usecnt++; usecnt++;
writel(usecnt, ioc->ioc_regs.ioc_usage_reg); writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
return true; return true;
} }
...@@ -134,14 +135,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) ...@@ -134,14 +135,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
/** /**
* decrement usage count * decrement usage count
*/ */
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
usecnt = readl(ioc->ioc_regs.ioc_usage_reg); usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
BUG_ON(!(usecnt > 0)); BUG_ON(!(usecnt > 0));
usecnt--; usecnt--;
writel(usecnt, ioc->ioc_regs.ioc_usage_reg); writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
} }
/** /**
...@@ -302,9 +303,9 @@ static void ...@@ -302,9 +303,9 @@ static void
bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
{ {
if (ioc->cna) { if (ioc->cna) {
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
writel(0, ioc->ioc_regs.ioc_usage_reg); writel(0, ioc->ioc_regs.ioc_usage_reg);
bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
} }
/* /*
...@@ -313,10 +314,10 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) ...@@ -313,10 +314,10 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
* will lock it instead of clearing it. * will lock it instead of clearing it.
*/ */
readl(ioc->ioc_regs.ioc_sem_reg); readl(ioc->ioc_regs.ioc_sem_reg);
bfa_ioc_hw_sem_release(ioc); bfa_nw_ioc_hw_sem_release(ioc);
} }
enum bfa_status static enum bfa_status
bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode) bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
{ {
u32 pll_sclk, pll_fclk, r32; u32 pll_sclk, pll_fclk, r32;
......
...@@ -81,7 +81,7 @@ bna_ll_isr(void *llarg, struct bfi_mbmsg *msg) ...@@ -81,7 +81,7 @@ bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
/* Post the next entry, if needed */ /* Post the next entry, if needed */
if (to_post) { if (to_post) {
mb_qe = bfa_q_first(&bna->mbox_mod.posted_q); mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
bfa_ioc_mbox_queue(&bna->device.ioc, bfa_nw_ioc_mbox_queue(&bna->device.ioc,
&mb_qe->cmd); &mb_qe->cmd);
} }
} else { } else {
...@@ -107,7 +107,7 @@ bna_err_handler(struct bna *bna, u32 intr_status) ...@@ -107,7 +107,7 @@ bna_err_handler(struct bna *bna, u32 intr_status)
writel(init_halt, bna->device.ioc.ioc_regs.ll_halt); writel(init_halt, bna->device.ioc.ioc_regs.ll_halt);
} }
bfa_ioc_error_isr(&bna->device.ioc); bfa_nw_ioc_error_isr(&bna->device.ioc);
} }
void void
...@@ -118,7 +118,7 @@ bna_mbox_handler(struct bna *bna, u32 intr_status) ...@@ -118,7 +118,7 @@ bna_mbox_handler(struct bna *bna, u32 intr_status)
return; return;
} }
if (BNA_IS_MBOX_INTR(intr_status)) if (BNA_IS_MBOX_INTR(intr_status))
bfa_ioc_mbox_isr(&bna->device.ioc); bfa_nw_ioc_mbox_isr(&bna->device.ioc);
} }
void void
...@@ -133,7 +133,7 @@ bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe) ...@@ -133,7 +133,7 @@ bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
bna->mbox_mod.msg_pending++; bna->mbox_mod.msg_pending++;
if (bna->mbox_mod.state == BNA_MBOX_FREE) { if (bna->mbox_mod.state == BNA_MBOX_FREE) {
list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q); list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
bfa_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd); bfa_nw_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd);
bna->mbox_mod.state = BNA_MBOX_POSTED; bna->mbox_mod.state = BNA_MBOX_POSTED;
} else { } else {
list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q); list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
...@@ -180,7 +180,7 @@ bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod) ...@@ -180,7 +180,7 @@ bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
void void
bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna) bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
{ {
bfa_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna); bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
mbox_mod->state = BNA_MBOX_FREE; mbox_mod->state = BNA_MBOX_FREE;
mbox_mod->msg_ctr = mbox_mod->msg_pending = 0; mbox_mod->msg_ctr = mbox_mod->msg_pending = 0;
INIT_LIST_HEAD(&mbox_mod->posted_q); INIT_LIST_HEAD(&mbox_mod->posted_q);
...@@ -1289,7 +1289,7 @@ bna_port_mtu_set(struct bna_port *port, int mtu, ...@@ -1289,7 +1289,7 @@ bna_port_mtu_set(struct bna_port *port, int mtu,
void void
bna_port_mac_get(struct bna_port *port, mac_t *mac) bna_port_mac_get(struct bna_port *port, mac_t *mac)
{ {
*mac = bfa_ioc_get_mac(&port->bna->device.ioc); *mac = bfa_nw_ioc_get_mac(&port->bna->device.ioc);
} }
/** /**
...@@ -1427,7 +1427,7 @@ bna_device_sm_stopped(struct bna_device *device, ...@@ -1427,7 +1427,7 @@ bna_device_sm_stopped(struct bna_device *device,
case DEVICE_E_ENABLE: case DEVICE_E_ENABLE:
if (device->intr_type == BNA_INTR_T_MSIX) if (device->intr_type == BNA_INTR_T_MSIX)
bna_mbox_msix_idx_set(device); bna_mbox_msix_idx_set(device);
bfa_ioc_enable(&device->ioc); bfa_nw_ioc_enable(&device->ioc);
bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait); bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
break; break;
...@@ -1547,7 +1547,7 @@ bna_device_sm_port_stop_wait(struct bna_device *device, ...@@ -1547,7 +1547,7 @@ bna_device_sm_port_stop_wait(struct bna_device *device,
static void static void
bna_device_sm_ioc_disable_wait_entry(struct bna_device *device) bna_device_sm_ioc_disable_wait_entry(struct bna_device *device)
{ {
bfa_ioc_disable(&device->ioc); bfa_nw_ioc_disable(&device->ioc);
} }
static void static void
...@@ -1655,12 +1655,12 @@ bna_device_init(struct bna_device *device, struct bna *bna, ...@@ -1655,12 +1655,12 @@ bna_device_init(struct bna_device *device, struct bna *bna,
* 1. DMA memory for IOC attributes * 1. DMA memory for IOC attributes
* 2. Kernel memory for FW trace * 2. Kernel memory for FW trace
*/ */
bfa_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn); bfa_nw_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn);
bfa_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL); bfa_nw_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL);
BNA_GET_DMA_ADDR( BNA_GET_DMA_ADDR(
&res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma); &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
bfa_ioc_mem_claim(&device->ioc, bfa_nw_ioc_mem_claim(&device->ioc,
res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva, res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva,
dma); dma);
...@@ -1686,9 +1686,7 @@ bna_device_uninit(struct bna_device *device) ...@@ -1686,9 +1686,7 @@ bna_device_uninit(struct bna_device *device)
{ {
bna_mbox_mod_uninit(&device->bna->mbox_mod); bna_mbox_mod_uninit(&device->bna->mbox_mod);
bfa_cee_detach(&device->bna->cee); bfa_nw_ioc_detach(&device->ioc);
bfa_ioc_detach(&device->ioc);
device->bna = NULL; device->bna = NULL;
} }
...@@ -1783,10 +1781,10 @@ bna_adv_device_init(struct bna_device *device, struct bna *bna, ...@@ -1783,10 +1781,10 @@ bna_adv_device_init(struct bna_device *device, struct bna *bna,
&res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma); &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva; kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
bfa_cee_attach(&bna->cee, &device->ioc, bna); bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
bfa_cee_mem_claim(&bna->cee, kva, dma); bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
kva += bfa_cee_meminfo(); kva += bfa_nw_cee_meminfo();
dma += bfa_cee_meminfo(); dma += bfa_nw_cee_meminfo();
} }
...@@ -1800,7 +1798,7 @@ bna_adv_res_req(struct bna_res_info *res_info) ...@@ -1800,7 +1798,7 @@ bna_adv_res_req(struct bna_res_info *res_info)
res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA; res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1; res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN( res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
bfa_cee_meminfo(), PAGE_SIZE); bfa_nw_cee_meminfo(), PAGE_SIZE);
/* Virtual memory for retreiving fw_trc */ /* Virtual memory for retreiving fw_trc */
res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM; res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
...@@ -3333,7 +3331,7 @@ bna_res_req(struct bna_res_info *res_info) ...@@ -3333,7 +3331,7 @@ bna_res_req(struct bna_res_info *res_info)
res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA; res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1; res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
ALIGN(bfa_ioc_meminfo(), PAGE_SIZE); ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
/* DMA memory for index segment of an IB */ /* DMA memory for index segment of an IB */
res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
......
...@@ -1365,7 +1365,7 @@ bnad_ioc_timeout(unsigned long data) ...@@ -1365,7 +1365,7 @@ bnad_ioc_timeout(unsigned long data)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags); spin_lock_irqsave(&bnad->bna_lock, flags);
bfa_ioc_timeout((void *) &bnad->bna.device.ioc); bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags); spin_unlock_irqrestore(&bnad->bna_lock, flags);
} }
...@@ -1376,7 +1376,7 @@ bnad_ioc_hb_check(unsigned long data) ...@@ -1376,7 +1376,7 @@ bnad_ioc_hb_check(unsigned long data)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags); spin_lock_irqsave(&bnad->bna_lock, flags);
bfa_ioc_hb_check((void *) &bnad->bna.device.ioc); bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags); spin_unlock_irqrestore(&bnad->bna_lock, flags);
} }
...@@ -1387,7 +1387,7 @@ bnad_ioc_sem_timeout(unsigned long data) ...@@ -1387,7 +1387,7 @@ bnad_ioc_sem_timeout(unsigned long data)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags); spin_lock_irqsave(&bnad->bna_lock, flags);
bfa_ioc_sem_timeout((void *) &bnad->bna.device.ioc); bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags); spin_unlock_irqrestore(&bnad->bna_lock, flags);
} }
...@@ -3067,7 +3067,6 @@ bnad_pci_probe(struct pci_dev *pdev, ...@@ -3067,7 +3067,6 @@ bnad_pci_probe(struct pci_dev *pdev,
} }
bnad = netdev_priv(netdev); bnad = netdev_priv(netdev);
/* /*
* PCI initialization * PCI initialization
* Output : using_dac = 1 for 64 bit DMA * Output : using_dac = 1 for 64 bit DMA
...@@ -3239,7 +3238,7 @@ bnad_module_init(void) ...@@ -3239,7 +3238,7 @@ bnad_module_init(void)
pr_info("Brocade 10G Ethernet driver\n"); pr_info("Brocade 10G Ethernet driver\n");
bfa_ioc_auto_recover(bnad_ioc_auto_recover); bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
err = pci_register_driver(&bnad_pci_driver); err = pci_register_driver(&bnad_pci_driver);
if (err < 0) { if (err < 0) {
......
...@@ -276,7 +276,7 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) ...@@ -276,7 +276,7 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
if (ioc_attr) { if (ioc_attr) {
memset(ioc_attr, 0, sizeof(*ioc_attr)); memset(ioc_attr, 0, sizeof(*ioc_attr));
spin_lock_irqsave(&bnad->bna_lock, flags); spin_lock_irqsave(&bnad->bna_lock, flags);
bfa_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags); spin_unlock_irqrestore(&bnad->bna_lock, flags);
strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver, strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment