Commit 5b0ec2ef authored by Ralf Baechle's avatar Ralf Baechle

Merge branch 'fixes-for-linus' into mips-for-linux-next

parents 5611cc45 3d18c983
...@@ -536,7 +536,7 @@ static int __init ar7_register_uarts(void) ...@@ -536,7 +536,7 @@ static int __init ar7_register_uarts(void)
bus_clk = clk_get(NULL, "bus"); bus_clk = clk_get(NULL, "bus");
if (IS_ERR(bus_clk)) if (IS_ERR(bus_clk))
panic("unable to get bus clk\n"); panic("unable to get bus clk");
uart_port.type = PORT_AR7; uart_port.type = PORT_AR7;
uart_port.uartclk = clk_get_rate(bus_clk) / 2; uart_port.uartclk = clk_get_rate(bus_clk) / 2;
......
...@@ -96,7 +96,7 @@ void __init plat_mem_setup(void) ...@@ -96,7 +96,7 @@ void __init plat_mem_setup(void)
io_base = (unsigned long)ioremap(AR7_REGS_BASE, 0x10000); io_base = (unsigned long)ioremap(AR7_REGS_BASE, 0x10000);
if (!io_base) if (!io_base)
panic("Can't remap IO base!\n"); panic("Can't remap IO base!");
set_io_port_base(io_base); set_io_port_base(io_base);
prom_meminit(); prom_meminit();
......
...@@ -134,7 +134,7 @@ static void __init ath79_detect_sys_type(void) ...@@ -134,7 +134,7 @@ static void __init ath79_detect_sys_type(void)
break; break;
default: default:
panic("ath79: unknown SoC, id:0x%08x\n", id); panic("ath79: unknown SoC, id:0x%08x", id);
} }
sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev); sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev);
......
...@@ -289,7 +289,7 @@ static void __init bcm47xx_register_ssb(void) ...@@ -289,7 +289,7 @@ static void __init bcm47xx_register_ssb(void)
err = ssb_bus_ssbbus_register(&(bcm47xx_bus.ssb), SSB_ENUM_BASE, err = ssb_bus_ssbbus_register(&(bcm47xx_bus.ssb), SSB_ENUM_BASE,
bcm47xx_get_invariants); bcm47xx_get_invariants);
if (err) if (err)
panic("Failed to initialize SSB bus (err %d)\n", err); panic("Failed to initialize SSB bus (err %d)", err);
mcore = &bcm47xx_bus.ssb.mipscore; mcore = &bcm47xx_bus.ssb.mipscore;
if (nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) { if (nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) {
...@@ -314,7 +314,7 @@ static void __init bcm47xx_register_bcma(void) ...@@ -314,7 +314,7 @@ static void __init bcm47xx_register_bcma(void)
err = bcma_host_soc_register(&bcm47xx_bus.bcma); err = bcma_host_soc_register(&bcm47xx_bus.bcma);
if (err) if (err)
panic("Failed to initialize BCMA bus (err %d)\n", err); panic("Failed to initialize BCMA bus (err %d)", err);
} }
#endif #endif
......
...@@ -767,11 +767,11 @@ void prom_free_prom_memory(void) ...@@ -767,11 +767,11 @@ void prom_free_prom_memory(void)
: "=r" (insn) : : "$31", "memory"); : "=r" (insn) : : "$31", "memory");
if ((insn >> 26) != 0x33) if ((insn >> 26) != 0x33)
panic("No PREF instruction at Core-14449 probe point.\n"); panic("No PREF instruction at Core-14449 probe point.");
if (((insn >> 16) & 0x1f) != 28) if (((insn >> 16) & 0x1f) != 28)
panic("Core-14449 WAR not in place (%04x).\n" panic("Core-14449 WAR not in place (%04x).\n"
"Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).\n", insn); "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn);
} }
#ifdef CONFIG_CAVIUM_DECODE_RSL #ifdef CONFIG_CAVIUM_DECODE_RSL
cvmx_interrupt_rsl_enable(); cvmx_interrupt_rsl_enable();
...@@ -779,7 +779,7 @@ void prom_free_prom_memory(void) ...@@ -779,7 +779,7 @@ void prom_free_prom_memory(void)
/* Add an interrupt handler for general failures. */ /* Add an interrupt handler for general failures. */
if (request_irq(OCTEON_IRQ_RML, octeon_rlm_interrupt, IRQF_SHARED, if (request_irq(OCTEON_IRQ_RML, octeon_rlm_interrupt, IRQF_SHARED,
"RML/RSL", octeon_rlm_interrupt)) { "RML/RSL", octeon_rlm_interrupt)) {
panic("Unable to request_irq(OCTEON_IRQ_RML)\n"); panic("Unable to request_irq(OCTEON_IRQ_RML)");
} }
#endif #endif
} }
...@@ -210,7 +210,7 @@ void octeon_prepare_cpus(unsigned int max_cpus) ...@@ -210,7 +210,7 @@ void octeon_prepare_cpus(unsigned int max_cpus)
if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI", IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
mailbox_interrupt)) { mailbox_interrupt)) {
panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n"); panic("Cannot request_irq(OCTEON_IRQ_MBOX0)");
} }
} }
......
...@@ -18,12 +18,6 @@ ...@@ -18,12 +18,6 @@
#include <asm-generic/pgtable-nopmd.h> #include <asm-generic/pgtable-nopmd.h>
/*
* - add_wired_entry() add a fixed TLB entry, and move wired register
*/
extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask);
/* /*
* - add_temporary_entry() add a temporary TLB entry. We use TLB entries * - add_temporary_entry() add a temporary TLB entry. We use TLB entries
* starting at the top and working down. This is for populating the * starting at the top and working down. This is for populating the
......
#ifndef __ASM_TLBMISC_H
#define __ASM_TLBMISC_H
/*
* - add_wired_entry() add a fixed TLB entry, and move wired register
*/
extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask);
#endif /* __ASM_TLBMISC_H */
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/jazz.h> #include <asm/jazz.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbmisc.h>
static DEFINE_RAW_SPINLOCK(r4030_lock); static DEFINE_RAW_SPINLOCK(r4030_lock);
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/jazzdma.h> #include <asm/jazzdma.h>
#include <asm/reboot.h> #include <asm/reboot.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbmisc.h>
extern asmlinkage void jazz_handle_int(void); extern asmlinkage void jazz_handle_int(void);
......
...@@ -488,7 +488,7 @@ static int __init qi_lb60_board_setup(void) ...@@ -488,7 +488,7 @@ static int __init qi_lb60_board_setup(void)
board_gpio_setup(); board_gpio_setup();
if (qi_lb60_init_platform_devices()) if (qi_lb60_init_platform_devices())
panic("Failed to initialize platform devices\n"); panic("Failed to initialize platform devices");
return 0; return 0;
} }
......
...@@ -559,7 +559,7 @@ void smtc_prepare_cpus(int cpus) ...@@ -559,7 +559,7 @@ void smtc_prepare_cpus(int cpus)
pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
if (pipi == NULL) if (pipi == NULL)
panic("kmalloc of IPI message buffers failed\n"); panic("kmalloc of IPI message buffers failed");
else else
printk("IPI buffer pool of %d buffers\n", nipi); printk("IPI buffer pool of %d buffers\n", nipi);
for (i = 0; i < nipi; i++) { for (i = 0; i < nipi; i++) {
...@@ -813,7 +813,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) ...@@ -813,7 +813,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
if (pipi == NULL) { if (pipi == NULL) {
bust_spinlocks(1); bust_spinlocks(1);
mips_mt_regdump(dvpe()); mips_mt_regdump(dvpe());
panic("IPI Msg. Buffers Depleted\n"); panic("IPI Msg. Buffers Depleted");
} }
pipi->type = type; pipi->type = type;
pipi->arg = (void *)action; pipi->arg = (void *)action;
......
...@@ -400,7 +400,7 @@ void __noreturn die(const char *str, struct pt_regs *regs) ...@@ -400,7 +400,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
panic("Fatal exception in interrupt"); panic("Fatal exception in interrupt");
if (panic_on_oops) { if (panic_on_oops) {
printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
ssleep(5); ssleep(5);
panic("Fatal exception"); panic("Fatal exception");
} }
...@@ -1150,7 +1150,7 @@ asmlinkage void do_mt(struct pt_regs *regs) ...@@ -1150,7 +1150,7 @@ asmlinkage void do_mt(struct pt_regs *regs)
asmlinkage void do_dsp(struct pt_regs *regs) asmlinkage void do_dsp(struct pt_regs *regs)
{ {
if (cpu_has_dsp) if (cpu_has_dsp)
panic("Unexpected DSP exception\n"); panic("Unexpected DSP exception");
force_sig(SIGILL, current); force_sig(SIGILL, current);
} }
......
...@@ -134,11 +134,11 @@ void __init plat_time_init(void) ...@@ -134,11 +134,11 @@ void __init plat_time_init(void)
struct clk *clk; struct clk *clk;
if (insert_resource(&iomem_resource, &ltq_cgu_resource) < 0) if (insert_resource(&iomem_resource, &ltq_cgu_resource) < 0)
panic("Failed to insert cgu memory\n"); panic("Failed to insert cgu memory");
if (request_mem_region(ltq_cgu_resource.start, if (request_mem_region(ltq_cgu_resource.start,
resource_size(&ltq_cgu_resource), "cgu") < 0) resource_size(&ltq_cgu_resource), "cgu") < 0)
panic("Failed to request cgu memory\n"); panic("Failed to request cgu memory");
ltq_cgu_membase = ioremap_nocache(ltq_cgu_resource.start, ltq_cgu_membase = ioremap_nocache(ltq_cgu_resource.start,
resource_size(&ltq_cgu_resource)); resource_size(&ltq_cgu_resource));
......
...@@ -249,28 +249,28 @@ void __init arch_init_irq(void) ...@@ -249,28 +249,28 @@ void __init arch_init_irq(void)
int i; int i;
if (insert_resource(&iomem_resource, &ltq_icu_resource) < 0) if (insert_resource(&iomem_resource, &ltq_icu_resource) < 0)
panic("Failed to insert icu memory\n"); panic("Failed to insert icu memory");
if (request_mem_region(ltq_icu_resource.start, if (request_mem_region(ltq_icu_resource.start,
resource_size(&ltq_icu_resource), "icu") < 0) resource_size(&ltq_icu_resource), "icu") < 0)
panic("Failed to request icu memory\n"); panic("Failed to request icu memory");
ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start, ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start,
resource_size(&ltq_icu_resource)); resource_size(&ltq_icu_resource));
if (!ltq_icu_membase) if (!ltq_icu_membase)
panic("Failed to remap icu memory\n"); panic("Failed to remap icu memory");
if (insert_resource(&iomem_resource, &ltq_eiu_resource) < 0) if (insert_resource(&iomem_resource, &ltq_eiu_resource) < 0)
panic("Failed to insert eiu memory\n"); panic("Failed to insert eiu memory");
if (request_mem_region(ltq_eiu_resource.start, if (request_mem_region(ltq_eiu_resource.start,
resource_size(&ltq_eiu_resource), "eiu") < 0) resource_size(&ltq_eiu_resource), "eiu") < 0)
panic("Failed to request eiu memory\n"); panic("Failed to request eiu memory");
ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start, ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start,
resource_size(&ltq_eiu_resource)); resource_size(&ltq_eiu_resource));
if (!ltq_eiu_membase) if (!ltq_eiu_membase)
panic("Failed to remap eiu memory\n"); panic("Failed to remap eiu memory");
/* make sure all irqs are turned off by default */ /* make sure all irqs are turned off by default */
for (i = 0; i < 5; i++) for (i = 0; i < 5; i++)
......
...@@ -222,17 +222,17 @@ ltq_dma_init(void) ...@@ -222,17 +222,17 @@ ltq_dma_init(void)
/* insert and request the memory region */ /* insert and request the memory region */
if (insert_resource(&iomem_resource, &ltq_dma_resource) < 0) if (insert_resource(&iomem_resource, &ltq_dma_resource) < 0)
panic("Failed to insert dma memory\n"); panic("Failed to insert dma memory");
if (request_mem_region(ltq_dma_resource.start, if (request_mem_region(ltq_dma_resource.start,
resource_size(&ltq_dma_resource), "dma") < 0) resource_size(&ltq_dma_resource), "dma") < 0)
panic("Failed to request dma memory\n"); panic("Failed to request dma memory");
/* remap dma register range */ /* remap dma register range */
ltq_dma_membase = ioremap_nocache(ltq_dma_resource.start, ltq_dma_membase = ioremap_nocache(ltq_dma_resource.start,
resource_size(&ltq_dma_resource)); resource_size(&ltq_dma_resource));
if (!ltq_dma_membase) if (!ltq_dma_membase)
panic("Failed to remap dma memory\n"); panic("Failed to remap dma memory");
/* power up and reset the dma engine */ /* power up and reset the dma engine */
ltq_pmu_enable(PMU_DMA); ltq_pmu_enable(PMU_DMA);
......
...@@ -32,17 +32,17 @@ static int __init lantiq_ebu_init(void) ...@@ -32,17 +32,17 @@ static int __init lantiq_ebu_init(void)
{ {
/* insert and request the memory region */ /* insert and request the memory region */
if (insert_resource(&iomem_resource, &ltq_ebu_resource) < 0) if (insert_resource(&iomem_resource, &ltq_ebu_resource) < 0)
panic("Failed to insert ebu memory\n"); panic("Failed to insert ebu memory");
if (request_mem_region(ltq_ebu_resource.start, if (request_mem_region(ltq_ebu_resource.start,
resource_size(&ltq_ebu_resource), "ebu") < 0) resource_size(&ltq_ebu_resource), "ebu") < 0)
panic("Failed to request ebu memory\n"); panic("Failed to request ebu memory");
/* remap ebu register range */ /* remap ebu register range */
ltq_ebu_membase = ioremap_nocache(ltq_ebu_resource.start, ltq_ebu_membase = ioremap_nocache(ltq_ebu_resource.start,
resource_size(&ltq_ebu_resource)); resource_size(&ltq_ebu_resource));
if (!ltq_ebu_membase) if (!ltq_ebu_membase)
panic("Failed to remap ebu memory\n"); panic("Failed to remap ebu memory");
/* make sure to unprotect the memory region where flash is located */ /* make sure to unprotect the memory region where flash is located */
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0); ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
......
...@@ -40,7 +40,7 @@ void ltq_pmu_enable(unsigned int module) ...@@ -40,7 +40,7 @@ void ltq_pmu_enable(unsigned int module)
do {} while (--err && (ltq_pmu_r32(LTQ_PMU_PWDSR) & module)); do {} while (--err && (ltq_pmu_r32(LTQ_PMU_PWDSR) & module));
if (!err) if (!err)
panic("activating PMU module failed!\n"); panic("activating PMU module failed!");
} }
EXPORT_SYMBOL(ltq_pmu_enable); EXPORT_SYMBOL(ltq_pmu_enable);
...@@ -53,16 +53,16 @@ EXPORT_SYMBOL(ltq_pmu_disable); ...@@ -53,16 +53,16 @@ EXPORT_SYMBOL(ltq_pmu_disable);
int __init ltq_pmu_init(void) int __init ltq_pmu_init(void)
{ {
if (insert_resource(&iomem_resource, &ltq_pmu_resource) < 0) if (insert_resource(&iomem_resource, &ltq_pmu_resource) < 0)
panic("Failed to insert pmu memory\n"); panic("Failed to insert pmu memory");
if (request_mem_region(ltq_pmu_resource.start, if (request_mem_region(ltq_pmu_resource.start,
resource_size(&ltq_pmu_resource), "pmu") < 0) resource_size(&ltq_pmu_resource), "pmu") < 0)
panic("Failed to request pmu memory\n"); panic("Failed to request pmu memory");
ltq_pmu_membase = ioremap_nocache(ltq_pmu_resource.start, ltq_pmu_membase = ioremap_nocache(ltq_pmu_resource.start,
resource_size(&ltq_pmu_resource)); resource_size(&ltq_pmu_resource));
if (!ltq_pmu_membase) if (!ltq_pmu_membase)
panic("Failed to remap pmu memory\n"); panic("Failed to remap pmu memory");
return 0; return 0;
} }
......
...@@ -69,17 +69,17 @@ static int __init mips_reboot_setup(void) ...@@ -69,17 +69,17 @@ static int __init mips_reboot_setup(void)
{ {
/* insert and request the memory region */ /* insert and request the memory region */
if (insert_resource(&iomem_resource, &ltq_rcu_resource) < 0) if (insert_resource(&iomem_resource, &ltq_rcu_resource) < 0)
panic("Failed to insert rcu memory\n"); panic("Failed to insert rcu memory");
if (request_mem_region(ltq_rcu_resource.start, if (request_mem_region(ltq_rcu_resource.start,
resource_size(&ltq_rcu_resource), "rcu") < 0) resource_size(&ltq_rcu_resource), "rcu") < 0)
panic("Failed to request rcu memory\n"); panic("Failed to request rcu memory");
/* remap rcu register range */ /* remap rcu register range */
ltq_rcu_membase = ioremap_nocache(ltq_rcu_resource.start, ltq_rcu_membase = ioremap_nocache(ltq_rcu_resource.start,
resource_size(&ltq_rcu_resource)); resource_size(&ltq_rcu_resource));
if (!ltq_rcu_membase) if (!ltq_rcu_membase)
panic("Failed to remap rcu memory\n"); panic("Failed to remap rcu memory");
_machine_restart = ltq_machine_restart; _machine_restart = ltq_machine_restart;
_machine_halt = ltq_machine_halt; _machine_halt = ltq_machine_halt;
......
...@@ -223,7 +223,7 @@ static void __cpuinit probe_octeon(void) ...@@ -223,7 +223,7 @@ static void __cpuinit probe_octeon(void)
break; break;
default: default:
panic("Unsupported Cavium Networks CPU type\n"); panic("Unsupported Cavium Networks CPU type");
break; break;
} }
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/tlbmisc.h>
#include <asm/isadep.h> #include <asm/isadep.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/tlbmisc.h>
extern void build_tlb_refill_handler(void); extern void build_tlb_refill_handler(void);
......
...@@ -162,7 +162,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) ...@@ -162,7 +162,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
msg.address_hi = (0 + CVMX_NPEI_PCIE_MSI_RCV) >> 32; msg.address_hi = (0 + CVMX_NPEI_PCIE_MSI_RCV) >> 32;
break; break;
default: default:
panic("arch_setup_msi_irq: Invalid octeon_dma_bar_type\n"); panic("arch_setup_msi_irq: Invalid octeon_dma_bar_type");
} }
msg.data = irq - OCTEON_IRQ_MSI_BIT0; msg.data = irq - OCTEON_IRQ_MSI_BIT0;
......
...@@ -13,9 +13,11 @@ ...@@ -13,9 +13,11 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/syscore_ops.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1000.h>
#include <asm/tlbmisc.h>
#ifdef CONFIG_DEBUG_PCI #ifdef CONFIG_DEBUG_PCI
#define DBG(x...) printk(KERN_DEBUG x) #define DBG(x...) printk(KERN_DEBUG x)
...@@ -41,6 +43,12 @@ struct alchemy_pci_context { ...@@ -41,6 +43,12 @@ struct alchemy_pci_context {
int (*board_pci_idsel)(unsigned int devsel, int assert); int (*board_pci_idsel)(unsigned int devsel, int assert);
}; };
/* for syscore_ops. There's only one PCI controller on Alchemy chips, so this
* should suffice for now.
*/
static struct alchemy_pci_context *__alchemy_pci_ctx;
/* IO/MEM resources for PCI. Keep the memres in sync with __fixup_bigphys_addr /* IO/MEM resources for PCI. Keep the memres in sync with __fixup_bigphys_addr
* in arch/mips/alchemy/common/setup.c * in arch/mips/alchemy/common/setup.c
*/ */
...@@ -99,18 +107,6 @@ static int config_access(unsigned char access_type, struct pci_bus *bus, ...@@ -99,18 +107,6 @@ static int config_access(unsigned char access_type, struct pci_bus *bus,
return -1; return -1;
} }
/* YAMON on all db1xxx boards wipes the TLB and writes zero to C0_wired
* on resume, clearing our wired entry. Unfortunately the ->resume()
* callback is called way way way too late (and ->suspend() too early)
* to have them destroy and recreate it. Instead just test if c0_wired
* is now lower than the index we retrieved before suspending and then
* recreate the entry if necessary. Of course this is totally bonkers
* and breaks as soon as someone else adds another wired entry somewhere
* else. Anyone have any ideas how to handle this better?
*/
if (unlikely(read_c0_wired() < ctx->wired_entry))
alchemy_pci_wired_entry(ctx);
local_irq_save(flags); local_irq_save(flags);
r = __raw_readl(ctx->regs + PCI_REG_STATCMD) & 0x0000ffff; r = __raw_readl(ctx->regs + PCI_REG_STATCMD) & 0x0000ffff;
r |= PCI_STATCMD_STATUS(0x2000); r |= PCI_STATCMD_STATUS(0x2000);
...@@ -304,6 +300,62 @@ static int alchemy_pci_def_idsel(unsigned int devsel, int assert) ...@@ -304,6 +300,62 @@ static int alchemy_pci_def_idsel(unsigned int devsel, int assert)
return 1; /* success */ return 1; /* success */
} }
/* save PCI controller register contents. */
static int alchemy_pci_suspend(void)
{
struct alchemy_pci_context *ctx = __alchemy_pci_ctx;
if (!ctx)
return 0;
ctx->pm[0] = __raw_readl(ctx->regs + PCI_REG_CMEM);
ctx->pm[1] = __raw_readl(ctx->regs + PCI_REG_CONFIG) & 0x0009ffff;
ctx->pm[2] = __raw_readl(ctx->regs + PCI_REG_B2BMASK_CCH);
ctx->pm[3] = __raw_readl(ctx->regs + PCI_REG_B2BBASE0_VID);
ctx->pm[4] = __raw_readl(ctx->regs + PCI_REG_B2BBASE1_SID);
ctx->pm[5] = __raw_readl(ctx->regs + PCI_REG_MWMASK_DEV);
ctx->pm[6] = __raw_readl(ctx->regs + PCI_REG_MWBASE_REV_CCL);
ctx->pm[7] = __raw_readl(ctx->regs + PCI_REG_ID);
ctx->pm[8] = __raw_readl(ctx->regs + PCI_REG_CLASSREV);
ctx->pm[9] = __raw_readl(ctx->regs + PCI_REG_PARAM);
ctx->pm[10] = __raw_readl(ctx->regs + PCI_REG_MBAR);
ctx->pm[11] = __raw_readl(ctx->regs + PCI_REG_TIMEOUT);
return 0;
}
static void alchemy_pci_resume(void)
{
struct alchemy_pci_context *ctx = __alchemy_pci_ctx;
if (!ctx)
return;
__raw_writel(ctx->pm[0], ctx->regs + PCI_REG_CMEM);
__raw_writel(ctx->pm[2], ctx->regs + PCI_REG_B2BMASK_CCH);
__raw_writel(ctx->pm[3], ctx->regs + PCI_REG_B2BBASE0_VID);
__raw_writel(ctx->pm[4], ctx->regs + PCI_REG_B2BBASE1_SID);
__raw_writel(ctx->pm[5], ctx->regs + PCI_REG_MWMASK_DEV);
__raw_writel(ctx->pm[6], ctx->regs + PCI_REG_MWBASE_REV_CCL);
__raw_writel(ctx->pm[7], ctx->regs + PCI_REG_ID);
__raw_writel(ctx->pm[8], ctx->regs + PCI_REG_CLASSREV);
__raw_writel(ctx->pm[9], ctx->regs + PCI_REG_PARAM);
__raw_writel(ctx->pm[10], ctx->regs + PCI_REG_MBAR);
__raw_writel(ctx->pm[11], ctx->regs + PCI_REG_TIMEOUT);
wmb();
__raw_writel(ctx->pm[1], ctx->regs + PCI_REG_CONFIG);
wmb();
/* YAMON on all db1xxx boards wipes the TLB and writes zero to C0_wired
* on resume, making it necessary to recreate it as soon as possible.
*/
ctx->wired_entry = 8191; /* impossibly high value */
alchemy_pci_wired_entry(ctx); /* install it */
}
static struct syscore_ops alchemy_pci_pmops = {
.suspend = alchemy_pci_suspend,
.resume = alchemy_pci_resume,
};
static int __devinit alchemy_pci_probe(struct platform_device *pdev) static int __devinit alchemy_pci_probe(struct platform_device *pdev)
{ {
struct alchemy_pci_platdata *pd = pdev->dev.platform_data; struct alchemy_pci_platdata *pd = pdev->dev.platform_data;
...@@ -396,7 +448,8 @@ static int __devinit alchemy_pci_probe(struct platform_device *pdev) ...@@ -396,7 +448,8 @@ static int __devinit alchemy_pci_probe(struct platform_device *pdev)
ret = -ENOMEM; ret = -ENOMEM;
goto out4; goto out4;
} }
ctx->wired_entry = 8192; /* impossibly high value */ ctx->wired_entry = 8191; /* impossibly high value */
alchemy_pci_wired_entry(ctx); /* install it */
set_io_port_base((unsigned long)ctx->alchemy_pci_ctrl.io_map_base); set_io_port_base((unsigned long)ctx->alchemy_pci_ctrl.io_map_base);
...@@ -408,7 +461,9 @@ static int __devinit alchemy_pci_probe(struct platform_device *pdev) ...@@ -408,7 +461,9 @@ static int __devinit alchemy_pci_probe(struct platform_device *pdev)
__raw_writel(val, ctx->regs + PCI_REG_CONFIG); __raw_writel(val, ctx->regs + PCI_REG_CONFIG);
wmb(); wmb();
__alchemy_pci_ctx = ctx;
platform_set_drvdata(pdev, ctx); platform_set_drvdata(pdev, ctx);
register_syscore_ops(&alchemy_pci_pmops);
register_pci_controller(&ctx->alchemy_pci_ctrl); register_pci_controller(&ctx->alchemy_pci_ctrl);
return 0; return 0;
...@@ -425,68 +480,11 @@ static int __devinit alchemy_pci_probe(struct platform_device *pdev) ...@@ -425,68 +480,11 @@ static int __devinit alchemy_pci_probe(struct platform_device *pdev)
return ret; return ret;
} }
#ifdef CONFIG_PM
/* save PCI controller register contents. */
static int alchemy_pci_suspend(struct device *dev)
{
struct alchemy_pci_context *ctx = dev_get_drvdata(dev);
ctx->pm[0] = __raw_readl(ctx->regs + PCI_REG_CMEM);
ctx->pm[1] = __raw_readl(ctx->regs + PCI_REG_CONFIG) & 0x0009ffff;
ctx->pm[2] = __raw_readl(ctx->regs + PCI_REG_B2BMASK_CCH);
ctx->pm[3] = __raw_readl(ctx->regs + PCI_REG_B2BBASE0_VID);
ctx->pm[4] = __raw_readl(ctx->regs + PCI_REG_B2BBASE1_SID);
ctx->pm[5] = __raw_readl(ctx->regs + PCI_REG_MWMASK_DEV);
ctx->pm[6] = __raw_readl(ctx->regs + PCI_REG_MWBASE_REV_CCL);
ctx->pm[7] = __raw_readl(ctx->regs + PCI_REG_ID);
ctx->pm[8] = __raw_readl(ctx->regs + PCI_REG_CLASSREV);
ctx->pm[9] = __raw_readl(ctx->regs + PCI_REG_PARAM);
ctx->pm[10] = __raw_readl(ctx->regs + PCI_REG_MBAR);
ctx->pm[11] = __raw_readl(ctx->regs + PCI_REG_TIMEOUT);
return 0;
}
static int alchemy_pci_resume(struct device *dev)
{
struct alchemy_pci_context *ctx = dev_get_drvdata(dev);
__raw_writel(ctx->pm[0], ctx->regs + PCI_REG_CMEM);
__raw_writel(ctx->pm[2], ctx->regs + PCI_REG_B2BMASK_CCH);
__raw_writel(ctx->pm[3], ctx->regs + PCI_REG_B2BBASE0_VID);
__raw_writel(ctx->pm[4], ctx->regs + PCI_REG_B2BBASE1_SID);
__raw_writel(ctx->pm[5], ctx->regs + PCI_REG_MWMASK_DEV);
__raw_writel(ctx->pm[6], ctx->regs + PCI_REG_MWBASE_REV_CCL);
__raw_writel(ctx->pm[7], ctx->regs + PCI_REG_ID);
__raw_writel(ctx->pm[8], ctx->regs + PCI_REG_CLASSREV);
__raw_writel(ctx->pm[9], ctx->regs + PCI_REG_PARAM);
__raw_writel(ctx->pm[10], ctx->regs + PCI_REG_MBAR);
__raw_writel(ctx->pm[11], ctx->regs + PCI_REG_TIMEOUT);
wmb();
__raw_writel(ctx->pm[1], ctx->regs + PCI_REG_CONFIG);
wmb();
return 0;
}
static const struct dev_pm_ops alchemy_pci_pmops = {
.suspend = alchemy_pci_suspend,
.resume = alchemy_pci_resume,
};
#define ALCHEMY_PCICTL_PM (&alchemy_pci_pmops)
#else
#define ALCHEMY_PCICTL_PM NULL
#endif
static struct platform_driver alchemy_pcictl_driver = { static struct platform_driver alchemy_pcictl_driver = {
.probe = alchemy_pci_probe, .probe = alchemy_pci_probe,
.driver = { .driver = {
.name = "alchemy-pci", .name = "alchemy-pci",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.pm = ALCHEMY_PCICTL_PM,
}, },
}; };
......
...@@ -209,7 +209,7 @@ void __init prom_init(void) ...@@ -209,7 +209,7 @@ void __init prom_init(void)
default: default:
/* we don't recognize the machine */ /* we don't recognize the machine */
mips_machtype = MACH_UNKNOWN; mips_machtype = MACH_UNKNOWN;
panic("***Bogosity factor five***, exiting\n"); panic("***Bogosity factor five***, exiting");
break; break;
} }
......
...@@ -73,7 +73,7 @@ static inline int alloc_level(int cpu, int irq) ...@@ -73,7 +73,7 @@ static inline int alloc_level(int cpu, int irq)
level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE); level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE);
if (level >= LEVELS_PER_SLICE) if (level >= LEVELS_PER_SLICE)
panic("Cpu %d flooded with devices\n", cpu); panic("Cpu %d flooded with devices", cpu);
__set_bit(level, hub->irq_alloc_mask); __set_bit(level, hub->irq_alloc_mask);
si->level_to_irq[level] = irq; si->level_to_irq[level] = irq;
...@@ -96,7 +96,7 @@ static inline int find_level(cpuid_t *cpunum, int irq) ...@@ -96,7 +96,7 @@ static inline int find_level(cpuid_t *cpunum, int irq)
} }
} }
panic("Could not identify cpu/level for irq %d\n", irq); panic("Could not identify cpu/level for irq %d", irq);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment