Commit b7c4a9f8 authored by Adrian Bunk's avatar Adrian Bunk Committed by Adrian Bunk
parents 65163fd7 0b154bb7
...@@ -8,10 +8,9 @@ Compilation of kernel ...@@ -8,10 +8,9 @@ Compilation of kernel
--------------------- ---------------------
In order to compile ARM Linux, you will need a compiler capable of In order to compile ARM Linux, you will need a compiler capable of
generating ARM ELF code with GNU extensions. GCC 2.95.1, EGCS generating ARM ELF code with GNU extensions. GCC 3.3 is known to be
1.1.2, and GCC 3.3 are known to be good compilers. Fortunately, you a good compiler. Fortunately, you needn't guess. The kernel will report
needn't guess. The kernel will report an error if your compiler is an error if your compiler is a recognized offender.
a recognized offender.
To build ARM Linux natively, you shouldn't have to alter the ARCH = line To build ARM Linux natively, you shouldn't have to alter the ARCH = line
in the top level Makefile. However, if you don't have the ARM Linux ELF in the top level Makefile. However, if you don't have the ARM Linux ELF
......
...@@ -297,6 +297,11 @@ P: Richard Purdie ...@@ -297,6 +297,11 @@ P: Richard Purdie
M: rpurdie@rpsys.net M: rpurdie@rpsys.net
S: Maintained S: Maintained
ARM/TOSA MACHINE SUPPORT
P: Dirk Opfer
M: dirk@opfer-online.de
S: Maintained
ARM/PLEB SUPPORT ARM/PLEB SUPPORT
P: Peter Chubb P: Peter Chubb
M: pleb@gelato.unsw.edu.au M: pleb@gelato.unsw.edu.au
......
...@@ -346,7 +346,8 @@ AFLAGS_KERNEL = ...@@ -346,7 +346,8 @@ AFLAGS_KERNEL =
# Use LINUXINCLUDE when you must reference the include/ directory. # Use LINUXINCLUDE when you must reference the include/ directory.
# Needed to be compatible with the O= option # Needed to be compatible with the O= option
LINUXINCLUDE := -Iinclude \ LINUXINCLUDE := -Iinclude \
$(if $(KBUILD_SRC),-Iinclude2 -I$(srctree)/include) $(if $(KBUILD_SRC),-Iinclude2 -I$(srctree)/include) \
-imacros include/linux/autoconf.h
CPPFLAGS := -D__KERNEL__ $(LINUXINCLUDE) CPPFLAGS := -D__KERNEL__ $(LINUXINCLUDE)
...@@ -1249,11 +1250,6 @@ tags: FORCE ...@@ -1249,11 +1250,6 @@ tags: FORCE
# Scripts to check various things for consistency # Scripts to check various things for consistency
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
configcheck:
find * $(RCS_FIND_IGNORE) \
-name '*.[hcS]' -type f -print | sort \
| xargs $(PERL) -w scripts/checkconfig.pl
includecheck: includecheck:
find * $(RCS_FIND_IGNORE) \ find * $(RCS_FIND_IGNORE) \
-name '*.[hcS]' -type f -print | sort \ -name '*.[hcS]' -type f -print | sort \
......
...@@ -785,7 +785,7 @@ __kuser_helper_end: ...@@ -785,7 +785,7 @@ __kuser_helper_end:
* SP points to a minimal amount of processor-private memory, the address * SP points to a minimal amount of processor-private memory, the address
* of which is copied into r0 for the mode specific abort handler. * of which is copied into r0 for the mode specific abort handler.
*/ */
.macro vector_stub, name, correction=0 .macro vector_stub, name, mode, correction=0
.align 5 .align 5
vector_\name: vector_\name:
...@@ -805,15 +805,14 @@ vector_\name: ...@@ -805,15 +805,14 @@ vector_\name:
@ Prepare for SVC32 mode. IRQs remain disabled. @ Prepare for SVC32 mode. IRQs remain disabled.
@ @
mrs r0, cpsr mrs r0, cpsr
bic r0, r0, #MODE_MASK eor r0, r0, #(\mode ^ SVC_MODE)
orr r0, r0, #SVC_MODE
msr spsr_cxsf, r0 msr spsr_cxsf, r0
@ @
@ the branch table must immediately follow this code @ the branch table must immediately follow this code
@ @
mov r0, sp
and lr, lr, #0x0f and lr, lr, #0x0f
mov r0, sp
ldr lr, [pc, lr, lsl #2] ldr lr, [pc, lr, lsl #2]
movs pc, lr @ branch to handler in SVC mode movs pc, lr @ branch to handler in SVC mode
.endm .endm
...@@ -823,7 +822,7 @@ __stubs_start: ...@@ -823,7 +822,7 @@ __stubs_start:
/* /*
* Interrupt dispatcher * Interrupt dispatcher
*/ */
vector_stub irq, 4 vector_stub irq, IRQ_MODE, 4
.long __irq_usr @ 0 (USR_26 / USR_32) .long __irq_usr @ 0 (USR_26 / USR_32)
.long __irq_invalid @ 1 (FIQ_26 / FIQ_32) .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
...@@ -846,7 +845,7 @@ __stubs_start: ...@@ -846,7 +845,7 @@ __stubs_start:
* Data abort dispatcher * Data abort dispatcher
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
*/ */
vector_stub dabt, 8 vector_stub dabt, ABT_MODE, 8
.long __dabt_usr @ 0 (USR_26 / USR_32) .long __dabt_usr @ 0 (USR_26 / USR_32)
.long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
...@@ -869,7 +868,7 @@ __stubs_start: ...@@ -869,7 +868,7 @@ __stubs_start:
* Prefetch abort dispatcher * Prefetch abort dispatcher
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
*/ */
vector_stub pabt, 4 vector_stub pabt, ABT_MODE, 4
.long __pabt_usr @ 0 (USR_26 / USR_32) .long __pabt_usr @ 0 (USR_26 / USR_32)
.long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
...@@ -892,7 +891,7 @@ __stubs_start: ...@@ -892,7 +891,7 @@ __stubs_start:
* Undef instr entry dispatcher * Undef instr entry dispatcher
* Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
*/ */
vector_stub und vector_stub und, UND_MODE
.long __und_usr @ 0 (USR_26 / USR_32) .long __und_usr @ 0 (USR_26 / USR_32)
.long __und_invalid @ 1 (FIQ_26 / FIQ_32) .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
......
...@@ -338,6 +338,7 @@ void cpu_init(void) ...@@ -338,6 +338,7 @@ void cpu_init(void)
BUG(); BUG();
} }
if (system_state == SYSTEM_BOOTING)
dump_cpu_info(cpu); dump_cpu_info(cpu);
/* /*
...@@ -838,7 +839,12 @@ static int c_show(struct seq_file *m, void *v) ...@@ -838,7 +839,12 @@ static int c_show(struct seq_file *m, void *v)
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
for_each_online_cpu(i) { for_each_online_cpu(i) {
seq_printf(m, "Processor\t: %d\n", i); /*
* glibc reads /proc/cpuinfo to determine the number of
* online processors, looking for lines beginning with
* "processor". Give glibc what it expects.
*/
seq_printf(m, "processor\t: %d\n", i);
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
(per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
......
...@@ -91,8 +91,8 @@ EXPORT_SYMBOL(ixp2000_uengine_csr_write); ...@@ -91,8 +91,8 @@ EXPORT_SYMBOL(ixp2000_uengine_csr_write);
void ixp2000_uengine_reset(u32 uengine_mask) void ixp2000_uengine_reset(u32 uengine_mask)
{ {
ixp2000_reg_write(IXP2000_RESET1, uengine_mask & ixp2000_uengine_mask); ixp2000_reg_wrb(IXP2000_RESET1, uengine_mask & ixp2000_uengine_mask);
ixp2000_reg_write(IXP2000_RESET1, 0); ixp2000_reg_wrb(IXP2000_RESET1, 0);
} }
EXPORT_SYMBOL(ixp2000_uengine_reset); EXPORT_SYMBOL(ixp2000_uengine_reset);
...@@ -452,21 +452,20 @@ static int __init ixp2000_uengine_init(void) ...@@ -452,21 +452,20 @@ static int __init ixp2000_uengine_init(void)
/* /*
* Reset microengines. * Reset microengines.
*/ */
ixp2000_reg_write(IXP2000_RESET1, ixp2000_uengine_mask); ixp2000_uengine_reset(ixp2000_uengine_mask);
ixp2000_reg_write(IXP2000_RESET1, 0);
/* /*
* Synchronise timestamp counters across all microengines. * Synchronise timestamp counters across all microengines.
*/ */
value = ixp2000_reg_read(IXP2000_MISC_CONTROL); value = ixp2000_reg_read(IXP2000_MISC_CONTROL);
ixp2000_reg_write(IXP2000_MISC_CONTROL, value & ~0x80); ixp2000_reg_wrb(IXP2000_MISC_CONTROL, value & ~0x80);
for (uengine = 0; uengine < 32; uengine++) { for (uengine = 0; uengine < 32; uengine++) {
if (ixp2000_uengine_mask & (1 << uengine)) { if (ixp2000_uengine_mask & (1 << uengine)) {
ixp2000_uengine_csr_write(uengine, TIMESTAMP_LOW, 0); ixp2000_uengine_csr_write(uengine, TIMESTAMP_LOW, 0);
ixp2000_uengine_csr_write(uengine, TIMESTAMP_HIGH, 0); ixp2000_uengine_csr_write(uengine, TIMESTAMP_HIGH, 0);
} }
} }
ixp2000_reg_write(IXP2000_MISC_CONTROL, value | 0x80); ixp2000_reg_wrb(IXP2000_MISC_CONTROL, value | 0x80);
return 0; return 0;
} }
......
...@@ -427,7 +427,7 @@ void __init ixp4xx_pci_preinit(void) ...@@ -427,7 +427,7 @@ void __init ixp4xx_pci_preinit(void)
#ifdef __ARMEB__ #ifdef __ARMEB__
*PCI_CSR = PCI_CSR_IC | PCI_CSR_ABE | PCI_CSR_PDS | PCI_CSR_ADS; *PCI_CSR = PCI_CSR_IC | PCI_CSR_ABE | PCI_CSR_PDS | PCI_CSR_ADS;
#else #else
*PCI_CSR = PCI_CSR_IC; *PCI_CSR = PCI_CSR_IC | PCI_CSR_ABE;
#endif #endif
pr_debug("DONE\n"); pr_debug("DONE\n");
......
...@@ -27,7 +27,8 @@ config PXA_SHARPSL ...@@ -27,7 +27,8 @@ config PXA_SHARPSL
Say Y here if you intend to run this kernel on a Say Y here if you intend to run this kernel on a
Sharp Zaurus SL-5600 (Poodle), SL-C700 (Corgi), Sharp Zaurus SL-5600 (Poodle), SL-C700 (Corgi),
SL-C750 (Shepherd), SL-C760 (Husky), SL-C1000 (Akita), SL-C750 (Shepherd), SL-C760 (Husky), SL-C1000 (Akita),
SL-C3000 (Spitz) or SL-C3100 (Borzoi) handheld computer. SL-C3000 (Spitz), SL-C3100 (Borzoi) or SL-C6000x (Tosa)
handheld computer.
endchoice endchoice
...@@ -37,7 +38,7 @@ choice ...@@ -37,7 +38,7 @@ choice
prompt "Select target Sharp Zaurus device range" prompt "Select target Sharp Zaurus device range"
config PXA_SHARPSL_25x config PXA_SHARPSL_25x
bool "Sharp PXA25x models (SL-5600 and SL-C7xx)" bool "Sharp PXA25x models (SL-5600, SL-C7xx and SL-C6000x)"
select PXA25x select PXA25x
config PXA_SHARPSL_27x config PXA_SHARPSL_27x
...@@ -80,6 +81,10 @@ config MACH_BORZOI ...@@ -80,6 +81,10 @@ config MACH_BORZOI
depends PXA_SHARPSL_27x depends PXA_SHARPSL_27x
select PXA_SHARP_Cxx00 select PXA_SHARP_Cxx00
config MACH_TOSA
bool "Enable Sharp SL-6000x (Tosa) Support"
depends PXA_SHARPSL
config PXA25x config PXA25x
bool bool
help help
......
...@@ -14,6 +14,7 @@ obj-$(CONFIG_ARCH_PXA_IDP) += idp.o ...@@ -14,6 +14,7 @@ obj-$(CONFIG_ARCH_PXA_IDP) += idp.o
obj-$(CONFIG_PXA_SHARP_C7xx) += corgi.o corgi_ssp.o corgi_lcd.o ssp.o obj-$(CONFIG_PXA_SHARP_C7xx) += corgi.o corgi_ssp.o corgi_lcd.o ssp.o
obj-$(CONFIG_PXA_SHARP_Cxx00) += spitz.o corgi_ssp.o corgi_lcd.o ssp.o obj-$(CONFIG_PXA_SHARP_Cxx00) += spitz.o corgi_ssp.o corgi_lcd.o ssp.o
obj-$(CONFIG_MACH_POODLE) += poodle.o obj-$(CONFIG_MACH_POODLE) += poodle.o
obj-$(CONFIG_MACH_TOSA) += tosa.o
# Support for blinky lights # Support for blinky lights
led-y := leds.o led-y := leds.o
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/time.h> #include <linux/time.h>
...@@ -19,6 +20,7 @@ ...@@ -19,6 +20,7 @@
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/arch/pm.h>
#include <asm/arch/pxa-regs.h> #include <asm/arch/pxa-regs.h>
#include <asm/arch/lubbock.h> #include <asm/arch/lubbock.h>
#include <asm/mach/time.h> #include <asm/mach/time.h>
...@@ -72,7 +74,7 @@ enum { SLEEP_SAVE_START = 0, ...@@ -72,7 +74,7 @@ enum { SLEEP_SAVE_START = 0,
}; };
static int pxa_pm_enter(suspend_state_t state) int pxa_pm_enter(suspend_state_t state)
{ {
unsigned long sleep_save[SLEEP_SAVE_SIZE]; unsigned long sleep_save[SLEEP_SAVE_SIZE];
unsigned long checksum = 0; unsigned long checksum = 0;
...@@ -191,6 +193,8 @@ static int pxa_pm_enter(suspend_state_t state) ...@@ -191,6 +193,8 @@ static int pxa_pm_enter(suspend_state_t state)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(pxa_pm_enter);
unsigned long sleep_phys_sp(void *sp) unsigned long sleep_phys_sp(void *sp)
{ {
return virt_to_phys(sp); return virt_to_phys(sp);
...@@ -199,21 +203,25 @@ unsigned long sleep_phys_sp(void *sp) ...@@ -199,21 +203,25 @@ unsigned long sleep_phys_sp(void *sp)
/* /*
* Called after processes are frozen, but before we shut down devices. * Called after processes are frozen, but before we shut down devices.
*/ */
static int pxa_pm_prepare(suspend_state_t state) int pxa_pm_prepare(suspend_state_t state)
{ {
extern int pxa_cpu_pm_prepare(suspend_state_t state); extern int pxa_cpu_pm_prepare(suspend_state_t state);
return pxa_cpu_pm_prepare(state); return pxa_cpu_pm_prepare(state);
} }
EXPORT_SYMBOL_GPL(pxa_pm_prepare);
/* /*
* Called after devices are re-setup, but before processes are thawed. * Called after devices are re-setup, but before processes are thawed.
*/ */
static int pxa_pm_finish(suspend_state_t state) int pxa_pm_finish(suspend_state_t state)
{ {
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(pxa_pm_finish);
/* /*
* Set to PM_DISK_FIRMWARE so we can quickly veto suspend-to-disk. * Set to PM_DISK_FIRMWARE so we can quickly veto suspend-to-disk.
*/ */
...@@ -230,4 +238,4 @@ static int __init pxa_pm_init(void) ...@@ -230,4 +238,4 @@ static int __init pxa_pm_init(void)
return 0; return 0;
} }
late_initcall(pxa_pm_init); device_initcall(pxa_pm_init);
/*
* Support for Sharp SL-C6000x PDAs
* Model: (Tosa)
*
* Copyright (c) 2005 Dirk Opfer
*
* Based on code written by Sharp/Lineo for 2.4 kernels
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/mmc/host.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/mach-types.h>
#include <asm/hardware.h>
#include <asm/irq.h>
#include <asm/arch/irda.h>
#include <asm/arch/mmc.h>
#include <asm/arch/udc.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/arch/pxa-regs.h>
#include <asm/arch/irq.h>
#include <asm/arch/tosa.h>
#include <asm/hardware/scoop.h>
#include <asm/mach/sharpsl_param.h>
#include "generic.h"
/*
* SCOOP Device
*/
static struct resource tosa_scoop_resources[] = {
[0] = {
.start = TOSA_CF_PHYS,
.end = TOSA_CF_PHYS + 0xfff,
.flags = IORESOURCE_MEM,
},
};
static struct scoop_config tosa_scoop_setup = {
.io_dir = TOSA_SCOOP_IO_DIR,
.io_out = TOSA_SCOOP_IO_OUT,
};
struct platform_device tosascoop_device = {
.name = "sharp-scoop",
.id = 0,
.dev = {
.platform_data = &tosa_scoop_setup,
},
.num_resources = ARRAY_SIZE(tosa_scoop_resources),
.resource = tosa_scoop_resources,
};
/*
* SCOOP Device Jacket
*/
static struct resource tosa_scoop_jc_resources[] = {
[0] = {
.start = TOSA_SCOOP_PHYS + 0x40,
.end = TOSA_SCOOP_PHYS + 0xfff,
.flags = IORESOURCE_MEM,
},
};
static struct scoop_config tosa_scoop_jc_setup = {
.io_dir = TOSA_SCOOP_JC_IO_DIR,
.io_out = TOSA_SCOOP_JC_IO_OUT,
};
struct platform_device tosascoop_jc_device = {
.name = "sharp-scoop",
.id = 1,
.dev = {
.platform_data = &tosa_scoop_jc_setup,
.parent = &tosascoop_device.dev,
},
.num_resources = ARRAY_SIZE(tosa_scoop_jc_resources),
.resource = tosa_scoop_jc_resources,
};
static struct scoop_pcmcia_dev tosa_pcmcia_scoop[] = {
{
.dev = &tosascoop_device.dev,
.irq = TOSA_IRQ_GPIO_CF_IRQ,
.cd_irq = TOSA_IRQ_GPIO_CF_CD,
.cd_irq_str = "PCMCIA0 CD",
},{
.dev = &tosascoop_jc_device.dev,
.irq = TOSA_IRQ_GPIO_JC_CF_IRQ,
.cd_irq = -1,
},
};
static struct platform_device *devices[] __initdata = {
&tosascoop_device,
&tosascoop_jc_device,
};
static void __init tosa_init(void)
{
pxa_gpio_mode(TOSA_GPIO_ON_RESET | GPIO_IN);
pxa_gpio_mode(TOSA_GPIO_TC6393_INT | GPIO_IN);
/* setup sleep mode values */
PWER = 0x00000002;
PFER = 0x00000000;
PRER = 0x00000002;
PGSR0 = 0x00000000;
PGSR1 = 0x00FF0002;
PGSR2 = 0x00014000;
PCFR |= PCFR_OPDE;
// enable batt_fault
PMCR = 0x01;
platform_add_devices(devices, ARRAY_SIZE(devices));
scoop_num = 2;
scoop_devs = &tosa_pcmcia_scoop[0];
}
static void __init fixup_tosa(struct machine_desc *desc,
struct tag *tags, char **cmdline, struct meminfo *mi)
{
sharpsl_save_param();
mi->nr_banks=1;
mi->bank[0].start = 0xa0000000;
mi->bank[0].node = 0;
mi->bank[0].size = (64*1024*1024);
}
MACHINE_START(TOSA, "SHARP Tosa")
.phys_ram = 0xa0000000,
.phys_io = 0x40000000,
.io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
.fixup = fixup_tosa,
.map_io = pxa_map_io,
.init_irq = pxa_init_irq,
.init_machine = tosa_init,
.timer = &pxa_timer,
MACHINE_END
...@@ -603,6 +603,15 @@ config NODES_SPAN_OTHER_NODES ...@@ -603,6 +603,15 @@ config NODES_SPAN_OTHER_NODES
def_bool y def_bool y
depends on NEED_MULTIPLE_NODES depends on NEED_MULTIPLE_NODES
config PPC_64K_PAGES
bool "64k page size"
help
This option changes the kernel logical page size to 64k. On machines
without processor support for 64k pages, the kernel will simulate
them by loading each individual 4k page on demand transparently,
while on hardware with such support, it will be used to map
normal application pages.
config SCHED_SMT config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support" bool "SMT (Hyperthreading) scheduler support"
depends on PPC64 && SMP depends on PPC64 && SMP
......
...@@ -125,6 +125,9 @@ int main(void) ...@@ -125,6 +125,9 @@ int main(void)
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_64K_PAGES
DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir));
#endif
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
......
...@@ -240,7 +240,7 @@ struct cpu_spec cpu_specs[] = { ...@@ -240,7 +240,7 @@ struct cpu_spec cpu_specs[] = {
.oprofile_model = &op_model_power4, .oprofile_model = &op_model_power4,
#endif #endif
}, },
{ /* Power5 */ { /* Power5 GR */
.pvr_mask = 0xffff0000, .pvr_mask = 0xffff0000,
.pvr_value = 0x003a0000, .pvr_value = 0x003a0000,
.cpu_name = "POWER5 (gr)", .cpu_name = "POWER5 (gr)",
...@@ -255,7 +255,7 @@ struct cpu_spec cpu_specs[] = { ...@@ -255,7 +255,7 @@ struct cpu_spec cpu_specs[] = {
.oprofile_model = &op_model_power4, .oprofile_model = &op_model_power4,
#endif #endif
}, },
{ /* Power5 */ { /* Power5 GS */
.pvr_mask = 0xffff0000, .pvr_mask = 0xffff0000,
.pvr_value = 0x003b0000, .pvr_value = 0x003b0000,
.cpu_name = "POWER5 (gs)", .cpu_name = "POWER5 (gs)",
......
This diff is collapsed.
...@@ -25,7 +25,7 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = { ...@@ -25,7 +25,7 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
.xRanges = { .xRanges = {
{ .xPages = HvPagesToMap, { .xPages = HvPagesToMap,
.xOffset = 0, .xOffset = 0,
.xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - PAGE_SHIFT), .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - HW_PAGE_SHIFT),
}, },
}, },
}; };
...@@ -554,12 +554,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, ...@@ -554,12 +554,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_SLB)) { if (cpu_has_feature(CPU_FTR_SLB)) {
unsigned long sp_vsid = get_kernel_vsid(sp); unsigned long sp_vsid = get_kernel_vsid(sp);
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
sp_vsid <<= SLB_VSID_SHIFT; sp_vsid <<= SLB_VSID_SHIFT;
sp_vsid |= SLB_VSID_KERNEL; sp_vsid |= SLB_VSID_KERNEL | llp;
if (cpu_has_feature(CPU_FTR_16M_PAGE))
sp_vsid |= SLB_VSID_L;
p->thread.ksp_vsid = sp_vsid; p->thread.ksp_vsid = sp_vsid;
} }
......
...@@ -724,7 +724,7 @@ static inline char *find_flat_dt_string(u32 offset) ...@@ -724,7 +724,7 @@ static inline char *find_flat_dt_string(u32 offset)
* used to extract the memory informations at boot before we can * used to extract the memory informations at boot before we can
* unflatten the tree * unflatten the tree
*/ */
static int __init scan_flat_dt(int (*it)(unsigned long node, int __init of_scan_flat_dt(int (*it)(unsigned long node,
const char *uname, int depth, const char *uname, int depth,
void *data), void *data),
void *data) void *data)
...@@ -784,7 +784,7 @@ static int __init scan_flat_dt(int (*it)(unsigned long node, ...@@ -784,7 +784,7 @@ static int __init scan_flat_dt(int (*it)(unsigned long node,
* This function can be used within scan_flattened_dt callback to get * This function can be used within scan_flattened_dt callback to get
* access to properties * access to properties
*/ */
static void* __init get_flat_dt_prop(unsigned long node, const char *name, void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
unsigned long *size) unsigned long *size)
{ {
unsigned long p = node; unsigned long p = node;
...@@ -1087,7 +1087,7 @@ void __init unflatten_device_tree(void) ...@@ -1087,7 +1087,7 @@ void __init unflatten_device_tree(void)
static int __init early_init_dt_scan_cpus(unsigned long node, static int __init early_init_dt_scan_cpus(unsigned long node,
const char *uname, int depth, void *data) const char *uname, int depth, void *data)
{ {
char *type = get_flat_dt_prop(node, "device_type", NULL); char *type = of_get_flat_dt_prop(node, "device_type", NULL);
u32 *prop; u32 *prop;
unsigned long size = 0; unsigned long size = 0;
...@@ -1095,19 +1095,6 @@ static int __init early_init_dt_scan_cpus(unsigned long node, ...@@ -1095,19 +1095,6 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
if (type == NULL || strcmp(type, "cpu") != 0) if (type == NULL || strcmp(type, "cpu") != 0)
return 0; return 0;
#ifdef CONFIG_PPC_PSERIES
/* On LPAR, look for the first ibm,pft-size property for the hash table size
*/
if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
u32 *pft_size;
pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL);
if (pft_size != NULL) {
/* pft_size[0] is the NUMA CEC cookie */
ppc64_pft_size = pft_size[1];
}
}
#endif
boot_cpuid = 0; boot_cpuid = 0;
boot_cpuid_phys = 0; boot_cpuid_phys = 0;
if (initial_boot_params && initial_boot_params->version >= 2) { if (initial_boot_params && initial_boot_params->version >= 2) {
...@@ -1117,8 +1104,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node, ...@@ -1117,8 +1104,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
boot_cpuid_phys = initial_boot_params->boot_cpuid_phys; boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
} else { } else {
/* Check if it's the boot-cpu, set it's hw index now */ /* Check if it's the boot-cpu, set it's hw index now */
if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) { if (of_get_flat_dt_prop(node,
prop = get_flat_dt_prop(node, "reg", NULL); "linux,boot-cpu", NULL) != NULL) {
prop = of_get_flat_dt_prop(node, "reg", NULL);
if (prop != NULL) if (prop != NULL)
boot_cpuid_phys = *prop; boot_cpuid_phys = *prop;
} }
...@@ -1127,14 +1115,14 @@ static int __init early_init_dt_scan_cpus(unsigned long node, ...@@ -1127,14 +1115,14 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
/* Check if we have a VMX and eventually update CPU features */ /* Check if we have a VMX and eventually update CPU features */
prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size); prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", &size);
if (prop && (*prop) > 0) { if (prop && (*prop) > 0) {
cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
} }
/* Same goes for Apple's "altivec" property */ /* Same goes for Apple's "altivec" property */
prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL); prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
if (prop) { if (prop) {
cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
...@@ -1147,7 +1135,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node, ...@@ -1147,7 +1135,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
* this by looking at the size of the ibm,ppc-interrupt-server#s * this by looking at the size of the ibm,ppc-interrupt-server#s
* property * property
*/ */
prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
&size); &size);
cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
if (prop && ((size / sizeof(u32)) > 1)) if (prop && ((size / sizeof(u32)) > 1))
...@@ -1170,7 +1158,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node, ...@@ -1170,7 +1158,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
return 0; return 0;
/* get platform type */ /* get platform type */
prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL); prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
if (prop == NULL) if (prop == NULL)
return 0; return 0;
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
...@@ -1183,21 +1171,21 @@ static int __init early_init_dt_scan_chosen(unsigned long node, ...@@ -1183,21 +1171,21 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
/* check if iommu is forced on or off */ /* check if iommu is forced on or off */
if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
iommu_is_off = 1; iommu_is_off = 1;
if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL) if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
iommu_force_on = 1; iommu_force_on = 1;
#endif #endif
lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL); lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
if (lprop) if (lprop)
memory_limit = *lprop; memory_limit = *lprop;
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
if (lprop) if (lprop)
tce_alloc_start = *lprop; tce_alloc_start = *lprop;
lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
if (lprop) if (lprop)
tce_alloc_end = *lprop; tce_alloc_end = *lprop;
#endif #endif
...@@ -1209,9 +1197,9 @@ static int __init early_init_dt_scan_chosen(unsigned long node, ...@@ -1209,9 +1197,9 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
{ {
u64 *basep, *entryp; u64 *basep, *entryp;
basep = get_flat_dt_prop(node, "linux,rtas-base", NULL); basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL); entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
prop = get_flat_dt_prop(node, "linux,rtas-size", NULL); prop = of_get_flat_dt_prop(node, "linux,rtas-size", NULL);
if (basep && entryp && prop) { if (basep && entryp && prop) {
rtas.base = *basep; rtas.base = *basep;
rtas.entry = *entryp; rtas.entry = *entryp;
...@@ -1232,11 +1220,11 @@ static int __init early_init_dt_scan_root(unsigned long node, ...@@ -1232,11 +1220,11 @@ static int __init early_init_dt_scan_root(unsigned long node,
if (depth != 0) if (depth != 0)
return 0; return 0;
prop = get_flat_dt_prop(node, "#size-cells", NULL); prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
dt_root_size_cells = (prop == NULL) ? 1 : *prop; dt_root_size_cells = (prop == NULL) ? 1 : *prop;
DBG("dt_root_size_cells = %x\n", dt_root_size_cells); DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
prop = get_flat_dt_prop(node, "#address-cells", NULL); prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
dt_root_addr_cells = (prop == NULL) ? 2 : *prop; dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells); DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
...@@ -1271,7 +1259,7 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp) ...@@ -1271,7 +1259,7 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
static int __init early_init_dt_scan_memory(unsigned long node, static int __init early_init_dt_scan_memory(unsigned long node,
const char *uname, int depth, void *data) const char *uname, int depth, void *data)
{ {
char *type = get_flat_dt_prop(node, "device_type", NULL); char *type = of_get_flat_dt_prop(node, "device_type", NULL);
cell_t *reg, *endp; cell_t *reg, *endp;
unsigned long l; unsigned long l;
...@@ -1279,7 +1267,7 @@ static int __init early_init_dt_scan_memory(unsigned long node, ...@@ -1279,7 +1267,7 @@ static int __init early_init_dt_scan_memory(unsigned long node,
if (type == NULL || strcmp(type, "memory") != 0) if (type == NULL || strcmp(type, "memory") != 0)
return 0; return 0;
reg = (cell_t *)get_flat_dt_prop(node, "reg", &l); reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
if (reg == NULL) if (reg == NULL)
return 0; return 0;
...@@ -1343,12 +1331,12 @@ void __init early_init_devtree(void *params) ...@@ -1343,12 +1331,12 @@ void __init early_init_devtree(void *params)
* device-tree, including the platform type, initrd location and * device-tree, including the platform type, initrd location and
* size, TCE reserve, and more ... * size, TCE reserve, and more ...
*/ */
scan_flat_dt(early_init_dt_scan_chosen, NULL); of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
/* Scan memory nodes and rebuild LMBs */ /* Scan memory nodes and rebuild LMBs */
lmb_init(); lmb_init();
scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_root, NULL);
scan_flat_dt(early_init_dt_scan_memory, NULL); of_scan_flat_dt(early_init_dt_scan_memory, NULL);
lmb_enforce_memory_limit(memory_limit); lmb_enforce_memory_limit(memory_limit);
lmb_analyze(); lmb_analyze();
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
...@@ -1363,10 +1351,10 @@ void __init early_init_devtree(void *params) ...@@ -1363,10 +1351,10 @@ void __init early_init_devtree(void *params)
DBG("Scanning CPUs ...\n"); DBG("Scanning CPUs ...\n");
/* Retreive hash table size from flattened tree plus other /* Retreive CPU related informations from the flat tree
* CPU related informations (altivec support, boot CPU ID, ...) * (altivec support, boot CPU ID, ...)
*/ */
scan_flat_dt(early_init_dt_scan_cpus, NULL); of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
DBG(" <- early_init_devtree()\n"); DBG(" <- early_init_devtree()\n");
} }
......
...@@ -277,16 +277,21 @@ void __init early_setup(unsigned long dt_ptr) ...@@ -277,16 +277,21 @@ void __init early_setup(unsigned long dt_ptr)
DBG("Found, Initializing memory management...\n"); DBG("Found, Initializing memory management...\n");
/* /*
* Initialize stab / SLB management * Initialize the MMU Hash table and create the linear mapping
* of memory. Has to be done before stab/slb initialization as
* this is currently where the page size encoding is obtained
*/ */
if (!firmware_has_feature(FW_FEATURE_ISERIES)) htab_initialize();
stab_initialize(lpaca->stab_real);
/* /*
* Initialize the MMU Hash table and create the linear mapping * Initialize stab / SLB management except on iSeries
* of memory
*/ */
htab_initialize(); if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
if (cpu_has_feature(CPU_FTR_SLB))
slb_initialize();
else
stab_initialize(lpaca->stab_real);
}
DBG(" <- early_setup()\n"); DBG(" <- early_setup()\n");
} }
...@@ -552,9 +557,11 @@ static void __init irqstack_early_init(void) ...@@ -552,9 +557,11 @@ static void __init irqstack_early_init(void)
* SLB misses on them. * SLB misses on them.
*/ */
for_each_cpu(i) { for_each_cpu(i) {
softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE, softirq_ctx[i] = (struct thread_info *)
__va(lmb_alloc_base(THREAD_SIZE,
THREAD_SIZE, 0x10000000)); THREAD_SIZE, 0x10000000));
hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE, hardirq_ctx[i] = (struct thread_info *)
__va(lmb_alloc_base(THREAD_SIZE,
THREAD_SIZE, 0x10000000)); THREAD_SIZE, 0x10000000));
} }
} }
...@@ -583,8 +590,8 @@ static void __init emergency_stack_init(void) ...@@ -583,8 +590,8 @@ static void __init emergency_stack_init(void)
limit = min(0x10000000UL, lmb.rmo_size); limit = min(0x10000000UL, lmb.rmo_size);
for_each_cpu(i) for_each_cpu(i)
paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128, paca[i].emergency_sp =
limit)) + PAGE_SIZE; __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE;
} }
/* /*
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
_GLOBAL(copy_page) _GLOBAL(copy_4K_page)
std r31,-8(1) std r31,-8(1)
std r30,-16(1) std r30,-16(1)
std r29,-24(1) std r29,-24(1)
......
...@@ -24,7 +24,7 @@ _GLOBAL(__copy_tofrom_user) ...@@ -24,7 +24,7 @@ _GLOBAL(__copy_tofrom_user)
std r4,-16(r1) std r4,-16(r1)
std r5,-8(r1) std r5,-8(r1)
dcbt 0,r4 dcbt 0,r4
beq .Lcopy_page beq .Lcopy_page_4K
andi. r6,r6,7 andi. r6,r6,7
mtcrf 0x01,r5 mtcrf 0x01,r5
blt cr1,.Lshort_copy blt cr1,.Lshort_copy
...@@ -366,7 +366,7 @@ _GLOBAL(__copy_tofrom_user) ...@@ -366,7 +366,7 @@ _GLOBAL(__copy_tofrom_user)
* above (following the .Ldst_aligned label) but it runs slightly * above (following the .Ldst_aligned label) but it runs slightly
* slower on POWER3. * slower on POWER3.
*/ */
.Lcopy_page: .Lcopy_page_4K:
std r31,-32(1) std r31,-32(1)
std r30,-40(1) std r30,-40(1)
std r29,-48(1) std r29,-48(1)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -47,10 +47,25 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) ...@@ -47,10 +47,25 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
pu = pud_offset(pg, addr); pu = pud_offset(pg, addr);
if (!pud_none(*pu)) { if (!pud_none(*pu)) {
pm = pmd_offset(pu, addr); pm = pmd_offset(pu, addr);
#ifdef CONFIG_PPC_64K_PAGES
/* Currently, we use the normal PTE offset within full
* size PTE pages, thus our huge PTEs are scattered in
* the PTE page and we do waste some. We may change
* that in the future, but the current mecanism keeps
* things much simpler
*/
if (!pmd_none(*pm)) {
/* Note: pte_offset_* are all equivalent on
* ppc64 as we don't have HIGHMEM
*/
pt = pte_offset_kernel(pm, addr);
return pt;
}
#else /* CONFIG_PPC_64K_PAGES */
/* On 4k pages, we put huge PTEs in the PMD page */
pt = (pte_t *)pm; pt = (pte_t *)pm;
BUG_ON(!pmd_none(*pm)
&& !(pte_present(*pt) && pte_huge(*pt)));
return pt; return pt;
#endif /* CONFIG_PPC_64K_PAGES */
} }
} }
...@@ -74,9 +89,16 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) ...@@ -74,9 +89,16 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
if (pu) { if (pu) {
pm = pmd_alloc(mm, pu, addr); pm = pmd_alloc(mm, pu, addr);
if (pm) { if (pm) {
#ifdef CONFIG_PPC_64K_PAGES
/* See comment in huge_pte_offset. Note that if we ever
* want to put the page size in the PMD, we would have
* to open code our own pte_alloc* function in order
* to populate and set the size atomically
*/
pt = pte_alloc_map(mm, pm, addr);
#else /* CONFIG_PPC_64K_PAGES */
pt = (pte_t *)pm; pt = (pte_t *)pm;
BUG_ON(!pmd_none(*pm) #endif /* CONFIG_PPC_64K_PAGES */
&& !(pte_present(*pt) && pte_huge(*pt)));
return pt; return pt;
} }
} }
...@@ -84,35 +106,29 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) ...@@ -84,35 +106,29 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
return NULL; return NULL;
} }
#define HUGEPTE_BATCH_SIZE (HPAGE_SIZE / PMD_SIZE)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte) pte_t *ptep, pte_t pte)
{ {
int i;
if (pte_present(*ptep)) { if (pte_present(*ptep)) {
pte_clear(mm, addr, ptep); /* We open-code pte_clear because we need to pass the right
* argument to hpte_update (huge / !huge)
*/
unsigned long old = pte_update(ptep, ~0UL);
if (old & _PAGE_HASHPTE)
hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
flush_tlb_pending(); flush_tlb_pending();
} }
for (i = 0; i < HUGEPTE_BATCH_SIZE; i++) {
*ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
ptep++;
}
} }
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
unsigned long old = pte_update(ptep, ~0UL); unsigned long old = pte_update(ptep, ~0UL);
int i;
if (old & _PAGE_HASHPTE) if (old & _PAGE_HASHPTE)
hpte_update(mm, addr, old, 0); hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
*ptep = __pte(0);
for (i = 1; i < HUGEPTE_BATCH_SIZE; i++)
ptep[i] = __pte(0);
return __pte(old); return __pte(old);
} }
...@@ -563,6 +579,8 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -563,6 +579,8 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
int lastshift; int lastshift;
u16 areamask, curareas; u16 areamask, curareas;
if (HPAGE_SHIFT == 0)
return -EINVAL;
if (len & ~HPAGE_MASK) if (len & ~HPAGE_MASK)
return -EINVAL; return -EINVAL;
...@@ -619,19 +637,15 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, ...@@ -619,19 +637,15 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
unsigned long ea, unsigned long vsid, int local) unsigned long ea, unsigned long vsid, int local)
{ {
pte_t *ptep; pte_t *ptep;
unsigned long va, vpn; unsigned long old_pte, new_pte;
pte_t old_pte, new_pte; unsigned long va, rflags, pa;
unsigned long rflags, prpn;
long slot; long slot;
int err = 1; int err = 1;
spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, ea); ptep = huge_pte_offset(mm, ea);
/* Search the Linux page table for a match with va */ /* Search the Linux page table for a match with va */
va = (vsid << 28) | (ea & 0x0fffffff); va = (vsid << 28) | (ea & 0x0fffffff);
vpn = va >> HPAGE_SHIFT;
/* /*
* If no pte found or not present, send the problem up to * If no pte found or not present, send the problem up to
...@@ -640,8 +654,6 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, ...@@ -640,8 +654,6 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
if (unlikely(!ptep || pte_none(*ptep))) if (unlikely(!ptep || pte_none(*ptep)))
goto out; goto out;
/* BUG_ON(pte_bad(*ptep)); */
/* /*
* Check the user's access rights to the page. If access should be * Check the user's access rights to the page. If access should be
* prevented then send the problem up to do_page_fault. * prevented then send the problem up to do_page_fault.
...@@ -661,58 +673,64 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, ...@@ -661,58 +673,64 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
*/ */
old_pte = *ptep; do {
new_pte = old_pte; old_pte = pte_val(*ptep);
if (old_pte & _PAGE_BUSY)
goto out;
new_pte = old_pte | _PAGE_BUSY |
_PAGE_ACCESSED | _PAGE_HASHPTE;
} while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
old_pte, new_pte));
rflags = 0x2 | (! (pte_val(new_pte) & _PAGE_RW)); rflags = 0x2 | (!(new_pte & _PAGE_RW));
/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
rflags |= ((pte_val(new_pte) & _PAGE_EXEC) ? 0 : HW_NO_EXEC); rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
/* Check if pte already has an hpte (case 2) */ /* Check if pte already has an hpte (case 2) */
if (unlikely(pte_val(old_pte) & _PAGE_HASHPTE)) { if (unlikely(old_pte & _PAGE_HASHPTE)) {
/* There MIGHT be an HPTE for this pte */ /* There MIGHT be an HPTE for this pte */
unsigned long hash, slot; unsigned long hash, slot;
hash = hpt_hash(vpn, 1); hash = hpt_hash(va, HPAGE_SHIFT);
if (pte_val(old_pte) & _PAGE_SECONDARY) if (old_pte & _PAGE_F_SECOND)
hash = ~hash; hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12; slot += (old_pte & _PAGE_F_GIX) >> 12;
if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1) if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1)
pte_val(old_pte) &= ~_PAGE_HPTEFLAGS; old_pte &= ~_PAGE_HPTEFLAGS;
} }
if (likely(!(pte_val(old_pte) & _PAGE_HASHPTE))) { if (likely(!(old_pte & _PAGE_HASHPTE))) {
unsigned long hash = hpt_hash(vpn, 1); unsigned long hash = hpt_hash(va, HPAGE_SHIFT);
unsigned long hpte_group; unsigned long hpte_group;
prpn = pte_pfn(old_pte); pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
repeat: repeat:
hpte_group = ((hash & htab_hash_mask) * hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL; HPTES_PER_GROUP) & ~0x7UL;
/* Update the linux pte with the HPTE slot */ /* clear HPTE slot informations in new PTE */
pte_val(new_pte) &= ~_PAGE_HPTEFLAGS; new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
pte_val(new_pte) |= _PAGE_HASHPTE;
/* Add in WIMG bits */ /* Add in WIMG bits */
/* XXX We should store these in the pte */ /* XXX We should store these in the pte */
/* --BenH: I think they are ... */
rflags |= _PAGE_COHERENT; rflags |= _PAGE_COHERENT;
slot = ppc_md.hpte_insert(hpte_group, va, prpn, /* Insert into the hash table, primary slot */
HPTE_V_LARGE, rflags); slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
mmu_huge_psize);
/* Primary is full, try the secondary */ /* Primary is full, try the secondary */
if (unlikely(slot == -1)) { if (unlikely(slot == -1)) {
pte_val(new_pte) |= _PAGE_SECONDARY; new_pte |= _PAGE_F_SECOND;
hpte_group = ((~hash & htab_hash_mask) * hpte_group = ((~hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL; HPTES_PER_GROUP) & ~0x7UL;
slot = ppc_md.hpte_insert(hpte_group, va, prpn, slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
HPTE_V_LARGE |
HPTE_V_SECONDARY, HPTE_V_SECONDARY,
rflags); mmu_huge_psize);
if (slot == -1) { if (slot == -1) {
if (mftb() & 0x1) if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) * hpte_group = ((hash & htab_hash_mask) *
...@@ -726,20 +744,18 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, ...@@ -726,20 +744,18 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
if (unlikely(slot == -2)) if (unlikely(slot == -2))
panic("hash_huge_page: pte_insert failed\n"); panic("hash_huge_page: pte_insert failed\n");
pte_val(new_pte) |= (slot<<12) & _PAGE_GROUP_IX; new_pte |= (slot << 12) & _PAGE_F_GIX;
}
/* /*
* No need to use ldarx/stdcx here because all who * No need to use ldarx/stdcx here because all who
* might be updating the pte will hold the * might be updating the pte will hold the
* page_table_lock * page_table_lock
*/ */
*ptep = new_pte; *ptep = __pte(new_pte & ~_PAGE_BUSY);
}
err = 0; err = 0;
out: out:
spin_unlock(&mm->page_table_lock);
return err; return err;
} }
...@@ -188,12 +188,21 @@ static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) ...@@ -188,12 +188,21 @@ static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
memset(addr, 0, kmem_cache_size(cache)); memset(addr, 0, kmem_cache_size(cache));
} }
#ifdef CONFIG_PPC_64K_PAGES
static const int pgtable_cache_size[2] = {
PTE_TABLE_SIZE, PGD_TABLE_SIZE
};
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
"pte_pmd_cache", "pgd_cache",
};
#else
static const int pgtable_cache_size[2] = { static const int pgtable_cache_size[2] = {
PTE_TABLE_SIZE, PMD_TABLE_SIZE PTE_TABLE_SIZE, PMD_TABLE_SIZE
}; };
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
"pgd_pte_cache", "pud_pmd_cache", "pgd_pte_cache", "pud_pmd_cache",
}; };
#endif /* CONFIG_PPC_64K_PAGES */
kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
...@@ -201,19 +210,14 @@ void pgtable_cache_init(void) ...@@ -201,19 +210,14 @@ void pgtable_cache_init(void)
{ {
int i; int i;
BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) { for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
int size = pgtable_cache_size[i]; int size = pgtable_cache_size[i];
const char *name = pgtable_cache_name[i]; const char *name = pgtable_cache_name[i];
pgtable_cache[i] = kmem_cache_create(name, pgtable_cache[i] = kmem_cache_create(name,
size, size, size, size,
SLAB_HWCACHE_ALIGN SLAB_HWCACHE_ALIGN |
| SLAB_MUST_HWCACHE_ALIGN, SLAB_MUST_HWCACHE_ALIGN,
zero_ctor, zero_ctor,
NULL); NULL);
if (! pgtable_cache[i]) if (! pgtable_cache[i])
......
...@@ -61,6 +61,9 @@ int init_bootmem_done; ...@@ -61,6 +61,9 @@ int init_bootmem_done;
int mem_init_done; int mem_init_done;
unsigned long memory_limit; unsigned long memory_limit;
extern void hash_preload(struct mm_struct *mm, unsigned long ea,
unsigned long access, unsigned long trap);
/* /*
* This is called by /dev/mem to know if a given address has to * This is called by /dev/mem to know if a given address has to
* be mapped non-cacheable or not * be mapped non-cacheable or not
...@@ -493,18 +496,10 @@ EXPORT_SYMBOL(flush_icache_user_range); ...@@ -493,18 +496,10 @@ EXPORT_SYMBOL(flush_icache_user_range);
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte) pte_t pte)
{ {
/* handle i-cache coherency */ #ifdef CONFIG_PPC_STD_MMU
unsigned long pfn = pte_pfn(pte); unsigned long access = 0, trap;
#ifdef CONFIG_PPC32
pmd_t *pmd;
#else
unsigned long vsid;
void *pgdir;
pte_t *ptep;
int local = 0;
cpumask_t tmp;
unsigned long flags;
#endif #endif
unsigned long pfn = pte_pfn(pte);
/* handle i-cache coherency */ /* handle i-cache coherency */
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
...@@ -535,30 +530,21 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, ...@@ -535,30 +530,21 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if (!pte_young(pte) || address >= TASK_SIZE) if (!pte_young(pte) || address >= TASK_SIZE)
return; return;
#ifdef CONFIG_PPC32
if (Hash == 0)
return;
pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address);
if (!pmd_none(*pmd))
add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd));
#else
pgdir = vma->vm_mm->pgd;
if (pgdir == NULL)
return;
ptep = find_linux_pte(pgdir, address); /* We try to figure out if we are coming from an instruction
if (!ptep) * access fault and pass that down to __hash_page so we avoid
* double-faulting on execution of fresh text. We have to test
* for regs NULL since init will get here first thing at boot
*
* We also avoid filling the hash if not coming from a fault
*/
if (current->thread.regs == NULL)
return; return;
trap = TRAP(current->thread.regs);
vsid = get_vsid(vma->vm_mm->context.id, address); if (trap == 0x400)
access |= _PAGE_EXEC;
local_irq_save(flags); else if (trap != 0x300)
tmp = cpumask_of_cpu(smp_processor_id()); return;
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp)) hash_preload(vma->vm_mm, address, access, trap);
local = 1; #endif /* CONFIG_PPC_STD_MMU */
__hash_page(address, 0, vsid, ptep, 0x300, local);
local_irq_restore(flags);
#endif
#endif
} }
...@@ -101,7 +101,6 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags) ...@@ -101,7 +101,6 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
pud_t *pudp; pud_t *pudp;
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep; pte_t *ptep;
unsigned long vsid;
if (mem_init_done) { if (mem_init_done) {
pgdp = pgd_offset_k(ea); pgdp = pgd_offset_k(ea);
...@@ -117,28 +116,15 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags) ...@@ -117,28 +116,15 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags))); __pgprot(flags)));
} else { } else {
unsigned long va, vpn, hash, hpteg;
/* /*
* If the mm subsystem is not fully up, we cannot create a * If the mm subsystem is not fully up, we cannot create a
* linux page table entry for this mapping. Simply bolt an * linux page table entry for this mapping. Simply bolt an
* entry in the hardware page table. * entry in the hardware page table.
*
*/ */
vsid = get_kernel_vsid(ea); if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
va = (vsid << 28) | (ea & 0xFFFFFFF); mmu_virtual_psize))
vpn = va >> PAGE_SHIFT; panic("Can't map bolted IO mapping");
hash = hpt_hash(vpn, 0);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
/* Panic if a pte grpup is full */
if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
HPTE_V_BOLTED,
_PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
== -1) {
panic("map_io_page: could not insert mapping");
}
} }
return 0; return 0;
} }
......
...@@ -178,6 +178,21 @@ void __init setbat(int index, unsigned long virt, unsigned long phys, ...@@ -178,6 +178,21 @@ void __init setbat(int index, unsigned long virt, unsigned long phys,
bat_addrs[index].phys = phys; bat_addrs[index].phys = phys;
} }
/*
* Preload a translation in the hash table
*/
void hash_preload(struct mm_struct *mm, unsigned long ea,
unsigned long access, unsigned long trap)
{
pmd_t *pmd;
if (Hash == 0)
return;
pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address);
if (!pmd_none(*pmd))
add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd));
}
/* /*
* Initialize the hash table and patch the instructions in hashtable.S. * Initialize the hash table and patch the instructions in hashtable.S.
*/ */
......
...@@ -14,14 +14,32 @@ ...@@ -14,14 +14,32 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#undef DEBUG
#include <linux/config.h> #include <linux/config.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/paca.h> #include <asm/paca.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/cacheflush.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif
extern void slb_allocate(unsigned long ea); extern void slb_allocate_realmode(unsigned long ea);
extern void slb_allocate_user(unsigned long ea);
static void slb_allocate(unsigned long ea)
{
/* Currently, we do real mode for all SLBs including user, but
* that will change if we bring back dynamic VSIDs
*/
slb_allocate_realmode(ea);
}
static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot) static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
{ {
...@@ -46,13 +64,15 @@ static void slb_flush_and_rebolt(void) ...@@ -46,13 +64,15 @@ static void slb_flush_and_rebolt(void)
{ {
/* If you change this make sure you change SLB_NUM_BOLTED /* If you change this make sure you change SLB_NUM_BOLTED
* appropriately too. */ * appropriately too. */
unsigned long ksp_flags = SLB_VSID_KERNEL; unsigned long linear_llp, virtual_llp, lflags, vflags;
unsigned long ksp_esid_data; unsigned long ksp_esid_data;
WARN_ON(!irqs_disabled()); WARN_ON(!irqs_disabled());
if (cpu_has_feature(CPU_FTR_16M_PAGE)) linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
ksp_flags |= SLB_VSID_L; virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
lflags = SLB_VSID_KERNEL | linear_llp;
vflags = SLB_VSID_KERNEL | virtual_llp;
ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
if ((ksp_esid_data & ESID_MASK) == KERNELBASE) if ((ksp_esid_data & ESID_MASK) == KERNELBASE)
...@@ -67,9 +87,9 @@ static void slb_flush_and_rebolt(void) ...@@ -67,9 +87,9 @@ static void slb_flush_and_rebolt(void)
/* Slot 2 - kernel stack */ /* Slot 2 - kernel stack */
"slbmte %2,%3\n" "slbmte %2,%3\n"
"isync" "isync"
:: "r"(mk_vsid_data(VMALLOCBASE, SLB_VSID_KERNEL)), :: "r"(mk_vsid_data(VMALLOCBASE, vflags)),
"r"(mk_esid_data(VMALLOCBASE, 1)), "r"(mk_esid_data(VMALLOCBASE, 1)),
"r"(mk_vsid_data(ksp_esid_data, ksp_flags)), "r"(mk_vsid_data(ksp_esid_data, lflags)),
"r"(ksp_esid_data) "r"(ksp_esid_data)
: "memory"); : "memory");
} }
...@@ -102,6 +122,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) ...@@ -102,6 +122,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
get_paca()->slb_cache_ptr = 0; get_paca()->slb_cache_ptr = 0;
get_paca()->context = mm->context; get_paca()->context = mm->context;
#ifdef CONFIG_PPC_64K_PAGES
get_paca()->pgdir = mm->pgd;
#endif /* CONFIG_PPC_64K_PAGES */
/* /*
* preload some userspace segments into the SLB. * preload some userspace segments into the SLB.
...@@ -131,28 +154,77 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) ...@@ -131,28 +154,77 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
slb_allocate(unmapped_base); slb_allocate(unmapped_base);
} }
static inline void patch_slb_encoding(unsigned int *insn_addr,
unsigned int immed)
{
/* Assume the instruction had a "0" immediate value, just
* "or" in the new value
*/
*insn_addr |= immed;
flush_icache_range((unsigned long)insn_addr, 4+
(unsigned long)insn_addr);
}
void slb_initialize(void) void slb_initialize(void)
{ {
unsigned long linear_llp, virtual_llp;
static int slb_encoding_inited;
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_virtual;
extern unsigned int *slb_miss_user_load_normal;
#ifdef CONFIG_HUGETLB_PAGE
extern unsigned int *slb_miss_user_load_huge;
unsigned long huge_llp;
huge_llp = mmu_psize_defs[mmu_huge_psize].sllp;
#endif
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
if (!slb_encoding_inited) {
slb_encoding_inited = 1;
patch_slb_encoding(slb_miss_kernel_load_linear,
SLB_VSID_KERNEL | linear_llp);
patch_slb_encoding(slb_miss_kernel_load_virtual,
SLB_VSID_KERNEL | virtual_llp);
patch_slb_encoding(slb_miss_user_load_normal,
SLB_VSID_USER | virtual_llp);
DBG("SLB: linear LLP = %04x\n", linear_llp);
DBG("SLB: virtual LLP = %04x\n", virtual_llp);
#ifdef CONFIG_HUGETLB_PAGE
patch_slb_encoding(slb_miss_user_load_huge,
SLB_VSID_USER | huge_llp);
DBG("SLB: huge LLP = %04x\n", huge_llp);
#endif
}
/* On iSeries the bolted entries have already been set up by /* On iSeries the bolted entries have already been set up by
* the hypervisor from the lparMap data in head.S */ * the hypervisor from the lparMap data in head.S */
#ifndef CONFIG_PPC_ISERIES #ifndef CONFIG_PPC_ISERIES
unsigned long flags = SLB_VSID_KERNEL; {
unsigned long lflags, vflags;
/* Invalidate the entire SLB (even slot 0) & all the ERATS */ lflags = SLB_VSID_KERNEL | linear_llp;
if (cpu_has_feature(CPU_FTR_16M_PAGE)) vflags = SLB_VSID_KERNEL | virtual_llp;
flags |= SLB_VSID_L;
/* Invalidate the entire SLB (even slot 0) & all the ERATS */
asm volatile("isync":::"memory"); asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory"); asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory"); asm volatile("isync; slbia; isync":::"memory");
create_slbe(KERNELBASE, flags, 0); create_slbe(KERNELBASE, lflags, 0);
create_slbe(VMALLOCBASE, SLB_VSID_KERNEL, 1);
/* VMALLOC space has 4K pages always for now */
create_slbe(VMALLOCBASE, vflags, 1);
/* We don't bolt the stack for the time being - we're in boot, /* We don't bolt the stack for the time being - we're in boot,
* so the stack is in the bolted segment. By the time it goes * so the stack is in the bolted segment. By the time it goes
* elsewhere, we'll call _switch() which will bolt in the new * elsewhere, we'll call _switch() which will bolt in the new
* one. */ * one. */
asm volatile("isync":::"memory"); asm volatile("isync":::"memory");
#endif }
#endif /* CONFIG_PPC_ISERIES */
get_paca()->stab_rr = SLB_NUM_BOLTED; get_paca()->stab_rr = SLB_NUM_BOLTED;
} }
...@@ -18,61 +18,28 @@ ...@@ -18,61 +18,28 @@
#include <linux/config.h> #include <linux/config.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
/* void slb_allocate(unsigned long ea); /* void slb_allocate_realmode(unsigned long ea);
* *
* Create an SLB entry for the given EA (user or kernel). * Create an SLB entry for the given EA (user or kernel).
* r3 = faulting address, r13 = PACA * r3 = faulting address, r13 = PACA
* r9, r10, r11 are clobbered by this function * r9, r10, r11 are clobbered by this function
* No other registers are examined or changed. * No other registers are examined or changed.
*/ */
_GLOBAL(slb_allocate) _GLOBAL(slb_allocate_realmode)
/* /* r3 = faulting address */
* First find a slot, round robin. Previously we tried to find
* a free slot first but that took too long. Unfortunately we
* dont have any LRU information to help us choose a slot.
*/
#ifdef CONFIG_PPC_ISERIES
/*
* On iSeries, the "bolted" stack segment can be cast out on
* shared processor switch so we need to check for a miss on
* it and restore it to the right slot.
*/
ld r9,PACAKSAVE(r13)
clrrdi r9,r9,28
clrrdi r11,r3,28
li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
cmpld r9,r11
beq 3f
#endif /* CONFIG_PPC_ISERIES */
ld r10,PACASTABRR(r13)
addi r10,r10,1
/* use a cpu feature mask if we ever change our slb size */
cmpldi r10,SLB_NUM_ENTRIES
blt+ 4f
li r10,SLB_NUM_BOLTED
4:
std r10,PACASTABRR(r13)
3:
/* r3 = faulting address, r10 = entry */
srdi r9,r3,60 /* get region */ srdi r9,r3,60 /* get region */
srdi r3,r3,28 /* get esid */ srdi r10,r3,28 /* get esid */
cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */ cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */
rldimi r10,r3,28,0 /* r10= ESID<<28 | entry */ /* r3 = address, r10 = esid, cr7 = <>KERNELBASE */
oris r10,r10,SLB_ESID_V@h /* r10 |= SLB_ESID_V */
/* r3 = esid, r10 = esid_data, cr7 = <>KERNELBASE */
blt cr7,0f /* user or kernel? */ blt cr7,0f /* user or kernel? */
/* kernel address: proto-VSID = ESID */ /* kernel address: proto-VSID = ESID */
...@@ -81,43 +48,161 @@ _GLOBAL(slb_allocate) ...@@ -81,43 +48,161 @@ _GLOBAL(slb_allocate)
* top segment. That's ok, the scramble below will translate * top segment. That's ok, the scramble below will translate
* it to VSID 0, which is reserved as a bad VSID - one which * it to VSID 0, which is reserved as a bad VSID - one which
* will never have any pages in it. */ * will never have any pages in it. */
li r11,SLB_VSID_KERNEL
BEGIN_FTR_SECTION
bne cr7,9f
li r11,(SLB_VSID_KERNEL|SLB_VSID_L)
END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
b 9f
0: /* user address: proto-VSID = context<<15 | ESID */ /* Check if hitting the linear mapping of the vmalloc/ioremap
srdi. r9,r3,USER_ESID_BITS * kernel space
*/
bne cr7,1f
/* Linear mapping encoding bits, the "li" instruction below will
* be patched by the kernel at boot
*/
_GLOBAL(slb_miss_kernel_load_linear)
li r11,0
b slb_finish_load
1: /* vmalloc/ioremap mapping encoding bits, the "li" instruction below
* will be patched by the kernel at boot
*/
_GLOBAL(slb_miss_kernel_load_virtual)
li r11,0
b slb_finish_load
0: /* user address: proto-VSID = context << 15 | ESID. First check
* if the address is within the boundaries of the user region
*/
srdi. r9,r10,USER_ESID_BITS
bne- 8f /* invalid ea bits set */ bne- 8f /* invalid ea bits set */
/* Figure out if the segment contains huge pages */
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
b 1f
END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
lhz r9,PACAHIGHHTLBAREAS(r13) lhz r9,PACAHIGHHTLBAREAS(r13)
srdi r11,r3,(HTLB_AREA_SHIFT-SID_SHIFT) srdi r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT)
srd r9,r9,r11 srd r9,r9,r11
lhz r11,PACALOWHTLBAREAS(r13) lhz r11,PACALOWHTLBAREAS(r13)
srd r11,r11,r3 srd r11,r11,r10
or r9,r9,r11 or. r9,r9,r11
END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) beq 1f
_GLOBAL(slb_miss_user_load_huge)
li r11,0
b 2f
1:
#endif /* CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_HUGETLB_PAGE */
li r11,SLB_VSID_USER _GLOBAL(slb_miss_user_load_normal)
li r11,0
#ifdef CONFIG_HUGETLB_PAGE 2:
BEGIN_FTR_SECTION ld r9,PACACONTEXTID(r13)
rldimi r11,r9,8,55 /* shift masked bit into SLB_VSID_L */ rldimi r10,r9,USER_ESID_BITS,0
END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) b slb_finish_load
#endif /* CONFIG_HUGETLB_PAGE */
8: /* invalid EA */
li r10,0 /* BAD_VSID */
li r11,SLB_VSID_USER /* flags don't much matter */
b slb_finish_load
#ifdef __DISABLED__
/* void slb_allocate_user(unsigned long ea);
*
* Create an SLB entry for the given EA (user or kernel).
* r3 = faulting address, r13 = PACA
* r9, r10, r11 are clobbered by this function
* No other registers are examined or changed.
*
* It is called with translation enabled in order to be able to walk the
* page tables. This is not currently used.
*/
_GLOBAL(slb_allocate_user)
/* r3 = faulting address */
srdi r10,r3,28 /* get esid */
crset 4*cr7+lt /* set "user" flag for later */
/* check if we fit in the range covered by the pagetables*/
srdi. r9,r3,PGTABLE_EADDR_SIZE
crnot 4*cr0+eq,4*cr0+eq
beqlr
/* now we need to get to the page tables in order to get the page
* size encoding from the PMD. In the future, we'll be able to deal
* with 1T segments too by getting the encoding from the PGD instead
*/
ld r9,PACAPGDIR(r13)
cmpldi cr0,r9,0
beqlr
rlwinm r11,r10,8,25,28
ldx r9,r9,r11 /* get pgd_t */
cmpldi cr0,r9,0
beqlr
rlwinm r11,r10,3,17,28
ldx r9,r9,r11 /* get pmd_t */
cmpldi cr0,r9,0
beqlr
/* build vsid flags */
andi. r11,r9,SLB_VSID_LLP
ori r11,r11,SLB_VSID_USER
/* get context to calculate proto-VSID */
ld r9,PACACONTEXTID(r13) ld r9,PACACONTEXTID(r13)
rldimi r3,r9,USER_ESID_BITS,0 rldimi r10,r9,USER_ESID_BITS,0
9: /* r3 = protovsid, r11 = flags, r10 = esid_data, cr7 = <>KERNELBASE */ /* fall through slb_finish_load */
ASM_VSID_SCRAMBLE(r3,r9)
rldimi r11,r3,SLB_VSID_SHIFT,16 /* combine VSID and flags */ #endif /* __DISABLED__ */
/*
* Finish loading of an SLB entry and return
*
* r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <>KERNELBASE
*/
slb_finish_load:
ASM_VSID_SCRAMBLE(r10,r9)
rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */
/* r3 = EA, r11 = VSID data */
/*
* Find a slot, round robin. Previously we tried to find a
* free slot first but that took too long. Unfortunately we
* dont have any LRU information to help us choose a slot.
*/
#ifdef CONFIG_PPC_ISERIES
/*
* On iSeries, the "bolted" stack segment can be cast out on
* shared processor switch so we need to check for a miss on
* it and restore it to the right slot.
*/
ld r9,PACAKSAVE(r13)
clrrdi r9,r9,28
clrrdi r3,r3,28
li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
cmpld r9,r3
beq 3f
#endif /* CONFIG_PPC_ISERIES */
ld r10,PACASTABRR(r13)
addi r10,r10,1
/* use a cpu feature mask if we ever change our slb size */
cmpldi r10,SLB_NUM_ENTRIES
blt+ 4f
li r10,SLB_NUM_BOLTED
4:
std r10,PACASTABRR(r13)
3:
rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */
oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */
/* r3 = ESID data, r11 = VSID data */
/* /*
* No need for an isync before or after this slbmte. The exception * No need for an isync before or after this slbmte. The exception
...@@ -125,7 +210,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) ...@@ -125,7 +210,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
*/ */
slbmte r11,r10 slbmte r11,r10
bgelr cr7 /* we're done for kernel addresses */ /* we're done for kernel addresses */
crclr 4*cr0+eq /* set result to "success" */
bgelr cr7
/* Update the slb cache */ /* Update the slb cache */
lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */ lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
...@@ -143,9 +230,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) ...@@ -143,9 +230,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
li r3,SLB_CACHE_ENTRIES+1 li r3,SLB_CACHE_ENTRIES+1
2: 2:
sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */ sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
crclr 4*cr0+eq /* set result to "success" */
blr blr
8: /* invalid EA */
li r3,0 /* BAD_VSID */
li r11,SLB_VSID_USER /* flags don't much matter */
b 9b
...@@ -26,7 +26,6 @@ struct stab_entry { ...@@ -26,7 +26,6 @@ struct stab_entry {
unsigned long vsid_data; unsigned long vsid_data;
}; };
/* Both the segment table and SLB code uses the following cache */
#define NR_STAB_CACHE_ENTRIES 8 #define NR_STAB_CACHE_ENTRIES 8
DEFINE_PER_CPU(long, stab_cache_ptr); DEFINE_PER_CPU(long, stab_cache_ptr);
DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]); DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]);
...@@ -186,7 +185,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) ...@@ -186,7 +185,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
/* Never flush the first entry. */ /* Never flush the first entry. */
ste += 1; ste += 1;
for (entry = 1; for (entry = 1;
entry < (PAGE_SIZE / sizeof(struct stab_entry)); entry < (HW_PAGE_SIZE / sizeof(struct stab_entry));
entry++, ste++) { entry++, ste++) {
unsigned long ea; unsigned long ea;
ea = ste->esid_data & ESID_MASK; ea = ste->esid_data & ESID_MASK;
...@@ -200,6 +199,10 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) ...@@ -200,6 +199,10 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
__get_cpu_var(stab_cache_ptr) = 0; __get_cpu_var(stab_cache_ptr) = 0;
#ifdef CONFIG_PPC_64K_PAGES
get_paca()->pgdir = mm->pgd;
#endif /* CONFIG_PPC_64K_PAGES */
/* Now preload some entries for the new task */ /* Now preload some entries for the new task */
if (test_tsk_thread_flag(tsk, TIF_32BIT)) if (test_tsk_thread_flag(tsk, TIF_32BIT))
unmapped_base = TASK_UNMAPPED_BASE_USER32; unmapped_base = TASK_UNMAPPED_BASE_USER32;
...@@ -223,8 +226,6 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) ...@@ -223,8 +226,6 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
asm volatile("sync" : : : "memory"); asm volatile("sync" : : : "memory");
} }
extern void slb_initialize(void);
/* /*
* Allocate segment tables for secondary CPUs. These must all go in * Allocate segment tables for secondary CPUs. These must all go in
* the first (bolted) segment, so that do_stab_bolted won't get a * the first (bolted) segment, so that do_stab_bolted won't get a
...@@ -243,18 +244,21 @@ void stabs_alloc(void) ...@@ -243,18 +244,21 @@ void stabs_alloc(void)
if (cpu == 0) if (cpu == 0)
continue; /* stab for CPU 0 is statically allocated */ continue; /* stab for CPU 0 is statically allocated */
newstab = lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, 1<<SID_SHIFT); newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
1<<SID_SHIFT);
if (! newstab) if (! newstab)
panic("Unable to allocate segment table for CPU %d.\n", panic("Unable to allocate segment table for CPU %d.\n",
cpu); cpu);
newstab += KERNELBASE; newstab += KERNELBASE;
memset((void *)newstab, 0, PAGE_SIZE); memset((void *)newstab, 0, HW_PAGE_SIZE);
paca[cpu].stab_addr = newstab; paca[cpu].stab_addr = newstab;
paca[cpu].stab_real = virt_to_abs(newstab); paca[cpu].stab_real = virt_to_abs(newstab);
printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx virtual, 0x%lx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real); printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx "
"virtual, 0x%lx absolute\n",
cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
} }
} }
...@@ -267,13 +271,9 @@ void stab_initialize(unsigned long stab) ...@@ -267,13 +271,9 @@ void stab_initialize(unsigned long stab)
{ {
unsigned long vsid = get_kernel_vsid(KERNELBASE); unsigned long vsid = get_kernel_vsid(KERNELBASE);
if (cpu_has_feature(CPU_FTR_SLB)) {
slb_initialize();
} else {
asm volatile("isync; slbia; isync":::"memory"); asm volatile("isync; slbia; isync":::"memory");
make_ste(stab, GET_ESID(KERNELBASE), vsid); make_ste(stab, GET_ESID(KERNELBASE), vsid);
/* Order update */ /* Order update */
asm volatile("sync":::"memory"); asm volatile("sync":::"memory");
}
} }
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
* as published by the Free Software Foundation; either version * as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -30,7 +31,7 @@ ...@@ -30,7 +31,7 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <linux/highmem.h> #include <asm/bug.h>
DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
...@@ -126,28 +127,46 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) ...@@ -126,28 +127,46 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
* (if we remove it we should clear the _PTE_HPTEFLAGS bits). * (if we remove it we should clear the _PTE_HPTEFLAGS bits).
*/ */
void hpte_update(struct mm_struct *mm, unsigned long addr, void hpte_update(struct mm_struct *mm, unsigned long addr,
unsigned long pte, int wrprot) pte_t *ptep, unsigned long pte, int huge)
{ {
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
unsigned long vsid; unsigned long vsid;
unsigned int psize = mmu_virtual_psize;
int i; int i;
i = batch->index; i = batch->index;
/* We mask the address for the base page size. Huge pages will
* have applied their own masking already
*/
addr &= PAGE_MASK;
/* Get page size (maybe move back to caller) */
if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
psize = mmu_huge_psize;
#else
BUG();
#endif
}
/* /*
* This can happen when we are in the middle of a TLB batch and * This can happen when we are in the middle of a TLB batch and
* we encounter memory pressure (eg copy_page_range when it tries * we encounter memory pressure (eg copy_page_range when it tries
* to allocate a new pte). If we have to reclaim memory and end * to allocate a new pte). If we have to reclaim memory and end
* up scanning and resetting referenced bits then our batch context * up scanning and resetting referenced bits then our batch context
* will change mid stream. * will change mid stream.
*
* We also need to ensure only one page size is present in a given
* batch
*/ */
if (i != 0 && (mm != batch->mm || batch->large != pte_huge(pte))) { if (i != 0 && (mm != batch->mm || batch->psize != psize)) {
flush_tlb_pending(); flush_tlb_pending();
i = 0; i = 0;
} }
if (i == 0) { if (i == 0) {
batch->mm = mm; batch->mm = mm;
batch->large = pte_huge(pte); batch->psize = psize;
} }
if (addr < KERNELBASE) { if (addr < KERNELBASE) {
vsid = get_vsid(mm->context.id, addr); vsid = get_vsid(mm->context.id, addr);
...@@ -155,7 +174,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, ...@@ -155,7 +174,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
} else } else
vsid = get_kernel_vsid(addr); vsid = get_kernel_vsid(addr);
batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff); batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff);
batch->pte[i] = __pte(pte); batch->pte[i] = __real_pte(__pte(pte), ptep);
batch->index = ++i; batch->index = ++i;
if (i >= PPC64_TLB_BATCH_NR) if (i >= PPC64_TLB_BATCH_NR)
flush_tlb_pending(); flush_tlb_pending();
...@@ -177,7 +196,8 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) ...@@ -177,7 +196,8 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
local = 1; local = 1;
if (i == 1) if (i == 1)
flush_hash_page(batch->vaddr[0], batch->pte[0], local); flush_hash_page(batch->vaddr[0], batch->pte[0],
batch->psize, local);
else else
flush_hash_range(i, local); flush_hash_range(i, local);
batch->index = 0; batch->index = 0;
......
...@@ -39,15 +39,16 @@ static inline void iSeries_hunlock(unsigned long slot) ...@@ -39,15 +39,16 @@ static inline void iSeries_hunlock(unsigned long slot)
spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]); spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
} }
static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
unsigned long prpn, unsigned long vflags, unsigned long pa, unsigned long rflags,
unsigned long rflags) unsigned long vflags, int psize)
{ {
unsigned long arpn;
long slot; long slot;
hpte_t lhpte; hpte_t lhpte;
int secondary = 0; int secondary = 0;
BUG_ON(psize != MMU_PAGE_4K);
/* /*
* The hypervisor tries both primary and secondary. * The hypervisor tries both primary and secondary.
* If we are being called to insert in the secondary, * If we are being called to insert in the secondary,
...@@ -59,8 +60,19 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, ...@@ -59,8 +60,19 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
iSeries_hlock(hpte_group); iSeries_hlock(hpte_group);
slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT); slot = HvCallHpt_findValid(&lhpte, va >> HW_PAGE_SHIFT);
BUG_ON(lhpte.v & HPTE_V_VALID); if (unlikely(lhpte.v & HPTE_V_VALID)) {
if (vflags & HPTE_V_BOLTED) {
HvCallHpt_setSwBits(slot, 0x10, 0);
HvCallHpt_setPp(slot, PP_RWXX);
iSeries_hunlock(hpte_group);
if (slot < 0)
return 0x8 | (slot & 7);
else
return slot & 7;
}
BUG();
}
if (slot == -1) { /* No available entry found in either group */ if (slot == -1) { /* No available entry found in either group */
iSeries_hunlock(hpte_group); iSeries_hunlock(hpte_group);
...@@ -73,10 +85,9 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, ...@@ -73,10 +85,9 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
slot &= 0x7fffffffffffffff; slot &= 0x7fffffffffffffff;
} }
arpn = phys_to_abs(prpn << PAGE_SHIFT) >> PAGE_SHIFT;
lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID; lhpte.v = hpte_encode_v(va, MMU_PAGE_4K) | vflags | HPTE_V_VALID;
lhpte.r = (arpn << HPTE_R_RPN_SHIFT) | rflags; lhpte.r = hpte_encode_r(phys_to_abs(pa), MMU_PAGE_4K) | rflags;
/* Now fill in the actual HPTE */ /* Now fill in the actual HPTE */
HvCallHpt_addValidate(slot, secondary, &lhpte); HvCallHpt_addValidate(slot, secondary, &lhpte);
...@@ -86,25 +97,6 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, ...@@ -86,25 +97,6 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
return (secondary << 3) | (slot & 7); return (secondary << 3) | (slot & 7);
} }
long iSeries_hpte_bolt_or_insert(unsigned long hpte_group,
unsigned long va, unsigned long prpn, unsigned long vflags,
unsigned long rflags)
{
long slot;
hpte_t lhpte;
slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
if (lhpte.v & HPTE_V_VALID) {
/* Bolt the existing HPTE */
HvCallHpt_setSwBits(slot, 0x10, 0);
HvCallHpt_setPp(slot, PP_RWXX);
return 0;
}
return iSeries_hpte_insert(hpte_group, va, prpn, vflags, rflags);
}
static unsigned long iSeries_hpte_getword0(unsigned long slot) static unsigned long iSeries_hpte_getword0(unsigned long slot)
{ {
hpte_t hpte; hpte_t hpte;
...@@ -150,15 +142,17 @@ static long iSeries_hpte_remove(unsigned long hpte_group) ...@@ -150,15 +142,17 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
* bits 61..63 : PP2,PP1,PP0 * bits 61..63 : PP2,PP1,PP0
*/ */
static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp, static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long va, int large, int local) unsigned long va, int psize, int local)
{ {
hpte_t hpte; hpte_t hpte;
unsigned long avpn = va >> 23; unsigned long want_v;
iSeries_hlock(slot); iSeries_hlock(slot);
HvCallHpt_get(&hpte, slot); HvCallHpt_get(&hpte, slot);
if ((HPTE_V_AVPN_VAL(hpte.v) == avpn) && (hpte.v & HPTE_V_VALID)) { want_v = hpte_encode_v(va, MMU_PAGE_4K);
if (HPTE_V_COMPARE(hpte.v, want_v) && (hpte.v & HPTE_V_VALID)) {
/* /*
* Hypervisor expects bits as NPPP, which is * Hypervisor expects bits as NPPP, which is
* different from how they are mapped in our PP. * different from how they are mapped in our PP.
...@@ -210,14 +204,17 @@ static long iSeries_hpte_find(unsigned long vpn) ...@@ -210,14 +204,17 @@ static long iSeries_hpte_find(unsigned long vpn)
* *
* No need to lock here because we should be the only user. * No need to lock here because we should be the only user.
*/ */
static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea) static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
int psize)
{ {
unsigned long vsid,va,vpn; unsigned long vsid,va,vpn;
long slot; long slot;
BUG_ON(psize != MMU_PAGE_4K);
vsid = get_kernel_vsid(ea); vsid = get_kernel_vsid(ea);
va = (vsid << 28) | (ea & 0x0fffffff); va = (vsid << 28) | (ea & 0x0fffffff);
vpn = va >> PAGE_SHIFT; vpn = va >> HW_PAGE_SHIFT;
slot = iSeries_hpte_find(vpn); slot = iSeries_hpte_find(vpn);
if (slot == -1) if (slot == -1)
panic("updateboltedpp: Could not find page to bolt\n"); panic("updateboltedpp: Could not find page to bolt\n");
...@@ -225,7 +222,7 @@ static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea) ...@@ -225,7 +222,7 @@ static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
} }
static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va, static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
int large, int local) int psize, int local)
{ {
unsigned long hpte_v; unsigned long hpte_v;
unsigned long avpn = va >> 23; unsigned long avpn = va >> 23;
......
...@@ -22,7 +22,7 @@ void HvCall_writeLogBuffer(const void *buffer, u64 len) ...@@ -22,7 +22,7 @@ void HvCall_writeLogBuffer(const void *buffer, u64 len)
while (len) { while (len) {
hv_buf.addr = cur; hv_buf.addr = cur;
left_this_page = ((cur & PAGE_MASK) + PAGE_SIZE) - cur; left_this_page = ((cur & HW_PAGE_MASK) + HW_PAGE_SIZE) - cur;
if (left_this_page > len) if (left_this_page > len)
left_this_page = len; left_this_page = len;
hv_buf.len = left_this_page; hv_buf.len = left_this_page;
...@@ -30,6 +30,6 @@ void HvCall_writeLogBuffer(const void *buffer, u64 len) ...@@ -30,6 +30,6 @@ void HvCall_writeLogBuffer(const void *buffer, u64 len)
HvCall2(HvCallBaseWriteLogBuffer, HvCall2(HvCallBaseWriteLogBuffer,
virt_to_abs(&hv_buf), virt_to_abs(&hv_buf),
left_this_page); left_this_page);
cur = (cur & PAGE_MASK) + PAGE_SIZE; cur = (cur & HW_PAGE_MASK) + HW_PAGE_SIZE;
} }
} }
...@@ -43,9 +43,12 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages, ...@@ -43,9 +43,12 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
u64 rc; u64 rc;
union tce_entry tce; union tce_entry tce;
index <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
while (npages--) { while (npages--) {
tce.te_word = 0; tce.te_word = 0;
tce.te_bits.tb_rpn = virt_to_abs(uaddr) >> PAGE_SHIFT; tce.te_bits.tb_rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
if (tbl->it_type == TCE_VB) { if (tbl->it_type == TCE_VB) {
/* Virtual Bus */ /* Virtual Bus */
...@@ -66,7 +69,7 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages, ...@@ -66,7 +69,7 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n",
rc); rc);
index++; index++;
uaddr += PAGE_SIZE; uaddr += TCE_PAGE_SIZE;
} }
} }
...@@ -74,6 +77,9 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) ...@@ -74,6 +77,9 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
{ {
u64 rc; u64 rc;
npages <<= TCE_PAGE_FACTOR;
index <<= TCE_PAGE_FACTOR;
while (npages--) { while (npages--) {
rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0); rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
if (rc) if (rc)
...@@ -83,27 +89,6 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) ...@@ -83,27 +89,6 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
} }
} }
#ifdef CONFIG_PCI
/*
* This function compares the known tables to find an iommu_table
* that has already been built for hardware TCEs.
*/
static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
{
struct pci_dn *pdn;
list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
struct iommu_table *it = pdn->iommu_table;
if ((it != NULL) &&
(it->it_type == TCE_PCI) &&
(it->it_offset == tbl->it_offset) &&
(it->it_index == tbl->it_index) &&
(it->it_size == tbl->it_size))
return it;
}
return NULL;
}
/* /*
* Call Hv with the architected data structure to get TCE table info. * Call Hv with the architected data structure to get TCE table info.
* info. Put the returned data into the Linux representation of the * info. Put the returned data into the Linux representation of the
...@@ -113,7 +98,9 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl) ...@@ -113,7 +98,9 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
* 2. TCE table per Bus. * 2. TCE table per Bus.
* 3. TCE Table per IOA. * 3. TCE Table per IOA.
*/ */
static void iommu_table_getparms(struct pci_dn *pdn, void iommu_table_getparms_iSeries(unsigned long busno,
unsigned char slotno,
unsigned char virtbus,
struct iommu_table* tbl) struct iommu_table* tbl)
{ {
struct iommu_table_cb *parms; struct iommu_table_cb *parms;
...@@ -124,9 +111,9 @@ static void iommu_table_getparms(struct pci_dn *pdn, ...@@ -124,9 +111,9 @@ static void iommu_table_getparms(struct pci_dn *pdn,
memset(parms, 0, sizeof(*parms)); memset(parms, 0, sizeof(*parms));
parms->itc_busno = pdn->busno; parms->itc_busno = busno;
parms->itc_slotno = pdn->LogicalSlot; parms->itc_slotno = slotno;
parms->itc_virtbus = 0; parms->itc_virtbus = virtbus;
HvCallXm_getTceTableParms(iseries_hv_addr(parms)); HvCallXm_getTceTableParms(iseries_hv_addr(parms));
...@@ -134,17 +121,40 @@ static void iommu_table_getparms(struct pci_dn *pdn, ...@@ -134,17 +121,40 @@ static void iommu_table_getparms(struct pci_dn *pdn,
panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms); panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
/* itc_size is in pages worth of table, it_size is in # of entries */ /* itc_size is in pages worth of table, it_size is in # of entries */
tbl->it_size = (parms->itc_size * PAGE_SIZE) / sizeof(union tce_entry); tbl->it_size = ((parms->itc_size * TCE_PAGE_SIZE) /
sizeof(union tce_entry)) >> TCE_PAGE_FACTOR;
tbl->it_busno = parms->itc_busno; tbl->it_busno = parms->itc_busno;
tbl->it_offset = parms->itc_offset; tbl->it_offset = parms->itc_offset >> TCE_PAGE_FACTOR;
tbl->it_index = parms->itc_index; tbl->it_index = parms->itc_index;
tbl->it_blocksize = 1; tbl->it_blocksize = 1;
tbl->it_type = TCE_PCI; tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
kfree(parms); kfree(parms);
} }
#ifdef CONFIG_PCI
/*
* This function compares the known tables to find an iommu_table
* that has already been built for hardware TCEs.
*/
static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
{
struct pci_dn *pdn;
list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
struct iommu_table *it = pdn->iommu_table;
if ((it != NULL) &&
(it->it_type == TCE_PCI) &&
(it->it_offset == tbl->it_offset) &&
(it->it_index == tbl->it_index) &&
(it->it_size == tbl->it_size))
return it;
}
return NULL;
}
void iommu_devnode_init_iSeries(struct device_node *dn) void iommu_devnode_init_iSeries(struct device_node *dn)
{ {
struct iommu_table *tbl; struct iommu_table *tbl;
...@@ -152,7 +162,7 @@ void iommu_devnode_init_iSeries(struct device_node *dn) ...@@ -152,7 +162,7 @@ void iommu_devnode_init_iSeries(struct device_node *dn)
tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
iommu_table_getparms(pdn, tbl); iommu_table_getparms_iSeries(pdn->busno, pdn->LogicalSlot, 0, tbl);
/* Look for existing tce table */ /* Look for existing tce table */
pdn->iommu_table = iommu_table_find(tbl); pdn->iommu_table = iommu_table_find(tbl);
......
...@@ -320,11 +320,11 @@ static void __init iSeries_init_early(void) ...@@ -320,11 +320,11 @@ static void __init iSeries_init_early(void)
*/ */
if (naca.xRamDisk) { if (naca.xRamDisk) {
initrd_start = (unsigned long)__va(naca.xRamDisk); initrd_start = (unsigned long)__va(naca.xRamDisk);
initrd_end = initrd_start + naca.xRamDiskSize * PAGE_SIZE; initrd_end = initrd_start + naca.xRamDiskSize * HW_PAGE_SIZE;
initrd_below_start_ok = 1; // ramdisk in kernel space initrd_below_start_ok = 1; // ramdisk in kernel space
ROOT_DEV = Root_RAM0; ROOT_DEV = Root_RAM0;
if (((rd_size * 1024) / PAGE_SIZE) < naca.xRamDiskSize) if (((rd_size * 1024) / HW_PAGE_SIZE) < naca.xRamDiskSize)
rd_size = (naca.xRamDiskSize * PAGE_SIZE) / 1024; rd_size = (naca.xRamDiskSize * HW_PAGE_SIZE) / 1024;
} else } else
#endif /* CONFIG_BLK_DEV_INITRD */ #endif /* CONFIG_BLK_DEV_INITRD */
{ {
...@@ -470,13 +470,14 @@ static void __init build_iSeries_Memory_Map(void) ...@@ -470,13 +470,14 @@ static void __init build_iSeries_Memory_Map(void)
*/ */
hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress()); hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
hptSizePages = (u32)HvCallHpt_getHptPages(); hptSizePages = (u32)HvCallHpt_getHptPages();
hptSizeChunks = hptSizePages >> (MSCHUNKS_CHUNK_SHIFT - PAGE_SHIFT); hptSizeChunks = hptSizePages >>
(MSCHUNKS_CHUNK_SHIFT - HW_PAGE_SHIFT);
hptLastChunk = hptFirstChunk + hptSizeChunks - 1; hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
printk("HPT absolute addr = %016lx, size = %dK\n", printk("HPT absolute addr = %016lx, size = %dK\n",
chunk_to_addr(hptFirstChunk), hptSizeChunks * 256); chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
ppc64_pft_size = __ilog2(hptSizePages * PAGE_SIZE); ppc64_pft_size = __ilog2(hptSizePages * HW_PAGE_SIZE);
/* /*
* The actual hashed page table is in the hypervisor, * The actual hashed page table is in the hypervisor,
...@@ -629,7 +630,7 @@ static void __init iSeries_fixup_klimit(void) ...@@ -629,7 +630,7 @@ static void __init iSeries_fixup_klimit(void)
*/ */
if (naca.xRamDisk) if (naca.xRamDisk)
klimit = KERNELBASE + (u64)naca.xRamDisk + klimit = KERNELBASE + (u64)naca.xRamDisk +
(naca.xRamDiskSize * PAGE_SIZE); (naca.xRamDiskSize * HW_PAGE_SIZE);
else { else {
/* /*
* No ram disk was included - check and see if there * No ram disk was included - check and see if there
......
...@@ -30,41 +30,14 @@ static struct iommu_table vio_iommu_table; ...@@ -30,41 +30,14 @@ static struct iommu_table vio_iommu_table;
static void __init iommu_vio_init(void) static void __init iommu_vio_init(void)
{ {
struct iommu_table *t; iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
struct iommu_table_cb cb; veth_iommu_table.it_size /= 2;
unsigned long cbp; vio_iommu_table = veth_iommu_table;
unsigned long itc_entries; vio_iommu_table.it_offset += veth_iommu_table.it_size;
cb.itc_busno = 255; /* Bus 255 is the virtual bus */ if (!iommu_init_table(&veth_iommu_table))
cb.itc_virtbus = 0xff; /* Ask for virtual bus */
cbp = virt_to_abs(&cb);
HvCallXm_getTceTableParms(cbp);
itc_entries = cb.itc_size * PAGE_SIZE / sizeof(union tce_entry);
veth_iommu_table.it_size = itc_entries / 2;
veth_iommu_table.it_busno = cb.itc_busno;
veth_iommu_table.it_offset = cb.itc_offset;
veth_iommu_table.it_index = cb.itc_index;
veth_iommu_table.it_type = TCE_VB;
veth_iommu_table.it_blocksize = 1;
t = iommu_init_table(&veth_iommu_table);
if (!t)
printk("Virtual Bus VETH TCE table failed.\n"); printk("Virtual Bus VETH TCE table failed.\n");
if (!iommu_init_table(&vio_iommu_table))
vio_iommu_table.it_size = itc_entries - veth_iommu_table.it_size;
vio_iommu_table.it_busno = cb.itc_busno;
vio_iommu_table.it_offset = cb.itc_offset +
veth_iommu_table.it_size;
vio_iommu_table.it_index = cb.itc_index;
vio_iommu_table.it_type = TCE_VB;
vio_iommu_table.it_blocksize = 1;
t = iommu_init_table(&vio_iommu_table);
if (!t)
printk("Virtual Bus VIO TCE table failed.\n"); printk("Virtual Bus VIO TCE table failed.\n");
} }
......
...@@ -68,7 +68,8 @@ static DEFINE_SPINLOCK(statuslock); ...@@ -68,7 +68,8 @@ static DEFINE_SPINLOCK(statuslock);
* For each kind of event we allocate a buffer that is * For each kind of event we allocate a buffer that is
* guaranteed not to cross a page boundary * guaranteed not to cross a page boundary
*/ */
static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] __page_aligned; static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256]
__attribute__((__aligned__(4096)));
static atomic_t event_buffer_available[VIO_MAX_SUBTYPES]; static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
static int event_buffer_initialised; static int event_buffer_initialised;
...@@ -116,12 +117,12 @@ static int proc_viopath_show(struct seq_file *m, void *v) ...@@ -116,12 +117,12 @@ static int proc_viopath_show(struct seq_file *m, void *v)
HvLpEvent_Rc hvrc; HvLpEvent_Rc hvrc;
DECLARE_MUTEX_LOCKED(Semaphore); DECLARE_MUTEX_LOCKED(Semaphore);
buf = kmalloc(PAGE_SIZE, GFP_KERNEL); buf = kmalloc(HW_PAGE_SIZE, GFP_KERNEL);
if (!buf) if (!buf)
return 0; return 0;
memset(buf, 0, PAGE_SIZE); memset(buf, 0, HW_PAGE_SIZE);
handle = dma_map_single(iSeries_vio_dev, buf, PAGE_SIZE, handle = dma_map_single(iSeries_vio_dev, buf, HW_PAGE_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
...@@ -131,7 +132,7 @@ static int proc_viopath_show(struct seq_file *m, void *v) ...@@ -131,7 +132,7 @@ static int proc_viopath_show(struct seq_file *m, void *v)
viopath_sourceinst(viopath_hostLp), viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp), viopath_targetinst(viopath_hostLp),
(u64)(unsigned long)&Semaphore, VIOVERSION << 16, (u64)(unsigned long)&Semaphore, VIOVERSION << 16,
((u64)handle) << 32, PAGE_SIZE, 0, 0); ((u64)handle) << 32, HW_PAGE_SIZE, 0, 0);
if (hvrc != HvLpEvent_Rc_Good) if (hvrc != HvLpEvent_Rc_Good)
printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc); printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
...@@ -140,7 +141,7 @@ static int proc_viopath_show(struct seq_file *m, void *v) ...@@ -140,7 +141,7 @@ static int proc_viopath_show(struct seq_file *m, void *v)
vlanMap = HvLpConfig_getVirtualLanIndexMap(); vlanMap = HvLpConfig_getVirtualLanIndexMap();
buf[PAGE_SIZE-1] = '\0'; buf[HW_PAGE_SIZE-1] = '\0';
seq_printf(m, "%s", buf); seq_printf(m, "%s", buf);
seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap); seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n", seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n",
...@@ -152,7 +153,8 @@ static int proc_viopath_show(struct seq_file *m, void *v) ...@@ -152,7 +153,8 @@ static int proc_viopath_show(struct seq_file *m, void *v)
e2a(xItExtVpdPanel.systemSerial[4]), e2a(xItExtVpdPanel.systemSerial[4]),
e2a(xItExtVpdPanel.systemSerial[5])); e2a(xItExtVpdPanel.systemSerial[5]));
dma_unmap_single(iSeries_vio_dev, handle, PAGE_SIZE, DMA_FROM_DEVICE); dma_unmap_single(iSeries_vio_dev, handle, HW_PAGE_SIZE,
DMA_FROM_DEVICE);
kfree(buf); kfree(buf);
return 0; return 0;
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#define DEBUG #undef DEBUG_LOW
#include <linux/config.h> #include <linux/config.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -41,10 +41,10 @@ ...@@ -41,10 +41,10 @@
#include "plpar_wrappers.h" #include "plpar_wrappers.h"
#ifdef DEBUG #ifdef DEBUG_LOW
#define DBG(fmt...) udbg_printf(fmt) #define DBG_LOW(fmt...) do { udbg_printf(fmt); } while(0)
#else #else
#define DBG(fmt...) #define DBG_LOW(fmt...) do { } while(0)
#endif #endif
/* in pSeries_hvCall.S */ /* in pSeries_hvCall.S */
...@@ -276,8 +276,9 @@ void vpa_init(int cpu) ...@@ -276,8 +276,9 @@ void vpa_init(int cpu)
} }
long pSeries_lpar_hpte_insert(unsigned long hpte_group, long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long va, unsigned long prpn, unsigned long va, unsigned long pa,
unsigned long vflags, unsigned long rflags) unsigned long rflags, unsigned long vflags,
int psize)
{ {
unsigned long lpar_rc; unsigned long lpar_rc;
unsigned long flags; unsigned long flags;
...@@ -285,11 +286,28 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group, ...@@ -285,11 +286,28 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long hpte_v, hpte_r; unsigned long hpte_v, hpte_r;
unsigned long dummy0, dummy1; unsigned long dummy0, dummy1;
hpte_v = ((va >> 23) << HPTE_V_AVPN_SHIFT) | vflags | HPTE_V_VALID; if (!(vflags & HPTE_V_BOLTED))
if (vflags & HPTE_V_LARGE) DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
hpte_v &= ~(1UL << HPTE_V_AVPN_SHIFT); "rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, va, pa, rflags, vflags, psize);
hpte_r = (prpn << HPTE_R_RPN_SHIFT) | rflags; hpte_v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(pa, psize) | rflags;
if (!(vflags & HPTE_V_BOLTED))
DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
#if 1
{
int i;
for (i=0;i<8;i++) {
unsigned long w0, w1;
plpar_pte_read(0, hpte_group, &w0, &w1);
BUG_ON (HPTE_V_COMPARE(hpte_v, w0)
&& (w0 & HPTE_V_VALID));
}
}
#endif
/* Now fill in the actual HPTE */ /* Now fill in the actual HPTE */
/* Set CEC cookie to 0 */ /* Set CEC cookie to 0 */
...@@ -299,23 +317,30 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group, ...@@ -299,23 +317,30 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
/* Exact = 0 */ /* Exact = 0 */
flags = 0; flags = 0;
/* XXX why is this here? - Anton */ /* Make pHyp happy */
if (rflags & (_PAGE_GUARDED|_PAGE_NO_CACHE)) if (rflags & (_PAGE_GUARDED|_PAGE_NO_CACHE))
hpte_r &= ~_PAGE_COHERENT; hpte_r &= ~_PAGE_COHERENT;
lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, hpte_v, lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, hpte_v,
hpte_r, &slot, &dummy0, &dummy1); hpte_r, &slot, &dummy0, &dummy1);
if (unlikely(lpar_rc == H_PTEG_Full)) {
if (unlikely(lpar_rc == H_PTEG_Full)) if (!(vflags & HPTE_V_BOLTED))
DBG_LOW(" full\n");
return -1; return -1;
}
/* /*
* Since we try and ioremap PHBs we don't own, the pte insert * Since we try and ioremap PHBs we don't own, the pte insert
* will fail. However we must catch the failure in hash_page * will fail. However we must catch the failure in hash_page
* or we will loop forever, so return -2 in this case. * or we will loop forever, so return -2 in this case.
*/ */
if (unlikely(lpar_rc != H_Success)) if (unlikely(lpar_rc != H_Success)) {
if (!(vflags & HPTE_V_BOLTED))
DBG_LOW(" lpar err %d\n", lpar_rc);
return -2; return -2;
}
if (!(vflags & HPTE_V_BOLTED))
DBG_LOW(" -> slot: %d\n", slot & 7);
/* Because of iSeries, we have to pass down the secondary /* Because of iSeries, we have to pass down the secondary
* bucket bit here as well * bucket bit here as well
...@@ -340,10 +365,8 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group) ...@@ -340,10 +365,8 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
/* don't remove a bolted entry */ /* don't remove a bolted entry */
lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset, lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
(0x1UL << 4), &dummy1, &dummy2); (0x1UL << 4), &dummy1, &dummy2);
if (lpar_rc == H_Success) if (lpar_rc == H_Success)
return i; return i;
BUG_ON(lpar_rc != H_Not_Found); BUG_ON(lpar_rc != H_Not_Found);
slot_offset++; slot_offset++;
...@@ -371,20 +394,28 @@ static void pSeries_lpar_hptab_clear(void) ...@@ -371,20 +394,28 @@ static void pSeries_lpar_hptab_clear(void)
* We can probably optimize here and assume the high bits of newpp are * We can probably optimize here and assume the high bits of newpp are
* already zero. For now I am paranoid. * already zero. For now I am paranoid.
*/ */
static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp, static long pSeries_lpar_hpte_updatepp(unsigned long slot,
unsigned long va, int large, int local) unsigned long newpp,
unsigned long va,
int psize, int local)
{ {
unsigned long lpar_rc; unsigned long lpar_rc;
unsigned long flags = (newpp & 7) | H_AVPN; unsigned long flags = (newpp & 7) | H_AVPN;
unsigned long avpn = va >> 23; unsigned long want_v;
if (large) want_v = hpte_encode_v(va, psize);
avpn &= ~0x1UL;
lpar_rc = plpar_pte_protect(flags, slot, (avpn << 7)); DBG_LOW(" update: avpnv=%016lx, hash=%016lx, f=%x, psize: %d ... ",
want_v & HPTE_V_AVPN, slot, flags, psize);
if (lpar_rc == H_Not_Found) lpar_rc = plpar_pte_protect(flags, slot, want_v & HPTE_V_AVPN);
if (lpar_rc == H_Not_Found) {
DBG_LOW("not found !\n");
return -1; return -1;
}
DBG_LOW("ok\n");
BUG_ON(lpar_rc != H_Success); BUG_ON(lpar_rc != H_Success);
...@@ -410,21 +441,22 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot) ...@@ -410,21 +441,22 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
return dword0; return dword0;
} }
static long pSeries_lpar_hpte_find(unsigned long vpn) static long pSeries_lpar_hpte_find(unsigned long va, int psize)
{ {
unsigned long hash; unsigned long hash;
unsigned long i, j; unsigned long i, j;
long slot; long slot;
unsigned long hpte_v; unsigned long want_v, hpte_v;
hash = hpt_hash(vpn, 0); hash = hpt_hash(va, mmu_psize_defs[psize].shift);
want_v = hpte_encode_v(va, psize);
for (j = 0; j < 2; j++) { for (j = 0; j < 2; j++) {
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
for (i = 0; i < HPTES_PER_GROUP; i++) { for (i = 0; i < HPTES_PER_GROUP; i++) {
hpte_v = pSeries_lpar_hpte_getword0(slot); hpte_v = pSeries_lpar_hpte_getword0(slot);
if ((HPTE_V_AVPN_VAL(hpte_v) == (vpn >> 11)) if (HPTE_V_COMPARE(hpte_v, want_v)
&& (hpte_v & HPTE_V_VALID) && (hpte_v & HPTE_V_VALID)
&& (!!(hpte_v & HPTE_V_SECONDARY) == j)) { && (!!(hpte_v & HPTE_V_SECONDARY) == j)) {
/* HPTE matches */ /* HPTE matches */
...@@ -441,17 +473,15 @@ static long pSeries_lpar_hpte_find(unsigned long vpn) ...@@ -441,17 +473,15 @@ static long pSeries_lpar_hpte_find(unsigned long vpn)
} }
static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
unsigned long ea) unsigned long ea,
int psize)
{ {
unsigned long lpar_rc; unsigned long lpar_rc, slot, vsid, va, flags;
unsigned long vsid, va, vpn, flags;
long slot;
vsid = get_kernel_vsid(ea); vsid = get_kernel_vsid(ea);
va = (vsid << 28) | (ea & 0x0fffffff); va = (vsid << 28) | (ea & 0x0fffffff);
vpn = va >> PAGE_SHIFT;
slot = pSeries_lpar_hpte_find(vpn); slot = pSeries_lpar_hpte_find(va, psize);
BUG_ON(slot == -1); BUG_ON(slot == -1);
flags = newpp & 7; flags = newpp & 7;
...@@ -461,18 +491,18 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, ...@@ -461,18 +491,18 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
} }
static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
int large, int local) int psize, int local)
{ {
unsigned long avpn = va >> 23; unsigned long want_v;
unsigned long lpar_rc; unsigned long lpar_rc;
unsigned long dummy1, dummy2; unsigned long dummy1, dummy2;
if (large) DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d",
avpn &= ~0x1UL; slot, va, psize, local);
lpar_rc = plpar_pte_remove(H_AVPN, slot, (avpn << 7), &dummy1,
&dummy2);
want_v = hpte_encode_v(va, psize);
lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v & HPTE_V_AVPN,
&dummy1, &dummy2);
if (lpar_rc == H_Not_Found) if (lpar_rc == H_Not_Found)
return; return;
...@@ -494,7 +524,8 @@ void pSeries_lpar_flush_hash_range(unsigned long number, int local) ...@@ -494,7 +524,8 @@ void pSeries_lpar_flush_hash_range(unsigned long number, int local)
spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
for (i = 0; i < number; i++) for (i = 0; i < number; i++)
flush_hash_page(batch->vaddr[i], batch->pte[i], local); flush_hash_page(batch->vaddr[i], batch->pte[i],
batch->psize, local);
if (lock_tlbie) if (lock_tlbie)
spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
......
...@@ -47,6 +47,10 @@ config ARCH_MAY_HAVE_PC_FDC ...@@ -47,6 +47,10 @@ config ARCH_MAY_HAVE_PC_FDC
bool bool
default y default y
config PPC_STD_MMU
bool
default y
# We optimistically allocate largepages from the VM, so make the limit # We optimistically allocate largepages from the VM, so make the limit
# large enough (16MB). This badly named config option is actually # large enough (16MB). This badly named config option is actually
# max order + 1 # max order + 1
...@@ -294,6 +298,15 @@ config NODES_SPAN_OTHER_NODES ...@@ -294,6 +298,15 @@ config NODES_SPAN_OTHER_NODES
def_bool y def_bool y
depends on NEED_MULTIPLE_NODES depends on NEED_MULTIPLE_NODES
config PPC_64K_PAGES
bool "64k page size"
help
This option changes the kernel logical page size to 64k. On machines
without processor support for 64k pages, the kernel will simulate
them by loading each individual 4k page on demand transparently,
while on hardware with such support, it will be used to map
normal application pages.
config SCHED_SMT config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support" bool "SMT (Hyperthreading) scheduler support"
depends on SMP depends on SMP
......
...@@ -93,6 +93,9 @@ int main(void) ...@@ -93,6 +93,9 @@ int main(void)
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_64K_PAGES
DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir));
#endif
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
......
This diff is collapsed.
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
static union { static union {
struct systemcfg data; struct systemcfg data;
u8 page[PAGE_SIZE]; u8 page[PAGE_SIZE];
} systemcfg_store __page_aligned; } systemcfg_store __attribute__((__section__(".data.page.aligned")));
struct systemcfg *systemcfg = &systemcfg_store.data; struct systemcfg *systemcfg = &systemcfg_store.data;
EXPORT_SYMBOL(systemcfg); EXPORT_SYMBOL(systemcfg);
......
...@@ -635,7 +635,7 @@ static inline char *find_flat_dt_string(u32 offset) ...@@ -635,7 +635,7 @@ static inline char *find_flat_dt_string(u32 offset)
* used to extract the memory informations at boot before we can * used to extract the memory informations at boot before we can
* unflatten the tree * unflatten the tree
*/ */
static int __init scan_flat_dt(int (*it)(unsigned long node, int __init of_scan_flat_dt(int (*it)(unsigned long node,
const char *uname, int depth, const char *uname, int depth,
void *data), void *data),
void *data) void *data)
...@@ -695,7 +695,7 @@ static int __init scan_flat_dt(int (*it)(unsigned long node, ...@@ -695,7 +695,7 @@ static int __init scan_flat_dt(int (*it)(unsigned long node,
* This function can be used within scan_flattened_dt callback to get * This function can be used within scan_flattened_dt callback to get
* access to properties * access to properties
*/ */
static void* __init get_flat_dt_prop(unsigned long node, const char *name, void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
unsigned long *size) unsigned long *size)
{ {
unsigned long p = node; unsigned long p = node;
...@@ -996,7 +996,7 @@ void __init unflatten_device_tree(void) ...@@ -996,7 +996,7 @@ void __init unflatten_device_tree(void)
static int __init early_init_dt_scan_cpus(unsigned long node, static int __init early_init_dt_scan_cpus(unsigned long node,
const char *uname, int depth, void *data) const char *uname, int depth, void *data)
{ {
char *type = get_flat_dt_prop(node, "device_type", NULL); char *type = of_get_flat_dt_prop(node, "device_type", NULL);
u32 *prop; u32 *prop;
unsigned long size; unsigned long size;
...@@ -1004,17 +1004,6 @@ static int __init early_init_dt_scan_cpus(unsigned long node, ...@@ -1004,17 +1004,6 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
if (type == NULL || strcmp(type, "cpu") != 0) if (type == NULL || strcmp(type, "cpu") != 0)
return 0; return 0;
/* On LPAR, look for the first ibm,pft-size property for the hash table size
*/
if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
u32 *pft_size;
pft_size = (u32 *)get_flat_dt_prop(node, "ibm,pft-size", NULL);
if (pft_size != NULL) {
/* pft_size[0] is the NUMA CEC cookie */
ppc64_pft_size = pft_size[1];
}
}
if (initial_boot_params && initial_boot_params->version >= 2) { if (initial_boot_params && initial_boot_params->version >= 2) {
/* version 2 of the kexec param format adds the phys cpuid /* version 2 of the kexec param format adds the phys cpuid
* of booted proc. * of booted proc.
...@@ -1023,8 +1012,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node, ...@@ -1023,8 +1012,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
boot_cpuid = 0; boot_cpuid = 0;
} else { } else {
/* Check if it's the boot-cpu, set it's hw index in paca now */ /* Check if it's the boot-cpu, set it's hw index in paca now */
if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) { if (of_get_flat_dt_prop(node, "linux,boot-cpu", NULL)
u32 *prop = get_flat_dt_prop(node, "reg", NULL); != NULL) {
u32 *prop = of_get_flat_dt_prop(node, "reg", NULL);
set_hard_smp_processor_id(0, prop == NULL ? 0 : *prop); set_hard_smp_processor_id(0, prop == NULL ? 0 : *prop);
boot_cpuid_phys = get_hard_smp_processor_id(0); boot_cpuid_phys = get_hard_smp_processor_id(0);
} }
...@@ -1032,14 +1022,14 @@ static int __init early_init_dt_scan_cpus(unsigned long node, ...@@ -1032,14 +1022,14 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
/* Check if we have a VMX and eventually update CPU features */ /* Check if we have a VMX and eventually update CPU features */
prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", NULL); prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
if (prop && (*prop) > 0) { if (prop && (*prop) > 0) {
cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
} }
/* Same goes for Apple's "altivec" property */ /* Same goes for Apple's "altivec" property */
prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL); prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
if (prop) { if (prop) {
cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
...@@ -1051,7 +1041,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node, ...@@ -1051,7 +1041,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
* this by looking at the size of the ibm,ppc-interrupt-server#s * this by looking at the size of the ibm,ppc-interrupt-server#s
* property * property
*/ */
prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
&size); &size);
cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
if (prop && ((size / sizeof(u32)) > 1)) if (prop && ((size / sizeof(u32)) > 1))
...@@ -1072,26 +1062,26 @@ static int __init early_init_dt_scan_chosen(unsigned long node, ...@@ -1072,26 +1062,26 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
return 0; return 0;
/* get platform type */ /* get platform type */
prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL); prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
if (prop == NULL) if (prop == NULL)
return 0; return 0;
systemcfg->platform = *prop; systemcfg->platform = *prop;
/* check if iommu is forced on or off */ /* check if iommu is forced on or off */
if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
iommu_is_off = 1; iommu_is_off = 1;
if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL) if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
iommu_force_on = 1; iommu_force_on = 1;
prop64 = (u64*)get_flat_dt_prop(node, "linux,memory-limit", NULL); prop64 = (u64*)of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
if (prop64) if (prop64)
memory_limit = *prop64; memory_limit = *prop64;
prop64 = (u64*)get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); prop64 = (u64*)of_get_flat_dt_prop(node, "linux,tce-alloc-start",NULL);
if (prop64) if (prop64)
tce_alloc_start = *prop64; tce_alloc_start = *prop64;
prop64 = (u64*)get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); prop64 = (u64*)of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
if (prop64) if (prop64)
tce_alloc_end = *prop64; tce_alloc_end = *prop64;
...@@ -1102,9 +1092,12 @@ static int __init early_init_dt_scan_chosen(unsigned long node, ...@@ -1102,9 +1092,12 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
{ {
u64 *basep, *entryp; u64 *basep, *entryp;
basep = (u64*)get_flat_dt_prop(node, "linux,rtas-base", NULL); basep = (u64*)of_get_flat_dt_prop(node,
entryp = (u64*)get_flat_dt_prop(node, "linux,rtas-entry", NULL); "linux,rtas-base", NULL);
prop = (u32*)get_flat_dt_prop(node, "linux,rtas-size", NULL); entryp = (u64*)of_get_flat_dt_prop(node,
"linux,rtas-entry", NULL);
prop = (u32*)of_get_flat_dt_prop(node,
"linux,rtas-size", NULL);
if (basep && entryp && prop) { if (basep && entryp && prop) {
rtas.base = *basep; rtas.base = *basep;
rtas.entry = *entryp; rtas.entry = *entryp;
...@@ -1125,11 +1118,11 @@ static int __init early_init_dt_scan_root(unsigned long node, ...@@ -1125,11 +1118,11 @@ static int __init early_init_dt_scan_root(unsigned long node,
if (depth != 0) if (depth != 0)
return 0; return 0;
prop = (u32 *)get_flat_dt_prop(node, "#size-cells", NULL); prop = (u32 *)of_get_flat_dt_prop(node, "#size-cells", NULL);
dt_root_size_cells = (prop == NULL) ? 1 : *prop; dt_root_size_cells = (prop == NULL) ? 1 : *prop;
DBG("dt_root_size_cells = %x\n", dt_root_size_cells); DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
prop = (u32 *)get_flat_dt_prop(node, "#address-cells", NULL); prop = (u32 *)of_get_flat_dt_prop(node, "#address-cells", NULL);
dt_root_addr_cells = (prop == NULL) ? 2 : *prop; dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells); DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
...@@ -1161,7 +1154,7 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp) ...@@ -1161,7 +1154,7 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
static int __init early_init_dt_scan_memory(unsigned long node, static int __init early_init_dt_scan_memory(unsigned long node,
const char *uname, int depth, void *data) const char *uname, int depth, void *data)
{ {
char *type = get_flat_dt_prop(node, "device_type", NULL); char *type = of_get_flat_dt_prop(node, "device_type", NULL);
cell_t *reg, *endp; cell_t *reg, *endp;
unsigned long l; unsigned long l;
...@@ -1169,7 +1162,7 @@ static int __init early_init_dt_scan_memory(unsigned long node, ...@@ -1169,7 +1162,7 @@ static int __init early_init_dt_scan_memory(unsigned long node,
if (type == NULL || strcmp(type, "memory") != 0) if (type == NULL || strcmp(type, "memory") != 0)
return 0; return 0;
reg = (cell_t *)get_flat_dt_prop(node, "reg", &l); reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
if (reg == NULL) if (reg == NULL)
return 0; return 0;
...@@ -1225,19 +1218,16 @@ void __init early_init_devtree(void *params) ...@@ -1225,19 +1218,16 @@ void __init early_init_devtree(void *params)
/* Setup flat device-tree pointer */ /* Setup flat device-tree pointer */
initial_boot_params = params; initial_boot_params = params;
/* By default, hash size is not set */
ppc64_pft_size = 0;
/* Retreive various informations from the /chosen node of the /* Retreive various informations from the /chosen node of the
* device-tree, including the platform type, initrd location and * device-tree, including the platform type, initrd location and
* size, TCE reserve, and more ... * size, TCE reserve, and more ...
*/ */
scan_flat_dt(early_init_dt_scan_chosen, NULL); of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
/* Scan memory nodes and rebuild LMBs */ /* Scan memory nodes and rebuild LMBs */
lmb_init(); lmb_init();
scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_root, NULL);
scan_flat_dt(early_init_dt_scan_memory, NULL); of_scan_flat_dt(early_init_dt_scan_memory, NULL);
lmb_enforce_memory_limit(memory_limit); lmb_enforce_memory_limit(memory_limit);
lmb_analyze(); lmb_analyze();
systemcfg->physicalMemorySize = lmb_phys_mem_size(); systemcfg->physicalMemorySize = lmb_phys_mem_size();
...@@ -1253,26 +1243,8 @@ void __init early_init_devtree(void *params) ...@@ -1253,26 +1243,8 @@ void __init early_init_devtree(void *params)
/* Retreive hash table size from flattened tree plus other /* Retreive hash table size from flattened tree plus other
* CPU related informations (altivec support, boot CPU ID, ...) * CPU related informations (altivec support, boot CPU ID, ...)
*/ */
scan_flat_dt(early_init_dt_scan_cpus, NULL); of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
/* If hash size wasn't obtained above, we calculate it now based on
* the total RAM size
*/
if (ppc64_pft_size == 0) {
unsigned long rnd_mem_size, pteg_count;
/* round mem_size up to next power of 2 */
rnd_mem_size = 1UL << __ilog2(systemcfg->physicalMemorySize);
if (rnd_mem_size < systemcfg->physicalMemorySize)
rnd_mem_size <<= 1;
/* # pages / 2 */
pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
ppc64_pft_size = __ilog2(pteg_count << 7);
}
DBG("Hash pftSize: %x\n", (int)ppc64_pft_size);
DBG(" <- early_init_devtree()\n"); DBG(" <- early_init_devtree()\n");
} }
......
This diff is collapsed.
...@@ -1470,15 +1470,6 @@ static int __init depca_mca_probe(struct device *device) ...@@ -1470,15 +1470,6 @@ static int __init depca_mca_probe(struct device *device)
** ISA bus I/O device probe ** ISA bus I/O device probe
*/ */
static void depca_platform_release (struct device *device)
{
struct platform_device *pldev;
/* free device */
pldev = to_platform_device (device);
kfree (pldev);
}
static void __init depca_platform_probe (void) static void __init depca_platform_probe (void)
{ {
int i; int i;
...@@ -1492,18 +1483,15 @@ static void __init depca_platform_probe (void) ...@@ -1492,18 +1483,15 @@ static void __init depca_platform_probe (void)
if (io && io != depca_io_ports[i].iobase) if (io && io != depca_io_ports[i].iobase)
continue; continue;
if (!(pldev = kmalloc (sizeof (*pldev), GFP_KERNEL))) pldev = platform_device_alloc(depca_string, i);
if (!pldev)
continue; continue;
memset (pldev, 0, sizeof (*pldev));
pldev->name = depca_string;
pldev->id = i;
pldev->dev.platform_data = (void *) depca_io_ports[i].iobase; pldev->dev.platform_data = (void *) depca_io_ports[i].iobase;
pldev->dev.release = depca_platform_release;
depca_io_ports[i].device = pldev; depca_io_ports[i].device = pldev;
if (platform_device_register (pldev)) { if (platform_device_add(pldev)) {
kfree (pldev); platform_device_put(pldev);
depca_io_ports[i].device = NULL; depca_io_ports[i].device = NULL;
continue; continue;
} }
...@@ -1515,6 +1503,7 @@ static void __init depca_platform_probe (void) ...@@ -1515,6 +1503,7 @@ static void __init depca_platform_probe (void)
* allocated structure */ * allocated structure */
depca_io_ports[i].device = NULL; depca_io_ports[i].device = NULL;
pldev->dev.platform_data = NULL;
platform_device_unregister (pldev); platform_device_unregister (pldev);
} }
} }
...@@ -2112,6 +2101,7 @@ static void __exit depca_module_exit (void) ...@@ -2112,6 +2101,7 @@ static void __exit depca_module_exit (void)
for (i = 0; depca_io_ports[i].iobase; i++) { for (i = 0; depca_io_ports[i].iobase; i++) {
if (depca_io_ports[i].device) { if (depca_io_ports[i].device) {
depca_io_ports[i].device->dev.platform_data = NULL;
platform_device_unregister (depca_io_ports[i].device); platform_device_unregister (depca_io_ports[i].device);
depca_io_ports[i].device = NULL; depca_io_ports[i].device = NULL;
} }
......
...@@ -285,18 +285,8 @@ static struct device_driver jazz_sonic_driver = { ...@@ -285,18 +285,8 @@ static struct device_driver jazz_sonic_driver = {
.remove = __devexit_p(jazz_sonic_device_remove), .remove = __devexit_p(jazz_sonic_device_remove),
}; };
static void jazz_sonic_platform_release (struct device *device)
{
struct platform_device *pldev;
/* free device */
pldev = to_platform_device (device);
kfree (pldev);
}
static int __init jazz_sonic_init_module(void) static int __init jazz_sonic_init_module(void)
{ {
struct platform_device *pldev;
int err; int err;
if ((err = driver_register(&jazz_sonic_driver))) { if ((err = driver_register(&jazz_sonic_driver))) {
...@@ -304,27 +294,19 @@ static int __init jazz_sonic_init_module(void) ...@@ -304,27 +294,19 @@ static int __init jazz_sonic_init_module(void)
return err; return err;
} }
jazz_sonic_device = NULL; jazz_sonic_device = platform_device_alloc(jazz_sonic_string, 0);
if (!jazz_sonnic_device)
if (!(pldev = kmalloc (sizeof (*pldev), GFP_KERNEL))) {
goto out_unregister; goto out_unregister;
}
memset(pldev, 0, sizeof (*pldev)); if (platform_device_add(jazz_sonic_device)) {
pldev->name = jazz_sonic_string; platform_device_put(jazz_sonic_device);
pldev->id = 0;
pldev->dev.release = jazz_sonic_platform_release;
jazz_sonic_device = pldev;
if (platform_device_register (pldev)) {
kfree(pldev);
jazz_sonic_device = NULL; jazz_sonic_device = NULL;
} }
return 0; return 0;
out_unregister: out_unregister:
platform_device_unregister(pldev); driver_unregister(&jazz_sonic_driver);
return -ENOMEM; return -ENOMEM;
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -49,6 +49,7 @@ struct serial8250_config { ...@@ -49,6 +49,7 @@ struct serial8250_config {
#define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */ #define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */
#define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */ #define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
#define UART_BUG_NOMSR (1 << 2) /* UART has buggy MSR status bits (Au1x00) */
#if defined(__i386__) && (defined(CONFIG_M386) || defined(CONFIG_M486)) #if defined(__i386__) && (defined(CONFIG_M386) || defined(CONFIG_M486))
#define _INLINE_ inline #define _INLINE_ inline
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment