Commit 262fd6ff authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm

Pull ARM fixes from Russell King:
 "The larger changes this time are

   - "ARM: 7755/1: handle user space mapped pages in flush_kernel_dcache_page"
     which fixes more data corruption problems with O_DIRECT

   - "ARM: 7759/1: decouple CPU offlining from reboot/shutdown" which
     gets us back to working shutdown/reboot on SMP platforms

   - "ARM: 7752/1: errata: LoUIS bit field in CLIDR register is incorrect"
     which fixes a shutdown regression found in v3.10 on Versatile
     Express platforms.

  The remainder are the quite small, maybe one or two line changes"

* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
  ARM: 7759/1: decouple CPU offlining from reboot/shutdown
  ARM: 7756/1: zImage/virt: remove hyp-stub.S during distclean
  ARM: 7755/1: handle user space mapped pages in flush_kernel_dcache_page
  ARM: 7754/1: Fix the CPU ID and the mask associated to the PJ4B
  ARM: 7753/1: map_init_section flushes incorrect pmd
  ARM: 7752/1: errata: LoUIS bit field in CLIDR register is incorrect
parents 17858ca6 19ab428f
...@@ -1189,6 +1189,16 @@ config PL310_ERRATA_588369 ...@@ -1189,6 +1189,16 @@ config PL310_ERRATA_588369
is not correctly implemented in PL310 as clean lines are not is not correctly implemented in PL310 as clean lines are not
invalidated as a result of these operations. invalidated as a result of these operations.
config ARM_ERRATA_643719
bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
depends on CPU_V7 && SMP
help
This option enables the workaround for the 643719 Cortex-A9 (prior to
r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR
register returns zero when it should return one. The workaround
corrects this value, ensuring cache maintenance operations which use
it behave as intended and avoiding data corruption.
config ARM_ERRATA_720789 config ARM_ERRATA_720789
bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
depends on CPU_V7 depends on CPU_V7
...@@ -2006,7 +2016,7 @@ config XIP_PHYS_ADDR ...@@ -2006,7 +2016,7 @@ config XIP_PHYS_ADDR
config KEXEC config KEXEC
bool "Kexec system call (EXPERIMENTAL)" bool "Kexec system call (EXPERIMENTAL)"
depends on (!SMP || HOTPLUG_CPU) depends on (!SMP || PM_SLEEP_SMP)
help help
kexec is a system call that implements the ability to shutdown your kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot current kernel, and to start another kernel. It is like a reboot
......
...@@ -116,7 +116,8 @@ targets := vmlinux vmlinux.lds \ ...@@ -116,7 +116,8 @@ targets := vmlinux vmlinux.lds \
# Make sure files are removed during clean # Make sure files are removed during clean
extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \ extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \
lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \
hyp-stub.S
ifeq ($(CONFIG_FUNCTION_TRACER),y) ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS) ORIG_CFLAGS := $(KBUILD_CFLAGS)
......
...@@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma, ...@@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
} }
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
static inline void flush_kernel_dcache_page(struct page *page) extern void flush_kernel_dcache_page(struct page *);
{
}
#define flush_dcache_mmap_lock(mapping) \ #define flush_dcache_mmap_lock(mapping) \
spin_lock_irq(&(mapping)->tree_lock) spin_lock_irq(&(mapping)->tree_lock)
......
...@@ -134,6 +134,10 @@ void machine_kexec(struct kimage *image) ...@@ -134,6 +134,10 @@ void machine_kexec(struct kimage *image)
unsigned long reboot_code_buffer_phys; unsigned long reboot_code_buffer_phys;
void *reboot_code_buffer; void *reboot_code_buffer;
if (num_online_cpus() > 1) {
pr_err("kexec: error: multiple CPUs still online\n");
return;
}
page_list = image->head & PAGE_MASK; page_list = image->head & PAGE_MASK;
......
...@@ -184,30 +184,61 @@ int __init reboot_setup(char *str) ...@@ -184,30 +184,61 @@ int __init reboot_setup(char *str)
__setup("reboot=", reboot_setup); __setup("reboot=", reboot_setup);
/*
* Called by kexec, immediately prior to machine_kexec().
*
* This must completely disable all secondary CPUs; simply causing those CPUs
* to execute e.g. a RAM-based pin loop is not sufficient. This allows the
* kexec'd kernel to use any and all RAM as it sees fit, without having to
* avoid any code or data used by any SW CPU pin loop. The CPU hotplug
* functionality embodied in disable_nonboot_cpus() to achieve this.
*/
void machine_shutdown(void) void machine_shutdown(void)
{ {
#ifdef CONFIG_SMP disable_nonboot_cpus();
smp_send_stop();
#endif
} }
/*
* Halting simply requires that the secondary CPUs stop performing any
* activity (executing tasks, handling interrupts). smp_send_stop()
* achieves this.
*/
void machine_halt(void) void machine_halt(void)
{ {
machine_shutdown(); smp_send_stop();
local_irq_disable(); local_irq_disable();
while (1); while (1);
} }
/*
* Power-off simply requires that the secondary CPUs stop performing any
* activity (executing tasks, handling interrupts). smp_send_stop()
* achieves this. When the system power is turned off, it will take all CPUs
* with it.
*/
void machine_power_off(void) void machine_power_off(void)
{ {
machine_shutdown(); smp_send_stop();
if (pm_power_off) if (pm_power_off)
pm_power_off(); pm_power_off();
} }
/*
* Restart requires that the secondary CPUs stop performing any activity
* while the primary CPU resets the system. Systems with a single CPU can
* use soft_restart() as their machine descriptor's .restart hook, since that
* will cause the only available CPU to reset. Systems with multiple CPUs must
* provide a HW restart implementation, to ensure that all CPUs reset at once.
* This is required so that any code running after reset on the primary CPU
* doesn't have to co-ordinate with other CPUs to ensure they aren't still
* executing pre-reset code, and using RAM that the primary CPU's code wishes
* to use. Implementing such co-ordination would be essentially impossible.
*/
void machine_restart(char *cmd) void machine_restart(char *cmd)
{ {
machine_shutdown(); smp_send_stop();
arm_pm_restart(reboot_mode, cmd); arm_pm_restart(reboot_mode, cmd);
......
...@@ -651,17 +651,6 @@ void smp_send_reschedule(int cpu) ...@@ -651,17 +651,6 @@ void smp_send_reschedule(int cpu)
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
} }
#ifdef CONFIG_HOTPLUG_CPU
static void smp_kill_cpus(cpumask_t *mask)
{
unsigned int cpu;
for_each_cpu(cpu, mask)
platform_cpu_kill(cpu);
}
#else
static void smp_kill_cpus(cpumask_t *mask) { }
#endif
void smp_send_stop(void) void smp_send_stop(void)
{ {
unsigned long timeout; unsigned long timeout;
...@@ -679,8 +668,6 @@ void smp_send_stop(void) ...@@ -679,8 +668,6 @@ void smp_send_stop(void)
if (num_online_cpus() > 1) if (num_online_cpus() > 1)
pr_warning("SMP: failed to stop secondary CPUs\n"); pr_warning("SMP: failed to stop secondary CPUs\n");
smp_kill_cpus(&mask);
} }
/* /*
......
...@@ -92,6 +92,14 @@ ENTRY(v7_flush_dcache_louis) ...@@ -92,6 +92,14 @@ ENTRY(v7_flush_dcache_louis)
mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr
ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr
ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr
#ifdef CONFIG_ARM_ERRATA_643719
ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register
ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do
ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p?
biceq r2, r2, #0x0000000f @ clear minor revision number
teqeq r2, r1 @ test for errata affected core and if so...
orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne')
#endif
ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2
ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2
moveq pc, lr @ return if level == 0 moveq pc, lr @ return if level == 0
......
...@@ -300,6 +300,39 @@ void flush_dcache_page(struct page *page) ...@@ -300,6 +300,39 @@ void flush_dcache_page(struct page *page)
} }
EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(flush_dcache_page);
/*
* Ensure cache coherency for the kernel mapping of this page. We can
* assume that the page is pinned via kmap.
*
* If the page only exists in the page cache and there are no user
* space mappings, this is a no-op since the page was already marked
* dirty at creation. Otherwise, we need to flush the dirty kernel
* cache lines directly.
*/
void flush_kernel_dcache_page(struct page *page)
{
if (cache_is_vivt() || cache_is_vipt_aliasing()) {
struct address_space *mapping;
mapping = page_mapping(page);
if (!mapping || mapping_mapped(mapping)) {
void *addr;
addr = page_address(page);
/*
* kmap_atomic() doesn't set the page virtual
* address for highmem pages, and
* kunmap_atomic() takes care of cache
* flushing already.
*/
if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
}
}
}
EXPORT_SYMBOL(flush_kernel_dcache_page);
/* /*
* Flush an anonymous page so that users of get_user_pages() * Flush an anonymous page so that users of get_user_pages()
* can safely access the data. The expected sequence is: * can safely access the data. The expected sequence is:
......
...@@ -616,10 +616,12 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, ...@@ -616,10 +616,12 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
} while (pte++, addr += PAGE_SIZE, addr != end); } while (pte++, addr += PAGE_SIZE, addr != end);
} }
static void __init map_init_section(pmd_t *pmd, unsigned long addr, static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
unsigned long end, phys_addr_t phys, unsigned long end, phys_addr_t phys,
const struct mem_type *type) const struct mem_type *type)
{ {
pmd_t *p = pmd;
#ifndef CONFIG_ARM_LPAE #ifndef CONFIG_ARM_LPAE
/* /*
* In classic MMU format, puds and pmds are folded in to * In classic MMU format, puds and pmds are folded in to
...@@ -638,7 +640,7 @@ static void __init map_init_section(pmd_t *pmd, unsigned long addr, ...@@ -638,7 +640,7 @@ static void __init map_init_section(pmd_t *pmd, unsigned long addr,
phys += SECTION_SIZE; phys += SECTION_SIZE;
} while (pmd++, addr += SECTION_SIZE, addr != end); } while (pmd++, addr += SECTION_SIZE, addr != end);
flush_pmd_entry(pmd); flush_pmd_entry(p);
} }
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
...@@ -661,7 +663,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, ...@@ -661,7 +663,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
*/ */
if (type->prot_sect && if (type->prot_sect &&
((addr | next | phys) & ~SECTION_MASK) == 0) { ((addr | next | phys) & ~SECTION_MASK) == 0) {
map_init_section(pmd, addr, next, phys, type); __map_init_section(pmd, addr, next, phys, type);
} else { } else {
alloc_init_pte(pmd, addr, next, alloc_init_pte(pmd, addr, next,
__phys_to_pfn(phys), type); __phys_to_pfn(phys), type);
......
...@@ -409,8 +409,8 @@ __v7_ca9mp_proc_info: ...@@ -409,8 +409,8 @@ __v7_ca9mp_proc_info:
*/ */
.type __v7_pj4b_proc_info, #object .type __v7_pj4b_proc_info, #object
__v7_pj4b_proc_info: __v7_pj4b_proc_info:
.long 0x562f5840 .long 0x560f5800
.long 0xfffffff0 .long 0xff0fff00
__v7_proc __v7_pj4b_setup __v7_proc __v7_pj4b_setup
.size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment