Commit 53f1d9af authored by Russell King's avatar Russell King

Merge branches 'fixes' and 'misc' into for-next

...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/domain.h> #include <asm/domain.h>
#include <asm/opcodes-virt.h> #include <asm/opcodes-virt.h>
#include <asm/asm-offsets.h>
#define IOMEM(x) (x) #define IOMEM(x) (x)
...@@ -174,6 +175,47 @@ ...@@ -174,6 +175,47 @@
restore_irqs_notrace \oldcpsr restore_irqs_notrace \oldcpsr
.endm .endm
/*
* Get current thread_info.
*/
.macro get_thread_info, rd
ARM( mov \rd, sp, lsr #13 )
THUMB( mov \rd, sp )
THUMB( lsr \rd, \rd, #13 )
mov \rd, \rd, lsl #13
.endm
/*
* Increment/decrement the preempt count.
*/
#ifdef CONFIG_PREEMPT_COUNT
.macro inc_preempt_count, ti, tmp
ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
add \tmp, \tmp, #1 @ increment it
str \tmp, [\ti, #TI_PREEMPT]
.endm
.macro dec_preempt_count, ti, tmp
ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
sub \tmp, \tmp, #1 @ decrement it
str \tmp, [\ti, #TI_PREEMPT]
.endm
.macro dec_preempt_count_ti, ti, tmp
get_thread_info \ti
dec_preempt_count \ti, \tmp
.endm
#else
.macro inc_preempt_count, ti, tmp
.endm
.macro dec_preempt_count, ti, tmp
.endm
.macro dec_preempt_count_ti, ti, tmp
.endm
#endif
#define USER(x...) \ #define USER(x...) \
9999: x; \ 9999: x; \
.pushsection __ex_table,"a"; \ .pushsection __ex_table,"a"; \
......
...@@ -221,4 +221,23 @@ static inline int cpu_is_xsc3(void) ...@@ -221,4 +221,23 @@ static inline int cpu_is_xsc3(void)
#define cpu_is_xscale() 1 #define cpu_is_xscale() 1
#endif #endif
/*
* Marvell's PJ4 core is based on V7 version. It has some modification
* for coprocessor setting. For this reason, we need a way to distinguish
* it.
*/
#ifndef CONFIG_CPU_PJ4
#define cpu_is_pj4() 0
#else
static inline int cpu_is_pj4(void)
{
unsigned int id;
id = read_cpuid_id();
if ((id & 0xfffffff0) == 0x562f5840)
return 1;
return 0;
}
#endif
#endif #endif
...@@ -39,7 +39,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, ...@@ -39,7 +39,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!csize) if (!csize)
return 0; return 0;
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
if (!vaddr) if (!vaddr)
return -ENOMEM; return -ENOMEM;
......
...@@ -236,11 +236,6 @@ ...@@ -236,11 +236,6 @@
movs pc, lr @ return & move spsr_svc into cpsr movs pc, lr @ return & move spsr_svc into cpsr
.endm .endm
.macro get_thread_info, rd
mov \rd, sp, lsr #13
mov \rd, \rd, lsl #13
.endm
@ @
@ 32-bit wide "mov pc, reg" @ 32-bit wide "mov pc, reg"
@ @
...@@ -306,12 +301,6 @@ ...@@ -306,12 +301,6 @@
.endm .endm
#endif /* ifdef CONFIG_CPU_V7M / else */ #endif /* ifdef CONFIG_CPU_V7M / else */
.macro get_thread_info, rd
mov \rd, sp
lsr \rd, \rd, #13
mov \rd, \rd, lsl #13
.endm
@ @
@ 32-bit wide "mov pc, reg" @ 32-bit wide "mov pc, reg"
@ @
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/io.h> #include <linux/io.h>
#include <asm/thread_notify.h> #include <asm/thread_notify.h>
#include <asm/cputype.h>
static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t) static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
{ {
...@@ -80,6 +81,9 @@ static int __init pj4_cp0_init(void) ...@@ -80,6 +81,9 @@ static int __init pj4_cp0_init(void)
{ {
u32 cp_access; u32 cp_access;
if (!cpu_is_pj4())
return 0;
cp_access = pj4_cp_access_read() & ~0xf; cp_access = pj4_cp_access_read() & ~0xf;
pj4_cp_access_write(cp_access); pj4_cp_access_write(cp_access);
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/thread_notify.h> #include <asm/thread_notify.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/system_misc.h>
#include <asm/mach/time.h> #include <asm/mach/time.h>
#include <asm/tls.h> #include <asm/tls.h>
...@@ -100,7 +101,7 @@ void soft_restart(unsigned long addr) ...@@ -100,7 +101,7 @@ void soft_restart(unsigned long addr)
u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack); u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
/* Disable interrupts first */ /* Disable interrupts first */
local_irq_disable(); raw_local_irq_disable();
local_fiq_disable(); local_fiq_disable();
/* Disable the L2 if we're the last man standing. */ /* Disable the L2 if we're the last man standing. */
......
...@@ -445,6 +445,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) ...@@ -445,6 +445,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (user_debug & UDBG_UNDEFINED) { if (user_debug & UDBG_UNDEFINED) {
printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
current->comm, task_pid_nr(current), pc); current->comm, task_pid_nr(current), pc);
__show_regs(regs);
dump_instr(KERN_INFO, regs); dump_instr(KERN_INFO, regs);
} }
#endif #endif
......
...@@ -137,11 +137,16 @@ static void dcscb_power_down(void) ...@@ -137,11 +137,16 @@ static void dcscb_power_down(void)
v7_exit_coherency_flush(all); v7_exit_coherency_flush(all);
/* /*
* This is a harmless no-op. On platforms with a real * A full outer cache flush could be needed at this point
* outer cache this might either be needed or not, * on platforms with such a cache, depending on where the
* depending on where the outer cache sits. * outer cache sits. In some cases the notion of a "last
* cluster standing" would need to be implemented if the
* outer cache is shared across clusters. In any case, when
* the outer cache needs flushing, there is no concurrent
* access to the cache controller to worry about and no
* special locking besides what is already provided by the
* MCPM state machinery is needed.
*/ */
outer_flush_all();
/* /*
* Disable cluster-level coherency by masking * Disable cluster-level coherency by masking
......
...@@ -120,34 +120,51 @@ static const struct prot_bits pte_bits[] = { ...@@ -120,34 +120,51 @@ static const struct prot_bits pte_bits[] = {
}; };
static const struct prot_bits section_bits[] = { static const struct prot_bits section_bits[] = {
#ifndef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
/* These are approximate */ {
.mask = PMD_SECT_USER,
.val = PMD_SECT_USER,
.set = "USR",
}, {
.mask = PMD_SECT_RDONLY,
.val = PMD_SECT_RDONLY,
.set = "ro",
.clear = "RW",
#elif __LINUX_ARM_ARCH__ >= 6
{ {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = 0, .val = PMD_SECT_APX | PMD_SECT_AP_WRITE,
.set = " ro", .set = " ro",
}, { }, {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_WRITE, .val = PMD_SECT_AP_WRITE,
.set = " RW", .set = " RW",
}, { }, {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ, .val = PMD_SECT_AP_READ,
.set = "USR ro", .set = "USR ro",
}, { }, {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.set = "USR RW", .set = "USR RW",
#else #else /* ARMv4/ARMv5 */
/* These are approximate */
{ {
.mask = PMD_SECT_USER, .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_USER, .val = 0,
.set = "USR", .set = " ro",
}, { }, {
.mask = PMD_SECT_RDONLY, .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_RDONLY, .val = PMD_SECT_AP_WRITE,
.set = "ro", .set = " RW",
.clear = "RW", }, {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ,
.set = "USR ro",
}, {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.set = "USR RW",
#endif #endif
}, { }, {
.mask = PMD_SECT_XN, .mask = PMD_SECT_XN,
......
...@@ -8,9 +8,12 @@ ...@@ -8,9 +8,12 @@
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/vfpmacros.h> #include <asm/vfpmacros.h>
#include "../kernel/entry-header.S" #include <asm/assembler.h>
#include <asm/asm-offsets.h>
@ VFP entry point. @ VFP entry point.
@ @
...@@ -22,11 +25,7 @@ ...@@ -22,11 +25,7 @@
@ IRQs disabled. @ IRQs disabled.
@ @
ENTRY(do_vfp) ENTRY(do_vfp)
#ifdef CONFIG_PREEMPT_COUNT inc_preempt_count r10, r4
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
add r11, r4, #1 @ increment it
str r11, [r10, #TI_PREEMPT]
#endif
enable_irq enable_irq
ldr r4, .LCvfp ldr r4, .LCvfp
ldr r11, [r10, #TI_CPU] @ CPU number ldr r11, [r10, #TI_CPU] @ CPU number
...@@ -35,12 +34,7 @@ ENTRY(do_vfp) ...@@ -35,12 +34,7 @@ ENTRY(do_vfp)
ENDPROC(do_vfp) ENDPROC(do_vfp)
ENTRY(vfp_null_entry) ENTRY(vfp_null_entry)
#ifdef CONFIG_PREEMPT_COUNT dec_preempt_count_ti r10, r4
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it
str r11, [r10, #TI_PREEMPT]
#endif
mov pc, lr mov pc, lr
ENDPROC(vfp_null_entry) ENDPROC(vfp_null_entry)
...@@ -53,12 +47,7 @@ ENDPROC(vfp_null_entry) ...@@ -53,12 +47,7 @@ ENDPROC(vfp_null_entry)
__INIT __INIT
ENTRY(vfp_testing_entry) ENTRY(vfp_testing_entry)
#ifdef CONFIG_PREEMPT_COUNT dec_preempt_count_ti r10, r4
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it
str r11, [r10, #TI_PREEMPT]
#endif
ldr r0, VFP_arch_address ldr r0, VFP_arch_address
str r0, [r0] @ set to non-zero value str r0, [r0] @ set to non-zero value
mov pc, r9 @ we have handled the fault mov pc, r9 @ we have handled the fault
......
...@@ -14,10 +14,13 @@ ...@@ -14,10 +14,13 @@
* r10 points at the start of the private FP workspace in the thread structure * r10 points at the start of the private FP workspace in the thread structure
* sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h) * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
*/ */
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/vfpmacros.h> #include <asm/vfpmacros.h>
#include <linux/kern_levels.h> #include <linux/kern_levels.h>
#include "../kernel/entry-header.S" #include <asm/assembler.h>
#include <asm/asm-offsets.h>
.macro DBGSTR, str .macro DBGSTR, str
#ifdef DEBUG #ifdef DEBUG
...@@ -179,12 +182,7 @@ vfp_hw_state_valid: ...@@ -179,12 +182,7 @@ vfp_hw_state_valid:
@ else it's one 32-bit instruction, so @ else it's one 32-bit instruction, so
@ always subtract 4 from the following @ always subtract 4 from the following
@ instruction address. @ instruction address.
#ifdef CONFIG_PREEMPT_COUNT dec_preempt_count_ti r10, r4
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it
str r11, [r10, #TI_PREEMPT]
#endif
mov pc, r9 @ we think we have handled things mov pc, r9 @ we think we have handled things
...@@ -203,12 +201,7 @@ look_for_VFP_exceptions: ...@@ -203,12 +201,7 @@ look_for_VFP_exceptions:
@ not recognised by VFP @ not recognised by VFP
DBGSTR "not VFP" DBGSTR "not VFP"
#ifdef CONFIG_PREEMPT_COUNT dec_preempt_count_ti r10, r4
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it
str r11, [r10, #TI_PREEMPT]
#endif
mov pc, lr mov pc, lr
process_exception: process_exception:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment