Commit ede1d63f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull second set of ARM changes from Russell King:
 "This is the remainder of the ARM changes for this merge window.
  Included in this request are:

   - fixes for kprobes for big-endian support
   - fix tracing in soft_restart
   - avoid phys address overflow in kdump code
   - fix reporting of read-only pmd bits in kernel page table dump
   - remove unnecessary (and possibly buggy) call to outer_flush_all()
   - fix a three sparse warnings (missing header file for function
     prototypes)
   - fix pj4 crashing single zImage (thanks to arm-soc merging changes
     which enables this with knowledge that the corresponding fix had
     not even been submitted for my tree before the merge window opened)
   - vfp macro cleanups
   - dump register state on undefined instruction userspace faults when
     debugging"

* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
  Dump the registers on undefined instruction userspace faults
  ARM: 8018/1: Add {inc,dec}_preempt_count asm macros
  ARM: 8017/1: Move asm macro get_thread_info to asm/assembler.h
  ARM: 8016/1: Check cpu id in pj4_cp0_init.
  ARM: 8015/1: Add cpu_is_pj4 to distinguish PJ4 because it has some differences with V7
  ARM: add missing system_misc.h include to process.c
  ARM: 8009/1: dcscb.c: remove call to outer_flush_all()
  ARM: 8014/1: mm: fix reporting of read-only PMD bits
  ARM: 8012/1: kdump: Avoid overflow when converting pfn to physaddr
  ARM: 8010/1: avoid tracers in soft_restart
  ARM: kprobes-test: Workaround GAS .align bug
  ARM: kprobes-test: use <asm/opcodes.h> for Thumb instruction building
  ARM: kprobes-test: use <asm/opcodes.h> for ARM instruction building
  ARM: kprobes-test: use <asm/opcodes.h> for instruction accesses
  ARM: probes: fix instruction fetch order with <asm/opcodes.h>
parents b42e6dc6 98f07013
......@@ -23,6 +23,7 @@
#include <asm/ptrace.h>
#include <asm/domain.h>
#include <asm/opcodes-virt.h>
#include <asm/asm-offsets.h>
#define IOMEM(x) (x)
......@@ -174,6 +175,47 @@
restore_irqs_notrace \oldcpsr
.endm
/*
* Get current thread_info.
*/
.macro get_thread_info, rd
ARM( mov \rd, sp, lsr #13 )
THUMB( mov \rd, sp )
THUMB( lsr \rd, \rd, #13 )
mov \rd, \rd, lsl #13
.endm
/*
* Increment/decrement the preempt count.
*/
#ifdef CONFIG_PREEMPT_COUNT
.macro inc_preempt_count, ti, tmp
ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
add \tmp, \tmp, #1 @ increment it
str \tmp, [\ti, #TI_PREEMPT]
.endm
.macro dec_preempt_count, ti, tmp
ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
sub \tmp, \tmp, #1 @ decrement it
str \tmp, [\ti, #TI_PREEMPT]
.endm
.macro dec_preempt_count_ti, ti, tmp
get_thread_info \ti
dec_preempt_count \ti, \tmp
.endm
#else
.macro inc_preempt_count, ti, tmp
.endm
.macro dec_preempt_count, ti, tmp
.endm
.macro dec_preempt_count_ti, ti, tmp
.endm
#endif
#define USER(x...) \
9999: x; \
.pushsection __ex_table,"a"; \
......
......@@ -221,4 +221,23 @@ static inline int cpu_is_xsc3(void)
#define cpu_is_xscale() 1
#endif
/*
* Marvell's PJ4 core is based on V7 version. It has some modification
* for coprocessor setting. For this reason, we need a way to distinguish
* it.
*/
#ifndef CONFIG_CPU_PJ4
#define cpu_is_pj4() 0
#else
static inline int cpu_is_pj4(void)
{
unsigned int id;
id = read_cpuid_id();
if ((id & 0xfffffff0) == 0x562f5840)
return 1;
return 0;
}
#endif
#endif
......@@ -39,7 +39,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!csize)
return 0;
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
if (!vaddr)
return -ENOMEM;
......
......@@ -236,11 +236,6 @@
movs pc, lr @ return & move spsr_svc into cpsr
.endm
.macro get_thread_info, rd
mov \rd, sp, lsr #13
mov \rd, \rd, lsl #13
.endm
@
@ 32-bit wide "mov pc, reg"
@
......@@ -306,12 +301,6 @@
.endm
#endif /* ifdef CONFIG_CPU_V7M / else */
.macro get_thread_info, rd
mov \rd, sp
lsr \rd, \rd, #13
mov \rd, \rd, lsl #13
.endm
@
@ 32-bit wide "mov pc, reg"
@
......
......@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <asm/opcodes.h>
#include "kprobes.h"
......@@ -153,7 +154,8 @@ kprobe_decode_ldmstm(probes_opcode_t insn, struct arch_probes_insn *asi,
if (handler) {
/* We can emulate the instruction in (possibly) modified form */
asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist;
asi->insn[0] = __opcode_to_mem_arm((insn & 0xfff00000) |
(rn << 16) | reglist);
asi->insn_handler = handler;
return INSN_GOOD;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -113,7 +113,7 @@
* @ start of inline data...
* .ascii "mov r0, r7" @ text title for test case
* .byte 0
* .align 2
* .align 2, 0
*
* @ TEST_ARG_REG
* .byte ARG_TYPE_REG
......@@ -1333,7 +1333,8 @@ static void test_case_failed(const char *message)
static unsigned long next_instruction(unsigned long pc)
{
#ifdef CONFIG_THUMB2_KERNEL
if ((pc & 1) && !is_wide_instruction(*(u16 *)(pc - 1)))
if ((pc & 1) &&
!is_wide_instruction(__mem_to_opcode_thumb16(*(u16 *)(pc - 1))))
return pc + 2;
else
#endif
......@@ -1378,13 +1379,13 @@ static uintptr_t __used kprobes_test_case_start(const char *title, void *stack)
if (test_case_is_thumb) {
u16 *p = (u16 *)(test_code & ~1);
current_instruction = p[0];
current_instruction = __mem_to_opcode_thumb16(p[0]);
if (is_wide_instruction(current_instruction)) {
current_instruction <<= 16;
current_instruction |= p[1];
u16 instr2 = __mem_to_opcode_thumb16(p[1]);
current_instruction = __opcode_thumb32_compose(current_instruction, instr2);
}
} else {
current_instruction = *(u32 *)test_code;
current_instruction = __mem_to_opcode_arm(*(u32 *)test_code);
}
if (current_title[0] == '.')
......
......@@ -115,7 +115,7 @@ struct test_arg_end {
/* multiple strings to be concatenated. */ \
".ascii "#title" \n\t" \
".byte 0 \n\t" \
".align 2 \n\t"
".align 2, 0 \n\t"
#define TEST_ARG_REG(reg, val) \
".byte "__stringify(ARG_TYPE_REG)" \n\t" \
......
......@@ -149,9 +149,9 @@ t32_decode_ldmstm(probes_opcode_t insn, struct arch_probes_insn *asi,
enum probes_insn ret = kprobe_decode_ldmstm(insn, asi, d);
/* Fixup modified instruction to have halfwords in correct order...*/
insn = asi->insn[0];
((u16 *)asi->insn)[0] = insn >> 16;
((u16 *)asi->insn)[1] = insn & 0xffff;
insn = __mem_to_opcode_arm(asi->insn[0]);
((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn >> 16);
((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0xffff);
return ret;
}
......@@ -516,7 +516,7 @@ t16_decode_hiregs(probes_opcode_t insn, struct arch_probes_insn *asi,
{
insn &= ~0x00ff;
insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */
((u16 *)asi->insn)[0] = insn;
((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn);
asi->insn_handler = t16_emulate_hiregs;
return INSN_GOOD;
}
......@@ -547,8 +547,10 @@ t16_decode_push(probes_opcode_t insn, struct arch_probes_insn *asi,
* and call it with R9=SP and LR in the register list represented
* by R8.
*/
((u16 *)asi->insn)[0] = 0xe929; /* 1st half STMDB R9!,{} */
((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */
/* 1st half STMDB R9!,{} */
((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe929);
/* 2nd half (register list) */
((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
asi->insn_handler = t16_emulate_push;
return INSN_GOOD;
}
......@@ -600,8 +602,10 @@ t16_decode_pop(probes_opcode_t insn, struct arch_probes_insn *asi,
* and call it with R9=SP and PC in the register list represented
* by R8.
*/
((u16 *)asi->insn)[0] = 0xe8b9; /* 1st half LDMIA R9!,{} */
((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */
/* 1st half LDMIA R9!,{} */
((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe8b9);
/* 2nd half (register list) */
((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc
: t16_emulate_pop_nopc;
return INSN_GOOD;
......
......@@ -26,6 +26,7 @@
#include <linux/stop_machine.h>
#include <linux/stringify.h>
#include <asm/traps.h>
#include <asm/opcodes.h>
#include <asm/cacheflush.h>
#include <linux/percpu.h>
#include <linux/bug.h>
......@@ -67,10 +68,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
#ifdef CONFIG_THUMB2_KERNEL
thumb = true;
addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
insn = ((u16 *)addr)[0];
insn = __mem_to_opcode_thumb16(((u16 *)addr)[0]);
if (is_wide_instruction(insn)) {
insn <<= 16;
insn |= ((u16 *)addr)[1];
u16 inst2 = __mem_to_opcode_thumb16(((u16 *)addr)[1]);
insn = __opcode_thumb32_compose(insn, inst2);
decode_insn = thumb32_probes_decode_insn;
actions = kprobes_t32_actions;
} else {
......@@ -81,7 +82,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
thumb = false;
if (addr & 0x3)
return -EINVAL;
insn = *p->addr;
insn = __mem_to_opcode_arm(*p->addr);
decode_insn = arm_probes_decode_insn;
actions = kprobes_arm_actions;
#endif
......
......@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <asm/thread_notify.h>
#include <asm/cputype.h>
static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
{
......@@ -80,6 +81,9 @@ static int __init pj4_cp0_init(void)
{
u32 cp_access;
if (!cpu_is_pj4())
return 0;
cp_access = pj4_cp_access_read() & ~0xf;
pj4_cp_access_write(cp_access);
......
......@@ -202,13 +202,14 @@ prepare_emulated_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
#ifdef CONFIG_THUMB2_KERNEL
if (thumb) {
u16 *thumb_insn = (u16 *)asi->insn;
thumb_insn[1] = 0x4770; /* Thumb bx lr */
thumb_insn[2] = 0x4770; /* Thumb bx lr */
/* Thumb bx lr */
thumb_insn[1] = __opcode_to_mem_thumb16(0x4770);
thumb_insn[2] = __opcode_to_mem_thumb16(0x4770);
return insn;
}
asi->insn[1] = 0xe12fff1e; /* ARM bx lr */
asi->insn[1] = __opcode_to_mem_arm(0xe12fff1e); /* ARM bx lr */
#else
asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */
asi->insn[1] = __opcode_to_mem_arm(0xe1a0f00e); /* mov pc, lr */
#endif
/* Make an ARM instruction unconditional */
if (insn < 0xe0000000)
......@@ -228,12 +229,12 @@ set_emulated_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
if (thumb) {
u16 *ip = (u16 *)asi->insn;
if (is_wide_instruction(insn))
*ip++ = insn >> 16;
*ip++ = insn;
*ip++ = __opcode_to_mem_thumb16(insn >> 16);
*ip++ = __opcode_to_mem_thumb16(insn);
return;
}
#endif
asi->insn[0] = insn;
asi->insn[0] = __opcode_to_mem_arm(insn);
}
/*
......
......@@ -38,6 +38,7 @@
#include <asm/processor.h>
#include <asm/thread_notify.h>
#include <asm/stacktrace.h>
#include <asm/system_misc.h>
#include <asm/mach/time.h>
#include <asm/tls.h>
......@@ -99,7 +100,7 @@ void soft_restart(unsigned long addr)
u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
/* Disable interrupts first */
local_irq_disable();
raw_local_irq_disable();
local_fiq_disable();
/* Disable the L2 if we're the last man standing. */
......
......@@ -445,6 +445,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (user_debug & UDBG_UNDEFINED) {
printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
current->comm, task_pid_nr(current), pc);
__show_regs(regs);
dump_instr(KERN_INFO, regs);
}
#endif
......
......@@ -137,11 +137,16 @@ static void dcscb_power_down(void)
v7_exit_coherency_flush(all);
/*
* This is a harmless no-op. On platforms with a real
* outer cache this might either be needed or not,
* depending on where the outer cache sits.
* A full outer cache flush could be needed at this point
* on platforms with such a cache, depending on where the
* outer cache sits. In some cases the notion of a "last
* cluster standing" would need to be implemented if the
* outer cache is shared across clusters. In any case, when
* the outer cache needs flushing, there is no concurrent
* access to the cache controller to worry about and no
* special locking besides what is already provided by the
* MCPM state machinery is needed.
*/
outer_flush_all();
/*
* Disable cluster-level coherency by masking
......
......@@ -120,34 +120,51 @@ static const struct prot_bits pte_bits[] = {
};
static const struct prot_bits section_bits[] = {
#ifndef CONFIG_ARM_LPAE
/* These are approximate */
#ifdef CONFIG_ARM_LPAE
{
.mask = PMD_SECT_USER,
.val = PMD_SECT_USER,
.set = "USR",
}, {
.mask = PMD_SECT_RDONLY,
.val = PMD_SECT_RDONLY,
.set = "ro",
.clear = "RW",
#elif __LINUX_ARM_ARCH__ >= 6
{
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = 0,
.mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_APX | PMD_SECT_AP_WRITE,
.set = " ro",
}, {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_WRITE,
.set = " RW",
}, {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ,
.set = "USR ro",
}, {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.set = "USR RW",
#else
#else /* ARMv4/ARMv5 */
/* These are approximate */
{
.mask = PMD_SECT_USER,
.val = PMD_SECT_USER,
.set = "USR",
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = 0,
.set = " ro",
}, {
.mask = PMD_SECT_RDONLY,
.val = PMD_SECT_RDONLY,
.set = "ro",
.clear = "RW",
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_WRITE,
.set = " RW",
}, {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ,
.set = "USR ro",
}, {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.set = "USR RW",
#endif
}, {
.mask = PMD_SECT_XN,
......
......@@ -8,9 +8,12 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/vfpmacros.h>
#include "../kernel/entry-header.S"
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@ VFP entry point.
@
......@@ -22,11 +25,7 @@
@ IRQs disabled.
@
ENTRY(do_vfp)
#ifdef CONFIG_PREEMPT_COUNT
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
add r11, r4, #1 @ increment it
str r11, [r10, #TI_PREEMPT]
#endif
inc_preempt_count r10, r4
enable_irq
ldr r4, .LCvfp
ldr r11, [r10, #TI_CPU] @ CPU number
......@@ -35,12 +34,7 @@ ENTRY(do_vfp)
ENDPROC(do_vfp)
ENTRY(vfp_null_entry)
#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it
str r11, [r10, #TI_PREEMPT]
#endif
dec_preempt_count_ti r10, r4
mov pc, lr
ENDPROC(vfp_null_entry)
......@@ -53,12 +47,7 @@ ENDPROC(vfp_null_entry)
__INIT
ENTRY(vfp_testing_entry)
#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it
str r11, [r10, #TI_PREEMPT]
#endif
dec_preempt_count_ti r10, r4
ldr r0, VFP_arch_address
str r0, [r0] @ set to non-zero value
mov pc, r9 @ we have handled the fault
......
......@@ -14,10 +14,13 @@
* r10 points at the start of the private FP workspace in the thread structure
* sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/vfpmacros.h>
#include <linux/kern_levels.h>
#include "../kernel/entry-header.S"
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
.macro DBGSTR, str
#ifdef DEBUG
......@@ -179,12 +182,7 @@ vfp_hw_state_valid:
@ else it's one 32-bit instruction, so
@ always subtract 4 from the following
@ instruction address.
#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it
str r11, [r10, #TI_PREEMPT]
#endif
dec_preempt_count_ti r10, r4
mov pc, r9 @ we think we have handled things
......@@ -203,12 +201,7 @@ look_for_VFP_exceptions:
@ not recognised by VFP
DBGSTR "not VFP"
#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
sub r11, r4, #1 @ decrement it
str r11, [r10, #TI_PREEMPT]
#endif
dec_preempt_count_ti r10, r4
mov pc, lr
process_exception:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment