Commit 19accfd3 authored by Russell King's avatar Russell King

ARM: move vector stubs

Move the machine vector stubs into the page above the vector page,
which we can prevent from being visible to userspace.  Also move
the reset stub, and place the swi vector at a location that the
'ldr' can get to it.

This hides pointers into the kernel which could give valuable
information to attackers, and reduces the number of exploitable
instructions at a fixed address.

Cc: <stable@vger.kernel.org>
Acked-by: default avatarNicolas Pitre <nico@linaro.org>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 5b43e7a3
...@@ -218,7 +218,8 @@ config VECTORS_BASE ...@@ -218,7 +218,8 @@ config VECTORS_BASE
default DRAM_BASE if REMAP_VECTORS_TO_RAM default DRAM_BASE if REMAP_VECTORS_TO_RAM
default 0x00000000 default 0x00000000
help help
The base address of exception vectors. The base address of exception vectors. This must be two pages
in size.
config ARM_PATCH_PHYS_VIRT config ARM_PATCH_PHYS_VIRT
bool "Patch physical to virtual translations at runtime" if EMBEDDED bool "Patch physical to virtual translations at runtime" if EMBEDDED
......
...@@ -944,9 +944,9 @@ __kuser_helper_end: ...@@ -944,9 +944,9 @@ __kuser_helper_end:
/* /*
* Vector stubs. * Vector stubs.
* *
* This code is copied to 0xffff0200 so we can use branches in the * This code is copied to 0xffff1000 so we can use branches in the
* vectors, rather than ldr's. Note that this code must not * vectors, rather than ldr's. Note that this code must not exceed
* exceed 0x300 bytes. * a page size.
* *
* Common stub entry macro: * Common stub entry macro:
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
...@@ -995,6 +995,15 @@ ENDPROC(vector_\name) ...@@ -995,6 +995,15 @@ ENDPROC(vector_\name)
.globl __stubs_start .globl __stubs_start
__stubs_start: __stubs_start:
@ This must be the first word
.word vector_swi
vector_rst:
ARM( swi SYS_ERROR0 )
THUMB( svc #0 )
THUMB( nop )
b vector_und
/* /*
* Interrupt dispatcher * Interrupt dispatcher
*/ */
...@@ -1088,6 +1097,16 @@ __stubs_start: ...@@ -1088,6 +1097,16 @@ __stubs_start:
.align 5 .align 5
/*=============================================================================
* Address exception handler
*-----------------------------------------------------------------------------
* These aren't too critical.
* (they're not supposed to happen, and won't happen in 32-bit data mode).
*/
vector_addrexcptn:
b vector_addrexcptn
/*============================================================================= /*=============================================================================
* Undefined FIQs * Undefined FIQs
*----------------------------------------------------------------------------- *-----------------------------------------------------------------------------
...@@ -1101,35 +1120,14 @@ __stubs_start: ...@@ -1101,35 +1120,14 @@ __stubs_start:
vector_fiq: vector_fiq:
subs pc, lr, #4 subs pc, lr, #4
/*=============================================================================
* Address exception handler
*-----------------------------------------------------------------------------
* These aren't too critical.
* (they're not supposed to happen, and won't happen in 32-bit data mode).
*/
vector_addrexcptn:
b vector_addrexcptn
/*
* We group all the following data together to optimise
* for CPUs with separate I & D caches.
*/
.align 5
.LCvswi:
.word vector_swi
.globl __stubs_end .globl __stubs_end
__stubs_end: __stubs_end:
.equ stubs_offset, __vectors_start + 0x200 - __stubs_start .equ stubs_offset, __vectors_start + 0x1000 - __stubs_start
.globl __vectors_start .globl __vectors_start
__vectors_start: __vectors_start:
ARM( swi SYS_ERROR0 ) W(b) vector_rst + stubs_offset
THUMB( svc #0 )
THUMB( nop )
W(b) vector_und + stubs_offset W(b) vector_und + stubs_offset
W(ldr) pc, .LCvswi + stubs_offset W(ldr) pc, .LCvswi + stubs_offset
W(b) vector_pabt + stubs_offset W(b) vector_pabt + stubs_offset
......
...@@ -837,7 +837,7 @@ void __init early_trap_init(void *vectors_base) ...@@ -837,7 +837,7 @@ void __init early_trap_init(void *vectors_base)
* are visible to the instruction stream. * are visible to the instruction stream.
*/ */
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
/* /*
...@@ -852,7 +852,7 @@ void __init early_trap_init(void *vectors_base) ...@@ -852,7 +852,7 @@ void __init early_trap_init(void *vectors_base)
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
sigreturn_codes, sizeof(sigreturn_codes)); sigreturn_codes, sizeof(sigreturn_codes));
flush_icache_range(vectors, vectors + PAGE_SIZE); flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT); modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
#else /* ifndef CONFIG_CPU_V7M */ #else /* ifndef CONFIG_CPU_V7M */
/* /*
......
...@@ -1160,7 +1160,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) ...@@ -1160,7 +1160,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
/* /*
* Allocate the vector page early. * Allocate the vector page early.
*/ */
vectors = early_alloc(PAGE_SIZE); vectors = early_alloc(PAGE_SIZE * 2);
early_trap_init(vectors); early_trap_init(vectors);
...@@ -1210,10 +1210,18 @@ static void __init devicemaps_init(struct machine_desc *mdesc) ...@@ -1210,10 +1210,18 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
if (!vectors_high()) { if (!vectors_high()) {
map.virtual = 0; map.virtual = 0;
map.length = PAGE_SIZE * 2;
map.type = MT_LOW_VECTORS; map.type = MT_LOW_VECTORS;
create_mapping(&map); create_mapping(&map);
} }
/* Now create a kernel read-only mapping */
map.pfn += 1;
map.virtual = 0xffff0000 + PAGE_SIZE;
map.length = PAGE_SIZE;
map.type = MT_LOW_VECTORS;
create_mapping(&map);
/* /*
* Ask the machine support to map in the statically mapped devices. * Ask the machine support to map in the statically mapped devices.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment