Commit e6e5494c authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] vdso: randomize the i386 vDSO by moving it into a vma

Move the i386 VDSO down into a vma and thus randomize it.

Besides the security implications, this feature also helps debuggers, which
can COW a vma-backed VDSO just like a normal DSO and can thus do
single-stepping and other debugging features.

It's good for hypervisors (Xen, VMWare) too, which typically live in the same
high-mapped address space as the VDSO, hence whenever the VDSO is used, they
get lots of guest pagefaults and have to fix such guest accesses up - which
slows things down instead of speeding things up (the primary purpose of the
VDSO).

There's a new CONFIG_COMPAT_VDSO (default=y) option, which provides support
for older glibcs that still rely on a prelinked high-mapped VDSO.  Newer
distributions (using glibc 2.3.3 or later) can turn this option off.  Turning
it off is also recommended for security reasons: attackers cannot use the
predictable high-mapped VDSO page as syscall trampoline anymore.

There is a new vdso=[0|1] boot option as well, and a runtime
/proc/sys/vm/vdso_enabled sysctl switch, that allows the VDSO to be turned
on/off.

(This version of the VDSO-randomization patch also has working ELF
coredumping, the previous patch crashed in the coredumping code.)

This code is a combined work of the exec-shield VDSO randomization
code and Gerd Hoffmann's hypervisor-centric VDSO patch. Rusty Russell
started this patch and i completed it.

[akpm@osdl.org: cleanups]
[akpm@osdl.org: compile fix]
[akpm@osdl.org: compile fix 2]
[akpm@osdl.org: compile fix 3]
[akpm@osdl.org: revernt MAXMEM change]
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarArjan van de Ven <arjan@infradead.org>
Cc: Gerd Hoffmann <kraxel@suse.de>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Jan Beulich <jbeulich@novell.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent d5fb3426
...@@ -1669,6 +1669,10 @@ running once the system is up. ...@@ -1669,6 +1669,10 @@ running once the system is up.
usbhid.mousepoll= usbhid.mousepoll=
[USBHID] The interval which mice are to be polled at. [USBHID] The interval which mice are to be polled at.
vdso= [IA-32]
vdso=1: enable VDSO (default)
vdso=0: disable VDSO mapping
video= [FB] Frame buffer configuration video= [FB] Frame buffer configuration
See Documentation/fb/modedb.txt. See Documentation/fb/modedb.txt.
......
...@@ -780,6 +780,17 @@ config HOTPLUG_CPU ...@@ -780,6 +780,17 @@ config HOTPLUG_CPU
enable suspend on SMP systems. CPUs can be controlled through enable suspend on SMP systems. CPUs can be controlled through
/sys/devices/system/cpu. /sys/devices/system/cpu.
config COMPAT_VDSO
bool "Compat VDSO support"
default y
help
Map the VDSO to the predictable old-style address too.
---help---
Say N here if you are running a sufficiently recent glibc
version (2.3.3 or later), to remove the high-mapped
VDSO mapping and to exclusively use the randomized VDSO.
If unsure, say Y.
endmenu endmenu
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/elf.h>
#define DEFINE(sym, val) \ #define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val)) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
...@@ -54,6 +55,7 @@ void foo(void) ...@@ -54,6 +55,7 @@ void foo(void)
OFFSET(TI_preempt_count, thread_info, preempt_count); OFFSET(TI_preempt_count, thread_info, preempt_count);
OFFSET(TI_addr_limit, thread_info, addr_limit); OFFSET(TI_addr_limit, thread_info, addr_limit);
OFFSET(TI_restart_block, thread_info, restart_block); OFFSET(TI_restart_block, thread_info, restart_block);
OFFSET(TI_sysenter_return, thread_info, sysenter_return);
BLANK(); BLANK();
OFFSET(EXEC_DOMAIN_handler, exec_domain, handler); OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
...@@ -69,7 +71,7 @@ void foo(void) ...@@ -69,7 +71,7 @@ void foo(void)
sizeof(struct tss_struct)); sizeof(struct tss_struct));
DEFINE(PAGE_SIZE_asm, PAGE_SIZE); DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
DEFINE(VSYSCALL_BASE, __fix_to_virt(FIX_VSYSCALL)); DEFINE(VDSO_PRELINK, VDSO_PRELINK);
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
} }
...@@ -270,7 +270,12 @@ sysenter_past_esp: ...@@ -270,7 +270,12 @@ sysenter_past_esp:
pushl $(__USER_CS) pushl $(__USER_CS)
CFI_ADJUST_CFA_OFFSET 4 CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET cs, 0*/ /*CFI_REL_OFFSET cs, 0*/
pushl $SYSENTER_RETURN /*
* Push current_thread_info()->sysenter_return to the stack.
* A tiny bit of offset fixup is necessary - 4*4 means the 4 words
* pushed above; +8 corresponds to copy_thread's esp0 setting.
*/
pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
CFI_ADJUST_CFA_OFFSET 4 CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET eip, 0 CFI_REL_OFFSET eip, 0
......
...@@ -351,7 +351,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, ...@@ -351,7 +351,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
goto give_sigsegv; goto give_sigsegv;
} }
restorer = &__kernel_sigreturn; restorer = (void *)VDSO_SYM(&__kernel_sigreturn);
if (ka->sa.sa_flags & SA_RESTORER) if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer; restorer = ka->sa.sa_restorer;
...@@ -447,7 +447,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -447,7 +447,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
goto give_sigsegv; goto give_sigsegv;
/* Set up to return from userspace. */ /* Set up to return from userspace. */
restorer = &__kernel_rt_sigreturn; restorer = (void *)VDSO_SYM(&__kernel_rt_sigreturn);
if (ka->sa.sa_flags & SA_RESTORER) if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer; restorer = ka->sa.sa_restorer;
err |= __put_user(restorer, &frame->pretcode); err |= __put_user(restorer, &frame->pretcode);
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
* linux/arch/i386/kernel/sysenter.c * linux/arch/i386/kernel/sysenter.c
* *
* (C) Copyright 2002 Linus Torvalds * (C) Copyright 2002 Linus Torvalds
* Portions based on the vdso-randomization code from exec-shield:
* Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
* *
* This file contains the needed initializations to support sysenter. * This file contains the needed initializations to support sysenter.
*/ */
...@@ -13,12 +15,31 @@ ...@@ -13,12 +15,31 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/elf.h> #include <linux/elf.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/unistd.h> #include <asm/unistd.h>
/*
* Should the kernel map a VDSO page into processes and pass its
* address down to glibc upon exec()?
*/
unsigned int __read_mostly vdso_enabled = 1;
EXPORT_SYMBOL_GPL(vdso_enabled);
static int __init vdso_setup(char *s)
{
vdso_enabled = simple_strtoul(s, NULL, 0);
return 1;
}
__setup("vdso=", vdso_setup);
extern asmlinkage void sysenter_entry(void); extern asmlinkage void sysenter_entry(void);
void enable_sep_cpu(void) void enable_sep_cpu(void)
...@@ -45,23 +66,122 @@ void enable_sep_cpu(void) ...@@ -45,23 +66,122 @@ void enable_sep_cpu(void)
*/ */
extern const char vsyscall_int80_start, vsyscall_int80_end; extern const char vsyscall_int80_start, vsyscall_int80_end;
extern const char vsyscall_sysenter_start, vsyscall_sysenter_end; extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
static void *syscall_page;
int __init sysenter_setup(void) int __init sysenter_setup(void)
{ {
void *page = (void *)get_zeroed_page(GFP_ATOMIC); syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
__set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC); #ifdef CONFIG_COMPAT_VDSO
__set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
#else
/*
* In the non-compat case the ELF coredumping code needs the fixmap:
*/
__set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_KERNEL_RO);
#endif
if (!boot_cpu_has(X86_FEATURE_SEP)) { if (!boot_cpu_has(X86_FEATURE_SEP)) {
memcpy(page, memcpy(syscall_page,
&vsyscall_int80_start, &vsyscall_int80_start,
&vsyscall_int80_end - &vsyscall_int80_start); &vsyscall_int80_end - &vsyscall_int80_start);
return 0; return 0;
} }
memcpy(page, memcpy(syscall_page,
&vsyscall_sysenter_start, &vsyscall_sysenter_start,
&vsyscall_sysenter_end - &vsyscall_sysenter_start); &vsyscall_sysenter_end - &vsyscall_sysenter_start);
return 0; return 0;
} }
static struct page *syscall_nopage(struct vm_area_struct *vma,
unsigned long adr, int *type)
{
struct page *p = virt_to_page(adr - vma->vm_start + syscall_page);
get_page(p);
return p;
}
/* Prevent VMA merging */
static void syscall_vma_close(struct vm_area_struct *vma)
{
}
static struct vm_operations_struct syscall_vm_ops = {
.close = syscall_vma_close,
.nopage = syscall_nopage,
};
/* Defined in vsyscall-sysenter.S */
extern void SYSENTER_RETURN;
/* Setup a VMA at program startup for the vsyscall page */
int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr;
int ret;
down_write(&mm->mmap_sem);
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;
goto up_fail;
}
vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
if (!vma) {
ret = -ENOMEM;
goto up_fail;
}
vma->vm_start = addr;
vma->vm_end = addr + PAGE_SIZE;
/* MAYWRITE to allow gdb to COW and set breakpoints */
vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
vma->vm_flags |= mm->def_flags;
vma->vm_page_prot = protection_map[vma->vm_flags & 7];
vma->vm_ops = &syscall_vm_ops;
vma->vm_mm = mm;
ret = insert_vm_struct(mm, vma);
if (ret)
goto free_vma;
current->mm->context.vdso = (void *)addr;
current_thread_info()->sysenter_return =
(void *)VDSO_SYM(&SYSENTER_RETURN);
mm->total_vm++;
up_fail:
up_write(&mm->mmap_sem);
return ret;
free_vma:
kmem_cache_free(vm_area_cachep, vma);
return ret;
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
return "[vdso]";
return NULL;
}
struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
{
return NULL;
}
int in_gate_area(struct task_struct *task, unsigned long addr)
{
return 0;
}
int in_gate_area_no_task(unsigned long addr)
{
return 0;
}
...@@ -42,10 +42,10 @@ __kernel_vsyscall: ...@@ -42,10 +42,10 @@ __kernel_vsyscall:
/* 7: align return point with nop's to make disassembly easier */ /* 7: align return point with nop's to make disassembly easier */
.space 7,0x90 .space 7,0x90
/* 14: System call restart point is here! (SYSENTER_RETURN - 2) */ /* 14: System call restart point is here! (SYSENTER_RETURN-2) */
jmp .Lenter_kernel jmp .Lenter_kernel
/* 16: System call normal return point is here! */ /* 16: System call normal return point is here! */
.globl SYSENTER_RETURN /* Symbol used by entry.S. */ .globl SYSENTER_RETURN /* Symbol used by sysenter.c */
SYSENTER_RETURN: SYSENTER_RETURN:
pop %ebp pop %ebp
.Lpop_ebp: .Lpop_ebp:
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
SECTIONS SECTIONS
{ {
. = VSYSCALL_BASE + SIZEOF_HEADERS; . = VDSO_PRELINK + SIZEOF_HEADERS;
.hash : { *(.hash) } :text .hash : { *(.hash) } :text
.dynsym : { *(.dynsym) } .dynsym : { *(.dynsym) }
...@@ -20,7 +20,7 @@ SECTIONS ...@@ -20,7 +20,7 @@ SECTIONS
For the layouts to match, we need to skip more than enough For the layouts to match, we need to skip more than enough
space for the dynamic symbol table et al. If this amount space for the dynamic symbol table et al. If this amount
is insufficient, ld -shared will barf. Just increase it here. */ is insufficient, ld -shared will barf. Just increase it here. */
. = VSYSCALL_BASE + 0x400; . = VDSO_PRELINK + 0x400;
.text : { *(.text) } :text =0x90909090 .text : { *(.text) } :text =0x90909090
.note : { *(.note.*) } :text :note .note : { *(.note.*) } :text :note
......
...@@ -122,6 +122,11 @@ struct mem_size_stats ...@@ -122,6 +122,11 @@ struct mem_size_stats
unsigned long private_dirty; unsigned long private_dirty;
}; };
__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
{
return NULL;
}
static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss) static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
{ {
struct proc_maps_private *priv = m->private; struct proc_maps_private *priv = m->private;
...@@ -158,22 +163,23 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats ...@@ -158,22 +163,23 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats
pad_len_spaces(m, len); pad_len_spaces(m, len);
seq_path(m, file->f_vfsmnt, file->f_dentry, "\n"); seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
} else { } else {
if (mm) { const char *name = arch_vma_name(vma);
if (vma->vm_start <= mm->start_brk && if (!name) {
if (mm) {
if (vma->vm_start <= mm->start_brk &&
vma->vm_end >= mm->brk) { vma->vm_end >= mm->brk) {
pad_len_spaces(m, len); name = "[heap]";
seq_puts(m, "[heap]"); } else if (vma->vm_start <= mm->start_stack &&
} else { vma->vm_end >= mm->start_stack) {
if (vma->vm_start <= mm->start_stack && name = "[stack]";
vma->vm_end >= mm->start_stack) {
pad_len_spaces(m, len);
seq_puts(m, "[stack]");
} }
} else {
name = "[vdso]";
} }
} else { }
if (name) {
pad_len_spaces(m, len); pad_len_spaces(m, len);
seq_puts(m, "[vdso]"); seq_puts(m, name);
} }
} }
seq_putc(m, '\n'); seq_putc(m, '\n');
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h> /* for savesegment */ #include <asm/system.h> /* for savesegment */
#include <asm/auxvec.h> #include <asm/auxvec.h>
#include <asm/desc.h>
#include <linux/utsname.h> #include <linux/utsname.h>
...@@ -129,15 +130,41 @@ extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct ...@@ -129,15 +130,41 @@ extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs) #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs)
#define VSYSCALL_BASE (__fix_to_virt(FIX_VSYSCALL)) #define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
#define VSYSCALL_EHDR ((const struct elfhdr *) VSYSCALL_BASE) #define VDSO_BASE ((unsigned long)current->mm->context.vdso)
#define VSYSCALL_ENTRY ((unsigned long) &__kernel_vsyscall)
#ifdef CONFIG_COMPAT_VDSO
# define VDSO_COMPAT_BASE VDSO_HIGH_BASE
# define VDSO_PRELINK VDSO_HIGH_BASE
#else
# define VDSO_COMPAT_BASE VDSO_BASE
# define VDSO_PRELINK 0
#endif
#define VDSO_COMPAT_SYM(x) \
(VDSO_COMPAT_BASE + (unsigned long)(x) - VDSO_PRELINK)
#define VDSO_SYM(x) \
(VDSO_BASE + (unsigned long)(x) - VDSO_PRELINK)
#define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE)
#define VDSO_EHDR ((const struct elfhdr *) VDSO_COMPAT_BASE)
extern void __kernel_vsyscall; extern void __kernel_vsyscall;
#define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall)
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int executable_stack);
extern unsigned int vdso_enabled;
#define ARCH_DLINFO \ #define ARCH_DLINFO \
do { \ do if (vdso_enabled) { \
NEW_AUX_ENT(AT_SYSINFO, VSYSCALL_ENTRY); \ NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL_BASE); \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_COMPAT_BASE); \
} while (0) } while (0)
/* /*
...@@ -148,15 +175,15 @@ do { \ ...@@ -148,15 +175,15 @@ do { \
* Dumping its extra ELF program headers includes all the other information * Dumping its extra ELF program headers includes all the other information
* a debugger needs to easily find how the vsyscall DSO was being used. * a debugger needs to easily find how the vsyscall DSO was being used.
*/ */
#define ELF_CORE_EXTRA_PHDRS (VSYSCALL_EHDR->e_phnum) #define ELF_CORE_EXTRA_PHDRS (VDSO_HIGH_EHDR->e_phnum)
#define ELF_CORE_WRITE_EXTRA_PHDRS \ #define ELF_CORE_WRITE_EXTRA_PHDRS \
do { \ do { \
const struct elf_phdr *const vsyscall_phdrs = \ const struct elf_phdr *const vsyscall_phdrs = \
(const struct elf_phdr *) (VSYSCALL_BASE \ (const struct elf_phdr *) (VDSO_HIGH_BASE \
+ VSYSCALL_EHDR->e_phoff); \ + VDSO_HIGH_EHDR->e_phoff); \
int i; \ int i; \
Elf32_Off ofs = 0; \ Elf32_Off ofs = 0; \
for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \ for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \
struct elf_phdr phdr = vsyscall_phdrs[i]; \ struct elf_phdr phdr = vsyscall_phdrs[i]; \
if (phdr.p_type == PT_LOAD) { \ if (phdr.p_type == PT_LOAD) { \
BUG_ON(ofs != 0); \ BUG_ON(ofs != 0); \
...@@ -174,10 +201,10 @@ do { \ ...@@ -174,10 +201,10 @@ do { \
#define ELF_CORE_WRITE_EXTRA_DATA \ #define ELF_CORE_WRITE_EXTRA_DATA \
do { \ do { \
const struct elf_phdr *const vsyscall_phdrs = \ const struct elf_phdr *const vsyscall_phdrs = \
(const struct elf_phdr *) (VSYSCALL_BASE \ (const struct elf_phdr *) (VDSO_HIGH_BASE \
+ VSYSCALL_EHDR->e_phoff); \ + VDSO_HIGH_EHDR->e_phoff); \
int i; \ int i; \
for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \ for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \
if (vsyscall_phdrs[i].p_type == PT_LOAD) \ if (vsyscall_phdrs[i].p_type == PT_LOAD) \
DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr, \ DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr, \
PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \ PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
*/ */
enum fixed_addresses { enum fixed_addresses {
FIX_HOLE, FIX_HOLE,
FIX_VSYSCALL, FIX_VDSO,
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#endif #endif
...@@ -115,14 +115,6 @@ extern void __set_fixmap (enum fixed_addresses idx, ...@@ -115,14 +115,6 @@ extern void __set_fixmap (enum fixed_addresses idx,
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
/*
* This is the range that is readable by user mode, and things
* acting like user mode such as get_user_pages.
*/
#define FIXADDR_USER_START (__fix_to_virt(FIX_VSYSCALL))
#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
extern void __this_fixmap_does_not_exist(void); extern void __this_fixmap_does_not_exist(void);
/* /*
......
...@@ -12,6 +12,7 @@ typedef struct { ...@@ -12,6 +12,7 @@ typedef struct {
int size; int size;
struct semaphore sem; struct semaphore sem;
void *ldt; void *ldt;
void *vdso;
} mm_context_t; } mm_context_t;
#endif #endif
...@@ -96,6 +96,8 @@ typedef struct { unsigned long pgprot; } pgprot_t; ...@@ -96,6 +96,8 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
struct vm_area_struct;
/* /*
* This much address space is reserved for vmalloc() and iomap() * This much address space is reserved for vmalloc() and iomap()
* as well as fixmap mappings. * as well as fixmap mappings.
...@@ -139,6 +141,7 @@ extern int page_is_ram(unsigned long pagenr); ...@@ -139,6 +141,7 @@ extern int page_is_ram(unsigned long pagenr);
#include <asm-generic/memory_model.h> #include <asm-generic/memory_model.h>
#include <asm-generic/page.h> #include <asm-generic/page.h>
#define __HAVE_ARCH_GATE_AREA 1
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _I386_PAGE_H */ #endif /* _I386_PAGE_H */
...@@ -37,6 +37,7 @@ struct thread_info { ...@@ -37,6 +37,7 @@ struct thread_info {
0-0xBFFFFFFF for user-thead 0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread 0-0xFFFFFFFF for kernel-thread
*/ */
void *sysenter_return;
struct restart_block restart_block; struct restart_block restart_block;
unsigned long previous_esp; /* ESP of the previous stack in case unsigned long previous_esp; /* ESP of the previous stack in case
......
...@@ -78,8 +78,8 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info) ...@@ -78,8 +78,8 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
return user_mode_vm(&info->regs); return user_mode_vm(&info->regs);
#else #else
return info->regs.eip < PAGE_OFFSET return info->regs.eip < PAGE_OFFSET
|| (info->regs.eip >= __fix_to_virt(FIX_VSYSCALL) || (info->regs.eip >= __fix_to_virt(FIX_VDSO)
&& info->regs.eip < __fix_to_virt(FIX_VSYSCALL) + PAGE_SIZE) && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
|| info->regs.esp < PAGE_OFFSET; || info->regs.esp < PAGE_OFFSET;
#endif #endif
} }
......
...@@ -1065,5 +1065,7 @@ void drop_slab(void); ...@@ -1065,5 +1065,7 @@ void drop_slab(void);
extern int randomize_va_space; extern int randomize_va_space;
#endif #endif
const char *arch_vma_name(struct vm_area_struct *vma);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */ #endif /* _LINUX_MM_H */
...@@ -189,6 +189,7 @@ enum ...@@ -189,6 +189,7 @@ enum
VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */ VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */
VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */ VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */
VM_PANIC_ON_OOM=33, /* panic at out-of-memory */ VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
VM_VDSO_ENABLED=34, /* map VDSO into new processes? */
}; };
......
...@@ -927,6 +927,18 @@ static ctl_table vm_table[] = { ...@@ -927,6 +927,18 @@ static ctl_table vm_table[] = {
.proc_handler = &proc_dointvec_jiffies, .proc_handler = &proc_dointvec_jiffies,
.strategy = &sysctl_jiffies, .strategy = &sysctl_jiffies,
}, },
#endif
#ifdef CONFIG_X86_32
{
.ctl_name = VM_VDSO_ENABLED,
.procname = "vdso_enabled",
.data = &vdso_enabled,
.maxlen = sizeof(vdso_enabled),
.mode = 0644,
.proc_handler = &proc_dointvec,
.strategy = &sysctl_intvec,
.extra1 = &zero,
},
#endif #endif
{ .ctl_name = 0 } { .ctl_name = 0 }
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment