Commit a104ba57 authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] x86-64 updates for 2.5.64-bk3

Make x86-64 boot again after the INITIAL_JIFFIES changes
and some bug fixes. Also some work for NPTL.

 - Merge with i386/2.5.64-bk3
 - Fix memory leak in copy_thread
 - arch_prctl uses GDT for base if possible. Cleanup.
 - clone supports bases >32bit for SETTLS etc. %fs hardcoded now.
 - new ptrace support for 64bit TLS
 - Disable (set|get)_thread_* for 64bit processes.
 - Audit arch/x86_64 for jiffies wrap issues.
 - Fix initial jiffies problem (that caused hanging kernels)
 - FIx a few 32bit emulation bugs (sigaltstack, sigqueue)
 - Some cleanup from Pavel
 - Should compile again as UP
 - Shrink size a bit by not putting exception tables into object files.
 - Fix compilation with gcc 3.3 :- force inlining when needed
 - Work around 2.5.64-bk3 console init bug.
 - Fix some alignments in assembly code
parent c8eddecf
......@@ -829,13 +829,6 @@ gdt:
.word 0x9200 # data read/write
.word 0x00CF # granularity = 4096, 386
# (+5th nibble of limit)
# this is 64bit descriptor for code
.word 0xFFFF
.word 0
.word 0x9A00 # code read/exec
.word 0x00AF # as above, but it is long mode and with D=0
# it does not seem to do the trick.
idt_48:
.word 0 # idt limit = 0
.word 0, 0 # idt base = 0L
......
......@@ -106,20 +106,24 @@ sys32_sigaltstack(const stack_ia32_t *uss_ptr, stack_ia32_t *uoss_ptr,
stack_t uss,uoss;
int ret;
mm_segment_t seg;
if (uss_ptr) {
u32 ptr;
if (!access_ok(VERIFY_READ,uss_ptr,sizeof(stack_ia32_t)) ||
__get_user(ptr_to_u32(uss.ss_sp), &uss_ptr->ss_sp) ||
__get_user((u32)uss.ss_flags, &uss_ptr->ss_flags) ||
__get_user((u32)uss.ss_size, &uss_ptr->ss_size))
__get_user(ptr, &uss_ptr->ss_sp) ||
__get_user(uss.ss_flags, &uss_ptr->ss_flags) ||
__get_user(uss.ss_size, &uss_ptr->ss_size))
return -EFAULT;
uss.ss_sp = (void *)(u64)ptr;
}
seg = get_fs();
set_fs(KERNEL_DS);
ret = do_sigaltstack(&uss, &uoss, regs.rsp);
ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs.rsp);
set_fs(seg);
if (ret >= 0 && uoss_ptr) {
if (!access_ok(VERIFY_WRITE,uss_ptr,sizeof(stack_ia32_t)) ||
__put_user(ptr_to_u32(uss.ss_sp), &uss_ptr->ss_sp) ||
__put_user((u32)uss.ss_flags, &uss_ptr->ss_flags) ||
__put_user((u32)uss.ss_size, &uss_ptr->ss_size))
if (!access_ok(VERIFY_WRITE,uoss_ptr,sizeof(stack_ia32_t)) ||
__put_user((u32)(u64)uss.ss_sp, &uoss_ptr->ss_sp) ||
__put_user(uss.ss_flags, &uoss_ptr->ss_flags) ||
__put_user(uss.ss_size, &uoss_ptr->ss_size))
ret = -EFAULT;
}
return ret;
......
......@@ -443,8 +443,8 @@ ia32_sys_call_table:
.quad compat_sys_futex /* 240 */
.quad sys32_sched_setaffinity
.quad sys32_sched_getaffinity
.quad sys_set_thread_area
.quad sys_get_thread_area
.quad sys32_set_thread_area
.quad sys32_get_thread_area
.quad sys32_io_setup
.quad sys_io_destroy
.quad sys32_io_getevents
......
......@@ -22,6 +22,8 @@
#include <asm/errno.h>
#include <asm/debugreg.h>
#include <asm/i387.h>
#include <asm/desc.h>
#include <asm/ldt.h>
#include <asm/fpu32.h>
#include <linux/mm.h>
#include <linux/ptrace.h>
......
......@@ -1261,8 +1261,8 @@ siginfo64to32(siginfo_t32 *d, siginfo_t *s)
if (s->si_signo >= SIGRTMIN) {
d->si_pid = s->si_pid;
d->si_uid = s->si_uid;
/* XXX: Ouch, how to find this out??? */
d->si_int = s->si_int;
memcpy(&d->si_int, &s->si_int,
sizeof(siginfo_t) - offsetof(siginfo_t,si_int));
} else switch (s->si_signo) {
/* XXX: What about POSIX1.b timers */
case SIGCHLD:
......@@ -1299,8 +1299,9 @@ siginfo32to64(siginfo_t *d, siginfo_t32 *s)
if (s->si_signo >= SIGRTMIN) {
d->si_pid = s->si_pid;
d->si_uid = s->si_uid;
/* XXX: Ouch, how to find this out??? */
d->si_int = s->si_int;
memcpy(&d->si_int,
&s->si_int,
sizeof(siginfo_t) - offsetof(siginfo_t, si_int));
} else switch (s->si_signo) {
/* XXX: What about POSIX1.b timers */
case SIGCHLD:
......
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/user.h>
#include <asm/uaccess.h>
#include <asm/desc.h>
#include <asm/system.h>
#include <asm/ldt.h>
#include <asm/processor.h>
#include <asm/proto.h>
/*
* sys_alloc_thread_area: get a yet unused TLS descriptor index.
*/
static int get_free_idx(void)
{
struct thread_struct *t = &current->thread;
int idx;
for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
if (desc_empty((struct n_desc_struct *)(t->tls_array) + idx))
return idx + GDT_ENTRY_TLS_MIN;
return -ESRCH;
}
/*
* Set a given TLS descriptor:
* When you want addresses > 32bit use arch_prctl()
*/
int do_set_thread_area(struct thread_struct *t, struct user_desc *u_info)
{
struct user_desc info;
struct n_desc_struct *desc;
int cpu, idx;
if (copy_from_user(&info, u_info, sizeof(info)))
return -EFAULT;
idx = info.entry_number;
/*
* index -1 means the kernel should try to find and
* allocate an empty descriptor:
*/
if (idx == -1) {
idx = get_free_idx();
if (idx < 0)
return idx;
if (put_user(idx, &u_info->entry_number))
return -EFAULT;
}
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
/*
* We must not get preempted while modifying the TLS.
*/
cpu = get_cpu();
if (LDT_empty(&info)) {
desc->a = 0;
desc->b = 0;
} else {
desc->a = LDT_entry_a(&info);
desc->b = LDT_entry_b(&info);
}
if (t == &current->thread)
load_TLS(t, cpu);
put_cpu();
return 0;
}
asmlinkage long sys32_set_thread_area(struct user_desc *u_info)
{
return do_set_thread_area(&current->thread, u_info);
}
/*
* Get the current Thread-Local Storage area:
*/
#define GET_BASE(desc) ( \
(((desc)->a >> 16) & 0x0000ffff) | \
(((desc)->b << 16) & 0x00ff0000) | \
( (desc)->b & 0xff000000) )
#define GET_LIMIT(desc) ( \
((desc)->a & 0x0ffff) | \
((desc)->b & 0xf0000) )
#define GET_32BIT(desc) (((desc)->b >> 23) & 1)
#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
#define GET_LONGMODE(desc) (((desc)->b >> 21) & 1)
int do_get_thread_area(struct thread_struct *t, struct user_desc *u_info)
{
struct user_desc info;
struct n_desc_struct *desc;
int idx;
if (get_user(idx, &u_info->entry_number))
return -EFAULT;
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
memset(&info, 0, sizeof(struct user_desc));
info.entry_number = idx;
info.base_addr = GET_BASE(desc);
info.limit = GET_LIMIT(desc);
info.seg_32bit = GET_32BIT(desc);
info.contents = GET_CONTENTS(desc);
info.read_exec_only = !GET_WRITABLE(desc);
info.limit_in_pages = GET_LIMIT_PAGES(desc);
info.seg_not_present = !GET_PRESENT(desc);
info.useable = GET_USEABLE(desc);
info.lm = GET_LONGMODE(desc);
if (copy_to_user(u_info, &info, sizeof(info)))
return -EFAULT;
return 0;
}
asmlinkage long sys32_get_thread_area(struct user_desc *u_info)
{
return do_get_thread_area(&current->thread, u_info);
}
int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs)
{
struct n_desc_struct *desc;
struct user_desc info, *cp;
int idx;
cp = (void *)childregs->rsi;
if (copy_from_user(&info, cp, sizeof(info)))
return -EFAULT;
if (LDT_empty(&info))
return -EINVAL;
idx = info.entry_number;
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
desc = (struct n_desc_struct *)(p->thread.tls_array) + idx - GDT_ENTRY_TLS_MIN;
desc->a = LDT_entry_a(&info);
desc->b = LDT_entry_b(&info);
return 0;
}
......@@ -981,9 +981,6 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
asmlinkage void smp_spurious_interrupt(void)
{
unsigned int v;
static unsigned long last_warning;
static unsigned long skipped;
irq_enter();
/*
* Check if this really is a spurious interrupt and ACK it
......@@ -994,8 +991,12 @@ asmlinkage void smp_spurious_interrupt(void)
if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
ack_APIC_irq();
#if 0
static unsigned long last_warning;
static unsigned long skipped;
/* see sw-dev-man vol 3, chapter 7.4.13.5 */
if (last_warning+30*HZ < jiffies) {
if (time_before(last_warning+30*HZ,jiffies)) {
printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n",
smp_processor_id(), skipped);
last_warning = jiffies;
......@@ -1003,6 +1004,7 @@ asmlinkage void smp_spurious_interrupt(void)
} else {
skipped++;
}
#endif
irq_exit();
}
......
......@@ -79,6 +79,9 @@ void __init x86_64_start_kernel(char * real_mode_data)
clear_bss();
pda_init(0);
copy_bootdata(real_mode_data);
/* default console: */
if (!strstr(saved_command_line, "console="))
strcat(saved_command_line, " console=tty0");
s = strstr(saved_command_line, "earlyprintk=");
if (s != NULL)
setup_early_printk(s+12);
......
......@@ -1110,7 +1110,7 @@ static void __init setup_ioapic_ids_from_mpc (void)
*/
static int __init timer_irq_works(void)
{
unsigned int t1 = jiffies;
unsigned long t1 = jiffies;
local_irq_enable();
/* Let ten ticks pass... */
......@@ -1123,6 +1123,8 @@ static int __init timer_irq_works(void)
* might have cached one ExtINT interrupt. Finally, at
* least one tick may be lost due to delays.
*/
/* jiffies wrap? */
if (jiffies - t1 > 4)
return 1;
return 0;
......
......@@ -19,46 +19,37 @@
#define __KERNEL_SYSCALLS__
#include <stdarg.h>
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/elfcore.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/user.h>
#include <linux/module.h>
#include <linux/a.out.h>
#include <linux/interrupt.h>
#include <linux/config.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/ptrace.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/ldt.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/desc.h>
#include <asm/mmu_context.h>
#include <asm/pda.h>
#include <asm/prctl.h>
#include <asm/kdebug.h>
#include <linux/irq.h>
#include <asm/desc.h>
#include <asm/proto.h>
#include <asm/ia32.h>
asmlinkage extern void ret_from_fork(void);
int sys_arch_prctl(int code, unsigned long addr);
unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
......@@ -245,10 +236,36 @@ void release_thread(struct task_struct *dead_task)
}
}
static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
{
struct user_desc ud = {
.base_addr = addr,
.limit = 0xfffff,
.seg_32bit = 1,
.limit_in_pages = 1,
.useable = 1,
};
struct n_desc_struct *desc = (void *)t->thread.tls_array;
desc += tls;
desc->a = LDT_entry_a(&ud);
desc->b = LDT_entry_b(&ud);
}
static inline u32 read_32bit_tls(struct task_struct *t, int tls)
{
struct desc_struct *desc = (void *)t->thread.tls_array;
desc += tls;
return desc->base0 |
(((u32)desc->base1) << 16) |
(((u32)desc->base2) << 24);
}
int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
unsigned long unused,
struct task_struct * p, struct pt_regs * regs)
{
int err;
struct pt_regs * childregs;
struct task_struct *me = current;
......@@ -265,7 +282,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
p->thread.rsp = (unsigned long) childregs;
p->thread.rsp0 = (unsigned long) (childregs+1);
p->thread.userrsp = current->thread.userrsp;
p->thread.userrsp = me->thread.userrsp;
p->thread.rip = (unsigned long) ret_from_fork;
......@@ -277,8 +294,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
asm("movl %%es,%0" : "=m" (p->thread.es));
asm("movl %%ds,%0" : "=m" (p->thread.ds));
unlazy_fpu(current);
p->thread.i387 = current->thread.i387;
unlazy_fpu(me);
p->thread.i387 = me->thread.i387;
if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
p->thread.io_bitmap_ptr = kmalloc((IO_BITMAP_SIZE+1)*4, GFP_KERNEL);
......@@ -292,27 +309,20 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
* Set a new TLS for the child thread?
*/
if (clone_flags & CLONE_SETTLS) {
struct n_desc_struct *desc;
struct user_desc info;
int idx;
if (copy_from_user(&info, test_thread_flag(TIF_IA32) ?
(void *)childregs->rsi :
(void *)childregs->rdx, sizeof(info)))
return -EFAULT;
if (LDT_empty(&info))
return -EINVAL;
idx = info.entry_number;
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
desc = (struct n_desc_struct *)(p->thread.tls_array) + idx - GDT_ENTRY_TLS_MIN;
desc->a = LDT_entry_a(&info);
desc->b = LDT_entry_b(&info);
#ifdef CONFIG_IA32_EMULATION
if (test_thread_flag(TIF_IA32))
err = ia32_child_tls(p, childregs);
else
#endif
err = do_arch_prctl(p, ARCH_SET_FS, childregs->r10);
if (err)
goto out;
}
return 0;
err = 0;
out:
if (err && p->thread.io_bitmap_ptr)
kfree(p->thread.io_bitmap_ptr);
return err;
}
/*
......@@ -422,7 +432,7 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *
if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
if (next->io_bitmap_ptr) {
/*
* 4 cachelines copy ... not good, but not that
* 2 cachelines copy ... not good, but not that
* bad either. Anyone got something better?
* This only affects processes which use ioperm().
*/
......@@ -537,19 +547,35 @@ unsigned long get_wchan(struct task_struct *p)
#undef last_sched
#undef first_sched
int sys_arch_prctl(int code, unsigned long addr)
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
{
int ret = 0;
int doit = task == current;
int cpu;
switch (code) {
case ARCH_SET_GS:
if (addr >= TASK_SIZE)
return -EPERM;
get_cpu();
cpu = get_cpu();
/* handle small bases via the GDT because that's faster to
switch. */
if (addr <= 0xffffffff) {
set_32bit_tls(task, GS_TLS, addr);
if (doit) {
load_TLS(&task->thread, cpu);
load_gs_index(GS_TLS_SEL);
}
task->thread.gsindex = GS_TLS_SEL;
task->thread.gs = 0;
} else {
task->thread.gsindex = 0;
task->thread.gs = addr;
if (doit) {
load_gs_index(0);
current->thread.gsindex = 0;
current->thread.gs = addr;
ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
}
}
put_cpu();
break;
case ARCH_SET_FS:
......@@ -557,22 +583,51 @@ int sys_arch_prctl(int code, unsigned long addr)
with gs */
if (addr >= TASK_SIZE)
return -EPERM;
get_cpu();
cpu = get_cpu();
/* handle small bases via the GDT because that's faster to
switch. */
if (addr <= 0xffffffff) {
set_32bit_tls(task, FS_TLS, addr);
if (doit) {
load_TLS(&task->thread, cpu);
asm volatile("movl %0,%%fs" :: "r" (FS_TLS_SEL));
}
task->thread.fsindex = FS_TLS_SEL;
task->thread.fs = 0;
} else {
task->thread.fsindex = 0;
task->thread.fs = addr;
if (doit) {
/* set the selector to 0 to not confuse
__switch_to */
asm volatile("movl %0,%%fs" :: "r" (0));
current->thread.fsindex = 0;
current->thread.fs = addr;
ret = checking_wrmsrl(MSR_FS_BASE, addr);
}
}
put_cpu();
break;
/* Returned value may not be correct when the user changed fs/gs */
case ARCH_GET_FS:
ret = put_user(current->thread.fs, (unsigned long *)addr);
case ARCH_GET_FS: {
unsigned long base;
if (task->thread.fsindex == FS_TLS_SEL)
base = read_32bit_tls(task, FS_TLS);
else if (doit) {
rdmsrl(MSR_FS_BASE, base);
} else
base = task->thread.fs;
ret = put_user(base, (unsigned long *)addr);
break;
case ARCH_GET_GS:
ret = put_user(current->thread.gs, (unsigned long *)addr);
}
case ARCH_GET_GS: {
unsigned long base;
if (task->thread.gsindex == GS_TLS_SEL)
base = read_32bit_tls(task, GS_TLS);
else if (doit) {
rdmsrl(MSR_KERNEL_GS_BASE, base);
} else
base = task->thread.gs;
ret = put_user(base, (unsigned long *)addr);
break;
}
default:
ret = -EINVAL;
......@@ -582,131 +637,9 @@ int sys_arch_prctl(int code, unsigned long addr)
return ret;
}
/*
* sys_alloc_thread_area: get a yet unused TLS descriptor index.
*/
static int get_free_idx(void)
{
struct thread_struct *t = &current->thread;
int idx;
for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
if (desc_empty((struct n_desc_struct *)(t->tls_array) + idx))
return idx + GDT_ENTRY_TLS_MIN;
return -ESRCH;
}
/*
* Set a given TLS descriptor:
* When you want addresses > 32bit use arch_prctl()
*/
int do_set_thread_area(struct thread_struct *t, struct user_desc *u_info)
{
struct user_desc info;
struct n_desc_struct *desc;
int cpu, idx;
if (copy_from_user(&info, u_info, sizeof(info)))
return -EFAULT;
idx = info.entry_number;
/*
* index -1 means the kernel should try to find and
* allocate an empty descriptor:
*/
if (idx == -1) {
idx = get_free_idx();
if (idx < 0)
return idx;
if (put_user(idx, &u_info->entry_number))
return -EFAULT;
}
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
/*
* We must not get preempted while modifying the TLS.
*/
cpu = get_cpu();
if (LDT_empty(&info)) {
desc->a = 0;
desc->b = 0;
} else {
desc->a = LDT_entry_a(&info);
desc->b = LDT_entry_b(&info);
}
if (t == &current->thread)
load_TLS(t, cpu);
put_cpu();
return 0;
}
asmlinkage int sys_set_thread_area(struct user_desc *u_info)
{
return do_set_thread_area(&current->thread, u_info);
}
/*
* Get the current Thread-Local Storage area:
*/
#define GET_BASE(desc) ( \
(((desc)->a >> 16) & 0x0000ffff) | \
(((desc)->b << 16) & 0x00ff0000) | \
( (desc)->b & 0xff000000) )
#define GET_LIMIT(desc) ( \
((desc)->a & 0x0ffff) | \
((desc)->b & 0xf0000) )
#define GET_32BIT(desc) (((desc)->b >> 23) & 1)
#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
#define GET_LONGMODE(desc) (((desc)->b >> 21) & 1)
int do_get_thread_area(struct thread_struct *t, struct user_desc *u_info)
{
struct user_desc info;
struct n_desc_struct *desc;
int idx;
if (get_user(idx, &u_info->entry_number))
return -EFAULT;
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;
memset(&info, 0, sizeof(struct user_desc));
info.entry_number = idx;
info.base_addr = GET_BASE(desc);
info.limit = GET_LIMIT(desc);
info.seg_32bit = GET_32BIT(desc);
info.contents = GET_CONTENTS(desc);
info.read_exec_only = !GET_WRITABLE(desc);
info.limit_in_pages = GET_LIMIT_PAGES(desc);
info.seg_not_present = !GET_PRESENT(desc);
info.useable = GET_USEABLE(desc);
info.lm = GET_LONGMODE(desc);
if (copy_to_user(u_info, &info, sizeof(info)))
return -EFAULT;
return 0;
}
asmlinkage int sys_get_thread_area(struct user_desc *u_info)
long sys_arch_prctl(int code, unsigned long addr)
{
return do_get_thread_area(&current->thread, u_info);
return do_arch_prctl(current, code, addr);
}
/*
......
......@@ -26,6 +26,7 @@
#include <asm/ldt.h>
#include <asm/desc.h>
#include <asm/proto.h>
#include <asm/ia32.h>
/*
* does not yet catch signals sent when the child dies.
......@@ -322,6 +323,13 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
wake_up_process(child);
ret = 0;
break;
}
#ifdef CONFIG_IA32_EMULATION
/* This makes only sense with 32bit programs. Allow a
64bit debugger to fully examine them too. Better
don't use it against 64bit processes, use
PTRACE_ARCH_PRCTL instead. */
case PTRACE_SET_THREAD_AREA: {
int old;
get_user(old, &((struct user_desc *)data)->entry_number);
......@@ -338,7 +346,13 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
put_user(old, &((struct user_desc *)data)->entry_number);
break;
}
}
#endif
/* normal 64bit interface to access TLS data.
Works just like arch_prctl, except that the arguments
are reversed. */
case PTRACE_ARCH_PRCTL:
ret = do_arch_prctl(child, data, addr);
break;
/*
* make the child exit. Best I can do is send it a sigkill.
......
......@@ -22,6 +22,7 @@
#include <asm/i387.h>
#include <asm/percpu.h>
#include <asm/mtrr.h>
#include <asm/proto.h>
char x86_boot_params[2048] __initdata = {0,};
......@@ -29,9 +30,6 @@ unsigned long cpu_initialized __initdata = 0;
struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned;
extern void system_call(void);
extern void ia32_cstar_target(void);
extern struct task_struct init_task;
extern unsigned char __per_cpu_start[], __per_cpu_end[];
......@@ -130,6 +128,24 @@ void pda_init(int cpu)
#define EXCEPTION_STK_ORDER 0 /* >= N_EXCEPTION_STACKS*EXCEPTION_STKSZ */
char boot_exception_stacks[N_EXCEPTION_STACKS*EXCEPTION_STKSZ];
void syscall_init(void)
{
/*
* LSTAR and STAR live in a bit strange symbiosis.
* They both write to the same internal register. STAR allows to set CS/DS
* but only a 32bit target. LSTAR sets the 64bit rip.
*/
wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
wrmsrl(MSR_LSTAR, system_call);
#ifdef CONFIG_IA32_EMULATION
wrmsrl(MSR_CSTAR, ia32_cstar_target);
#endif
/* Flags to clear on syscall */
wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000);
}
/*
* cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT
......@@ -188,20 +204,7 @@ void __init cpu_init (void)
asm volatile("pushfq ; popq %%rax ; btr $14,%%rax ; pushq %%rax ; popfq" ::: "eax");
/*
* LSTAR and STAR live in a bit strange symbiosis.
* They both write to the same internal register. STAR allows to set CS/DS
* but only a 32bit target. LSTAR sets the 64bit rip.
*/
wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
wrmsrl(MSR_LSTAR, system_call);
#ifdef CONFIG_IA32_EMULATION
wrmsrl(MSR_CSTAR, ia32_cstar_target);
#endif
/* Flags to clear on syscall */
wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000);
syscall_init();
wrmsrl(MSR_FS_BASE, 0);
wrmsrl(MSR_KERNEL_GS_BASE, 0);
......
......@@ -96,14 +96,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, unsigned long *p
#define COPY(x) err |= __get_user(regs->x, &sc->x)
{
unsigned int seg;
err |= __get_user(seg, &sc->gs);
load_gs_index(seg);
err |= __get_user(seg, &sc->fs);
loadsegment(fs,seg);
}
COPY(rdi); COPY(rsi); COPY(rbp); COPY(rsp); COPY(rbx);
COPY(rdx); COPY(rcx); COPY(rip);
COPY(r8);
......@@ -189,13 +181,10 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs regs)
static inline int
setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me)
{
int tmp, err = 0;
int err = 0;
tmp = 0;
__asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
err |= __put_user(tmp, (unsigned int *)&sc->gs);
__asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
err |= __put_user(tmp, (unsigned int *)&sc->fs);
err |= __put_user(0, &sc->gs);
err |= __put_user(0, &sc->fs);
err |= __put_user(regs->rdi, &sc->rdi);
err |= __put_user(regs->rsi, &sc->rsi);
......
......@@ -26,6 +26,7 @@
#include <asm/acpi.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
#include <asm/proto.h>
static struct saved_context saved_context;
......@@ -58,10 +59,9 @@ void save_processor_state (void)
asm volatile ("movw %%gs, %0" : "=m" (saved_context.gs));
asm volatile ("movw %%ss, %0" : "=m" (saved_context.ss));
asm volatile ("swapgs");
rdmsrl(0xc0000100, saved_context.fs_base);
rdmsrl(0xc0000101, saved_context.gs_base);
asm volatile ("swapgs");
rdmsrl(MSR_FS_BASE, saved_context.fs_base);
rdmsrl(MSR_GS_BASE, saved_context.gs_base);
rdmsrl(MSR_KERNEL_GS_BASE, saved_context.gs_kernel_base);
/*
* control registers
......@@ -99,10 +99,9 @@ void restore_processor_state(void)
load_gs_index(saved_context.gs);
asm volatile ("movw %0, %%ss" :: "r" (saved_context.ss));
asm volatile ("swapgs");
wrmsrl(0xc0000100, saved_context.fs_base);
wrmsrl(0xc0000101, saved_context.gs_base);
asm volatile ("swapgs");
wrmsrl(MSR_FS_BASE, saved_context.fs_base);
wrmsrl(MSR_GS_BASE, saved_context.gs_base);
wrmsrl(MSR_KERNEL_GS_BASE, saved_context.gs_kernel_base);
/*
* now restore the descriptor tables to their proper values
......
......@@ -30,7 +30,7 @@
#include <asm/apic.h>
#endif
u64 jiffies_64;
u64 jiffies_64 = INITIAL_JIFFIES;
extern int using_apic_timer;
......@@ -47,8 +47,8 @@ int hpet_report_lost_ticks; /* command line option */
struct hpet_data __hpet __section_hpet; /* address, quotient, trigger, hz */
volatile unsigned long __jiffies __section_jiffies;
unsigned long __wall_jiffies __section_wall_jiffies;
volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
struct timespec __xtime __section_xtime;
struct timezone __sys_tz __section_sys_tz;
......
......@@ -30,7 +30,7 @@
#include <asm/thread_info.h>
.text
.p2align
.p2align 4
.globl __get_user_1
__get_user_1:
GET_THREAD_INFO(%rbx)
......@@ -40,7 +40,7 @@ __get_user_1:
xorq %rax,%rax
ret
.p2align
.p2align 4
.globl __get_user_2
__get_user_2:
GET_THREAD_INFO(%rbx)
......@@ -52,7 +52,7 @@ __get_user_2:
xorq %rax,%rax
ret
.p2align
.p2align 4
.globl __get_user_4
__get_user_4:
GET_THREAD_INFO(%rbx)
......@@ -64,7 +64,7 @@ __get_user_4:
xorq %rax,%rax
ret
.p2align
.p2align 4
.globl __get_user_8
__get_user_8:
GET_THREAD_INFO(%rbx)
......
......@@ -53,7 +53,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot)
static void flush_kernel_map(void *address)
{
if (address && cpu_has_clflush) {
if (0 && address && cpu_has_clflush) {
/* is this worth it? */
int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
......
......@@ -196,21 +196,35 @@ static struct pci_ops pci_direct_conf2 = {
static int __devinit pci_sanity_check(struct pci_ops *o)
{
u32 x = 0;
struct pci_bus bus; /* Fake bus and device */
struct pci_dev dev;
int retval = 0;
struct pci_bus *bus; /* Fake bus and device */
struct pci_dev *dev;
if (pci_probe & PCI_NO_CHECKS)
return 1;
bus.number = 0;
dev.bus = &bus;
for(dev.devfn=0; dev.devfn < 0x100; dev.devfn++)
if ((!o->read(&bus, dev.devfn, PCI_CLASS_DEVICE, 2, &x) &&
bus = kmalloc(sizeof(*bus), GFP_ATOMIC);
dev = kmalloc(sizeof(*dev), GFP_ATOMIC);
if (!bus || !dev) {
printk(KERN_ERR "Out of memory in %s\n", __FUNCTION__);
goto exit;
}
bus->number = 0;
dev->bus = bus;
for(dev->devfn=0; dev->devfn < 0x100; dev->devfn++)
if ((!o->read(bus, dev->devfn, PCI_CLASS_DEVICE, 2, &x) &&
(x == PCI_CLASS_BRIDGE_HOST || x == PCI_CLASS_DISPLAY_VGA)) ||
(!o->read(&bus, dev.devfn, PCI_VENDOR_ID, 2, &x) &&
(x == PCI_VENDOR_ID_INTEL || x == PCI_VENDOR_ID_COMPAQ)))
return 1;
(!o->read(bus, dev->devfn, PCI_VENDOR_ID, 2, &x) &&
(x == PCI_VENDOR_ID_INTEL || x == PCI_VENDOR_ID_COMPAQ))) {
retval = 1;
goto exit;
}
DBG("PCI: Sanity check failed\n");
return 0;
exit:
kfree(dev);
kfree(bus);
return retval;
}
static int __init pci_direct_init(void)
......@@ -218,7 +232,7 @@ static int __init pci_direct_init(void)
unsigned int tmp;
unsigned long flags;
local_save_flags(flags); local_irq_disable();
local_irq_save(flags);
/*
* Check if configuration type 1 works.
......@@ -261,7 +275,6 @@ static int __init pci_direct_init(void)
}
local_irq_restore(flags);
pci_root_ops = NULL;
return 0;
}
......
......@@ -109,7 +109,7 @@ static void __init pirq_peer_trick(void)
*/
if (busmap[i] && pci_scan_bus(i, pci_root_bus->ops, NULL))
printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i);
pcibios_last_bus = -1;
//pcibios_last_bus = -1;
}
/*
......@@ -291,14 +291,14 @@ static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq
{
irq = read_config_nybble(router, 0x56, pirq - 1);
}
printk(KERN_INFO "AMD: dev %04x:%04x, router pirq : %d get irq : %2d\n",
printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n",
dev->vendor, dev->device, pirq, irq);
return irq;
}
static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
printk(KERN_INFO "AMD: dev %04x:%04x, router pirq : %d SET irq : %2d\n",
printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n",
dev->vendor, dev->device, pirq, irq);
if (pirq <= 4)
{
......
/*
* legacy.c - traditional, old school PCI bus probing
*/
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/pci.h>
#include "pci.h"
/*
......@@ -12,28 +12,39 @@
void __devinit pcibios_fixup_peer_bridges(void)
{
int n;
struct pci_bus bus;
struct pci_dev dev;
struct pci_bus *bus;
struct pci_dev *dev;
u16 l;
if (pcibios_last_bus <= 0 || pcibios_last_bus > 0xff)
if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff)
return;
DBG("PCI: Peer bridge fixup\n");
bus = kmalloc(sizeof(*bus), GFP_ATOMIC);
dev = kmalloc(sizeof(*dev), GFP_ATOMIC);
if (!bus || !dev) {
printk(KERN_ERR "Out of memory in %s\n", __FUNCTION__);
goto exit;
}
for (n=0; n <= pcibios_last_bus; n++) {
if (pci_bus_exists(&pci_root_buses, n))
continue;
bus.number = n;
bus.ops = pci_root_ops;
dev.bus = &bus;
for(dev.devfn=0; dev.devfn<256; dev.devfn += 8)
if (!pci_read_config_word(&dev, PCI_VENDOR_ID, &l) &&
bus->number = n;
bus->ops = pci_root_ops;
dev->bus = bus;
for (dev->devfn=0; dev->devfn<256; dev->devfn += 8)
if (!pci_read_config_word(dev, PCI_VENDOR_ID, &l) &&
l != 0x0000 && l != 0xffff) {
DBG("Found device at %02x:%02x [%04x]\n", n, dev.devfn, l);
DBG("Found device at %02x:%02x [%04x]\n", n, dev->devfn, l);
printk(KERN_INFO "PCI: Discovered peer bus %02x\n", n);
pci_scan_bus(n, pci_root_ops, NULL);
break;
}
}
exit:
kfree(dev);
kfree(bus);
}
static int __init pci_legacy_init(void)
......
......@@ -67,7 +67,6 @@ extern unsigned int pcibios_irq_mask;
extern int pcibios_scanned;
extern spinlock_t pci_config_lock;
void pcibios_fixup_irqs(void);
int pirq_enable_irq(struct pci_dev *dev);
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
......
......@@ -138,9 +138,11 @@ static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
static inline void set_seg_base(unsigned cpu, int entry, void *base)
{
struct desc_struct *d = &cpu_gdt_table[cpu][entry];
d->base0 = PTR_LOW(base);
d->base1 = PTR_MIDDLE(base);
d->base2 = PTR_HIGH(base);
u32 addr = (u32)(u64)base;
BUG_ON((u64)base >> 32);
d->base0 = addr & 0xffff;
d->base1 = (addr >> 16) & 0xff;
d->base2 = (addr >> 24) & 0xff;
}
#define LDT_entry_a(info) \
......
......@@ -156,6 +156,13 @@ struct ustat32 {
#define IA32_PAGE_OFFSET 0xffffe000
#define IA32_STACK_TOP IA32_PAGE_OFFSET
#ifdef __KERNEL__
struct user_desc;
int do_get_thread_area(struct thread_struct *t, struct user_desc *u_info);
int do_set_thread_area(struct thread_struct *t, struct user_desc *u_info);
int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs);
#endif
#endif /* !CONFIG_IA32_SUPPORT */
#endif
......@@ -57,11 +57,11 @@ extern unsigned long max_mapnr;
extern unsigned long end_pfn;
extern unsigned long table_start, table_end;
struct thread_struct;
struct user_desc;
extern void syscall_init(void);
int do_set_thread_area(struct thread_struct *t, struct user_desc *u_info);
int do_get_thread_area(struct thread_struct *t, struct user_desc *u_info);
struct pt_regs;
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1))
#define round_down(x,y) ((x) & ~((y)-1))
......
......@@ -78,9 +78,11 @@ struct pt_regs {
#define PTRACE_GETFPXREGS 18
#define PTRACE_SETFPXREGS 19
/* only useful for access 32bit programs */
#define PTRACE_GET_THREAD_AREA 25
#define PTRACE_SET_THREAD_AREA 26
#define PTRACE_ARCH_PRCTL 30 /* arch_prctl for child */
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
#define user_mode(regs) (!!((regs)->cs & 3))
......
......@@ -6,8 +6,6 @@
#define __KERNEL32_CS 0x38
#define __USER_LONGBASE ((GDT_ENTRY_LONGBASE * 8) | 3)
/*
* we cannot use the same code segment descriptor for user and kernel
* -- not even in the long flat mode, because of different DPL /kkeil
......@@ -31,6 +29,13 @@
#define GDT_ENTRY_TLS_ENTRIES 3
/* TLS indexes for 64bit - hardcoded in arch_prctl */
#define FS_TLS 0
#define GS_TLS 1
#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
#define IDT_ENTRIES 256
#define GDT_ENTRIES 16
#define GDT_SIZE (GDT_ENTRIES * 8)
......
......@@ -14,7 +14,7 @@ arch_prepare_suspend(void)
/* image of the saved processor state */
struct saved_context {
u16 ds, es, fs, gs, ss;
unsigned long gs_base, fs_base;
unsigned long gs_base, gs_kernel_base, fs_base;
unsigned long cr0, cr2, cr3, cr4;
u16 gdt_pad;
u16 gdt_limit;
......
......@@ -5,6 +5,7 @@
* User space memory access functions
*/
#include <linux/config.h>
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/prefetch.h>
......@@ -47,7 +48,7 @@
#define access_ok(type,addr,size) (__range_not_ok(addr,size) == 0)
extern inline int verify_area(int type, const void * addr, unsigned long size)
extern __force_inline int verify_area(int type, const void * addr, unsigned long size)
{
return access_ok(type,addr,size) ? 0 : -EFAULT;
}
......@@ -237,7 +238,7 @@ extern unsigned long copy_user_generic(void *to, const void *from, unsigned len)
extern unsigned long copy_to_user(void *to, const void *from, unsigned len);
extern unsigned long copy_from_user(void *to, const void *from, unsigned len);
static inline int __copy_from_user(void *dst, const void *src, unsigned size)
static __force_inline int __copy_from_user(void *dst, const void *src, unsigned size)
{
if (!__builtin_constant_p(size))
return copy_user_generic(dst,src,size);
......@@ -266,7 +267,7 @@ static inline int __copy_from_user(void *dst, const void *src, unsigned size)
}
}
static inline int __copy_to_user(void *dst, const void *src, unsigned size)
static __force_inline int __copy_to_user(void *dst, const void *src, unsigned size)
{
if (!__builtin_constant_p(size))
return copy_user_generic(dst,src,size);
......
......@@ -469,7 +469,7 @@ __SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity)
#define __NR_sched_getaffinity 204
__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity)
#define __NR_set_thread_area 205
__SYSCALL(__NR_set_thread_area, sys_set_thread_area)
__SYSCALL(__NR_set_thread_area, sys_ni_syscall) /* use arch_prctl */
#define __NR_io_setup 206
__SYSCALL(__NR_io_setup, sys_io_setup)
#define __NR_io_destroy 207
......@@ -481,7 +481,7 @@ __SYSCALL(__NR_io_submit, sys_io_submit)
#define __NR_io_cancel 210
__SYSCALL(__NR_io_cancel, sys_io_cancel)
#define __NR_get_thread_area 211
__SYSCALL(__NR_get_thread_area, sys_get_thread_area)
__SYSCALL(__NR_get_thread_area, sys_ni_syscall) /* use arch_prctl */
#define __NR_lookup_dcookie 212
__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie)
#define __NR_epoll_create 213
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment