Commit d3806951 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents c60ef48a e87866ff
......@@ -72,6 +72,12 @@ DF_MASK = 0x00000400
NT_MASK = 0x00004000
VM_MASK = 0x00020000
/*
* ESP0 is at offset 4. 0x100 is the size of the TSS, and
* also thus the top-of-stack pointer offset of SYSENTER_ESP
*/
TSS_ESP0_OFFSET = (4 - 0x100)
#ifdef CONFIG_PREEMPT
#define preempt_stop cli
#else
......@@ -229,6 +235,8 @@ need_resched:
# sysenter call handler stub
ENTRY(sysenter_entry)
movl TSS_ESP0_OFFSET(%esp),%esp
sysenter_past_esp:
sti
pushl $(__USER_DS)
pushl %ebp
......@@ -458,12 +466,36 @@ device_not_available_emulate:
addl $4, %esp
jmp ret_from_exception
/*
* Debug traps and NMI can happen at the one SYSENTER instruction
* that sets up the real kernel stack. Check here, since we can't
* allow the wrong stack to be used.
*
* "TSS_ESP0_OFFSET+12" is because the NMI/debug handler will have
* already pushed 3 words if it hits on the sysenter instruction:
* eflags, cs and eip.
*
* We just load the right stack, and push the three (known) values
* by hand onto the new stack - while updating the return eip past
* the instruction that would have done it for sysenter.
*/
#define CHECK_SYSENTER_EIP \
cmpl $sysenter_entry,(%esp); \
jne 1f; \
movl TSS_ESP0_OFFSET+12(%esp),%esp; \
pushfl; \
pushl $__KERNEL_CS; \
pushl $sysenter_past_esp; \
1:
ENTRY(debug)
CHECK_SYSENTER_EIP
pushl $0
pushl $do_debug
jmp error_code
ENTRY(nmi)
CHECK_SYSENTER_EIP
pushl %eax
SAVE_ALL
movl %esp, %edx
......
......@@ -41,8 +41,9 @@ void enable_sep_cpu(void *info)
struct tss_struct *tss = init_tss + cpu;
tss->ss1 = __KERNEL_CS;
tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp0, 0);
wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0);
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
printk("Enabling SEP on CPU %d\n", cpu);
......
......@@ -107,22 +107,22 @@
* Use slab allocator instead of kmalloc/kfree.
* Use generic list implementation from <linux/list.h>.
* Sped up posix_locks_deadlock by only considering blocked locks.
* Matthew Wilcox <willy@thepuffingroup.com>, March, 2000.
* Matthew Wilcox <willy@debian.org>, March, 2000.
*
* Leases and LOCK_MAND
* Matthew Wilcox <willy@linuxcare.com>, June, 2000.
* Matthew Wilcox <willy@debian.org>, June, 2000.
* Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
*/
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/capability.h>
#include <linux/timer.h>
#include <linux/time.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/security.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/time.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
......@@ -519,19 +519,11 @@ static void locks_delete_lock(struct file_lock **thisfl_p)
*/
static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
{
switch (caller_fl->fl_type) {
case F_RDLCK:
return (sys_fl->fl_type == F_WRLCK);
case F_WRLCK:
return (1);
default:
printk(KERN_ERR "locks_conflict(): impossible lock type - %d\n",
caller_fl->fl_type);
break;
}
return (0); /* This should never happen */
if (sys_fl->fl_type == F_WRLCK)
return 1;
if (caller_fl->fl_type == F_WRLCK)
return 1;
return 0;
}
/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
......@@ -1966,3 +1958,13 @@ static int __init filelock_init(void)
}
module_init(filelock_init)
EXPORT_SYMBOL(file_lock_list);
EXPORT_SYMBOL(locks_init_lock);
EXPORT_SYMBOL(locks_copy_lock);
EXPORT_SYMBOL(posix_lock_file);
EXPORT_SYMBOL(posix_test_lock);
EXPORT_SYMBOL(posix_block_lock);
EXPORT_SYMBOL(posix_unblock_lock);
EXPORT_SYMBOL(posix_locks_deadlock);
EXPORT_SYMBOL(locks_mandatory_area);
......@@ -404,6 +404,8 @@ struct thread_struct {
#define INIT_TSS { \
.esp0 = sizeof(init_stack) + (long)&init_stack, \
.ss0 = __KERNEL_DS, \
.esp1 = sizeof(init_tss[0]) + (long)&init_tss[0], \
.ss1 = __KERNEL_CS, \
.ldt = GDT_ENTRY_LDT, \
.bitmap = INVALID_IO_BITMAP_OFFSET, \
.io_bitmap = { [ 0 ... IO_BITMAP_SIZE ] = ~0 }, \
......@@ -412,12 +414,10 @@ struct thread_struct {
static inline void load_esp0(struct tss_struct *tss, unsigned long esp0)
{
tss->esp0 = esp0;
if (cpu_has_sep) {
if (tss->ss1 != __KERNEL_CS) {
tss->ss1 = __KERNEL_CS;
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
}
wrmsr(MSR_IA32_SYSENTER_ESP, esp0, 0);
/* This can only happen when SEP is enabled, no need to test "SEP"arately */
if (tss->ss1 != __KERNEL_CS) {
tss->ss1 = __KERNEL_CS;
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
}
}
......
......@@ -18,22 +18,6 @@
* Copyright (C) 1998 Ingo Molnar.
*/
#define FPU_SAVE \
do { \
preempt_disable(); \
if (!test_thread_flag(TIF_USEDFPU)) \
__asm__ __volatile__ (" clts;\n"); \
__asm__ __volatile__ ("fsave %0; fwait": "=m"(fpu_save[0])); \
} while (0)
#define FPU_RESTORE \
do { \
__asm__ __volatile__ ("frstor %0": : "m"(fpu_save[0])); \
if (!test_thread_flag(TIF_USEDFPU)) \
stts(); \
preempt_enable(); \
} while (0)
#define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
#define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
#define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
......@@ -46,9 +30,8 @@ static void
xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
{
unsigned long lines = bytes >> 7;
char fpu_save[108];
FPU_SAVE;
kernel_fpu_begin();
__asm__ __volatile__ (
#undef BLOCK
......@@ -83,7 +66,7 @@ xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
:
: "memory");
FPU_RESTORE;
kernel_fpu_end();
}
static void
......@@ -91,9 +74,8 @@ xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3)
{
unsigned long lines = bytes >> 7;
char fpu_save[108];
FPU_SAVE;
kernel_fpu_begin();
__asm__ __volatile__ (
#undef BLOCK
......@@ -133,7 +115,7 @@ xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
:
: "memory");
FPU_RESTORE;
kernel_fpu_end();
}
static void
......@@ -141,9 +123,8 @@ xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4)
{
unsigned long lines = bytes >> 7;
char fpu_save[108];
FPU_SAVE;
kernel_fpu_begin();
__asm__ __volatile__ (
#undef BLOCK
......@@ -188,7 +169,7 @@ xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
:
: "memory");
FPU_RESTORE;
kernel_fpu_end();
}
......@@ -197,9 +178,8 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5)
{
unsigned long lines = bytes >> 7;
char fpu_save[108];
FPU_SAVE;
kernel_fpu_begin();
/* need to save/restore p4/p5 manually otherwise gcc's 10 argument
limit gets exceeded (+ counts as two arguments) */
......@@ -255,7 +235,7 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
: "r" (p4), "r" (p5)
: "memory");
FPU_RESTORE;
kernel_fpu_end();
}
#undef LD
......@@ -270,9 +250,8 @@ static void
xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
{
unsigned long lines = bytes >> 6;
char fpu_save[108];
FPU_SAVE;
kernel_fpu_begin();
__asm__ __volatile__ (
" .align 32 ;\n"
......@@ -311,7 +290,7 @@ xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
:
: "memory");
FPU_RESTORE;
kernel_fpu_end();
}
static void
......@@ -319,9 +298,8 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3)
{
unsigned long lines = bytes >> 6;
char fpu_save[108];
FPU_SAVE;
kernel_fpu_begin();
__asm__ __volatile__ (
" .align 32,0x90 ;\n"
......@@ -369,7 +347,7 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
:
: "memory" );
FPU_RESTORE;
kernel_fpu_end();
}
static void
......@@ -377,9 +355,8 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4)
{
unsigned long lines = bytes >> 6;
char fpu_save[108];
FPU_SAVE;
kernel_fpu_begin();
__asm__ __volatile__ (
" .align 32,0x90 ;\n"
......@@ -436,7 +413,7 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
:
: "memory");
FPU_RESTORE;
kernel_fpu_end();
}
static void
......@@ -444,9 +421,8 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
unsigned long *p3, unsigned long *p4, unsigned long *p5)
{
unsigned long lines = bytes >> 6;
char fpu_save[108];
FPU_SAVE;
kernel_fpu_begin();
/* need to save p4/p5 manually to not exceed gcc's 10 argument limit */
__asm__ __volatile__ (
......@@ -517,7 +493,7 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
: "r" (p4), "r" (p5)
: "memory");
FPU_RESTORE;
kernel_fpu_end();
}
static struct xor_block_template xor_block_pII_mmx = {
......@@ -536,9 +512,6 @@ static struct xor_block_template xor_block_p5_mmx = {
.do_5 = xor_p5_mmx_5,
};
#undef FPU_SAVE
#undef FPU_RESTORE
/*
* Cache avoiding checksumming functions utilizing KNI instructions
* Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
......
......@@ -235,15 +235,6 @@ EXPORT_SYMBOL(generic_file_write_nolock);
EXPORT_SYMBOL(generic_file_mmap);
EXPORT_SYMBOL(generic_file_readonly_mmap);
EXPORT_SYMBOL(generic_ro_fops);
EXPORT_SYMBOL(file_lock_list);
EXPORT_SYMBOL(locks_init_lock);
EXPORT_SYMBOL(locks_copy_lock);
EXPORT_SYMBOL(posix_lock_file);
EXPORT_SYMBOL(posix_test_lock);
EXPORT_SYMBOL(posix_block_lock);
EXPORT_SYMBOL(posix_unblock_lock);
EXPORT_SYMBOL(posix_locks_deadlock);
EXPORT_SYMBOL(locks_mandatory_area);
EXPORT_SYMBOL(dput);
EXPORT_SYMBOL(have_submounts);
EXPORT_SYMBOL(d_find_alias);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment