Commit ab92dab4 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.109pre1

parent 7eaba1c7
This diff is collapsed.
ncpfs is a filesystem which understands the NCP protocol, designed by the
Novell Corporation for their NetWare(tm) product. NCP is functionally
similar to the NFS used in the tcp/ip community.
To mount a Netware-Filesystem, you need a special mount program, which
can be found in ncpfs package. Homesite for ncpfs is
The ncpfs filesystem understands the NCP protocol, designed by the
Novell Corporation for their NetWare(tm) product. NCP is functionally
similar to the NFS used in the TCP/IP community.
To mount a NetWare filesystem, you need a special mount program, which
can be found in the ncpfs package. The home site for ncpfs is
ftp.gwdg.de/pub/linux/misc/ncpfs, but sunsite and its many mirrors
will have it as well.
Related products are linware and mars_nwe, which will give Linux partial
NetWare Server functionality.
Linware's home site is: klokan.sh.cvut.cz/pub/linux/linware,
Mars_nwe can be found on ftp.gwdg.de/pub/linux/misc/ncpfs.
NetWare server functionality. Linware's home site is
klokan.sh.cvut.cz/pub/linux/linware; mars_nwe can be found on
ftp.gwdg.de/pub/linux/misc/ncpfs.
VERSION = 2
PATCHLEVEL = 1
SUBLEVEL = 108
SUBLEVEL = 109
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
......
......@@ -94,18 +94,31 @@ ENOSYS = 38
movl %dx,%ds; \
movl %dx,%es;
#define RESTORE_ALL \
popl %ebx; \
popl %ecx; \
popl %edx; \
popl %esi; \
popl %edi; \
popl %ebp; \
popl %eax; \
popl %ds; \
popl %es; \
addl $4,%esp; \
iret
#define RESTORE_ALL \
popl %ebx; \
popl %ecx; \
popl %edx; \
popl %esi; \
popl %edi; \
popl %ebp; \
popl %eax; \
1: popl %ds; \
2: popl %es; \
3: addl $4,%esp; \
iret; \
.section fixup,"ax"; \
4: pushl $0; \
popl %ds; \
jmp 2b; \
5: pushl $0; \
popl %es; \
jmp 3b; \
.previous; \
.section __ex_table,"a";\
.align 4; \
.long 1b,4b; \
.long 2b,5b; \
.previous
#define GET_CURRENT(reg) \
movl %esp, reg; \
......
......@@ -35,21 +35,29 @@ static int read_ldt(void * ptr, unsigned long bytecount)
static int write_ldt(void * ptr, unsigned long bytecount, int oldmode)
{
struct mm_struct * mm = current->mm;
void * ldt;
__u32 entry_1, entry_2, *lp;
__u16 selector, reg_fs, reg_gs;
int error;
struct modify_ldt_ldt_s ldt_info;
unsigned long *lp;
struct mm_struct * mm;
int error, i;
error = -EINVAL;
if (bytecount != sizeof(ldt_info))
return -EINVAL;
error = copy_from_user(&ldt_info, ptr, sizeof(ldt_info));
if (error)
return -EFAULT;
if ((ldt_info.contents == 3 && (oldmode || ldt_info.seg_not_present == 0)) || ldt_info.entry_number >= LDT_ENTRIES)
return -EINVAL;
goto out;
error = -EFAULT;
if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
goto out;
mm = current->mm;
error = -EINVAL;
if (ldt_info.entry_number >= LDT_ENTRIES)
goto out;
if (ldt_info.contents == 3) {
if (oldmode)
goto out;
if (ldt_info.seg_not_present == 0)
goto out;
}
/*
* Horrible dependencies! Try to get rid of this. This is wrong,
......@@ -62,60 +70,97 @@ static int write_ldt(void * ptr, unsigned long bytecount, int oldmode)
* For no good reason except historical, the GDT index of the LDT
* is chosen to follow the index number in the task[] array.
*/
if (!mm->segments) {
for (i=1 ; i<NR_TASKS ; i++) {
if (task[i] == current) {
if (!(mm->segments = (void *) vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE)))
return -ENOMEM;
memset(mm->segments, 0, LDT_ENTRIES*LDT_ENTRY_SIZE);
set_ldt_desc(gdt+(i<<1)+FIRST_LDT_ENTRY, mm->segments, LDT_ENTRIES);
load_ldt(i);
}
ldt = mm->segments;
if (!ldt) {
error = -ENOMEM;
ldt = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
if (!ldt)
goto out;
memset(ldt, 0, LDT_ENTRIES*LDT_ENTRY_SIZE);
/*
* Make sure someone else hasn't allocated it for us ...
*/
if (!mm->segments) {
int i = current->tarray_ptr - &task[0];
mm->segments = ldt;
set_ldt_desc(gdt+(i<<1)+FIRST_LDT_ENTRY, ldt, LDT_ENTRIES);
load_ldt(i);
if (mm->count > 1)
printk(KERN_WARNING
"LDT allocated for cloned task!\n");
} else {
vfree(ldt);
}
}
lp = (unsigned long *) (LDT_ENTRY_SIZE * ldt_info.entry_number + (unsigned long) mm->segments);
/*
* Check whether the entry to be changed is currently in use.
* If it is, we may need extra validation checks in case the
* kernel is forced to save and restore the selector.
*
* Note: we check the fs and gs values as well, as these are
* loaded by the signal code and during a task switch.
*/
selector = (ldt_info.entry_number << 3) | 4;
__asm__("movw %%fs,%0" : "=r"(reg_fs));
__asm__("movw %%gs,%0" : "=r"(reg_gs));
lp = (__u32 *) ((selector & ~7) + (char *) ldt);
/* Allow LDTs to be cleared by the user. */
if (ldt_info.base_addr == 0 && ldt_info.limit == 0
&& (oldmode ||
( ldt_info.contents == 0
&& ldt_info.read_exec_only == 1
&& ldt_info.seg_32bit == 0
&& ldt_info.limit_in_pages == 0
&& ldt_info.seg_not_present == 1
&& ldt_info.useable == 0 )) ) {
*lp = 0;
*(lp+1) = 0;
return 0;
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
if (oldmode ||
(ldt_info.contents == 0 &&
ldt_info.read_exec_only == 1 &&
ldt_info.seg_32bit == 0 &&
ldt_info.limit_in_pages == 0 &&
ldt_info.seg_not_present == 1 &&
ldt_info.useable == 0 )) {
entry_1 = 0;
entry_2 = 0;
goto out_check;
}
}
*lp = ((ldt_info.base_addr & 0x0000ffff) << 16) |
entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
(ldt_info.limit & 0x0ffff);
*(lp+1) = (ldt_info.base_addr & 0xff000000) |
((ldt_info.base_addr & 0x00ff0000)>>16) |
entry_2 = (ldt_info.base_addr & 0xff000000) |
((ldt_info.base_addr & 0x00ff0000) >> 16) |
(ldt_info.limit & 0xf0000) |
(ldt_info.contents << 10) |
((ldt_info.read_exec_only ^ 1) << 9) |
(ldt_info.contents << 10) |
((ldt_info.seg_not_present ^ 1) << 15) |
(ldt_info.seg_32bit << 22) |
(ldt_info.limit_in_pages << 23) |
((ldt_info.seg_not_present ^1) << 15) |
0x7000;
if (!oldmode) *(lp+1) |= (ldt_info.useable << 20);
return 0;
if (!oldmode)
entry_2 |= (ldt_info.useable << 20);
out_check:
/* OK to change the entry ... */
*lp = entry_1;
*(lp+1) = entry_2;
error = 0;
out:
return error;
}
asmlinkage int sys_modify_ldt(int func, void *ptr, unsigned long bytecount)
{
int ret;
int ret = -ENOSYS;
lock_kernel();
if (func == 0)
switch (func) {
case 0:
ret = read_ldt(ptr, bytecount);
else if (func == 1)
break;
case 1:
ret = write_ldt(ptr, bytecount, 1);
else if (func == 0x11)
break;
case 0x11:
ret = write_ldt(ptr, bytecount, 0);
else
ret = -ENOSYS;
break;
}
unlock_kernel();
return ret;
}
......@@ -48,12 +48,10 @@
spinlock_t semaphore_wake_lock = SPIN_LOCK_UNLOCKED;
struct task_struct *last_task_used_math = NULL;
#ifdef __SMP__
asmlinkage void ret_from_smpfork(void) __asm__("ret_from_smpfork");
asmlinkage void ret_from_fork(void) __asm__("ret_from_smpfork");
#else
asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
asmlinkage void ret_from_fork(void) __asm__("ret_from_sys_call");
#endif
#ifdef CONFIG_APM
......@@ -427,15 +425,20 @@ void show_regs(struct pt_regs * regs)
void release_segments(struct mm_struct *mm)
{
void * ldt;
void * ldt = mm->segments;
int nr;
/* forget local segments */
__asm__ __volatile__("movl %w0,%%fs ; movl %w0,%%gs ; lldt %w0"
: /* no outputs */
: "r" (0));
current->tss.ldt = 0;
/*
* Set the GDT entry back to the default.
*/
nr = current->tarray_ptr - &task[0];
set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY, &default_ldt, 1);
ldt = mm->segments;
if (ldt) {
mm->segments = NULL;
vfree(ldt);
......@@ -447,9 +450,7 @@ void release_segments(struct mm_struct *mm)
*/
void exit_thread(void)
{
/* forget lazy i387 state */
if (last_task_used_math == current)
last_task_used_math = NULL;
/* nothing to do ... */
}
void flush_thread(void)
......@@ -462,71 +463,103 @@ void flush_thread(void)
/*
* Forget coprocessor state..
*/
#ifdef __SMP__
if (current->flags & PF_USEDFPU) {
current->flags &= ~PF_USEDFPU;
stts();
}
#else
if (last_task_used_math == current) {
last_task_used_math = NULL;
stts();
}
#endif
current->used_math = 0;
current->flags &= ~PF_USEDFPU;
}
void release_thread(struct task_struct *dead_task)
{
}
static inline void unlazy_fpu(struct task_struct *tsk)
{
if (tsk->flags & PF_USEDFPU) {
tsk->flags &= ~PF_USEDFPU;
__asm__("fnsave %0":"=m" (tsk->tss.i387));
stts();
}
}
/*
* If new_mm is NULL, we're being called to set up the LDT descriptor
* for a clone task. Each clone must have a separate entry in the GDT.
*/
void copy_segments(int nr, struct task_struct *p, struct mm_struct *new_mm)
{
int ldt_size = 1;
void * ldt = &default_ldt;
struct mm_struct * old_mm = current->mm;
void * old_ldt = old_mm->segments, * ldt = old_ldt;
int ldt_size = LDT_ENTRIES;
p->tss.ldt = _LDT(nr);
if (old_mm->segments) {
new_mm->segments = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
if (new_mm->segments) {
ldt = new_mm->segments;
ldt_size = LDT_ENTRIES;
memcpy(ldt, old_mm->segments, LDT_ENTRIES*LDT_ENTRY_SIZE);
if (old_ldt) {
if (new_mm) {
ldt = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
new_mm->segments = ldt;
if (!ldt) {
printk(KERN_WARNING "ldt allocation failed\n");
goto no_ldt;
}
memcpy(ldt, old_ldt, LDT_ENTRIES*LDT_ENTRY_SIZE);
}
} else {
no_ldt:
ldt = &default_ldt;
ldt_size = 1;
}
set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY, ldt, ldt_size);
}
/*
* Save a segment.
*/
#define savesegment(seg,value) \
asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
/*
* Load a segment. Fall back on loading the zero
* segment if something goes wrong..
*/
#define loadsegment(seg,value) \
asm volatile("\n" \
"1:\t" \
"movl %0,%%" #seg "\n" \
"2:\n" \
".section fixup,\"ax\"\n" \
"3:\t" \
"pushl $0\n\t" \
"popl %%" #seg "\n\t" \
"jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n\t" \
".align 4\n\t" \
".long 1b,3b\n" \
".previous" \
: :"m" (*(unsigned int *)&(value)))
int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
struct task_struct * p, struct pt_regs * regs)
{
struct pt_regs * childregs;
p->tss.tr = _TSS(nr);
p->tss.es = __KERNEL_DS;
p->tss.cs = __KERNEL_CS;
p->tss.ss = __KERNEL_DS;
p->tss.ds = __KERNEL_DS;
p->tss.fs = __USER_DS;
p->tss.gs = __USER_DS;
set_tss_desc(gdt+(nr<<1)+FIRST_TSS_ENTRY,&(p->tss));
p->tss.ss0 = __KERNEL_DS;
p->tss.esp0 = 2*PAGE_SIZE + (unsigned long) p;
childregs = ((struct pt_regs *) (p->tss.esp0)) - 1;
p->tss.esp = (unsigned long) childregs;
#ifdef __SMP__
p->tss.eip = (unsigned long) ret_from_smpfork;
p->tss.eflags = regs->eflags & 0xffffcdff; /* iopl always 0 for a new process */
#else
p->tss.eip = (unsigned long) ret_from_sys_call;
p->tss.eflags = regs->eflags & 0xffffcfff; /* iopl always 0 for a new process */
#endif
p->tss.ebx = (unsigned long) p;
*childregs = *regs;
childregs->eax = 0;
childregs->esp = esp;
p->tss.back_link = 0;
set_tss_desc(gdt+(nr<<1)+FIRST_TSS_ENTRY,&(p->tss));
childregs->eflags = regs->eflags & 0xffffcfff; /* iopl always 0 for a new process */
p->tss.esp = (unsigned long) childregs;
p->tss.eip = (unsigned long) ret_from_fork;
savesegment(fs,p->tss.fs);
savesegment(gs,p->tss.gs);
/*
* a bitmap offset pointing outside of the TSS limit causes a nicely
......@@ -535,12 +568,9 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
*/
p->tss.bitmap = sizeof(struct thread_struct);
#ifdef __SMP__
if (current->flags & PF_USEDFPU)
#else
if (last_task_used_math == current)
#endif
__asm__("clts ; fnsave %0 ; frstor %0":"=m" (p->tss.i387));
unlazy_fpu(current);
asm volatile("fwait");
p->tss.i387 = current->tss.i387;
return 0;
}
......@@ -552,16 +582,11 @@ int dump_fpu (struct pt_regs * regs, struct user_i387_struct* fpu)
{
int fpvalid;
if ((fpvalid = current->used_math) != 0) {
if (boot_cpu_data.hard_math) {
if (last_task_used_math == current) {
__asm__("clts ; fsave %0; fwait": :"m" (*fpu));
}
else
memcpy(fpu,&current->tss.i387.hard,sizeof(*fpu));
} else {
memcpy(fpu,&current->tss.i387.hard,sizeof(*fpu));
}
fpvalid = current->used_math;
if (fpvalid) {
unlazy_fpu(current);
asm volatile("fwait");
memcpy(fpu,&current->tss.i387.hard,sizeof(*fpu));
}
return fpvalid;
......@@ -597,8 +622,8 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
dump->regs.eax = regs->eax;
dump->regs.ds = regs->xds;
dump->regs.es = regs->xes;
__asm__("movl %%fs,%0":"=r" (dump->regs.fs));
__asm__("movl %%gs,%0":"=r" (dump->regs.gs));
savesegment(fs,dump->regs.fs);
savesegment(gs,dump->regs.gs);
dump->regs.orig_eax = regs->orig_eax;
dump->regs.eip = regs->eip;
dump->regs.cs = regs->xcs;
......@@ -609,6 +634,89 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
dump->u_fpvalid = dump_fpu (regs, &dump->i387);
}
/*
* This special macro can be used to load a debugging register
*/
#define loaddebug(tsk,register) \
__asm__("movl %0,%%db" #register \
: /* no output */ \
:"r" (tsk->debugreg[register]))
/*
* switch_to(x,yn) should switch tasks from x to y.
*
* We fsave/fwait so that an exception goes off at the right time
* (as a call from the fsave or fwait in effect) rather than to
* the wrong process. Lazy FP saving no longer makes any sense
* with modern CPU's, and this simplifies a lot of things (SMP
* and UP become the same).
*
* NOTE! We used to use the x86 hardware context switching. The
* reason for not using it any more becomes apparent when you
* try to recover gracefully from saved state that is no longer
* valid (stale segment register values in particular). With the
* hardware task-switch, there is no way to fix up bad state in
* a reasonable manner.
*
* The fact that Intel documents the hardware task-switching to
* be slow is a fairly red herring - this code is not noticeably
* faster. However, there _is_ some room for improvement here,
* so the performance issues may eventually be a valid point.
* More important, however, is the fact that this allows us much
* more flexibility.
*/
void __switch_to(struct task_struct *prev, struct task_struct *next)
{
/* Do the FPU save and set TS if it wasn't set before.. */
unlazy_fpu(prev);
/*
* Reload TR, LDT and the page table pointers..
*
* We need TR for the IO permission bitmask (and
* the vm86 bitmasks in case we ever use enhanced
* v86 mode properly).
*
* We could do LDT things lazily if this turns out
* to be a win. Most processes will have the default
* LDT.
*
* We want to get rid of the TR register some day,
* and copy the bitmaps around by hand. Oh, well.
* In the meantime we have to clear the busy bit
* in the TSS entry, ugh.
*/
gdt_table[next->tss.tr >> 3].b &= 0xfffffdff;
asm volatile("ltr %0": :"g" (*(unsigned short *)&next->tss.tr));
asm volatile("lldt %0": :"g" (*(unsigned short *)&next->tss.ldt));
if (next->tss.cr3 != prev->tss.cr3)
asm volatile("movl %0,%%cr3": :"r" (next->tss.cr3));
/*
* Save away %fs and %gs. No need to save %es and %ds, as
* those are always kernel segments while inside the kernel.
* Restore the new values.
*/
asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->tss.fs));
asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->tss.gs));
loadsegment(fs,next->tss.fs);
loadsegment(gs,next->tss.gs);
/*
* Now maybe reload the debug registers
*/
if (next->debugreg[7]){
loaddebug(next,0);
loaddebug(next,1);
loaddebug(next,2);
loaddebug(next,3);
loaddebug(next,6);
loaddebug(next,7);
}
}
asmlinkage int sys_fork(struct pt_regs regs)
{
int ret;
......
......@@ -624,14 +624,8 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
#ifdef CONFIG_MATH_EMULATION
if ( boot_cpu_data.hard_math ) {
#endif
if (last_task_used_math == child) {
clts();
__asm__("fnsave %0; fwait":"=m" (child->tss.i387.hard));
last_task_used_math = NULL;
stts();
}
__copy_to_user((void *)data, &child->tss.i387.hard,
sizeof(struct user_i387_struct));
__copy_to_user((void *)data, &child->tss.i387.hard,
sizeof(struct user_i387_struct));
#ifdef CONFIG_MATH_EMULATION
} else {
save_i387_soft(&child->tss.i387.soft,
......@@ -652,13 +646,10 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
#ifdef CONFIG_MATH_EMULATION
if ( boot_cpu_data.hard_math ) {
#endif
if (last_task_used_math == child) {
/* Discard the state of the FPU */
last_task_used_math = NULL;
}
__copy_from_user(&child->tss.i387.hard, (void *)data,
sizeof(struct user_i387_struct));
child->flags &= ~PF_USEDFPU;
stts();
#ifdef CONFIG_MATH_EMULATION
} else {
restore_i387_soft(&child->tss.i387.soft,
......
......@@ -153,17 +153,10 @@ struct rt_sigframe
static inline int restore_i387_hard(struct _fpstate *buf)
{
#ifdef __SMP__
if (current->flags & PF_USEDFPU) {
current->flags &= ~PF_USEDFPU;
stts();
}
#else
if (current == last_task_used_math) {
last_task_used_math = NULL;
stts();
}
#endif
current->flags &= ~PF_USEDFPU;
return __copy_from_user(&current->tss.i387.hard, buf, sizeof(*buf));
}
......@@ -315,20 +308,12 @@ asmlinkage int sys_rt_sigreturn(unsigned long __unused)
static inline int save_i387_hard(struct _fpstate * buf)
{
#ifdef __SMP__
if (current->flags & PF_USEDFPU) {
__asm__ __volatile__("fnsave %0":"=m"(current->tss.i387.hard));
stts();
current->flags &= ~PF_USEDFPU;
}
#else
if (current == last_task_used_math) {
__asm__ __volatile__("fnsave %0":"=m"(current->tss.i387.hard));
last_task_used_math = NULL;
__asm__ __volatile__("fwait"); /* not needed on 486+ */
stts();
}
#endif
asm volatile("fwait");
current->tss.i387.hard.status = current->tss.i387.hard.swd;
if (__copy_to_user(buf, &current->tss.i387.hard, sizeof(*buf)))
return -1;
......
......@@ -66,23 +66,6 @@ out: \
unlock_kernel(); \
}
#define get_seg_byte(seg,addr) ({ \
register unsigned char __res; \
__asm__("pushl %%fs;movl %%ax,%%fs;movb %%fs:%2,%%al;popl %%fs" \
:"=a" (__res):"0" (seg),"m" (*(addr))); \
__res;})
#define get_seg_long(seg,addr) ({ \
register unsigned long __res; \
__asm__("pushl %%fs;movl %%ax,%%fs;movl %%fs:%2,%%eax;popl %%fs" \
:"=a" (__res):"0" (seg),"m" (*(addr))); \
__res;})
#define _fs() ({ \
register unsigned short __res; \
__asm__("movl %%fs,%%ax":"=a" (__res):); \
__res;})
void page_exception(void);
asmlinkage void divide_error(void);
......@@ -118,6 +101,7 @@ int kstack_depth_to_print = 24;
static void show_registers(struct pt_regs *regs)
{
int i;
int in_kernel = 1;
unsigned long esp;
unsigned short ss;
unsigned long *stack, addr, module_start, module_end;
......@@ -126,6 +110,7 @@ static void show_registers(struct pt_regs *regs)
esp = (unsigned long) &regs->esp;
ss = __KERNEL_DS;
if (regs->xcs & 3) {
in_kernel = 0;
esp = regs->esp;
ss = regs->xss & 0xffff;
}
......@@ -138,53 +123,59 @@ static void show_registers(struct pt_regs *regs)
printk("ds: %04x es: %04x ss: %04x\n",
regs->xds & 0xffff, regs->xes & 0xffff, ss);
store_TR(i);
printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)\nStack: ",
printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
current->comm, current->pid, 0xffff & i, 4096+(unsigned long)current);
stack = (unsigned long *) esp;
for(i=0; i < kstack_depth_to_print; i++) {
if (((long) stack & 4095) == 0)
break;
if (i && ((i % 8) == 0))
printk("\n ");
printk("%08lx ", get_seg_long(ss,stack++));
}
printk("\nCall Trace: ");
stack = (unsigned long *) esp;
i = 1;
module_start = PAGE_OFFSET + (max_mapnr << PAGE_SHIFT);
module_start = ((module_start + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1));
module_end = module_start + MODULE_RANGE;
while (((long) stack & 4095) != 0) {
addr = get_seg_long(ss, stack++);
/*
* If the address is either in the text segment of the
* kernel, or in the region which contains vmalloc'ed
* memory, it *may* be the address of a calling
* routine; if so, print it so that someone tracing
* down the cause of the crash will be able to figure
* out the call path that was taken.
*/
if (((addr >= (unsigned long) &_stext) &&
(addr <= (unsigned long) &_etext)) ||
((addr >= module_start) && (addr <= module_end))) {
/*
* When in-kernel, we also print out the stack and code at the
* time of the fault..
*/
if (in_kernel) {
printk("\nStack: ");
stack = (unsigned long *) esp;
for(i=0; i < kstack_depth_to_print; i++) {
if (((long) stack & 4095) == 0)
break;
if (i && ((i % 8) == 0))
printk("\n ");
printk("[<%08lx>] ", addr);
i++;
printk("%08lx ", *stack++);
}
printk("\nCall Trace: ");
stack = (unsigned long *) esp;
i = 1;
module_start = PAGE_OFFSET + (max_mapnr << PAGE_SHIFT);
module_start = ((module_start + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1));
module_end = module_start + MODULE_RANGE;
while (((long) stack & 4095) != 0) {
addr = *stack++;
/*
* If the address is either in the text segment of the
* kernel, or in the region which contains vmalloc'ed
* memory, it *may* be the address of a calling
* routine; if so, print it so that someone tracing
* down the cause of the crash will be able to figure
* out the call path that was taken.
*/
if (((addr >= (unsigned long) &_stext) &&
(addr <= (unsigned long) &_etext)) ||
((addr >= module_start) && (addr <= module_end))) {
if (i && ((i % 8) == 0))
printk("\n ");
printk("[<%08lx>] ", addr);
i++;
}
}
printk("\nCode: ");
for(i=0;i<20;i++)
printk("%02x ", ((unsigned char *)regs->eip)[i]);
printk("\n");
}
printk("\nCode: ");
for(i=0;i<20;i++)
printk("%02x ",0xff & get_seg_byte(regs->xcs & 0xffff,(i+(char *)regs->eip)));
printk("\n");
}
spinlock_t die_lock;
void die_if_kernel(const char * str, struct pt_regs * regs, long err)
void die(const char * str, struct pt_regs * regs, long err)
{
if ((regs->eflags & VM_MASK) || (3 & regs->xcs) == 3)
return;
console_verbose();
spin_lock_irq(&die_lock);
printk("%s: %04lx\n", str, err & 0xffff);
......@@ -193,6 +184,12 @@ void die_if_kernel(const char * str, struct pt_regs * regs, long err)
do_exit(SIGSEGV);
}
static void die_if_kernel(const char * str, struct pt_regs * regs, long err)
{
if (!(regs->eflags & VM_MASK) && !(3 & regs->xcs))
die(str, regs, err);
}
DO_VM86_ERROR( 0, SIGFPE, "divide error", divide_error, current)
DO_VM86_ERROR( 3, SIGTRAP, "int3", int3, current)
DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow, current)
......@@ -200,7 +197,7 @@ DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds, current)
DO_ERROR( 6, SIGILL, "invalid operand", invalid_op, current)
DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available, current)
DO_ERROR( 8, SIGSEGV, "double fault", double_fault, current)
DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun, last_task_used_math)
DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun, current)
DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS, current)
DO_ERROR(11, SIGBUS, "segment not present", segment_not_present, current)
DO_ERROR(12, SIGBUS, "stack segment", stack_segment, current)
......@@ -224,17 +221,34 @@ asmlinkage void cache_flush_denied(struct pt_regs * regs, long error_code)
asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
{
if (regs->eflags & VM_MASK)
goto gp_in_vm86;
if (!(regs->xcs & 3))
goto gp_in_kernel;
lock_kernel();
if (regs->eflags & VM_MASK) {
handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
goto out;
}
die_if_kernel("general protection",regs,error_code);
current->tss.error_code = error_code;
current->tss.trap_no = 13;
force_sig(SIGSEGV, current);
out:
return;
gp_in_vm86:
lock_kernel();
handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
unlock_kernel();
return;
gp_in_kernel:
{
unsigned long fixup;
fixup = search_exception_table(regs->eip);
if (fixup) {
regs->eip = fixup;
return;
}
die("general protection fault", regs, error_code);
}
}
static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
......@@ -295,9 +309,7 @@ asmlinkage void do_debug(struct pt_regs * regs, long error_code)
__asm__("movl %0,%%db7"
: /* no output */
: "r" (0));
goto out;
}
die_if_kernel("debug",regs,error_code);
out:
unlock_kernel();
}
......@@ -313,16 +325,7 @@ void math_error(void)
lock_kernel();
clts();
#ifdef __SMP__
task = current;
#else
task = last_task_used_math;
last_task_used_math = NULL;
if (!task) {
__asm__("fnclex");
goto out;
}
#endif
/*
* Save the info for the exception handler
*/
......@@ -333,9 +336,6 @@ void math_error(void)
force_sig(SIGFPE, task);
task->tss.trap_no = 16;
task->tss.error_code = 0;
#ifndef __SMP__
out:
#endif
unlock_kernel();
}
......@@ -373,15 +373,6 @@ asmlinkage void math_state_restore(void)
* case we swap processors. We also don't use the coprocessor
* timer - IRQ 13 mode isn't used with SMP machines (thank god).
*/
#ifndef __SMP__
if (last_task_used_math == current)
return;
if (last_task_used_math)
__asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
else
__asm__("fnclex");
last_task_used_math = current;
#endif
if(current->used_math)
__asm__("frstor %0": :"m" (current->tss.i387));
......
......@@ -22,7 +22,7 @@
#include <asm/pgtable.h>
#include <asm/hardirq.h>
extern void die_if_kernel(const char *,struct pt_regs *,long);
extern void die(const char *,struct pt_regs *,long);
/*
* Ugly, ugly, but the goto's result in better assembly..
......@@ -101,7 +101,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
__asm__("movl %%cr2,%0":"=r" (address));
if (local_irq_count[smp_processor_id()])
die_if_kernel("page fault from irq handler",regs,error_code);
die("page fault from irq handler",regs,error_code);
tsk = current;
mm = tsk->mm;
......@@ -235,7 +235,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
printk(KERN_ALERT "*pte = %08lx\n", page);
}
lock_kernel();
die_if_kernel("Oops", regs, error_code);
die("Oops", regs, error_code);
do_exit(SIGKILL);
unlock_kernel();
}
......@@ -27,7 +27,6 @@
#include <asm/pgtable.h>
#include <asm/dma.h>
extern void die_if_kernel(char *,struct pt_regs *,long);
extern void show_net_buffers(void);
void __bad_pte_kernel(pmd_t *pmd)
......
......@@ -2316,7 +2316,6 @@ __initfunc(unsigned long con_init(unsigned long kmem_start))
{
const char *display_desc = NULL;
unsigned int currcons = 0;
char q[2] = { 0, 1 };
if (conswitchp)
kmem_start = conswitchp->con_startup(kmem_start,
......@@ -2397,11 +2396,15 @@ __initfunc(unsigned long con_init(unsigned long kmem_start))
#if 0
/* The logo is too ugly to live */
{
char q[2] = { 0, 1 };
if (console_show_logo)
q[1] += console_show_logo();
conswitchp->con_putcs(vc_cons[fg_console].d, linux_logo_banner,
sizeof(linux_logo_banner)-1, q[1]-1, q[0]);
putconsxy(0, q);
}
#endif
sw->con_cursor(vc_cons[currcons].d, CM_DRAW);
printk("Console: %s %s %ldx%ld",
......
This diff is collapsed.
......@@ -811,7 +811,7 @@
** Media / mode state machine definitions
** User selectable:
*/
#define TP 0x0001 /* 10Base-T */
#define TP 0x0040 /* 10Base-T (now equiv to _10Mb) */
#define TP_NW 0x0002 /* 10Base-T with Nway */
#define BNC 0x0004 /* Thinwire */
#define AUI 0x0008 /* Thickwire */
......
#
# Makefile for the linux ncp-filesystem routines.
# Makefile for the linux ncp filesystem routines.
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
# unless it's something special (not a .c file).
#
# Note 2! The CFLAGS definitions are now in the main makefile...
# Note 2! The CFLAGS definitions are now in the main makefile.
O_TARGET := ncpfs.o
O_OBJS := dir.o file.o inode.o ioctl.o mmap.o ncplib_kernel.o sock.o \
......
......@@ -41,7 +41,7 @@ __initfunc(static void copro_timeout(void))
timer_table[COPRO_TIMER].expires = jiffies+100;
timer_active |= 1<<COPRO_TIMER;
printk(KERN_ERR "387 failed: trying to reset\n");
send_sig(SIGFPE, last_task_used_math, 1);
send_sig(SIGFPE, current, 1);
outb_p(0,0xf1);
outb_p(0,0xf0);
}
......@@ -156,7 +156,7 @@ __initfunc(static void check_popad(void))
* misexecution of code under Linux. Owners of such processors should
* contact AMD for precise details and a CPU swap.
*
* See http://www.chorus.com/~poulot/k6bug.html
* See http://www.mygale.com/~poulot/k6bug.html
* http://www.amd.com/K6/k6docs/revgd.html
*
* The following test is erm.. interesting. AMD neglected to up
......@@ -202,7 +202,7 @@ __initfunc(static void check_amd_k6(void))
printk("system stability may be impaired when more than 32 MB are used.\n");
else
printk("probably OK (after B9730xxxx).\n");
printk(KERN_INFO "Please see http://www.chorus.com/poulot/k6bug.html\n");
printk(KERN_INFO "Please see http://www.mygale.com/~poulot/k6bug.html\n");
}
}
......
......@@ -74,9 +74,6 @@ extern unsigned int machine_id;
extern unsigned int machine_submodel_id;
extern unsigned int BIOS_revision;
/* Lazy FPU handling on uni-processor */
extern struct task_struct *last_task_used_math;
/*
* User space process size: 3GB (default).
*/
......@@ -166,33 +163,34 @@ struct thread_struct {
#define INIT_MMAP \
{ &init_mm, 0, 0, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, NULL, &init_mm.mmap }
#define INIT_TSS { \
0,0, \
sizeof(init_stack) + (long) &init_stack, \
__KERNEL_DS, 0, \
0,0,0,0,0,0, \
(long) &swapper_pg_dir - PAGE_OFFSET, \
0,0,0,0,0,0,0,0,0,0, \
__USER_DS,0,__USER_DS,0,__USER_DS,0, \
__USER_DS,0,__USER_DS,0,__USER_DS,0, \
_LDT(0),0, \
0, 0x8000, \
{~0, }, /* ioperm */ \
_TSS(0), 0, 0, 0, (mm_segment_t) { 0 } /* obsolete */ , \
{ { 0, }, }, /* 387 state */ \
NULL, 0, 0, 0, 0, 0 /* vm86_info */, \
#define INIT_TSS { \
0,0, /* back_link, __blh */ \
sizeof(init_stack) + (long) &init_stack, /* esp0 */ \
__KERNEL_DS, 0, /* ss0 */ \
0,0,0,0,0,0, /* stack1, stack2 */ \
(long) &swapper_pg_dir - PAGE_OFFSET, /* cr3 */ \
0,0, /* eip,eflags */ \
0,0,0,0, /* eax,ecx,edx,ebx */ \
0,0,0,0, /* esp,ebp,esi,edi */ \
0,0,0,0,0,0, /* es,cs,ss */ \
0,0,0,0,0,0, /* ds,fs,gs */ \
_LDT(0),0, /* ldt */ \
0, 0x8000, /* tace, bitmap */ \
{~0, }, /* ioperm */ \
_TSS(0), 0, 0, 0, (mm_segment_t) { 0 }, /* obsolete */ \
{ { 0, }, }, /* 387 state */ \
NULL, 0, 0, 0, 0, 0, /* vm86_info */ \
}
#define start_thread(regs, new_eip, new_esp) do {\
unsigned long seg = __USER_DS; \
__asm__("movl %w0,%%fs ; movl %w0,%%gs":"=r" (seg) :"0" (seg)); \
set_fs(USER_DS); \
regs->xds = seg; \
regs->xes = seg; \
regs->xss = seg; \
regs->xcs = __USER_CS; \
regs->eip = new_eip; \
regs->esp = new_esp; \
#define start_thread(regs, new_eip, new_esp) do { \
__asm__("movl %w0,%%fs ; movl %w0,%%gs": :"r" (0)); \
set_fs(USER_DS); \
regs->xds = __USER_DS; \
regs->xes = __USER_DS; \
regs->xss = __USER_DS; \
regs->xcs = __USER_CS; \
regs->eip = new_eip; \
regs->esp = new_esp; \
} while (0)
/* Forward declaration, a strange C thing */
......
#ifndef __ASM_SYSTEM_H
#define __ASM_SYSTEM_H
#include <linux/kernel.h>
#include <asm/segment.h>
/*
......@@ -35,84 +36,35 @@ __asm__("str %%ax\n\t" \
:"=a" (n) \
:"0" (0),"i" (FIRST_TSS_ENTRY<<3))
/* This special macro can be used to load a debugging register */
#define loaddebug(tsk,register) \
__asm__("movl %0,%%db" #register \
: /* no output */ \
:"r" (tsk->debugreg[register]))
struct task_struct; /* one of the stranger aspects of C forward declarations.. */
extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
/*
* switch_to(n) should switch tasks to task nr n, first
* checking that n isn't the current task, in which case it does nothing.
* This also clears the TS-flag if the task we switched to has used
* the math co-processor latest.
*
* It also reloads the debug regs if necessary..
* We do most of the task switching in C, but we need
* to do the EIP/ESP switch in assembly..
*/
#ifdef __SMP__
/*
* Keep the lock depth straight. If we switch on an interrupt from
* kernel->user task we need to lose a depth, and if we switch the
* other way we need to gain a depth. Same layer switches come out
* the same.
*
* We spot a switch in user mode because the kernel counter is the
* same as the interrupt counter depth. (We never switch during the
* message/invalidate IPI).
*
* We fsave/fwait so that an exception goes off at the right time
* (as a call from the fsave or fwait in effect) rather than to
* the wrong process.
*/
#define switch_to(prev,next) do { \
if(prev->flags&PF_USEDFPU) \
{ \
__asm__ __volatile__("fnsave %0":"=m" (prev->tss.i387.hard)); \
__asm__ __volatile__("fwait"); \
prev->flags&=~PF_USEDFPU; \
} \
__asm__("ljmp %0\n\t" \
: /* no output */ \
:"m" (*(((char *)&next->tss.tr)-4)), \
"c" (next)); \
/* Now maybe reload the debug registers */ \
if(prev->debugreg[7]){ \
loaddebug(prev,0); \
loaddebug(prev,1); \
loaddebug(prev,2); \
loaddebug(prev,3); \
loaddebug(prev,6); \
loaddebug(prev,7); \
} \
#define switch_to(prev,next) do { \
unsigned long eax, edx, ecx; \
asm volatile("pushl %%edi\n\t" \
"pushl %%esi\n\t" \
"pushl %%ebp\n\t" \
"pushl %%ebx\n\t" \
"movl %%esp,%0\n\t" /* save ESP */ \
"movl %5,%%esp\n\t" /* restore ESP */ \
"movl $1f,%1\n\t" /* save EIP */ \
"pushl %6\n\t" /* restore EIP */ \
"jmp __switch_to\n" \
"1:\t" \
"popl %%ebx\n\t" \
"popl %%ebp\n\t" \
"popl %%esi\n\t" \
"popl %%edi" \
:"=m" (prev->tss.esp),"=m" (prev->tss.eip), \
"=a" (eax), "=d" (edx), "=c" (ecx) \
:"m" (next->tss.esp),"m" (next->tss.eip), \
"a" (prev), "d" (next)); \
} while (0)
#else
#define switch_to(prev,next) do { \
__asm__("ljmp %0\n\t" \
"cmpl %1,"SYMBOL_NAME_STR(last_task_used_math)"\n\t" \
"jne 1f\n\t" \
"clts\n" \
"1:" \
: /* no outputs */ \
:"m" (*(((char *)&next->tss.tr)-4)), \
"r" (prev), "r" (next)); \
/* Now maybe reload the debug registers */ \
if(prev->debugreg[7]){ \
loaddebug(prev,0); \
loaddebug(prev,1); \
loaddebug(prev,2); \
loaddebug(prev,3); \
loaddebug(prev,6); \
loaddebug(prev,7); \
} \
} while (0)
#endif
#define _set_base(addr,base) \
__asm__("movw %%dx,%0\n\t" \
"rorl $16,%%edx\n\t" \
......
......@@ -308,6 +308,10 @@ static inline int copy_mm(int nr, unsigned long clone_flags, struct task_struct
if (clone_flags & CLONE_VM) {
mmget(current->mm);
/*
* Set up the LDT descriptor for the clone task.
*/
copy_segments(nr, tsk, NULL);
SET_PAGE_DIR(tsk, current->mm->pgd);
return 0;
}
......
......@@ -14,8 +14,6 @@
#include <stdarg.h>
#include <asm/system.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
......@@ -27,6 +25,7 @@
#include <linux/console.h>
#include <linux/init.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#define LOG_BUF_LEN 8192
......
......@@ -851,7 +851,7 @@ static int file_send_actor(read_descriptor_t * desc, const char *area, unsigned
return written;
}
asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, size_t count)
asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t *offset, size_t count)
{
ssize_t retval;
struct file * in_file, * out_file;
......@@ -900,16 +900,27 @@ asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, size_t count)
retval = 0;
if (count) {
read_descriptor_t desc;
loff_t pos = 0, *ppos;
retval = -EFAULT;
ppos = &in_file->f_pos;
if (offset) {
if (get_user(pos, offset))
goto fput_out;
ppos = &pos;
}
desc.written = 0;
desc.count = count;
desc.buf = (char *) out_file;
desc.error = 0;
do_generic_file_read(in_file, &in_file->f_pos, &desc, file_send_actor);
do_generic_file_read(in_file, ppos, &desc, file_send_actor);
retval = desc.written;
if (!retval)
retval = desc.error;
if (offset)
put_user(pos, offset);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment