Commit c0454a9f authored by Linus Torvalds's avatar Linus Torvalds

Merge http://ppc.bkbits.net/for-linus-ppc64

into home.transmeta.com:/home/torvalds/v2.5/linux
parents d0f733e4 0288cf54
......@@ -144,7 +144,6 @@ EXPORT_SYMBOL(pci_dac_dma_to_offset);
EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(hwrpb);
EXPORT_SYMBOL(wrusp);
EXPORT_SYMBOL(start_thread);
EXPORT_SYMBOL(alpha_read_fp_reg);
EXPORT_SYMBOL(alpha_read_fp_reg_s);
......
......@@ -20,11 +20,22 @@ void foo(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
BLANK();
DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
DEFINE(TASK_UID, offsetof(struct task_struct, uid));
DEFINE(TASK_EUID, offsetof(struct task_struct, euid));
DEFINE(TASK_GID, offsetof(struct task_struct, gid));
DEFINE(TASK_EGID, offsetof(struct task_struct, egid));
DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent));
DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
BLANK();
DEFINE(PT_PTRACED, PT_PTRACED);
DEFINE(CLONE_VM, CLONE_VM);
DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
DEFINE(SIGCHLD, SIGCHLD);
BLANK();
DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache));
DEFINE(HAE_REG, offsetof(struct alpha_machine_vector, hae_register));
}
/*
* alpha/entry.S
* arch/alpha/kernel/entry.S
*
* kernel entry-points
* Kernel entry-points.
*/
#include <linux/config.h>
#include <asm/system.h>
#include <asm/cache.h>
#include <asm/asm_offsets.h>
#include <asm/thread_info.h>
#include <asm/pal.h>
#include <asm/errno.h>
#include <asm/unistd.h>
/*
* stack offsets
*/
.text
.set noat
/* Stack offsets. */
#define SP_OFF 184
#define SWITCH_STACK_SIZE 320
......@@ -28,145 +29,578 @@
*/
#define SAVE_ALL \
subq $30,SP_OFF,$30; \
stq $0,0($30); \
stq $1,8($30); \
stq $2,16($30); \
stq $3,24($30); \
stq $4,32($30); \
stq $28,144($30); \
lda $2,alpha_mv; \
stq $5,40($30); \
stq $6,48($30); \
stq $7,56($30); \
stq $8,64($30); \
stq $19,72($30); \
stq $20,80($30); \
stq $21,88($30); \
ldq $2,HAE_CACHE($2); \
stq $22,96($30); \
stq $23,104($30); \
stq $24,112($30); \
stq $25,120($30); \
stq $26,128($30); \
stq $27,136($30); \
stq $2,152($30); \
stq $16,160($30); \
stq $17,168($30); \
stq $18,176($30)
subq $sp, SP_OFF, $sp; \
stq $0, 0($sp); \
stq $1, 8($sp); \
stq $2, 16($sp); \
stq $3, 24($sp); \
stq $4, 32($sp); \
stq $28, 144($sp); \
lda $2, alpha_mv; \
stq $5, 40($sp); \
stq $6, 48($sp); \
stq $7, 56($sp); \
stq $8, 64($sp); \
stq $19, 72($sp); \
stq $20, 80($sp); \
stq $21, 88($sp); \
ldq $2, HAE_CACHE($2); \
stq $22, 96($sp); \
stq $23, 104($sp); \
stq $24, 112($sp); \
stq $25, 120($sp); \
stq $26, 128($sp); \
stq $27, 136($sp); \
stq $2, 152($sp); \
stq $16, 160($sp); \
stq $17, 168($sp); \
stq $18, 176($sp)
#define RESTORE_ALL \
lda $19,alpha_mv; \
ldq $0,0($30); \
ldq $1,8($30); \
ldq $2,16($30); \
ldq $3,24($30); \
ldq $21,152($30); \
ldq $20,HAE_CACHE($19); \
ldq $4,32($30); \
ldq $5,40($30); \
ldq $6,48($30); \
ldq $7,56($30); \
subq $20,$21,$20; \
ldq $8,64($30); \
beq $20,99f; \
ldq $20,HAE_REG($19); \
stq $21,HAE_CACHE($19); \
stq $21,0($20); \
ldq $0,0($30); \
ldq $1,8($30); \
lda $19, alpha_mv; \
ldq $0, 0($sp); \
ldq $1, 8($sp); \
ldq $2, 16($sp); \
ldq $3, 24($sp); \
ldq $21, 152($sp); \
ldq $20, HAE_CACHE($19); \
ldq $4, 32($sp); \
ldq $5, 40($sp); \
ldq $6, 48($sp); \
ldq $7, 56($sp); \
subq $20, $21, $20; \
ldq $8, 64($sp); \
beq $20, 99f; \
ldq $20, HAE_REG($19); \
stq $21, HAE_CACHE($19); \
stq $21, 0($20); \
ldq $0, 0($sp); \
ldq $1, 8($sp); \
99:; \
ldq $19,72($30); \
ldq $20,80($30); \
ldq $21,88($30); \
ldq $22,96($30); \
ldq $23,104($30); \
ldq $24,112($30); \
ldq $25,120($30); \
ldq $26,128($30); \
ldq $27,136($30); \
ldq $28,144($30); \
addq $30,SP_OFF,$30
.text
.set noat
.align 3
.globl entInt
.ent entInt
ldq $19, 72($sp); \
ldq $20, 80($sp); \
ldq $21, 88($sp); \
ldq $22, 96($sp); \
ldq $23, 104($sp); \
ldq $24, 112($sp); \
ldq $25, 120($sp); \
ldq $26, 128($sp); \
ldq $27, 136($sp); \
ldq $28, 144($sp); \
addq $sp, SP_OFF, $sp
/*
* Non-syscall kernel entry points.
*/
.align 4
.globl entInt
.ent entInt
entInt:
SAVE_ALL
lda $8,0x3fff
lda $26,ret_from_sys_call
bic $30,$8,$8
jsr $31,do_entInt
lda $8, 0x3fff
lda $26, ret_from_sys_call
bic $sp, $8, $8
mov $sp, $19
jsr $31, do_entInt
.end entInt
.align 3
.globl entMM
.ent entMM
.align 4
.globl entArith
.ent entArith
entArith:
SAVE_ALL
lda $8, 0x3fff
lda $26, ret_from_sys_call
bic $sp, $8, $8
mov $sp, $18
jsr $31, do_entArith
.end entArith
.align 4
.globl entMM
.ent entMM
entMM:
SAVE_ALL
/* save $9 - $15 so the inline exception code can manipulate them. */
subq $30,56,$30
stq $9,0($30)
stq $10,8($30)
stq $11,16($30)
stq $12,24($30)
stq $13,32($30)
stq $14,40($30)
stq $15,48($30)
addq $30,56,$19
subq $sp, 56, $sp
stq $9, 0($sp)
stq $10, 8($sp)
stq $11, 16($sp)
stq $12, 24($sp)
stq $13, 32($sp)
stq $14, 40($sp)
stq $15, 48($sp)
addq $sp, 56, $19
/* handle the fault */
lda $8,0x3fff
bic $30,$8,$8
jsr $26,do_page_fault
lda $8, 0x3fff
bic $sp, $8, $8
jsr $26, do_page_fault
/* reload the registers after the exception code played. */
ldq $9,0($30)
ldq $10,8($30)
ldq $11,16($30)
ldq $12,24($30)
ldq $13,32($30)
ldq $14,40($30)
ldq $15,48($30)
addq $30,56,$30
ldq $9, 0($sp)
ldq $10, 8($sp)
ldq $11, 16($sp)
ldq $12, 24($sp)
ldq $13, 32($sp)
ldq $14, 40($sp)
ldq $15, 48($sp)
addq $sp, 56, $sp
/* finish up the syscall as normal. */
br ret_from_sys_call
.end entMM
.align 3
.globl entArith
.ent entArith
entArith:
SAVE_ALL
lda $8,0x3fff
lda $26,ret_from_sys_call
bic $30,$8,$8
jsr $31,do_entArith
.end entArith
.align 3
.globl entIF
.ent entIF
.align 4
.globl entIF
.ent entIF
entIF:
SAVE_ALL
lda $8,0x3fff
lda $26,ret_from_sys_call
bic $30,$8,$8
jsr $31,do_entIF
lda $8, 0x3fff
lda $26, ret_from_sys_call
bic $sp, $8, $8
mov $sp, $17
jsr $31, do_entIF
.end entIF
.align 3
.globl entDbg
.ent entDbg
.align 4
.globl entUna
.ent entUna
entUna:
lda $sp, -256($sp)
stq $0, 0($sp)
ldq $0, 256($sp) /* get PS */
stq $1, 8($sp)
stq $2, 16($sp)
stq $3, 24($sp)
and $0, 8, $0 /* user mode? */
stq $4, 32($sp)
bne $0, entUnaUser /* yup -> do user-level unaligned fault */
stq $5, 40($sp)
stq $6, 48($sp)
stq $7, 56($sp)
stq $8, 64($sp)
stq $9, 72($sp)
stq $10, 80($sp)
stq $11, 88($sp)
stq $12, 96($sp)
stq $13, 104($sp)
stq $14, 112($sp)
stq $15, 120($sp)
/* 16-18 PAL-saved */
stq $19, 152($sp)
stq $20, 160($sp)
stq $21, 168($sp)
stq $22, 176($sp)
stq $23, 184($sp)
stq $24, 192($sp)
stq $25, 200($sp)
stq $26, 208($sp)
stq $27, 216($sp)
stq $28, 224($sp)
stq $gp, 232($sp)
lda $8, 0x3fff
stq $31, 248($sp)
bic $sp, $8, $8
jsr $26, do_entUna
ldq $0, 0($sp)
ldq $1, 8($sp)
ldq $2, 16($sp)
ldq $3, 24($sp)
ldq $4, 32($sp)
ldq $5, 40($sp)
ldq $6, 48($sp)
ldq $7, 56($sp)
ldq $8, 64($sp)
ldq $9, 72($sp)
ldq $10, 80($sp)
ldq $11, 88($sp)
ldq $12, 96($sp)
ldq $13, 104($sp)
ldq $14, 112($sp)
ldq $15, 120($sp)
/* 16-18 PAL-saved */
ldq $19, 152($sp)
ldq $20, 160($sp)
ldq $21, 168($sp)
ldq $22, 176($sp)
ldq $23, 184($sp)
ldq $24, 192($sp)
ldq $25, 200($sp)
ldq $26, 208($sp)
ldq $27, 216($sp)
ldq $28, 224($sp)
ldq $gp, 232($sp)
lda $sp, 256($sp)
call_pal PAL_rti
.end entUna
.align 4
.ent entUnaUser
entUnaUser:
ldq $0, 0($sp) /* restore original $0 */
lda $sp, 256($sp) /* pop entUna's stack frame */
SAVE_ALL /* setup normal kernel stack */
lda $sp, -56($sp)
stq $9, 0($sp)
stq $10, 8($sp)
stq $11, 16($sp)
stq $12, 24($sp)
stq $13, 32($sp)
stq $14, 40($sp)
stq $15, 48($sp)
lda $8, 0x3fff
addq $sp, 56, $19
bic $sp, $8, $8
jsr $26, do_entUnaUser
ldq $9, 0($sp)
ldq $10, 8($sp)
ldq $11, 16($sp)
ldq $12, 24($sp)
ldq $13, 32($sp)
ldq $14, 40($sp)
ldq $15, 48($sp)
lda $sp, 56($sp)
br ret_from_sys_call
.end entUnaUser
.align 4
.globl entDbg
.ent entDbg
entDbg:
SAVE_ALL
lda $8,0x3fff
lda $26,ret_from_sys_call
bic $30,$8,$8
jsr $31,do_entDbg
lda $8, 0x3fff
lda $26, ret_from_sys_call
bic $sp, $8, $8
mov $sp, $16
jsr $31, do_entDbg
.end entDbg
/*
* The system call entry point is special. Most importantly, it looks
* like a function call to userspace as far as clobbered registers. We
* do preserve the argument registers (for syscall restarts) and $26
* (for leaf syscall functions).
*
* So much for theory. We don't take advantage of this yet.
*
* Note that a0-a2 are not saved by PALcode as with the other entry points.
*/
.align 4
.globl entSys
.globl ret_from_sys_call
.ent entSys
entSys:
SAVE_ALL
lda $8, 0x3fff
bic $sp, $8, $8
lda $4, NR_SYSCALLS($31)
stq $16, SP_OFF+24($sp)
lda $5, sys_call_table
lda $27, sys_ni_syscall
cmpult $0, $4, $4
ldl $3, TI_FLAGS($8)
stq $17, SP_OFF+32($sp)
s8addq $0, $5, $5
stq $18, SP_OFF+40($sp)
blbs $3, strace
beq $4, 1f
ldq $27, 0($5)
1: jsr $26, ($27), alpha_ni_syscall
ldgp $gp, 0($26)
blt $0, $syscall_error /* the call failed */
stq $0, 0($sp)
stq $31, 72($sp) /* a3=0 => no error */
.align 4
ret_from_sys_call:
cmovne $26, 0, $19 /* $19 = 0 => non-restartable */
ldq $0, SP_OFF($sp)
and $0, 8, $0
beq $0, restore_all
ret_from_reschedule:
/* Make sure need_resched and sigpending don't change between
sampling and the rti. */
lda $16, 7
call_pal PAL_swpipl
ldl $5, TI_FLAGS($8)
and $5, _TIF_WORK_MASK, $2
bne $5, work_pending
restore_all:
RESTORE_ALL
call_pal PAL_rti
.align 3
$syscall_error:
/*
* Some system calls (e.g., ptrace) can return arbitrary
* values which might normally be mistaken as error numbers.
* Those functions must zero $0 (v0) directly in the stack
* frame to indicate that a negative return value wasn't an
* error number..
*/
ldq $19, 0($sp) /* old syscall nr (zero if success) */
beq $19, $ret_success
ldq $20, 72($sp) /* .. and this a3 */
subq $31, $0, $0 /* with error in v0 */
addq $31, 1, $1 /* set a3 for errno return */
stq $0, 0($sp)
mov $31, $26 /* tell "ret_from_sys_call" we can restart */
stq $1, 72($sp) /* a3 for return */
br ret_from_sys_call
$ret_success:
stq $0, 0($sp)
stq $31, 72($sp) /* a3=0 => no error */
br ret_from_sys_call
.end entSys
/*
* Do all cleanup when returning from all interrupts and system calls.
*
* Arguments:
* $5: TI_FLAGS.
* $8: current.
* $19: The old syscall number, or zero if this is not a return
* from a syscall that errored and is possibly restartable.
* $20: Error indication.
*/
.align 4
.ent work_pending
work_pending:
and $5, _TIF_NEED_RESCHED, $2
beq $2, $work_notifysig
$work_resched:
subq $sp, 16, $sp
stq $19, 0($sp) /* save syscall nr */
stq $20, 8($sp) /* and error indication (a3) */
jsr $26, schedule
ldq $19, 0($sp)
ldq $20, 8($sp)
addq $sp, 16, $sp
/* Make sure need_resched and sigpending don't change between
sampling and the rti. */
lda $16, 7
call_pal PAL_swpipl
ldl $5, TI_FLAGS($8)
and $5, _TIF_WORK_MASK, $2
beq $2, restore_all
and $5, _TIF_NEED_RESCHED, $2
bne $2, $work_resched
$work_notifysig:
mov $sp, $17
br $1, do_switch_stack
mov $5, $21
mov $sp, $18
mov $31, $16
jsr $26, do_notify_resume
bsr $1, undo_switch_stack
br restore_all
.end work_pending
/*
* PTRACE syscall handler
*/
.align 4
.ent strace
strace:
/* set up signal stack, call syscall_trace */
bsr $1, do_switch_stack
jsr $26, syscall_trace
bsr $1, undo_switch_stack
/* get the system call number and the arguments back.. */
ldq $0, 0($sp)
ldq $16, SP_OFF+24($sp)
ldq $17, SP_OFF+32($sp)
ldq $18, SP_OFF+40($sp)
ldq $19, 72($sp)
ldq $20, 80($sp)
ldq $21, 88($sp)
/* get the system call pointer.. */
lda $1, NR_SYSCALLS($31)
lda $2, sys_call_table
lda $27, alpha_ni_syscall
cmpult $0, $1, $1
s8addq $0, $2, $2
beq $1, 1f
ldq $27, 0($2)
1: jsr $26, ($27), sys_gettimeofday
ldgp $gp, 0($26)
/* check return.. */
blt $0, $strace_error /* the call failed */
stq $31, 72($sp) /* a3=0 => no error */
$strace_success:
stq $0, 0($sp) /* save return value */
bsr $1, do_switch_stack
jsr $26, syscall_trace
bsr $1, undo_switch_stack
br $31, ret_from_sys_call
.align 3
$strace_error:
ldq $19, 0($sp) /* old syscall nr (zero if success) */
beq $19, $strace_success
ldq $20, 72($sp) /* .. and this a3 */
subq $31, $0, $0 /* with error in v0 */
addq $31, 1, $1 /* set a3 for errno return */
stq $0, 0($sp)
stq $1, 72($sp) /* a3 for return */
bsr $1, do_switch_stack
mov $19, $9 /* save old syscall number */
mov $20, $10 /* save old a3 */
jsr $26, syscall_trace
mov $9, $19
mov $10, $20
bsr $1, undo_switch_stack
mov $31, $26 /* tell "ret_from_sys_call" we can restart */
br ret_from_sys_call
.end strace
/*
* Save and restore the switch stack -- aka the balance of the user context.
*/
.align 4
.ent do_switch_stack
do_switch_stack:
lda $sp, -SWITCH_STACK_SIZE($sp)
stq $9, 0($sp)
stq $10, 8($sp)
stq $11, 16($sp)
stq $12, 24($sp)
stq $13, 32($sp)
stq $14, 40($sp)
stq $15, 48($sp)
stq $26, 56($sp)
stt $f0, 64($sp)
stt $f1, 72($sp)
stt $f2, 80($sp)
stt $f3, 88($sp)
stt $f4, 96($sp)
stt $f5, 104($sp)
stt $f6, 112($sp)
stt $f7, 120($sp)
stt $f8, 128($sp)
stt $f9, 136($sp)
stt $f10, 144($sp)
stt $f11, 152($sp)
stt $f12, 160($sp)
stt $f13, 168($sp)
stt $f14, 176($sp)
stt $f15, 184($sp)
stt $f16, 192($sp)
stt $f17, 200($sp)
stt $f18, 208($sp)
stt $f19, 216($sp)
stt $f20, 224($sp)
stt $f21, 232($sp)
stt $f22, 240($sp)
stt $f23, 248($sp)
stt $f24, 256($sp)
stt $f25, 264($sp)
stt $f26, 272($sp)
stt $f27, 280($sp)
mf_fpcr $f0 # get fpcr
stt $f28, 288($sp)
stt $f29, 296($sp)
stt $f30, 304($sp)
stt $f0, 312($sp) # save fpcr in slot of $f31
ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state.
ret $31, ($1), 1
.end do_switch_stack
.align 4
.ent undo_switch_stack
undo_switch_stack:
ldq $9, 0($sp)
ldq $10, 8($sp)
ldq $11, 16($sp)
ldq $12, 24($sp)
ldq $13, 32($sp)
ldq $14, 40($sp)
ldq $15, 48($sp)
ldq $26, 56($sp)
ldt $f30, 312($sp) # get saved fpcr
ldt $f0, 64($sp)
ldt $f1, 72($sp)
ldt $f2, 80($sp)
ldt $f3, 88($sp)
mt_fpcr $f30 # install saved fpcr
ldt $f4, 96($sp)
ldt $f5, 104($sp)
ldt $f6, 112($sp)
ldt $f7, 120($sp)
ldt $f8, 128($sp)
ldt $f9, 136($sp)
ldt $f10, 144($sp)
ldt $f11, 152($sp)
ldt $f12, 160($sp)
ldt $f13, 168($sp)
ldt $f14, 176($sp)
ldt $f15, 184($sp)
ldt $f16, 192($sp)
ldt $f17, 200($sp)
ldt $f18, 208($sp)
ldt $f19, 216($sp)
ldt $f20, 224($sp)
ldt $f21, 232($sp)
ldt $f22, 240($sp)
ldt $f23, 248($sp)
ldt $f24, 256($sp)
ldt $f25, 264($sp)
ldt $f26, 272($sp)
ldt $f27, 280($sp)
ldt $f28, 288($sp)
ldt $f29, 296($sp)
ldt $f30, 304($sp)
lda $sp, SWITCH_STACK_SIZE($sp)
ret $31, ($1), 1
.end undo_switch_stack
/*
* The meat of the context switch code.
*/
.align 4
.globl alpha_switch_to
.ent alpha_switch_to
alpha_switch_to:
.prologue 0
bsr $1, do_switch_stack
call_pal PAL_swpctx
lda $8, 0x3fff
bsr $1, undo_switch_stack
bic $sp, $8, $8
ret
.end alpha_switch_to
/*
* New processes begin life here.
*/
.globl ret_from_fork
#if CONFIG_SMP || CONFIG_PREEMPT
.align 4
.ent ret_from_fork
ret_from_fork:
lda $26, ret_from_sys_call
mov $17, $16
jmp $31, schedule_tail
.end ret_from_fork
#else
ret_from_fork = ret_from_sys_call
#endif
/*
* kernel_thread(fn, arg, clone_flags)
......@@ -175,9 +609,9 @@ entDbg:
.globl kernel_thread
.ent kernel_thread
kernel_thread:
ldgp $29,0($27) /* we can be called from a module */
ldgp $gp, 0($27) /* we can be called from a module */
.prologue 1
subq $30,SP_OFF+6*8,$30
subq $sp, SP_OFF+6*8, $sp
br $1, 2f /* load start address */
/* We've now "returned" from a fake system call. */
......@@ -186,9 +620,9 @@ kernel_thread:
ldi $1, 0x3fff
beq $20, 1f /* parent or child? */
bic $30, $1, $8 /* in child. */
bic $sp, $1, $8 /* in child. */
jsr $26, ($27)
ldgp $29, 0($26)
ldgp $gp, 0($26)
mov $0, $16
mov $31, $26
jmp $31, sys_exit
......@@ -197,19 +631,19 @@ kernel_thread:
.align 4
2: /* Fake a system call stack frame, as we can't do system calls
from kernel space. Note that we store FN and ARG as they
from kernel space. Note that we store FN and ARG as they
need to be set up in the child for the call. Also store $8
and $26 for use in the parent. */
stq $31, SP_OFF($30) /* ps */
stq $1, SP_OFF+8($30) /* pc */
stq $29, SP_OFF+16($30) /* gp */
stq $16, 136($30) /* $27; FN for child */
stq $17, SP_OFF+24($30) /* $16; ARG for child */
stq $8, 64($30) /* $8 */
stq $26, 128($30) /* $26 */
stq $31, SP_OFF($sp) /* ps */
stq $1, SP_OFF+8($sp) /* pc */
stq $gp, SP_OFF+16($sp) /* gp */
stq $16, 136($sp) /* $27; FN for child */
stq $17, SP_OFF+24($sp) /* $16; ARG for child */
stq $8, 64($sp) /* $8 */
stq $26, 128($sp) /* $26 */
/* Avoid the HAE being gratuitously wrong, to avoid restoring it. */
ldq $2, alpha_mv+HAE_CACHE
stq $2, 152($30) /* HAE */
stq $2, 152($sp) /* HAE */
/* Shuffle FLAGS to the front; add CLONE_VM. */
ldi $1, CLONE_VM|CLONE_UNTRACED
......@@ -218,515 +652,269 @@ kernel_thread:
/* We don't actually care for a3 success widgetry in the kernel.
Not for positive errno values. */
stq $0, 0($30) /* $0 */
stq $0, 0($sp) /* $0 */
br restore_all
.end kernel_thread
.end kernel_thread
/*
* __kernel_execve(path, argv, envp, regs)
*/
.align 3
.globl __kernel_execve
.ent __kernel_execve
.align 4
.globl __kernel_execve
.ent __kernel_execve
__kernel_execve:
ldgp $29,0($27) /* we can be called from modules. */
subq $30,16,$30
.frame $30,16,$26,0
stq $26,0($30)
stq $19,8($30)
ldgp $gp, 0($27) /* we can be called from modules. */
subq $sp, 16, $sp
.frame $sp, 16, $26, 0
stq $26, 0($sp)
stq $19, 8($sp)
.prologue 1
jsr $26,do_execve
bne $0,1f /* error! */
ldq $30,8($30)
br $31,ret_from_sys_call
1: ldq $26,0($30)
addq $30,16,$30
jsr $26, do_execve
bne $0, 1f /* error! */
ldq $sp, 8($sp)
br $31, ret_from_sys_call
1: ldq $26, 0($sp)
addq $sp, 16, $sp
ret
.end __kernel_execve
.align 3
.ent do_switch_stack
do_switch_stack:
lda $30,-SWITCH_STACK_SIZE($30)
stq $9,0($30)
stq $10,8($30)
stq $11,16($30)
stq $12,24($30)
stq $13,32($30)
stq $14,40($30)
stq $15,48($30)
stq $26,56($30)
stt $f0,64($30)
stt $f1,72($30)
stt $f2,80($30)
stt $f3,88($30)
stt $f4,96($30)
stt $f5,104($30)
stt $f6,112($30)
stt $f7,120($30)
stt $f8,128($30)
stt $f9,136($30)
stt $f10,144($30)
stt $f11,152($30)
stt $f12,160($30)
stt $f13,168($30)
stt $f14,176($30)
stt $f15,184($30)
stt $f16,192($30)
stt $f17,200($30)
stt $f18,208($30)
stt $f19,216($30)
stt $f20,224($30)
stt $f21,232($30)
stt $f22,240($30)
stt $f23,248($30)
stt $f24,256($30)
stt $f25,264($30)
stt $f26,272($30)
stt $f27,280($30)
mf_fpcr $f0 # get fpcr
stt $f28,288($30)
stt $f29,296($30)
stt $f30,304($30)
stt $f0,312($30) # save fpcr in slot of $f31
ldt $f0,64($30) # dont let "do_switch_stack" change fp state.
ret $31,($1),1
.end do_switch_stack
.align 3
.ent undo_switch_stack
undo_switch_stack:
ldq $9,0($30)
ldq $10,8($30)
ldq $11,16($30)
ldq $12,24($30)
ldq $13,32($30)
ldq $14,40($30)
ldq $15,48($30)
ldq $26,56($30)
ldt $f30,312($30) # get saved fpcr
ldt $f0,64($30)
ldt $f1,72($30)
ldt $f2,80($30)
ldt $f3,88($30)
mt_fpcr $f30 # install saved fpcr
ldt $f4,96($30)
ldt $f5,104($30)
ldt $f6,112($30)
ldt $f7,120($30)
ldt $f8,128($30)
ldt $f9,136($30)
ldt $f10,144($30)
ldt $f11,152($30)
ldt $f12,160($30)
ldt $f13,168($30)
ldt $f14,176($30)
ldt $f15,184($30)
ldt $f16,192($30)
ldt $f17,200($30)
ldt $f18,208($30)
ldt $f19,216($30)
ldt $f20,224($30)
ldt $f21,232($30)
ldt $f22,240($30)
ldt $f23,248($30)
ldt $f24,256($30)
ldt $f25,264($30)
ldt $f26,272($30)
ldt $f27,280($30)
ldt $f28,288($30)
ldt $f29,296($30)
ldt $f30,304($30)
lda $30,SWITCH_STACK_SIZE($30)
ret $31,($1),1
.end undo_switch_stack
.align 3
.globl entUna
.ent entUna
entUna:
lda $30,-256($30)
stq $0,0($30)
ldq $0,256($30) /* get PS */
stq $1,8($30)
stq $2,16($30)
stq $3,24($30)
and $0,8,$0 /* user mode? */
stq $4,32($30)
bne $0,entUnaUser /* yup -> do user-level unaligned fault */
stq $5,40($30)
stq $6,48($30)
stq $7,56($30)
stq $8,64($30)
stq $9,72($30)
stq $10,80($30)
stq $11,88($30)
stq $12,96($30)
stq $13,104($30)
stq $14,112($30)
stq $15,120($30)
/* 16-18 PAL-saved */
stq $19,152($30)
stq $20,160($30)
stq $21,168($30)
stq $22,176($30)
stq $23,184($30)
stq $24,192($30)
stq $25,200($30)
stq $26,208($30)
stq $27,216($30)
stq $28,224($30)
stq $29,232($30)
lda $8,0x3fff
stq $31,248($30)
bic $30,$8,$8
jsr $26,do_entUna
ldq $0,0($30)
ldq $1,8($30)
ldq $2,16($30)
ldq $3,24($30)
ldq $4,32($30)
ldq $5,40($30)
ldq $6,48($30)
ldq $7,56($30)
ldq $8,64($30)
ldq $9,72($30)
ldq $10,80($30)
ldq $11,88($30)
ldq $12,96($30)
ldq $13,104($30)
ldq $14,112($30)
ldq $15,120($30)
/* 16-18 PAL-saved */
ldq $19,152($30)
ldq $20,160($30)
ldq $21,168($30)
ldq $22,176($30)
ldq $23,184($30)
ldq $24,192($30)
ldq $25,200($30)
ldq $26,208($30)
ldq $27,216($30)
ldq $28,224($30)
ldq $29,232($30)
lda $30,256($30)
call_pal PAL_rti
.end entUna
.align 3
.ent entUnaUser
entUnaUser:
ldq $0,0($30) /* restore original $0 */
lda $30,256($30) /* pop entUna's stack frame */
SAVE_ALL /* setup normal kernel stack */
lda $30,-56($30)
stq $9,0($30)
stq $10,8($30)
stq $11,16($30)
stq $12,24($30)
stq $13,32($30)
stq $14,40($30)
stq $15,48($30)
lda $8,0x3fff
addq $30,56,$19
bic $30,$8,$8
jsr $26,do_entUnaUser
ldq $9,0($30)
ldq $10,8($30)
ldq $11,16($30)
ldq $12,24($30)
ldq $13,32($30)
ldq $14,40($30)
ldq $15,48($30)
lda $30,56($30)
br ret_from_sys_call
.end entUnaUser
.end __kernel_execve
/*
* A fork is the same as clone(SIGCHLD, 0);
* Special system calls. Most of these are special in that they either
* have to play switch_stack games or in some way use the pt_regs struct.
*/
.align 3
.globl sys_fork
.ent sys_fork
.align 4
.globl sys_fork
.ent sys_fork
sys_fork:
bsr $1,do_switch_stack
bis $31,SIGCHLD,$16
mov $31,$17
mov $31,$18
mov $30,$19
jsr $26,alpha_clone
bsr $1,undo_switch_stack
ret $31,($26),1
.end sys_fork
.align 3
.globl sys_clone
.ent sys_clone
.prologue 0
mov $sp, $19
bsr $1, do_switch_stack
/* A fork is the same as clone(SIGCHLD, 0); */
bis $31, SIGCHLD, $16
mov $31, $17
mov $31, $18
jsr $26, alpha_clone
bsr $1, undo_switch_stack
ret
.end sys_fork
.align 4
.globl sys_clone
.ent sys_clone
sys_clone:
bsr $1,do_switch_stack
.prologue 0
mov $sp, $19
bsr $1, do_switch_stack
/* $16, $17, $18, $19 come from the user; $19 is used later
via pt_regs->r19. */
mov $30,$19
jsr $26,alpha_clone
bsr $1,undo_switch_stack
ret $31,($26),1
.end sys_clone
.align 3
.globl sys_vfork
.ent sys_vfork
jsr $26, alpha_clone
bsr $1, undo_switch_stack
ret
.end sys_clone
.align 4
.globl sys_vfork
.ent sys_vfork
sys_vfork:
bsr $1,do_switch_stack
mov $30,$16
jsr $26,alpha_vfork
bsr $1,undo_switch_stack
ret $31,($26),1
.end sys_vfork
.align 3
.globl alpha_switch_to
.ent alpha_switch_to
alpha_switch_to:
.prologue 0
bsr $1,do_switch_stack
call_pal PAL_swpctx
lda $8,0x3fff
bsr $1,undo_switch_stack
bic $30,$8,$8
ret $31,($26),1
.end alpha_switch_to
bsr $1, do_switch_stack
mov $sp, $16
jsr $26, alpha_vfork
bsr $1, undo_switch_stack
ret
.end sys_vfork
#if CONFIG_SMP || CONFIG_PREEMPT
.globl ret_from_fork
.align 3
.ent ret_from_fork
ret_from_fork:
lda $26,ret_from_sys_call
mov $17,$16
jmp $31,schedule_tail
.end ret_from_fork
#endif
.align 4
.globl sys_sigreturn
.ent sys_sigreturn
sys_sigreturn:
.prologue 0
mov $sp, $17
lda $18, -SWITCH_STACK_SIZE($sp)
lda $sp, -SWITCH_STACK_SIZE($sp)
jsr $26, do_sigreturn
br $1, undo_switch_stack
br ret_from_sys_call
.end sys_sigreturn
/*
* Oh, well.. Disassembling OSF/1 binaries to find out how the
* system calls work isn't much fun.
*
* entSys is special in that the PAL-code doesn't save a0-a2, so
* we start off by doing that by hand.
*/
.align 3
.globl entSys
.globl ret_from_sys_call
.ent entSys
entSys:
SAVE_ALL
lda $8,0x3fff
bic $30,$8,$8
lda $4,NR_SYSCALLS($31)
stq $16,SP_OFF+24($30)
lda $5,sys_call_table
lda $27,sys_ni_syscall
cmpult $0,$4,$4
ldl $3,TI_FLAGS($8)
stq $17,SP_OFF+32($30)
s8addq $0,$5,$5
stq $18,SP_OFF+40($30)
blbs $3,strace
beq $4,1f
ldq $27,0($5)
1: jsr $26,($27),alpha_ni_syscall
ldgp $29,0($26)
blt $0,syscall_error /* the call failed */
stq $0,0($30)
stq $31,72($30) /* a3=0 => no error */
.align 3
ret_from_sys_call:
cmovne $26,0,$19 /* $19 = 0 => non-restartable */
ldq $0,SP_OFF($30)
and $0,8,$0
beq $0,restore_all
ret_from_reschedule:
/* Make sure need_resched and sigpending don't change between
sampling and the rti. */
lda $16,7
call_pal PAL_swpipl
ldl $5,TI_FLAGS($8)
and $5,_TIF_WORK_MASK,$2
bne $5,work_pending
restore_all:
RESTORE_ALL
call_pal PAL_rti
.align 4
.globl sys_rt_sigreturn
.ent sys_rt_sigreturn
sys_rt_sigreturn:
.prologue 0
mov $sp, $17
lda $18, -SWITCH_STACK_SIZE($sp)
lda $sp, -SWITCH_STACK_SIZE($sp)
jsr $26, do_rt_sigreturn
br $1, undo_switch_stack
br ret_from_sys_call
.end sys_rt_sigreturn
work_pending:
and $5,_TIF_NEED_RESCHED,$2
beq $2,work_notifysig
work_resched:
subq $30,16,$30
stq $19,0($30) /* save syscall nr */
stq $20,8($30) /* and error indication (a3) */
jsr $26,schedule
ldq $19,0($30)
ldq $20,8($30)
addq $30,16,$30
/* Make sure need_resched and sigpending don't change between
sampling and the rti. */
lda $16,7
call_pal PAL_swpipl
ldl $5,TI_FLAGS($8)
and $5,_TIF_WORK_MASK,$2
beq $2,restore_all
and $5,_TIF_NEED_RESCHED,$2
bne $2,work_resched
work_notifysig:
mov $30,$17
br $1,do_switch_stack
mov $5,$21
mov $30,$18
mov $31,$16
jsr $26,do_notify_resume
bsr $1,undo_switch_stack
br restore_all
.align 4
.globl sys_sigsuspend
.ent sys_sigsuspend
sys_sigsuspend:
.prologue 0
mov $sp, $17
br $1, do_switch_stack
mov $sp, $18
subq $sp, 16, $sp
stq $26, 0($sp)
jsr $26, do_sigsuspend
ldq $26, 0($sp)
lda $sp, SWITCH_STACK_SIZE+16($sp)
ret
.end sys_sigsuspend
/* PTRACE syscall handler */
.align 3
strace:
/* set up signal stack, call syscall_trace */
bsr $1,do_switch_stack
jsr $26,syscall_trace
bsr $1,undo_switch_stack
.align 4
.globl sys_rt_sigsuspend
.ent sys_rt_sigsuspend
sys_rt_sigsuspend:
.prologue 0
mov $sp, $18
br $1, do_switch_stack
mov $sp, $19
subq $sp, 16, $sp
stq $26, 0($sp)
jsr $26, do_rt_sigsuspend
ldq $26, 0($sp)
lda $sp, SWITCH_STACK_SIZE+16($sp)
ret
.end sys_rt_sigsuspend
/* get the system call number and the arguments back.. */
ldq $0,0($30)
ldq $16,SP_OFF+24($30)
ldq $17,SP_OFF+32($30)
ldq $18,SP_OFF+40($30)
ldq $19,72($30)
ldq $20,80($30)
ldq $21,88($30)
.align 4
.globl sys_sethae
.ent sys_sethae
sys_sethae:
.prologue 0
stq $16, 152($sp)
ret
.end sys_sethae
.align 4
.globl osf_getpriority
.ent osf_getpriority
osf_getpriority:
lda $sp, -16($sp)
stq $26, 0($sp)
.prologue 0
/* get the system call pointer.. */
lda $1,NR_SYSCALLS($31)
lda $2,sys_call_table
lda $27,alpha_ni_syscall
cmpult $0,$1,$1
s8addq $0,$2,$2
beq $1,1f
ldq $27,0($2)
1: jsr $26,($27),sys_gettimeofday
ldgp $29,0($26)
jsr $26, sys_getpriority
/* check return.. */
blt $0,strace_error /* the call failed */
stq $31,72($30) /* a3=0 => no error */
strace_success:
stq $0,0($30) /* save return value */
ldq $26, 0($sp)
blt $0, 1f
bsr $1,do_switch_stack
jsr $26,syscall_trace
bsr $1,undo_switch_stack
br $31,ret_from_sys_call
/* Return value is the unbiased priority, i.e. 20 - prio.
This does result in negative return values, so signal
no error by writing into the R0 slot. */
lda $1, 20
stq $31, 16($sp)
subl $1, $0, $0
unop
.align 3
strace_error:
ldq $19,0($30) /* old syscall nr (zero if success) */
beq $19,strace_success
ldq $20,72($30) /* .. and this a3 */
subq $31,$0,$0 /* with error in v0 */
addq $31,1,$1 /* set a3 for errno return */
stq $0,0($30)
stq $1,72($30) /* a3 for return */
bsr $1,do_switch_stack
mov $19,$9 /* save old syscall number */
mov $20,$10 /* save old a3 */
jsr $26,syscall_trace
mov $9,$19
mov $10,$20
bsr $1,undo_switch_stack
mov $31,$26 /* tell "ret_from_sys_call" we can restart */
br ret_from_sys_call
1: lda $sp, 16($sp)
ret
.end osf_getpriority
.align 3
syscall_error:
/*
* Some system calls (e.g., ptrace) can return arbitrary
* values which might normally be mistaken as error numbers.
* Those functions must zero $0 (v0) directly in the stack
* frame to indicate that a negative return value wasn't an
* error number..
*/
ldq $19,0($30) /* old syscall nr (zero if success) */
beq $19,ret_success
ldq $20,72($30) /* .. and this a3 */
subq $31,$0,$0 /* with error in v0 */
addq $31,1,$1 /* set a3 for errno return */
stq $0,0($30)
mov $31,$26 /* tell "ret_from_sys_call" we can restart */
stq $1,72($30) /* a3 for return */
br ret_from_sys_call
.align 4
.globl sys_getxuid
.ent sys_getxuid
sys_getxuid:
.prologue 0
ldq $2, TI_TASK($8)
ldl $0, TASK_UID($2)
ldl $1, TASK_EUID($2)
stq $1, 80($sp)
ret
.end sys_getxuid
ret_success:
stq $0,0($30)
stq $31,72($30) /* a3=0 => no error */
br ret_from_sys_call
.end entSys
.align 4
.globl sys_getxgid
.ent sys_getxgid
sys_getxgid:
.prologue 0
ldq $2, TI_TASK($8)
ldl $0, TASK_GID($2)
ldl $1, TASK_EGID($2)
stq $1, 80($sp)
ret
.end sys_getxgid
.align 3
.globl sys_sigreturn
.ent sys_sigreturn
sys_sigreturn:
mov $30,$17
lda $18,-SWITCH_STACK_SIZE($30)
lda $30,-SWITCH_STACK_SIZE($30)
jsr $26,do_sigreturn
br $1,undo_switch_stack
br ret_from_sys_call
.end sys_sigreturn
.align 4
.globl sys_getxpid
.ent sys_getxpid
sys_getxpid:
.prologue 0
ldq $2, TI_TASK($8)
/* See linux/kernel/timer.c sys_getppid for discussion
about this loop. */
ldq $3, TASK_REAL_PARENT($2)
1: ldl $1, TASK_TGID($3)
#if CONFIG_SMP
mov $3, $4
mb
ldq $3, TASK_REAL_PARENT($2)
cmpeq $3, $4, $4
beq $4, 1b
#endif
stq $1, 80($sp)
ldl $0, TASK_TGID($2)
ret
.end sys_getxpid
.align 4
.globl sys_pipe
.ent sys_pipe
sys_pipe:
lda $sp, -16($sp)
stq $26, 0($sp)
.prologue 0
.align 3
.globl sys_rt_sigreturn
.ent sys_rt_sigreturn
sys_rt_sigreturn:
mov $30,$17
lda $18,-SWITCH_STACK_SIZE($30)
lda $30,-SWITCH_STACK_SIZE($30)
jsr $26,do_rt_sigreturn
br $1,undo_switch_stack
br ret_from_sys_call
.end sys_rt_sigreturn
lda $16, 8($sp)
jsr $26, do_pipe
.align 3
.globl sys_sigsuspend
.ent sys_sigsuspend
sys_sigsuspend:
mov $30,$17
br $1,do_switch_stack
mov $30,$18
subq $30,16,$30
stq $26,0($30)
jsr $26,do_sigsuspend
ldq $26,0($30)
lda $30,SWITCH_STACK_SIZE+16($30)
ret $31,($26),1
.end sys_sigsuspend
ldq $26, 0($sp)
bne $0, 1f
.align 3
.globl sys_rt_sigsuspend
.ent sys_rt_sigsuspend
sys_rt_sigsuspend:
mov $30,$18
br $1,do_switch_stack
mov $30,$19
subq $30,16,$30
stq $26,0($30)
jsr $26,do_rt_sigsuspend
ldq $26,0($30)
lda $30,SWITCH_STACK_SIZE+16($30)
ret $31,($26),1
.end sys_rt_sigsuspend
/* The return values are in $0 and $20. */
ldl $1, 12($sp)
ldl $0, 8($sp)
stq $1, 80+16($sp)
1: lda $sp, 16($sp)
ret
.end sys_pipe
.align 4
.globl alpha_create_module
.ent alpha_create_module
alpha_create_module:
.prologue 0
mov $sp, $18
jmp $31, do_alpha_create_module
.end alpha_create_module
.align 4
.globl sys_ptrace
.ent sys_ptrace
sys_ptrace:
.prologue 0
mov $sp, $20
jmp $31, do_sys_ptrace
.end sys_ptrace
.align 4
.globl alpha_ni_syscall
.ent alpha_ni_syscall
alpha_ni_syscall:
.prologue 0
/* Special because it also implements overflow handling via
syscall number 0. And if you recall, zero is a special
trigger for "not an error". Store large non-zero there. */
lda $0, -ENOSYS
unop
stq $0, 0($sp)
ret
.end alpha_ni_syscall
......@@ -37,14 +37,13 @@ void (*perf_irq)(unsigned long, struct pt_regs *) = dummy_perf;
*/
asmlinkage void
do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr,
unsigned long a3, unsigned long a4, unsigned long a5,
struct pt_regs regs)
do_entInt(unsigned long type, unsigned long vector,
unsigned long la_ptr, struct pt_regs *regs)
{
switch (type) {
case 0:
#ifdef CONFIG_SMP
handle_ipi(&regs);
handle_ipi(regs);
return;
#else
irq_err_count++;
......@@ -56,32 +55,32 @@ do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr,
#ifdef CONFIG_SMP
{
long cpu;
smp_percpu_timer_interrupt(&regs);
smp_percpu_timer_interrupt(regs);
cpu = smp_processor_id();
if (cpu != boot_cpuid) {
kstat_cpu(cpu).irqs[RTC_IRQ]++;
} else {
handle_irq(RTC_IRQ, &regs);
handle_irq(RTC_IRQ, regs);
}
}
#else
handle_irq(RTC_IRQ, &regs);
handle_irq(RTC_IRQ, regs);
#endif
return;
case 2:
alpha_mv.machine_check(vector, la_ptr, &regs);
alpha_mv.machine_check(vector, la_ptr, regs);
return;
case 3:
alpha_mv.device_interrupt(vector, &regs);
alpha_mv.device_interrupt(vector, regs);
return;
case 4:
perf_irq(vector, &regs);
perf_irq(vector, regs);
return;
default:
printk(KERN_CRIT "Hardware intr %ld %lx? Huh?\n",
type, vector);
}
printk("PC = %016lx PS=%04lx\n", regs.pc, regs.ps);
printk(KERN_CRIT "PC = %016lx PS=%04lx\n", regs->pc, regs->ps);
}
void __init
......@@ -96,10 +95,8 @@ common_init_isa_dma(void)
void __init
init_IRQ(void)
{
/* Uh, this really MUST come first, just in case
* the platform init_irq() causes interrupts/mchecks
* (as is the case with RAWHIDE, at least).
*/
/* Just in case the platform init_irq() causes interrupts/mchecks
(as is the case with RAWHIDE, at least). */
wrent(entInt, 0);
alpha_mv.init_irq();
......
......@@ -45,7 +45,6 @@
extern int do_pipe(int *);
extern asmlinkage unsigned long sys_brk(unsigned long);
extern int sys_getpriority(int, int);
extern asmlinkage unsigned long sys_create_module(char *, unsigned long);
/*
......@@ -172,68 +171,9 @@ osf_getdirentries(unsigned int fd, struct osf_dirent *dirent,
#undef ROUND_UP
#undef NAME_OFFSET
/*
* Alpha syscall convention has no problem returning negative
* values:
*/
asmlinkage int
osf_getpriority(int which, int who,
int a2, int a3, int a4, int a5, struct pt_regs regs)
{
extern int sys_getpriority(int, int);
int prio;
/*
* We don't need to acquire the kernel lock here, because
* all of these operations are local. sys_getpriority
* will get the lock as required..
*/
prio = sys_getpriority(which, who);
if (prio >= 0) {
regs.r0 = 0; /* special return: no errors */
prio = 20 - prio;
}
return prio;
}
/*
* No need to acquire the kernel lock, we're local..
*/
asmlinkage unsigned long
sys_getxuid(int a0, int a1, int a2, int a3, int a4, int a5, struct pt_regs regs)
{
struct task_struct * tsk = current;
(&regs)->r20 = tsk->euid;
return tsk->uid;
}
asmlinkage unsigned long
sys_getxgid(int a0, int a1, int a2, int a3, int a4, int a5, struct pt_regs regs)
{
struct task_struct * tsk = current;
(&regs)->r20 = tsk->egid;
return tsk->gid;
}
asmlinkage unsigned long
sys_getxpid(int a0, int a1, int a2, int a3, int a4, int a5, struct pt_regs regs)
{
struct task_struct *tsk = current;
/*
* This isn't strictly "local" any more and we should actually
* acquire the kernel lock. The "p_opptr" pointer might change
* if the parent goes away (or due to ptrace). But any race
* isn't actually going to matter, as if the parent happens
* to change we can happily return either of the pids.
*/
(&regs)->r20 = tsk->real_parent->tgid;
return tsk->tgid;
}
asmlinkage unsigned long
osf_mmap(unsigned long addr, unsigned long len, unsigned long prot,
unsigned long flags, unsigned long fd, unsigned long off)
unsigned long flags, unsigned long fd, unsigned long off)
{
struct file *file = NULL;
unsigned long ret = -EBADF;
......@@ -502,19 +442,6 @@ sys_getdtablesize(void)
return NR_OPEN;
}
asmlinkage int
sys_pipe(int a0, int a1, int a2, int a3, int a4, int a5, struct pt_regs regs)
{
int fd[2], error;
error = do_pipe(fd);
if (!error) {
regs.r20 = fd[1];
error = fd[0];
}
return error;
}
/*
* For compatibility with OSF/1 only. Use utsname(2) instead.
*/
......@@ -723,8 +650,8 @@ osf_sigstack(struct sigstack *uss, struct sigstack *uoss)
*/
asmlinkage unsigned long
alpha_create_module(char *module_name, unsigned long size,
int a3, int a4, int a5, int a6, struct pt_regs regs)
do_alpha_create_module(char *module_name, unsigned long size,
struct pt_regs *regs)
{
long retval;
......@@ -735,7 +662,7 @@ alpha_create_module(char *module_name, unsigned long size,
the error number is a small negative number, while the address
is always negative but much larger. */
if (retval + 1000 < 0)
regs.r0 = 0;
regs->r0 = 0;
unlock_kernel();
return retval;
......
......@@ -42,18 +42,6 @@
#include "proto.h"
#include "pci_impl.h"
/*
* No need to acquire the kernel lock, we're entirely local..
*/
asmlinkage int
sys_sethae(unsigned long hae, unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4, unsigned long a5,
struct pt_regs regs)
{
(&regs)->hae = hae;
return 0;
}
void default_idle(void)
{
barrier();
......@@ -227,6 +215,9 @@ flush_thread(void)
with respect to the FPU. This is all exceptions disabled. */
current_thread_info()->ieee_state = 0;
wrfpcr(FPCR_DYN_NORMAL | ieee_swcr_to_fpcr(0));
/* Clean slate for TLS. */
current_thread_info()->pcb.unique = 0;
}
void
......@@ -244,16 +235,15 @@ release_thread(struct task_struct *dead_task)
* with parameters (SIGCHLD, 0).
*/
int
alpha_clone(unsigned long clone_flags, unsigned long usp,
int *user_tid, struct switch_stack * swstack)
alpha_clone(unsigned long clone_flags, unsigned long usp, int *user_tid,
struct pt_regs *regs)
{
struct task_struct *p;
struct pt_regs *u_regs = (struct pt_regs *) (swstack+1);
if (!usp)
usp = rdusp();
p = do_fork(clone_flags & ~CLONE_IDLETASK, usp, u_regs, 0, user_tid);
p = do_fork(clone_flags & ~CLONE_IDLETASK, usp, regs, 0, user_tid);
return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
......@@ -282,7 +272,6 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
unsigned long unused,
struct task_struct * p, struct pt_regs * regs)
{
extern void ret_from_sys_call(void);
extern void ret_from_fork(void);
struct thread_info *childti = p->thread_info;
......@@ -304,11 +293,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
stack = ((struct switch_stack *) regs) - 1;
childstack = ((struct switch_stack *) childregs) - 1;
*childstack = *stack;
#ifdef CONFIG_SMP
childstack->r26 = (unsigned long) ret_from_fork;
#else
childstack->r26 = (unsigned long) ret_from_sys_call;
#endif
childti->pcb.usp = usp;
childti->pcb.ksp = (unsigned long) childstack;
childti->pcb.flags = 1; /* set FEN, clear everything else */
......
......@@ -249,8 +249,8 @@ void ptrace_disable(struct task_struct *child)
}
asmlinkage long
sys_ptrace(long request, long pid, long addr, long data,
int a4, int a5, struct pt_regs regs)
do_sys_ptrace(long request, long pid, long addr, long data,
struct pt_regs *regs)
{
struct task_struct *child;
long ret;
......@@ -307,14 +307,14 @@ sys_ptrace(long request, long pid, long addr, long data,
if (copied != sizeof(tmp))
goto out;
regs.r0 = 0; /* special return: no errors */
regs->r0 = 0; /* special return: no errors */
ret = tmp;
goto out;
}
/* Read register number ADDR. */
case PTRACE_PEEKUSR:
regs.r0 = 0; /* special return: no errors */
regs->r0 = 0; /* special return: no errors */
ret = get_reg(child, addr);
DBG(DBG_MEM, ("peek $%ld->%#lx\n", addr, ret));
goto out;
......
......@@ -210,8 +210,7 @@ long alpha_fp_emul (unsigned long pc);
asmlinkage void
do_entArith(unsigned long summary, unsigned long write_mask,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, struct pt_regs regs)
struct pt_regs *regs)
{
long si_code = FPE_FLTINV;
siginfo_t info;
......@@ -221,23 +220,23 @@ do_entArith(unsigned long summary, unsigned long write_mask,
emulate the instruction. If the processor supports
precise exceptions, we don't have to search. */
if (!amask(AMASK_PRECISE_TRAP))
si_code = alpha_fp_emul(regs.pc - 4);
si_code = alpha_fp_emul(regs->pc - 4);
else
si_code = alpha_fp_emul_imprecise(&regs, write_mask);
si_code = alpha_fp_emul_imprecise(regs, write_mask);
if (si_code == 0)
return;
}
die_if_kernel("Arithmetic fault", &regs, 0, 0);
die_if_kernel("Arithmetic fault", regs, 0, 0);
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void *) regs.pc;
info.si_addr = (void *) regs->pc;
send_sig_info(SIGFPE, &info, current);
}
asmlinkage void
do_entIF(unsigned long type, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, struct pt_regs regs)
do_entIF(unsigned long type, struct pt_regs *regs)
{
siginfo_t info;
int signo, code;
......@@ -245,13 +244,13 @@ do_entIF(unsigned long type, unsigned long a1,
if (!opDEC_testing || type != 4) {
if (type == 1) {
const unsigned int *data
= (const unsigned int *) regs.pc;
= (const unsigned int *) regs->pc;
printk("Kernel bug at %s:%d\n",
(const char *)(data[1] | (long)data[2] << 32),
data[0]);
}
die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
&regs, type, 0);
regs, type, 0);
}
switch (type) {
......@@ -260,10 +259,10 @@ do_entIF(unsigned long type, unsigned long a1,
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_trapno = 0;
info.si_addr = (void *) regs.pc;
info.si_addr = (void *) regs->pc;
if (ptrace_cancel_bpt(current)) {
regs.pc -= 4; /* make pc point to former bpt */
regs->pc -= 4; /* make pc point to former bpt */
}
send_sig_info(SIGTRAP, &info, current);
......@@ -273,15 +272,15 @@ do_entIF(unsigned long type, unsigned long a1,
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = __SI_FAULT;
info.si_addr = (void *) regs.pc;
info.si_addr = (void *) regs->pc;
info.si_trapno = 0;
send_sig_info(SIGTRAP, &info, current);
return;
case 2: /* gentrap */
info.si_addr = (void *) regs.pc;
info.si_trapno = regs.r16;
switch ((long) regs.r16) {
info.si_addr = (void *) regs->pc;
info.si_trapno = regs->r16;
switch ((long) regs->r16) {
case GEN_INTOVF:
signo = SIGFPE;
code = FPE_INTOVF;
......@@ -341,7 +340,7 @@ do_entIF(unsigned long type, unsigned long a1,
info.si_signo = signo;
info.si_errno = 0;
info.si_code = code;
info.si_addr = (void *) regs.pc;
info.si_addr = (void *) regs->pc;
send_sig_info(signo, &info, current);
return;
......@@ -358,26 +357,26 @@ do_entIF(unsigned long type, unsigned long a1,
we get the correct PC. If not, we set a flag
to correct it every time through. */
if (opDEC_testing) {
if (regs.pc == opDEC_test_pc) {
if (regs->pc == opDEC_test_pc) {
opDEC_fix = 4;
regs.pc += 4;
regs->pc += 4;
printk("opDEC fixup enabled.\n");
}
return;
}
regs.pc += opDEC_fix;
regs->pc += opDEC_fix;
/* EV4 does not implement anything except normal
rounding. Everything else will come here as
an illegal instruction. Emulate them. */
si_code = alpha_fp_emul(regs.pc - 4);
si_code = alpha_fp_emul(regs->pc - 4);
if (si_code == 0)
return;
if (si_code > 0) {
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void *) regs.pc;
info.si_addr = (void *) regs->pc;
send_sig_info(SIGFPE, &info, current);
return;
}
......@@ -406,7 +405,7 @@ do_entIF(unsigned long type, unsigned long a1,
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
info.si_addr = regs.pc;
info.si_addr = (void *) regs->pc;
send_sig_info(SIGILL, &info, current);
}
......@@ -418,18 +417,16 @@ do_entIF(unsigned long type, unsigned long a1,
and if we don't put something on the entry point we'll oops. */
asmlinkage void
do_entDbg(unsigned long type, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, struct pt_regs regs)
do_entDbg(struct pt_regs *regs)
{
siginfo_t info;
die_if_kernel("Instruction fault", &regs, type, 0);
die_if_kernel("Instruction fault", regs, 0, 0);
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
info.si_addr = regs.pc;
info.si_addr = (void *) regs->pc;
force_sig_info(SIGILL, &info, current);
}
......@@ -1083,22 +1080,6 @@ do_entUnaUser(void * va, unsigned long opcode,
return;
}
/*
* Unimplemented system calls.
*/
asmlinkage long
alpha_ni_syscall(unsigned long a0, unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4, unsigned long a5,
struct pt_regs regs)
{
/* We only get here for OSF system calls, minus #112;
the rest go to sys_ni_syscall. */
#if 0
printk("<sc %ld(%lx,%lx,%lx)>", regs.r0, a0, a1, a2);
#endif
return -ENOSYS;
}
void
trap_init(void)
{
......@@ -1114,9 +1095,7 @@ trap_init(void)
wrent(entDbg, 6);
/* Hack for Multia (UDB) and JENSEN: some of their SRMs have
* a bug in the handling of the opDEC fault. Fix it up if so.
*/
if (implver() == IMPLVER_EV4) {
a bug in the handling of the opDEC fault. Fix it up if so. */
if (implver() == IMPLVER_EV4)
opDEC_check();
}
}
......@@ -223,12 +223,12 @@ alpha_fp_emul (unsigned long pc)
FP_CONV(S,D,1,1,SR,DB);
goto pack_s;
} else {
/* CVTST need do nothing else but copy the
bits and repack. */
vb = alpha_read_fp_reg_s(fb);
FP_UNPACK_SP(SB, &vb);
DR_c = DB_c;
DR_s = DB_s;
DR_e = DB_e;
DR_f = DB_f;
DR_f = SB_f << (52 - 23);
goto pack_d;
}
......
......@@ -140,7 +140,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
prot |= _PAGE_PCD | _PAGE_PWT;
#elif defined(__powerpc__)
prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
#elif defined(__mc68000__)
#elif defined(__mc68000__) && defined(CONFIG_MMU)
#ifdef SUN3_PAGE_NOCACHE
if (MMU_IS_SUN3)
prot |= SUN3_PAGE_NOCACHE;
......
......@@ -68,7 +68,7 @@
#include <asm/uaccess.h>
#include <asm/io.h>
/* FIXME: soem day we shouldnt need to look in here! */
/* FIXME: some day we shouldnt need to look in here! */
#include "legacy/pdc4030.h"
......
......@@ -590,7 +590,7 @@ int proc_ide_read_driver
(char *page, char **start, off_t off, int count, int *eof, void *data)
{
ide_drive_t *drive = (ide_drive_t *) data;
ide_driver_t *driver = (ide_driver_t *) drive->driver;
ide_driver_t *driver = drive->driver;
int len;
if (!driver)
......@@ -720,7 +720,6 @@ void recreate_proc_ide_device(ide_hwif_t *hwif, ide_drive_t *drive)
struct proc_dir_entry *ent;
struct proc_dir_entry *parent = hwif->proc;
char name[64];
// ide_driver_t *driver = drive->driver;
if (drive->present && !drive->proc) {
drive->proc = proc_mkdir(drive->name, parent);
......
......@@ -153,6 +153,7 @@
#include <linux/cdrom.h>
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/kmod.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
......@@ -162,7 +163,6 @@
#include "ide_modes.h"
#include <linux/kmod.h>
/* default maximum number of failures */
#define IDE_DEFAULT_MAX_FAILURES 1
......
......@@ -22,7 +22,6 @@ ifneq ($(CONFIG_MAC),y)
endif
obj-$(CONFIG_MAC_EMUMOUSEBTN) += mac_hid.o
obj-$(CONFIG_INPUT_ADBHID) += adbhid.o
obj-$(CONFIG_PPC_RTC) += rtc.o
obj-$(CONFIG_ANSLCD) += ans-lcd.o
obj-$(CONFIG_ADB_PMU) += via-pmu.o
......@@ -30,7 +29,6 @@ obj-$(CONFIG_ADB_CUDA) += via-cuda.o
obj-$(CONFIG_PMAC_APM_EMU) += apm_emu.o
obj-$(CONFIG_ADB) += adb.o
obj-$(CONFIG_ADB_KEYBOARD) += mac_keyb.o
obj-$(CONFIG_ADB_MACII) += via-macii.o
obj-$(CONFIG_ADB_MACIISI) += via-maciisi.o
obj-$(CONFIG_ADB_IOP) += adb-iop.o
......
......@@ -34,8 +34,10 @@
#include <linux/wait.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
#ifdef CONFIG_PPC
#include <asm/prom.h>
#include <asm/hydra.h>
......@@ -75,8 +77,8 @@ static struct adb_driver *adb_driver_list[] = {
struct adb_driver *adb_controller;
struct notifier_block *adb_client_list = NULL;
static int adb_got_sleep = 0;
static int adb_inited = 0;
static int adb_got_sleep;
static int adb_inited;
static pid_t adb_probe_task_pid;
static DECLARE_MUTEX(adb_probe_mutex);
static struct completion adb_probe_task_comp;
......@@ -94,7 +96,7 @@ static struct pmu_sleep_notifier adb_sleep_notifier = {
static int adb_scan_bus(void);
static int do_adb_reset_bus(void);
static void adbdev_init(void);
static int try_handler_change(int, int);
static struct adb_handler {
void (*handler)(unsigned char *, int, struct pt_regs *, int);
......@@ -102,6 +104,18 @@ static struct adb_handler {
int handler_id;
} adb_handler[16];
/*
* The adb_handler_sem mutex protects all accesses to the original_address
* and handler_id fields of adb_handler[i] for all i, and changes to the
* handler field.
* Accesses to the handler field are protected by the adb_handler_lock
* rwlock. It is held across all calls to any handler, so that by the
* time adb_unregister returns, we know that the old handler isn't being
* called.
*/
static DECLARE_MUTEX(adb_handler_sem);
static rwlock_t adb_handler_lock = RW_LOCK_UNLOCKED;
#if 0
static void printADBreply(struct adb_request *req)
{
......@@ -254,25 +268,18 @@ __adb_probe_task(void *data)
SIGCHLD | CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
}
static DECLARE_WORK(adb_reset_work, __adb_probe_task, NULL);
int
adb_reset_bus(void)
{
static struct tq_struct tqs = {
routine: __adb_probe_task,
};
if (__adb_probe_sync) {
do_adb_reset_bus();
return 0;
}
down(&adb_probe_mutex);
/* Create probe thread as a child of keventd */
if (current_is_keventd())
__adb_probe_task(NULL);
else
schedule_task(&tqs);
schedule_work(&adb_reset_work);
return 0;
}
......@@ -372,7 +379,6 @@ static int
do_adb_reset_bus(void)
{
int ret, nret, devs;
unsigned long flags;
if (adb_controller == NULL)
return -ENXIO;
......@@ -391,11 +397,11 @@ do_adb_reset_bus(void)
/* Let the trackpad settle down */
adb_wait_ms(500);
}
save_flags(flags);
cli();
down(&adb_handler_sem);
write_lock_irq(&adb_handler_lock);
memset(adb_handler, 0, sizeof(adb_handler));
restore_flags(flags);
write_unlock_irq(&adb_handler_lock);
/* That one is still a bit synchronous, oh well... */
if (adb_controller->reset_bus)
......@@ -413,6 +419,7 @@ do_adb_reset_bus(void)
if (adb_controller->autopoll)
adb_controller->autopoll(devs);
}
up(&adb_handler_sem);
nret = notifier_call_chain(&adb_client_list, ADB_MSG_POST_RESET, NULL);
if (nret & NOTIFY_STOP_MASK)
......@@ -512,30 +519,41 @@ adb_register(int default_id, int handler_id, struct adb_ids *ids,
{
int i;
down(&adb_handler_sem);
ids->nids = 0;
for (i = 1; i < 16; i++) {
if ((adb_handler[i].original_address == default_id) &&
(!handler_id || (handler_id == adb_handler[i].handler_id) ||
adb_try_handler_change(i, handler_id))) {
try_handler_change(i, handler_id))) {
if (adb_handler[i].handler != 0) {
printk(KERN_ERR
"Two handlers for ADB device %d\n",
default_id);
continue;
}
write_lock_irq(&adb_handler_lock);
adb_handler[i].handler = handler;
write_unlock_irq(&adb_handler_lock);
ids->id[ids->nids++] = i;
}
}
up(&adb_handler_sem);
return ids->nids;
}
int
adb_unregister(int index)
{
if (!adb_handler[index].handler)
return -ENODEV;
adb_handler[index].handler = 0;
int ret = -ENODEV;
down(&adb_handler_sem);
write_lock_irq(&adb_handler_lock);
if (adb_handler[index].handler) {
ret = 0;
adb_handler[index].handler = 0;
}
write_unlock_irq(&adb_handler_lock);
up(&adb_handler_sem);
return 0;
}
......@@ -544,6 +562,7 @@ adb_input(unsigned char *buf, int nb, struct pt_regs *regs, int autopoll)
{
int i, id;
static int dump_adb_input = 0;
void (*handler)(unsigned char *, int, struct pt_regs *, int);
/* We skip keystrokes and mouse moves when the sleep process
* has been started. We stop autopoll, but this is another security
......@@ -558,14 +577,15 @@ adb_input(unsigned char *buf, int nb, struct pt_regs *regs, int autopoll)
printk(" %x", buf[i]);
printk(", id = %d\n", id);
}
if (adb_handler[id].handler != 0) {
(*adb_handler[id].handler)(buf, nb, regs, autopoll);
}
read_lock(&adb_handler_lock);
handler = adb_handler[id].handler;
if (handler != 0)
(*handler)(buf, nb, regs, autopoll);
read_unlock(&adb_handler_lock);
}
/* Try to change handler to new_id. Will return 1 if successful */
int
adb_try_handler_change(int address, int new_id)
/* Try to change handler to new_id. Will return 1 if successful. */
static int try_handler_change(int address, int new_id)
{
struct adb_request req;
......@@ -584,12 +604,25 @@ adb_try_handler_change(int address, int new_id)
return 1;
}
int
adb_try_handler_change(int address, int new_id)
{
int ret;
down(&adb_handler_sem);
ret = try_handler_change(address, new_id);
up(&adb_handler_sem);
return ret;
}
int
adb_get_infos(int address, int *original_address, int *handler_id)
{
down(&adb_handler_sem);
*original_address = adb_handler[address].original_address;
*handler_id = adb_handler[address].handler_id;
up(&adb_handler_sem);
return (*original_address != 0);
}
......
/*
* drivers/char/mac_keyb.c
*
* Keyboard driver for Power Macintosh computers.
*
* Adapted from drivers/char/keyboard.c by Paul Mackerras
* (see that file for its authors and contributors).
*
* Copyright (C) 1996 Paul Mackerras.
*
* Adapted to ADB changes and support for more devices by
* Benjamin Herrenschmidt. Adapted from code in MkLinux
* and reworked.
*
* Supported devices:
*
* - Standard 1 button mouse
* - All standard Apple Extended protocol (handler ID 4)
* - mouseman and trackman mice & trackballs
* - PowerBook Trackpad (default setup: enable tapping)
* - MicroSpeed mouse & trackball (needs testing)
* - CH Products Trackball Pro (needs testing)
* - Contour Design (Contour Mouse)
* - Hunter digital (NoHandsMouse)
* - Kensignton TurboMouse 5 (needs testing)
* - Mouse Systems A3 mice and trackballs <aidan@kublai.com>
* - MacAlly 2-buttons mouse (needs testing) <pochini@denise.shiny.it>
*
* To do:
*
* Improve Kensignton support, add MacX support as a dynamic
* option (not a compile-time option).
*/
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/mm.h>
#include <linux/signal.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/tty_flip.h>
#include <linux/config.h>
#include <linux/notifier.h>
#include <asm/bitops.h>
#include <linux/adb.h>
#include <linux/cuda.h>
#include <linux/pmu.h>
#include <linux/kbd_kern.h>
#include <linux/kbd_ll.h>
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
#define KEYB_KEYREG 0 /* register # for key up/down data */
#define KEYB_LEDREG 2 /* register # for leds on ADB keyboard */
#define MOUSE_DATAREG 0 /* reg# for movement/button codes from mouse */
static int adb_message_handler(struct notifier_block *, unsigned long, void *);
static struct notifier_block mackeyb_adb_notifier = {
adb_message_handler,
NULL,
0
};
/* this map indicates which keys shouldn't autorepeat. */
static unsigned char dont_repeat[128] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* esc...option */
0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, /* fn, num lock */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, /* scroll lock */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, /* R modifiers */
};
/* Simple translation table for the SysRq keys */
#ifdef CONFIG_MAGIC_SYSRQ
unsigned char mackbd_sysrq_xlate[128] =
"asdfhgzxcv\000bqwer" /* 0x00 - 0x0f */
"yt123465=97-80o]" /* 0x10 - 0x1f */
"u[ip\rlj'k;\\,/nm." /* 0x20 - 0x2f */
"\t `\177\000\033\000\000\000\000\000\000\000\000\000\000"
/* 0x30 - 0x3f */
"\000\000\000*\000+\000\000\000\000\000/\r\000-\000"
/* 0x40 - 0x4f */
"\000\0000123456789\000\000\000" /* 0x50 - 0x5f */
"\205\206\207\203\210\211\000\213\000\215\000\000\000\000\000\212\000\214";
/* 0x60 - 0x6f */
#endif
static u_short macplain_map[NR_KEYS] __initdata = {
0xfb61, 0xfb73, 0xfb64, 0xfb66, 0xfb68, 0xfb67, 0xfb7a, 0xfb78,
0xfb63, 0xfb76, 0xf200, 0xfb62, 0xfb71, 0xfb77, 0xfb65, 0xfb72,
0xfb79, 0xfb74, 0xf031, 0xf032, 0xf033, 0xf034, 0xf036, 0xf035,
0xf03d, 0xf039, 0xf037, 0xf02d, 0xf038, 0xf030, 0xf05d, 0xfb6f,
0xfb75, 0xf05b, 0xfb69, 0xfb70, 0xf201, 0xfb6c, 0xfb6a, 0xf027,
0xfb6b, 0xf03b, 0xf05c, 0xf02c, 0xf02f, 0xfb6e, 0xfb6d, 0xf02e,
0xf009, 0xf020, 0xf060, 0xf07f, 0xf200, 0xf01b, 0xf702, 0xf703,
0xf700, 0xf207, 0xf701, 0xf601, 0xf602, 0xf600, 0xf603, 0xf200,
0xf200, 0xf310, 0xf200, 0xf30c, 0xf200, 0xf30a, 0xf200, 0xf208,
0xf200, 0xf200, 0xf200, 0xf30d, 0xf30e, 0xf200, 0xf30b, 0xf200,
0xf200, 0xf200, 0xf300, 0xf301, 0xf302, 0xf303, 0xf304, 0xf305,
0xf306, 0xf307, 0xf200, 0xf308, 0xf309, 0xf200, 0xf200, 0xf200,
0xf104, 0xf105, 0xf106, 0xf102, 0xf107, 0xf108, 0xf200, 0xf10a,
0xf200, 0xf10c, 0xf200, 0xf209, 0xf200, 0xf109, 0xf200, 0xf10b,
0xf200, 0xf11d, 0xf115, 0xf114, 0xf118, 0xf116, 0xf103, 0xf117,
0xf101, 0xf119, 0xf100, 0xf700, 0xf701, 0xf702, 0xf200, 0xf200,
};
static u_short macshift_map[NR_KEYS] __initdata = {
0xfb41, 0xfb53, 0xfb44, 0xfb46, 0xfb48, 0xfb47, 0xfb5a, 0xfb58,
0xfb43, 0xfb56, 0xf200, 0xfb42, 0xfb51, 0xfb57, 0xfb45, 0xfb52,
0xfb59, 0xfb54, 0xf021, 0xf040, 0xf023, 0xf024, 0xf05e, 0xf025,
0xf02b, 0xf028, 0xf026, 0xf05f, 0xf02a, 0xf029, 0xf07d, 0xfb4f,
0xfb55, 0xf07b, 0xfb49, 0xfb50, 0xf201, 0xfb4c, 0xfb4a, 0xf022,
0xfb4b, 0xf03a, 0xf07c, 0xf03c, 0xf03f, 0xfb4e, 0xfb4d, 0xf03e,
0xf009, 0xf020, 0xf07e, 0xf07f, 0xf200, 0xf01b, 0xf702, 0xf703,
0xf700, 0xf207, 0xf701, 0xf601, 0xf602, 0xf600, 0xf603, 0xf200,
0xf200, 0xf310, 0xf200, 0xf30c, 0xf200, 0xf30a, 0xf200, 0xf208,
0xf200, 0xf200, 0xf200, 0xf30d, 0xf30e, 0xf200, 0xf30b, 0xf200,
0xf200, 0xf200, 0xf300, 0xf301, 0xf302, 0xf303, 0xf304, 0xf305,
0xf306, 0xf307, 0xf200, 0xf308, 0xf309, 0xf200, 0xf200, 0xf200,
0xf10e, 0xf10f, 0xf110, 0xf10c, 0xf111, 0xf112, 0xf200, 0xf10a,
0xf200, 0xf10c, 0xf200, 0xf203, 0xf200, 0xf113, 0xf200, 0xf10b,
0xf200, 0xf11d, 0xf115, 0xf114, 0xf20b, 0xf116, 0xf10d, 0xf117,
0xf10b, 0xf20a, 0xf10a, 0xf700, 0xf701, 0xf702, 0xf200, 0xf200,
};
static u_short macaltgr_map[NR_KEYS] __initdata = {
0xf914, 0xfb73, 0xf917, 0xf919, 0xfb68, 0xfb67, 0xfb7a, 0xfb78,
0xf916, 0xfb76, 0xf200, 0xf915, 0xfb71, 0xfb77, 0xf918, 0xfb72,
0xfb79, 0xfb74, 0xf200, 0xf040, 0xf200, 0xf024, 0xf200, 0xf200,
0xf200, 0xf05d, 0xf07b, 0xf05c, 0xf05b, 0xf07d, 0xf07e, 0xfb6f,
0xfb75, 0xf200, 0xfb69, 0xfb70, 0xf201, 0xfb6c, 0xfb6a, 0xf200,
0xfb6b, 0xf200, 0xf200, 0xf200, 0xf200, 0xfb6e, 0xfb6d, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf702, 0xf703,
0xf700, 0xf207, 0xf701, 0xf601, 0xf602, 0xf600, 0xf603, 0xf200,
0xf200, 0xf310, 0xf200, 0xf30c, 0xf200, 0xf30a, 0xf200, 0xf208,
0xf200, 0xf200, 0xf200, 0xf30d, 0xf30e, 0xf200, 0xf30b, 0xf200,
0xf200, 0xf200, 0xf90a, 0xf90b, 0xf90c, 0xf90d, 0xf90e, 0xf90f,
0xf910, 0xf911, 0xf200, 0xf912, 0xf913, 0xf200, 0xf200, 0xf200,
0xf510, 0xf511, 0xf512, 0xf50e, 0xf513, 0xf514, 0xf200, 0xf516,
0xf200, 0xf10c, 0xf200, 0xf202, 0xf200, 0xf515, 0xf200, 0xf517,
0xf200, 0xf11d, 0xf115, 0xf114, 0xf118, 0xf116, 0xf50f, 0xf117,
0xf50d, 0xf119, 0xf50c, 0xf700, 0xf701, 0xf702, 0xf200, 0xf200,
};
static u_short macctrl_map[NR_KEYS] __initdata = {
0xf001, 0xf013, 0xf004, 0xf006, 0xf008, 0xf007, 0xf01a, 0xf018,
0xf003, 0xf016, 0xf200, 0xf002, 0xf011, 0xf017, 0xf005, 0xf012,
0xf019, 0xf014, 0xf200, 0xf000, 0xf01b, 0xf01c, 0xf01e, 0xf01d,
0xf200, 0xf200, 0xf01f, 0xf01f, 0xf07f, 0xf200, 0xf01d, 0xf00f,
0xf015, 0xf01b, 0xf009, 0xf010, 0xf201, 0xf00c, 0xf00a, 0xf007,
0xf00b, 0xf200, 0xf01c, 0xf200, 0xf07f, 0xf00e, 0xf00d, 0xf20e,
0xf200, 0xf000, 0xf000, 0xf008, 0xf200, 0xf200, 0xf702, 0xf703,
0xf700, 0xf207, 0xf701, 0xf601, 0xf602, 0xf600, 0xf603, 0xf200,
0xf200, 0xf310, 0xf200, 0xf30c, 0xf200, 0xf30a, 0xf200, 0xf208,
0xf200, 0xf200, 0xf200, 0xf30d, 0xf30e, 0xf200, 0xf30b, 0xf200,
0xf200, 0xf200, 0xf300, 0xf301, 0xf302, 0xf303, 0xf304, 0xf305,
0xf306, 0xf307, 0xf200, 0xf308, 0xf309, 0xf200, 0xf200, 0xf200,
0xf104, 0xf105, 0xf106, 0xf102, 0xf107, 0xf108, 0xf200, 0xf10a,
0xf200, 0xf10c, 0xf200, 0xf204, 0xf200, 0xf109, 0xf200, 0xf10b,
0xf200, 0xf11d, 0xf115, 0xf114, 0xf118, 0xf116, 0xf103, 0xf117,
0xf101, 0xf119, 0xf100, 0xf700, 0xf701, 0xf702, 0xf200, 0xf200,
};
static u_short macshift_ctrl_map[NR_KEYS] __initdata = {
0xf001, 0xf013, 0xf004, 0xf006, 0xf008, 0xf007, 0xf01a, 0xf018,
0xf003, 0xf016, 0xf200, 0xf002, 0xf011, 0xf017, 0xf005, 0xf012,
0xf019, 0xf014, 0xf200, 0xf000, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf01f, 0xf200, 0xf200, 0xf200, 0xf00f,
0xf015, 0xf200, 0xf009, 0xf010, 0xf201, 0xf00c, 0xf00a, 0xf200,
0xf00b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf00e, 0xf00d, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf702, 0xf703,
0xf700, 0xf207, 0xf701, 0xf601, 0xf602, 0xf600, 0xf603, 0xf200,
0xf200, 0xf310, 0xf200, 0xf30c, 0xf200, 0xf30a, 0xf200, 0xf208,
0xf200, 0xf200, 0xf200, 0xf30d, 0xf30e, 0xf200, 0xf30b, 0xf200,
0xf200, 0xf200, 0xf300, 0xf301, 0xf302, 0xf303, 0xf304, 0xf305,
0xf306, 0xf307, 0xf200, 0xf308, 0xf309, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf10c, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf11d, 0xf115, 0xf114, 0xf118, 0xf116, 0xf200, 0xf117,
0xf200, 0xf119, 0xf200, 0xf700, 0xf701, 0xf702, 0xf200, 0xf20c,
};
static u_short macalt_map[NR_KEYS] __initdata = {
0xf861, 0xf873, 0xf864, 0xf866, 0xf868, 0xf867, 0xf87a, 0xf878,
0xf863, 0xf876, 0xf200, 0xf862, 0xf871, 0xf877, 0xf865, 0xf872,
0xf879, 0xf874, 0xf831, 0xf832, 0xf833, 0xf834, 0xf836, 0xf835,
0xf83d, 0xf839, 0xf837, 0xf82d, 0xf838, 0xf830, 0xf85d, 0xf86f,
0xf875, 0xf85b, 0xf869, 0xf870, 0xf80d, 0xf86c, 0xf86a, 0xf827,
0xf86b, 0xf83b, 0xf85c, 0xf82c, 0xf82f, 0xf86e, 0xf86d, 0xf82e,
0xf809, 0xf820, 0xf860, 0xf87f, 0xf200, 0xf81b, 0xf702, 0xf703,
0xf700, 0xf207, 0xf701, 0xf210, 0xf211, 0xf600, 0xf603, 0xf200,
0xf200, 0xf310, 0xf200, 0xf30c, 0xf200, 0xf30a, 0xf200, 0xf208,
0xf200, 0xf200, 0xf200, 0xf30d, 0xf30e, 0xf200, 0xf30b, 0xf200,
0xf200, 0xf200, 0xf900, 0xf901, 0xf902, 0xf903, 0xf904, 0xf905,
0xf906, 0xf907, 0xf200, 0xf908, 0xf909, 0xf200, 0xf200, 0xf200,
0xf504, 0xf505, 0xf506, 0xf502, 0xf507, 0xf508, 0xf200, 0xf50a,
0xf200, 0xf10c, 0xf200, 0xf209, 0xf200, 0xf509, 0xf200, 0xf50b,
0xf200, 0xf11d, 0xf115, 0xf114, 0xf118, 0xf116, 0xf503, 0xf117,
0xf501, 0xf119, 0xf500, 0xf700, 0xf701, 0xf702, 0xf200, 0xf200,
};
static u_short macctrl_alt_map[NR_KEYS] __initdata = {
0xf801, 0xf813, 0xf804, 0xf806, 0xf808, 0xf807, 0xf81a, 0xf818,
0xf803, 0xf816, 0xf200, 0xf802, 0xf811, 0xf817, 0xf805, 0xf812,
0xf819, 0xf814, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf80f,
0xf815, 0xf200, 0xf809, 0xf810, 0xf201, 0xf80c, 0xf80a, 0xf200,
0xf80b, 0xf200, 0xf200, 0xf200, 0xf200, 0xf80e, 0xf80d, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf702, 0xf703,
0xf700, 0xf207, 0xf701, 0xf601, 0xf602, 0xf600, 0xf603, 0xf200,
0xf200, 0xf310, 0xf200, 0xf30c, 0xf200, 0xf30a, 0xf200, 0xf208,
0xf200, 0xf200, 0xf200, 0xf30d, 0xf30e, 0xf200, 0xf30b, 0xf200,
0xf200, 0xf200, 0xf300, 0xf301, 0xf302, 0xf303, 0xf304, 0xf305,
0xf306, 0xf307, 0xf200, 0xf308, 0xf309, 0xf200, 0xf200, 0xf200,
0xf504, 0xf505, 0xf506, 0xf502, 0xf507, 0xf508, 0xf200, 0xf50a,
0xf200, 0xf10c, 0xf200, 0xf200, 0xf200, 0xf509, 0xf200, 0xf50b,
0xf200, 0xf11d, 0xf115, 0xf114, 0xf118, 0xf116, 0xf503, 0xf117,
0xf501, 0xf119, 0xf500, 0xf700, 0xf701, 0xf702, 0xf200, 0xf200,
};
static void kbd_repeat(unsigned long);
static struct timer_list repeat_timer = TIMER_INITIALIZER(kbd_repeat, 0, 0);
static int last_keycode;
static void mackeyb_probe(void);
static void keyboard_input(unsigned char *, int, struct pt_regs *, int);
static void input_keycode(int, int);
static void leds_done(struct adb_request *);
static void mac_put_queue(int);
static void buttons_input(unsigned char *, int, struct pt_regs *, int);
static void init_trackpad(int id);
static void init_trackball(int id);
static void init_turbomouse(int id);
static void init_microspeed(int id);
static void init_ms_a3(int id);
extern struct kbd_struct kbd_table[];
extern void handle_scancode(unsigned char, int);
static struct adb_ids keyboard_ids;
static struct adb_ids mouse_ids;
static struct adb_ids buttons_ids;
/* Kind of mouse */
#define ADBMOUSE_STANDARD_100 0 /* Standard 100cpi mouse (handler 1) */
#define ADBMOUSE_STANDARD_200 1 /* Standard 200cpi mouse (handler 2) */
#define ADBMOUSE_EXTENDED 2 /* Apple Extended mouse (handler 4) */
#define ADBMOUSE_TRACKBALL 3 /* TrackBall (handler 4) */
#define ADBMOUSE_TRACKPAD 4 /* Apple's PowerBook trackpad (handler 4) */
#define ADBMOUSE_TURBOMOUSE5 5 /* Turbomouse 5 (previously req. mousehack) */
#define ADBMOUSE_MICROSPEED 6 /* Microspeed mouse (&trackball ?), MacPoint */
#define ADBMOUSE_TRACKBALLPRO 7 /* Trackball Pro (special buttons) */
#define ADBMOUSE_MS_A3 8 /* Mouse systems A3 trackball (handler 3) */
#define ADBMOUSE_MACALLY2 9 /* MacAlly 2-button mouse */
static int adb_mouse_kinds[16];
int mackbd_setkeycode(unsigned int scancode, unsigned int keycode)
{
return -EINVAL;
}
int mackbd_getkeycode(unsigned int scancode)
{
return -EINVAL;
}
int mackbd_translate(unsigned char keycode, unsigned char *keycodep,
char raw_mode)
{
if (!raw_mode) {
/*
* Convert R-shift/control/option to L version.
*/
switch (keycode) {
case 0x7b: keycode = 0x38; break; /* R-shift */
case 0x7c: keycode = 0x3a; break; /* R-option */
case 0x7d: keycode = 0x36; break; /* R-control */
}
}
*keycodep = keycode;
return 1;
}
char mackbd_unexpected_up(unsigned char keycode)
{
return 0x80;
}
static void
keyboard_input(unsigned char *data, int nb, struct pt_regs *regs, int apoll)
{
/* first check this is from register 0 */
if (nb != 3 || (data[0] & 3) != KEYB_KEYREG)
return; /* ignore it */
kbd_pt_regs = regs;
input_keycode(data[1], 0);
if (!(data[2] == 0xff || (data[2] == 0x7f && data[1] == 0x7f)))
input_keycode(data[2], 0);
}
static void
input_keycode(int keycode, int repeat)
{
struct kbd_struct *kbd;
int up_flag;
kbd = kbd_table + fg_console;
up_flag = (keycode & 0x80);
keycode &= 0x7f;
/* on the powerbook 3400, the power key gives code 0x7e */
if (keycode == 0x7e)
keycode = 0x7f;
/* remap the "Fn" key of the PowerBook G3 Series to 0x48
to avoid conflict with button emulation */
if (keycode == 0x3f)
keycode = 0x48;
if (!repeat)
del_timer(&repeat_timer);
if (kbd->kbdmode != VC_RAW) {
if (!up_flag && !dont_repeat[keycode]) {
last_keycode = keycode;
repeat_timer.expires = jiffies + (repeat? HZ/15: HZ/2);
add_timer(&repeat_timer);
}
/*
* adb kludge!! Imitate pc caps lock behaviour by
* generating an up/down event for each time caps
* is pressed/released. Also, makes sure that the
* LED are handled. atong@uiuc.edu
*/
switch (keycode) {
/*case 0xb9:*/
case 0x39:
handle_scancode(0x39, 1);
handle_scancode(0x39, 0);
tasklet_schedule(&keyboard_tasklet);
return;
case 0x47:
/*case 0xc7:*/
tasklet_schedule(&keyboard_tasklet);
break;
}
}
handle_scancode(keycode, !up_flag);
tasklet_schedule(&keyboard_tasklet);
}
static void
kbd_repeat(unsigned long xxx)
{
unsigned long flags;
save_flags(flags);
cli();
input_keycode(last_keycode, 1);
restore_flags(flags);
}
static void mac_put_queue(int ch)
{
extern struct tty_driver console_driver;
struct tty_struct *tty;
tty = console_driver.table? console_driver.table[fg_console]: NULL;
if (tty) {
tty_insert_flip_char(tty, ch, 0);
con_schedule_flip(tty);
}
}
static void
buttons_input(unsigned char *data, int nb, struct pt_regs *regs, int autopoll)
{
#ifdef CONFIG_PMAC_BACKLIGHT
int backlight = get_backlight_level();
/*
* XXX: Where is the contrast control for the passive?
* -- Cort
*/
/* Ignore data from register other than 0 */
if ((data[0] & 0x3) || (nb < 2))
return;
switch (data[1]) {
case 0x8: /* mute */
break;
case 0x7: /* contrast decrease */
break;
case 0x6: /* contrast increase */
break;
case 0xa: /* brightness decrease */
if (backlight < 0)
break;
if (backlight > BACKLIGHT_OFF)
set_backlight_level(backlight-1);
else
set_backlight_level(BACKLIGHT_OFF);
break;
case 0x9: /* brightness increase */
if (backlight < 0)
break;
if (backlight < BACKLIGHT_MAX)
set_backlight_level(backlight+1);
else
set_backlight_level(BACKLIGHT_MAX);
break;
}
#endif /* CONFIG_PMAC_BACKLIGHT */
}
/* Map led flags as defined in kbd_kern.h to bits for Apple keyboard. */
static unsigned char mac_ledmap[8] = {
0, /* none */
4, /* scroll lock */
1, /* num lock */
5, /* scroll + num lock */
2, /* caps lock */
6, /* caps + scroll lock */
3, /* caps + num lock */
7, /* caps + num + scroll lock */
};
static struct adb_request led_request;
static int leds_pending[16];
static int pending_devs[16];
static int pending_led_start=0;
static int pending_led_end=0;
static void real_mackbd_leds(unsigned char leds, int device)
{
if (led_request.complete) {
adb_request(&led_request, leds_done, 0, 3,
ADB_WRITEREG(device, KEYB_LEDREG), 0xff,
~mac_ledmap[leds]);
} else {
if (!(leds_pending[device] & 0x100)) {
pending_devs[pending_led_end] = device;
pending_led_end++;
pending_led_end = (pending_led_end < 16) ? pending_led_end : 0;
}
leds_pending[device] = leds | 0x100;
}
}
void mackbd_leds(unsigned char leds)
{
int i;
for(i = 0; i < keyboard_ids.nids; i++)
real_mackbd_leds(leds,keyboard_ids.id[i]);
}
static void leds_done(struct adb_request *req)
{
int leds,device;
if (pending_led_start != pending_led_end) {
device = pending_devs[pending_led_start];
leds = leds_pending[device] & 0xff;
leds_pending[device] = 0;
pending_led_start++;
pending_led_start = (pending_led_start < 16) ? pending_led_start : 0;
real_mackbd_leds(leds,device);
}
}
void __init mackbd_init_hw(void)
{
#ifdef CONFIG_PPC
if ( (_machine != _MACH_chrp) && (_machine != _MACH_Pmac) )
return;
#endif
#ifdef CONFIG_MAC
if (!MACH_IS_MAC)
return;
#endif
/* setup key map */
memcpy(key_maps[0], macplain_map, sizeof(plain_map));
memcpy(key_maps[1], macshift_map, sizeof(plain_map));
memcpy(key_maps[2], macaltgr_map, sizeof(plain_map));
memcpy(key_maps[4], macctrl_map, sizeof(plain_map));
memcpy(key_maps[5], macshift_ctrl_map, sizeof(plain_map));
memcpy(key_maps[8], macalt_map, sizeof(plain_map));
memcpy(key_maps[12], macctrl_alt_map, sizeof(plain_map));
led_request.complete = 1;
mackeyb_probe();
notifier_chain_register(&adb_client_list, &mackeyb_adb_notifier);
}
static int
adb_message_handler(struct notifier_block *this, unsigned long code, void *x)
{
unsigned long flags;
switch (code) {
case ADB_MSG_PRE_RESET:
case ADB_MSG_POWERDOWN:
/* Stop the repeat timer. Autopoll is already off at this point */
save_flags(flags);
cli();
del_timer(&repeat_timer);
restore_flags(flags);
/* Stop pending led requests */
while(!led_request.complete)
adb_poll();
break;
case ADB_MSG_POST_RESET:
mackeyb_probe();
break;
}
return NOTIFY_DONE;
}
static void
mackeyb_probe(void)
{
struct adb_request req;
int i;
adb_register(ADB_KEYBOARD, 0, &keyboard_ids, keyboard_input);
adb_register(0x07, 0x1F, &buttons_ids, buttons_input);
for (i = 0; i < keyboard_ids.nids; i++) {
int id = keyboard_ids.id[i];
/* turn off all leds */
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id, KEYB_LEDREG), 0xff, 0xff);
/* Enable full feature set of the keyboard
->get it to send separate codes for left and right shift,
control, option keys */
#if 0 /* handler 5 doesn't send separate codes for R modifiers */
if (adb_try_handler_change(id, 5))
printk("ADB keyboard at %d, handler set to 5\n", id);
else
#endif
if (adb_try_handler_change(id, 3))
printk("ADB keyboard at %d, handler set to 3\n", id);
else
printk("ADB keyboard at %d, handler 1\n", id);
}
/* Try to switch all mice to handler 4, or 2 for three-button
mode and full resolution. */
for (i = 0; i < mouse_ids.nids; i++) {
int id = mouse_ids.id[i];
if (adb_try_handler_change(id, 4)) {
printk("ADB mouse at %d, handler set to 4", id);
adb_mouse_kinds[id] = ADBMOUSE_EXTENDED;
}
else if (adb_try_handler_change(id, 0x2F)) {
printk("ADB mouse at %d, handler set to 0x2F", id);
adb_mouse_kinds[id] = ADBMOUSE_MICROSPEED;
}
else if (adb_try_handler_change(id, 0x42)) {
printk("ADB mouse at %d, handler set to 0x42", id);
adb_mouse_kinds[id] = ADBMOUSE_TRACKBALLPRO;
}
else if (adb_try_handler_change(id, 0x66)) {
printk("ADB mouse at %d, handler set to 0x66", id);
adb_mouse_kinds[id] = ADBMOUSE_MICROSPEED;
}
else if (adb_try_handler_change(id, 0x5F)) {
printk("ADB mouse at %d, handler set to 0x5F", id);
adb_mouse_kinds[id] = ADBMOUSE_MICROSPEED;
}
else if (adb_try_handler_change(id, 3)) {
printk("ADB mouse at %d, handler set to 3", id);
adb_mouse_kinds[id] = ADBMOUSE_MS_A3;
}
else if (adb_try_handler_change(id, 2)) {
printk("ADB mouse at %d, handler set to 2", id);
adb_mouse_kinds[id] = ADBMOUSE_STANDARD_200;
}
else {
printk("ADB mouse at %d, handler 1", id);
adb_mouse_kinds[id] = ADBMOUSE_STANDARD_100;
}
if ((adb_mouse_kinds[id] == ADBMOUSE_TRACKBALLPRO)
|| (adb_mouse_kinds[id] == ADBMOUSE_MICROSPEED)) {
init_microspeed(id);
} else if (adb_mouse_kinds[id] == ADBMOUSE_MS_A3) {
init_ms_a3(id);
} else if (adb_mouse_kinds[id] == ADBMOUSE_EXTENDED) {
/*
* Register 1 is usually used for device
* identification. Here, we try to identify
* a known device and call the appropriate
* init function.
*/
adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
ADB_READREG(id, 1));
if ((req.reply_len) &&
(req.reply[1] == 0x9a) && ((req.reply[2] == 0x21)
|| (req.reply[2] == 0x20)))
init_trackball(id);
else if ((req.reply_len >= 4) &&
(req.reply[1] == 0x74) && (req.reply[2] == 0x70) &&
(req.reply[3] == 0x61) && (req.reply[4] == 0x64))
init_trackpad(id);
else if ((req.reply_len >= 4) &&
(req.reply[1] == 0x4b) && (req.reply[2] == 0x4d) &&
(req.reply[3] == 0x4c) && (req.reply[4] == 0x31))
init_turbomouse(id);
else if ((req.reply_len == 9) &&
(req.reply[1] == 0x4b) && (req.reply[2] == 0x4f) &&
(req.reply[3] == 0x49) && (req.reply[4] == 0x54)){
if (adb_try_handler_change(id, 0x42)) {
printk("\nADB MacAlly 2-button mouse at %d, handler set to 0x42", id);
adb_mouse_kinds[id] = ADBMOUSE_MACALLY2;
}
}
}
printk("\n");
}
}
static void
init_trackpad(int id)
{
struct adb_request req;
unsigned char r1_buffer[8];
printk(" (trackpad)");
adb_mouse_kinds[id] = ADBMOUSE_TRACKPAD;
adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
ADB_READREG(id,1));
if (req.reply_len < 8)
printk("bad length for reg. 1\n");
else
{
memcpy(r1_buffer, &req.reply[1], 8);
adb_request(&req, NULL, ADBREQ_SYNC, 9,
ADB_WRITEREG(id,1),
r1_buffer[0],
r1_buffer[1],
r1_buffer[2],
r1_buffer[3],
r1_buffer[4],
r1_buffer[5],
0x0d, /*r1_buffer[6],*/
r1_buffer[7]);
adb_request(&req, NULL, ADBREQ_SYNC, 9,
ADB_WRITEREG(id,2),
0x99,
0x94,
0x19,
0xff,
0xb2,
0x8a,
0x1b,
0x50);
adb_request(&req, NULL, ADBREQ_SYNC, 9,
ADB_WRITEREG(id,1),
r1_buffer[0],
r1_buffer[1],
r1_buffer[2],
r1_buffer[3],
r1_buffer[4],
r1_buffer[5],
0x03, /*r1_buffer[6],*/
r1_buffer[7]);
/* Without this flush, the trackpad may be locked up */
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
}
}
static void
init_trackball(int id)
{
struct adb_request req;
printk(" (trackman/mouseman)");
adb_mouse_kinds[id] = ADBMOUSE_TRACKBALL;
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id,1), 00,0x81);
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id,1), 01,0x81);
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id,1), 02,0x81);
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id,1), 03,0x38);
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id,1), 00,0x81);
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id,1), 01,0x81);
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id,1), 02,0x81);
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id,1), 03,0x38);
}
static void
init_turbomouse(int id)
{
struct adb_request req;
printk(" (TurboMouse 5)");
adb_mouse_kinds[id] = ADBMOUSE_TURBOMOUSE5;
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(3));
adb_request(&req, NULL, ADBREQ_SYNC, 9,
ADB_WRITEREG(3,2),
0xe7,
0x8c,
0,
0,
0,
0xff,
0xff,
0x94);
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(3));
adb_request(&req, NULL, ADBREQ_SYNC, 9,
ADB_WRITEREG(3,2),
0xa5,
0x14,
0,
0,
0x69,
0xff,
0xff,
0x27);
}
static void
init_microspeed(int id)
{
struct adb_request req;
printk(" (Microspeed/MacPoint or compatible)");
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
/* This will initialize mice using the Microspeed, MacPoint and
other compatible firmware. Bit 12 enables extended protocol.
Register 1 Listen (4 Bytes)
0 - 3 Button is mouse (set also for double clicking!!!)
4 - 7 Button is locking (affects change speed also)
8 - 11 Button changes speed
12 1 = Extended mouse mode, 0 = normal mouse mode
13 - 15 unused 0
16 - 23 normal speed
24 - 31 changed speed
Register 1 talk holds version and product identification information.
Register 1 Talk (4 Bytes):
0 - 7 Product code
8 - 23 undefined, reserved
24 - 31 Version number
Speed 0 is max. 1 to 255 set speed in increments of 1/256 of max.
*/
adb_request(&req, NULL, ADBREQ_SYNC, 5,
ADB_WRITEREG(id,1),
0x20, /* alt speed = 0x20 (rather slow) */
0x00, /* norm speed = 0x00 (fastest) */
0x10, /* extended protocol, no speed change */
0x07); /* all buttons enabled as mouse buttons, no locking */
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
}
static void
init_ms_a3(int id)
{
struct adb_request req;
printk(" (Mouse Systems A3 Mouse, or compatible)");
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id, 0x2),
0x00,
0x07);
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
}
......@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <asm/prom.h>
#include <linux/adb.h>
#include <asm/io.h>
......@@ -57,7 +58,7 @@ struct adb_regs {
static volatile struct adb_regs *adb;
static struct adb_request *current_req, *last_req;
static unsigned char adb_rbuf[16];
static spinlock_t macio_lock = SPIN_LOCK_UNLOCKED;
static int macio_probe(void);
static int macio_init(void);
......@@ -66,7 +67,6 @@ static int macio_send_request(struct adb_request *req, int sync);
static int macio_adb_autopoll(int devs);
static void macio_adb_poll(void);
static int macio_adb_reset_bus(void);
static void completed(void);
struct adb_driver macio_adb_driver = {
"MACIO",
......@@ -107,19 +107,19 @@ int macio_init(void)
adb = (volatile struct adb_regs *)
ioremap(adbs->addrs->address, sizeof(struct adb_regs));
if (request_irq(adbs->intrs[0].line, macio_adb_interrupt,
0, "ADB", (void *)0)) {
printk(KERN_ERR "ADB: can't get irq %d\n",
adbs->intrs[0].line);
return -EAGAIN;
}
out_8(&adb->ctrl.r, 0);
out_8(&adb->intr.r, 0);
out_8(&adb->error.r, 0);
out_8(&adb->active_hi.r, 0xff); /* for now, set all devices active */
out_8(&adb->active_lo.r, 0xff);
out_8(&adb->autopoll.r, APE);
if (request_irq(adbs->intrs[0].line, macio_adb_interrupt,
0, "ADB", (void *)0)) {
printk(KERN_ERR "ADB: can't get irq %d\n",
adbs->intrs[0].line);
return -EAGAIN;
}
out_8(&adb->intr_enb.r, DFB | TAG);
printk("adb: mac-io driver 1.0 for unified ADB\n");
......@@ -129,16 +129,27 @@ int macio_init(void)
static int macio_adb_autopoll(int devs)
{
unsigned long flags;
spin_lock_irqsave(&macio_lock, flags);
out_8(&adb->active_hi.r, devs >> 8);
out_8(&adb->active_lo.r, devs);
out_8(&adb->autopoll.r, devs? APE: 0);
spin_unlock_irqrestore(&macio_lock, flags);
return 0;
}
static int macio_adb_reset_bus(void)
{
unsigned long flags;
int timeout = 1000000;
/* Hrm... we may want to not lock interrupts for so
* long ... oh well, who uses that chip anyway ? :)
* That function will be seldomly used during boot
* on rare machines, so...
*/
spin_lock_irqsave(&macio_lock, flags);
out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | ADB_RST);
while ((in_8(&adb->ctrl.r) & ADB_RST) != 0) {
if (--timeout == 0) {
......@@ -146,13 +157,14 @@ static int macio_adb_reset_bus(void)
return -1;
}
}
spin_unlock_irqrestore(&macio_lock, flags);
return 0;
}
/* Send an ADB command */
static int macio_send_request(struct adb_request *req, int sync)
{
unsigned long mflags;
unsigned long flags;
int i;
if (req->data[0] != ADB_PACKET)
......@@ -167,8 +179,7 @@ static int macio_send_request(struct adb_request *req, int sync)
req->complete = 0;
req->reply_len = 0;
save_flags(mflags);
cli();
spin_lock_irqsave(&macio_lock, flags);
if (current_req != 0) {
last_req->next = req;
last_req = req;
......@@ -176,7 +187,7 @@ static int macio_send_request(struct adb_request *req, int sync)
current_req = last_req = req;
out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR);
}
restore_flags(mflags);
spin_unlock_irqrestore(&macio_lock, flags);
if (sync) {
while (!req->complete)
......@@ -190,7 +201,12 @@ static void macio_adb_interrupt(int irq, void *arg, struct pt_regs *regs)
{
int i, n, err;
struct adb_request *req;
unsigned char ibuf[16];
int ibuf_len = 0;
int complete = 0;
int autopoll = 0;
spin_lock(&macio_lock);
if (in_8(&adb->intr.r) & TAG) {
if ((req = current_req) != 0) {
/* put the current request in */
......@@ -202,7 +218,10 @@ static void macio_adb_interrupt(int irq, void *arg, struct pt_regs *regs)
out_8(&adb->ctrl.r, DTB + CRE);
} else {
out_8(&adb->ctrl.r, DTB);
completed();
current_req = req->next;
complete = 1;
if (current_req)
out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR);
}
}
out_8(&adb->intr.r, 0);
......@@ -218,39 +237,42 @@ static void macio_adb_interrupt(int irq, void *arg, struct pt_regs *regs)
for (i = 0; i < req->reply_len; ++i)
req->reply[i] = in_8(&adb->data[i].r);
}
completed();
current_req = req->next;
complete = 1;
if (current_req)
out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR);
} else if (err == 0) {
/* autopoll data */
n = in_8(&adb->dcount.r) & HMB;
for (i = 0; i < n; ++i)
adb_rbuf[i] = in_8(&adb->data[i].r);
adb_input(adb_rbuf, n, regs,
in_8(&adb->dcount.r) & APD);
ibuf[i] = in_8(&adb->data[i].r);
ibuf_len = n;
autopoll = (in_8(&adb->dcount.r) & APD) != 0;
}
out_8(&adb->error.r, 0);
out_8(&adb->intr.r, 0);
}
}
static void completed(void)
{
struct adb_request *req = current_req;
req->complete = 1;
current_req = req->next;
if (current_req)
out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR);
if (req->done)
(*req->done)(req);
spin_unlock(&macio_lock);
if (complete && req) {
void (*done)(struct adb_request *) = req->done;
mb();
req->complete = 1;
/* Here, we assume that if the request has a done member, the
* struct request will survive to setting req->complete to 1
*/
if (done)
(*done)(req);
}
if (ibuf_len)
adb_input(ibuf, ibuf_len, regs, autopoll);
}
static void macio_adb_poll(void)
{
unsigned long flags;
save_flags(flags);
cli();
local_irq_save(flags);
if (in_8(&adb->intr.r) != 0)
macio_adb_interrupt(0, 0, 0);
restore_flags(flags);
local_irq_restore(flags);
}
# Kernel keymap for Macintoshes. This uses 7 modifier combinations.
keymaps 0-2,4-5,8,12
# We use the Command (pretzel) key as Alt, and the Option key as AltGr.
#
keycode 0x00 = a
altgr keycode 0x00 = Hex_A
keycode 0x01 = s
keycode 0x02 = d
altgr keycode 0x02 = Hex_D
keycode 0x03 = f
altgr keycode 0x03 = Hex_F
keycode 0x04 = h
keycode 0x05 = g
keycode 0x06 = z
keycode 0x07 = x
keycode 0x08 = c
altgr keycode 0x08 = Hex_C
keycode 0x09 = v
keycode 0x0a =
keycode 0x0b = b
altgr keycode 0x0b = Hex_B
keycode 0x0c = q
keycode 0x0d = w
keycode 0x0e = e
altgr keycode 0x0e = Hex_E
keycode 0x0f = r
keycode 0x10 = y
keycode 0x11 = t
keycode 0x12 = one exclam
alt keycode 0x12 = Meta_one
keycode 0x13 = two at at
control keycode 0x13 = nul
shift control keycode 0x13 = nul
alt keycode 0x13 = Meta_two
keycode 0x14 = three numbersign
control keycode 0x14 = Escape
alt keycode 0x14 = Meta_three
keycode 0x15 = four dollar dollar
control keycode 0x15 = Control_backslash
alt keycode 0x15 = Meta_four
keycode 0x16 = six asciicircum
control keycode 0x16 = Control_asciicircum
alt keycode 0x16 = Meta_six
keycode 0x17 = five percent
control keycode 0x17 = Control_bracketright
alt keycode 0x17 = Meta_five
keycode 0x18 = equal plus
alt keycode 0x18 = Meta_equal
keycode 0x19 = nine parenleft bracketright
alt keycode 0x19 = Meta_nine
keycode 0x1a = seven ampersand braceleft
control keycode 0x1a = Control_underscore
alt keycode 0x1a = Meta_seven
keycode 0x1b = minus underscore backslash
control keycode 0x1b = Control_underscore
shift control keycode 0x1b = Control_underscore
alt keycode 0x1b = Meta_minus
keycode 0x1c = eight asterisk bracketleft
control keycode 0x1c = Delete
alt keycode 0x1c = Meta_eight
keycode 0x1d = zero parenright braceright
alt keycode 0x1d = Meta_zero
keycode 0x1e = bracketright braceright asciitilde
control keycode 0x1e = Control_bracketright
alt keycode 0x1e = Meta_bracketright
keycode 0x1f = o
keycode 0x20 = u
keycode 0x21 = bracketleft braceleft
control keycode 0x21 = Escape
alt keycode 0x21 = Meta_bracketleft
keycode 0x22 = i
keycode 0x23 = p
keycode 0x24 = Return
alt keycode 0x24 = Meta_Control_m
keycode 0x25 = l
keycode 0x26 = j
keycode 0x27 = apostrophe quotedbl
control keycode 0x27 = Control_g
alt keycode 0x27 = Meta_apostrophe
keycode 0x28 = k
keycode 0x29 = semicolon colon
alt keycode 0x29 = Meta_semicolon
keycode 0x2a = backslash bar
control keycode 0x2a = Control_backslash
alt keycode 0x2a = Meta_backslash
keycode 0x2b = comma less
alt keycode 0x2b = Meta_comma
keycode 0x2c = slash question
control keycode 0x2c = Delete
alt keycode 0x2c = Meta_slash
keycode 0x2d = n
keycode 0x2e = m
keycode 0x2f = period greater
control keycode 0x2f = Compose
alt keycode 0x2f = Meta_period
keycode 0x30 = Tab Tab
alt keycode 0x30 = Meta_Tab
keycode 0x31 = space space
control keycode 0x31 = nul
alt keycode 0x31 = Meta_space
keycode 0x32 = grave asciitilde
control keycode 0x32 = nul
alt keycode 0x32 = Meta_grave
keycode 0x33 = Delete Delete
control keycode 0x33 = BackSpace
alt keycode 0x33 = Meta_Delete
keycode 0x34 =
keycode 0x35 = Escape Escape
alt keycode 0x35 = Meta_Escape
keycode 0x36 = Control
keycode 0x37 = Alt
keycode 0x38 = Shift
keycode 0x39 = Caps_Lock
keycode 0x3a = AltGr
keycode 0x3b = Left
alt keycode 0x3b = Decr_Console
keycode 0x3c = Right
alt keycode 0x3c = Incr_Console
keycode 0x3d = Down
keycode 0x3e = Up
keycode 0x3f =
keycode 0x40 =
keycode 0x41 = KP_Period
keycode 0x42 =
keycode 0x43 = KP_Multiply
keycode 0x44 =
keycode 0x45 = KP_Add
keycode 0x46 =
keycode 0x47 = Num_Lock
# shift keycode 0x47 = Bare_Num_Lock
keycode 0x48 =
keycode 0x49 =
keycode 0x4a =
keycode 0x4b = KP_Divide
keycode 0x4c = KP_Enter
keycode 0x4d =
keycode 0x4e = KP_Subtract
keycode 0x4f =
keycode 0x50 =
keycode 0x51 =
#keycode 0x51 = KP_Equals
keycode 0x52 = KP_0
alt keycode 0x52 = Ascii_0
altgr keycode 0x52 = Hex_0
keycode 0x53 = KP_1
alt keycode 0x53 = Ascii_1
altgr keycode 0x53 = Hex_1
keycode 0x54 = KP_2
alt keycode 0x54 = Ascii_2
altgr keycode 0x54 = Hex_2
keycode 0x55 = KP_3
alt keycode 0x55 = Ascii_3
altgr keycode 0x55 = Hex_3
keycode 0x56 = KP_4
alt keycode 0x56 = Ascii_4
altgr keycode 0x56 = Hex_4
keycode 0x57 = KP_5
alt keycode 0x57 = Ascii_5
altgr keycode 0x57 = Hex_5
keycode 0x58 = KP_6
alt keycode 0x58 = Ascii_6
altgr keycode 0x58 = Hex_6
keycode 0x59 = KP_7
alt keycode 0x59 = Ascii_7
altgr keycode 0x59 = Hex_7
keycode 0x5b = KP_8
alt keycode 0x5b = Ascii_8
altgr keycode 0x5b = Hex_8
keycode 0x5c = KP_9
alt keycode 0x5c = Ascii_9
altgr keycode 0x5c = Hex_9
keycode 0x5d =
keycode 0x5e =
keycode 0x5f =
keycode 0x60 = F5 F15 Console_17
control keycode 0x60 = F5
alt keycode 0x60 = Console_5
control alt keycode 0x60 = Console_5
keycode 0x61 = F6 F16 Console_18
control keycode 0x61 = F6
alt keycode 0x61 = Console_6
control alt keycode 0x61 = Console_6
keycode 0x62 = F7 F17 Console_19
control keycode 0x62 = F7
alt keycode 0x62 = Console_7
control alt keycode 0x62 = Console_7
keycode 0x63 = F3 F13 Console_15
control keycode 0x63 = F3
alt keycode 0x63 = Console_3
control alt keycode 0x63 = Console_3
keycode 0x64 = F8 F18 Console_20
control keycode 0x64 = F8
alt keycode 0x64 = Console_8
control alt keycode 0x64 = Console_8
keycode 0x65 = F9 F19 Console_21
control keycode 0x65 = F9
alt keycode 0x65 = Console_9
control alt keycode 0x65 = Console_9
keycode 0x66 =
keycode 0x67 = F11 F11 Console_23
control keycode 0x67 = F11
alt keycode 0x67 = Console_11
control alt keycode 0x67 = Console_11
keycode 0x68 =
keycode 0x69 = F13
keycode 0x6a =
keycode 0x6b = Scroll_Lock Show_Memory Show_Registers
control keycode 0x6b = Show_State
alt keycode 0x6b = Scroll_Lock
keycode 0x6c =
keycode 0x6d = F10 F20 Console_22
control keycode 0x6d = F10
alt keycode 0x6d = Console_10
control alt keycode 0x6d = Console_10
keycode 0x6e =
keycode 0x6f = F12 F12 Console_24
control keycode 0x6f = F12
alt keycode 0x6f = Console_12
control alt keycode 0x6f = Console_12
keycode 0x70 =
keycode 0x71 = Pause
keycode 0x72 = Insert
keycode 0x73 = Home
keycode 0x74 = Prior
shift keycode 0x74 = Scroll_Backward
keycode 0x75 = Remove
keycode 0x76 = F4 F14 Console_16
control keycode 0x76 = F4
alt keycode 0x76 = Console_4
control alt keycode 0x76 = Console_4
keycode 0x77 = End
keycode 0x78 = F2 F12 Console_14
control keycode 0x78 = F2
alt keycode 0x78 = Console_2
control alt keycode 0x78 = Console_2
keycode 0x79 = Next
shift keycode 0x79 = Scroll_Forward
keycode 0x7a = F1 F11 Console_13
control keycode 0x7a = F1
alt keycode 0x7a = Console_1
control alt keycode 0x7a = Console_1
keycode 0x7b = Shift
keycode 0x7c = AltGr
keycode 0x7d = Control
keycode 0x7e =
keycode 0x7f =
#keycode 0x7f = Power
control shift keycode 0x7f = Boot
string F1 = "\033[[A"
string F2 = "\033[[B"
string F3 = "\033[[C"
string F4 = "\033[[D"
string F5 = "\033[[E"
string F6 = "\033[17~"
string F7 = "\033[18~"
string F8 = "\033[19~"
string F9 = "\033[20~"
string F10 = "\033[21~"
string F11 = "\033[23~"
string F12 = "\033[24~"
string F13 = "\033[25~"
string F14 = "\033[26~"
string F15 = "\033[28~"
string F16 = "\033[29~"
string F17 = "\033[31~"
string F18 = "\033[32~"
string F19 = "\033[33~"
string F20 = "\033[34~"
string Find = "\033[1~"
string Insert = "\033[2~"
string Remove = "\033[3~"
string Select = "\033[4~"
string Prior = "\033[5~"
string Next = "\033[6~"
string Macro = "\033[M"
string Pause = "\033[P"
compose '`' 'A' to ''
compose '`' 'a' to ''
compose '\'' 'A' to ''
compose '\'' 'a' to ''
compose '^' 'A' to ''
compose '^' 'a' to ''
compose '~' 'A' to ''
compose '~' 'a' to ''
compose '"' 'A' to ''
compose '"' 'a' to ''
compose 'O' 'A' to ''
compose 'o' 'a' to ''
compose '0' 'A' to ''
compose '0' 'a' to ''
compose 'A' 'A' to ''
compose 'a' 'a' to ''
compose 'A' 'E' to ''
compose 'a' 'e' to ''
compose ',' 'C' to ''
compose ',' 'c' to ''
compose '`' 'E' to ''
compose '`' 'e' to ''
compose '\'' 'E' to ''
compose '\'' 'e' to ''
compose '^' 'E' to ''
compose '^' 'e' to ''
compose '"' 'E' to ''
compose '"' 'e' to ''
compose '`' 'I' to ''
compose '`' 'i' to ''
compose '\'' 'I' to ''
compose '\'' 'i' to ''
compose '^' 'I' to ''
compose '^' 'i' to ''
compose '"' 'I' to ''
compose '"' 'i' to ''
compose '-' 'D' to ''
compose '-' 'd' to ''
compose '~' 'N' to ''
compose '~' 'n' to ''
compose '`' 'O' to ''
compose '`' 'o' to ''
compose '\'' 'O' to ''
compose '\'' 'o' to ''
compose '^' 'O' to ''
compose '^' 'o' to ''
compose '~' 'O' to ''
compose '~' 'o' to ''
compose '"' 'O' to ''
compose '"' 'o' to ''
compose '/' 'O' to ''
compose '/' 'o' to ''
compose '`' 'U' to ''
compose '`' 'u' to ''
compose '\'' 'U' to ''
compose '\'' 'u' to ''
compose '^' 'U' to ''
compose '^' 'u' to ''
compose '"' 'U' to ''
compose '"' 'u' to ''
compose '\'' 'Y' to ''
compose '\'' 'y' to ''
compose 'T' 'H' to ''
compose 't' 'h' to ''
compose 's' 's' to ''
compose '"' 'y' to ''
compose 's' 'z' to ''
compose 'i' 'j' to ''
/*
* Linux/PowerPC Real Time Clock Driver
*
* heavily based on:
* Linux/SPARC Real Time Clock Driver
* Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
*
* This is a little driver that lets a user-level program access
* the PPC clocks chip. It is no use unless you
* use the modified clock utility.
*
* Get the modified clock utility from:
* ftp://vger.rutgers.edu/pub/linux/Sparc/userland/clock.c
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/mc146818rtc.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/machdep.h>
#include <asm/time.h>
static int rtc_busy = 0;
/* Retrieve the current date and time from the real time clock. */
void get_rtc_time(struct rtc_time *t)
{
unsigned long nowtime;
nowtime = (ppc_md.get_rtc_time)();
to_tm(nowtime, t);
t->tm_year -= 1900;
t->tm_mon -= 1; /* Make sure userland has a 0-based month */
}
/* Set the current date and time in the real time clock. */
void set_rtc_time(struct rtc_time *t)
{
unsigned long nowtime;
nowtime = mktime(t->tm_year+1900, t->tm_mon+1, t->tm_mday,
t->tm_hour, t->tm_min, t->tm_sec);
(ppc_md.set_rtc_time)(nowtime);
}
static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
{
struct rtc_time rtc_tm;
switch (cmd)
{
case RTC_RD_TIME:
if (ppc_md.get_rtc_time)
{
get_rtc_time(&rtc_tm);
if (copy_to_user((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time)))
return -EFAULT;
return 0;
}
else
return -EINVAL;
case RTC_SET_TIME:
if (!capable(CAP_SYS_TIME))
return -EPERM;
if (ppc_md.set_rtc_time)
{
if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, sizeof(struct rtc_time)))
return -EFAULT;
set_rtc_time(&rtc_tm);
return 0;
}
else
return -EINVAL;
default:
return -EINVAL;
}
}
static int rtc_open(struct inode *inode, struct file *file)
{
if (rtc_busy)
return -EBUSY;
rtc_busy = 1;
MOD_INC_USE_COUNT;
return 0;
}
static int rtc_release(struct inode *inode, struct file *file)
{
MOD_DEC_USE_COUNT;
rtc_busy = 0;
return 0;
}
static struct file_operations rtc_fops = {
owner: THIS_MODULE,
llseek: no_llseek,
ioctl: rtc_ioctl,
open: rtc_open,
release: rtc_release
};
static struct miscdevice rtc_dev = { RTC_MINOR, "rtc", &rtc_fops };
static int __init rtc_init(void)
{
int error;
error = misc_register(&rtc_dev);
if (error) {
printk(KERN_ERR "rtc: unable to get misc minor\n");
return error;
}
return 0;
}
static void __exit rtc_exit(void)
{
misc_deregister(&rtc_dev);
}
module_init(rtc_init);
module_exit(rtc_exit);
MODULE_LICENSE("GPL");
......@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/adb.h>
#include <linux/cuda.h>
#include <linux/spinlock.h>
#ifdef CONFIG_PPC
#include <asm/prom.h>
#include <asm/machdep.h>
......@@ -31,6 +32,7 @@
#include <linux/init.h>
static volatile unsigned char *via;
static spinlock_t cuda_lock = SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_MAC
#define CUDA_IRQ IRQ_MAC_ADB
......@@ -386,8 +388,8 @@ cuda_write(struct adb_request *req)
req->sent = 0;
req->complete = 0;
req->reply_len = 0;
save_flags(flags); cli();
spin_lock_irqsave(&cuda_lock, flags);
if (current_req != 0) {
last_req->next = req;
last_req = req;
......@@ -397,15 +399,14 @@ cuda_write(struct adb_request *req)
if (cuda_state == idle)
cuda_start();
}
spin_unlock_irqrestore(&cuda_lock, flags);
restore_flags(flags);
return 0;
}
static void
cuda_start()
{
unsigned long flags;
struct adb_request *req;
/* assert cuda_state == idle */
......@@ -413,41 +414,46 @@ cuda_start()
req = current_req;
if (req == 0)
return;
save_flags(flags); cli();
if ((via[B] & TREQ) == 0) {
restore_flags(flags);
if ((via[B] & TREQ) == 0)
return; /* a byte is coming in from the CUDA */
}
/* set the shift register to shift out and send a byte */
via[ACR] |= SR_OUT; eieio();
via[SR] = req->data[0]; eieio();
via[B] &= ~TIP;
cuda_state = sent_first_byte;
restore_flags(flags);
}
void
cuda_poll()
{
unsigned long flags;
save_flags(flags);
cli();
if (via[IFR] & SR_INT)
if (via[IFR] & SR_INT) {
unsigned long flags;
/* cuda_interrupt only takes a normal lock, we disable
* interrupts here to avoid re-entering and thus deadlocking.
* An option would be to disable only the IRQ source with
* disable_irq(), would that work on m68k ? --BenH
*/
local_irq_save(flags);
cuda_interrupt(0, 0, 0);
restore_flags(flags);
local_irq_restore(flags);
}
}
static void
cuda_interrupt(int irq, void *arg, struct pt_regs *regs)
{
int x, status;
struct adb_request *req;
struct adb_request *req = NULL;
unsigned char ibuf[16];
int ibuf_len = 0;
int complete = 0;
if ((via[IFR] & SR_INT) == 0)
return;
spin_lock(&cuda_lock);
status = (~via[B] & (TIP|TREQ)) | (via[ACR] & SR_OUT); eieio();
/* printk("cuda_interrupt: state=%d status=%x\n", cuda_state, status); */
switch (cuda_state) {
......@@ -502,8 +508,7 @@ cuda_interrupt(int irq, void *arg, struct pt_regs *regs)
cuda_state = awaiting_reply;
} else {
current_req = req->next;
if (req->done)
(*req->done)(req);
complete = 1;
/* not sure about this */
cuda_state = idle;
cuda_start();
......@@ -544,12 +549,18 @@ cuda_interrupt(int irq, void *arg, struct pt_regs *regs)
memmove(req->reply, req->reply + 2, req->reply_len);
}
}
req->complete = 1;
current_req = req->next;
if (req->done)
(*req->done)(req);
complete = 1;
} else {
cuda_input(cuda_rbuf, reply_ptr - cuda_rbuf, regs);
/* This is tricky. We must break the spinlock to call
* cuda_input. However, doing so means we might get
* re-entered from another CPU getting an interrupt
* or calling cuda_poll(). I ended up using the stack
* (it's only for 16 bytes) and moving the actual
* call to cuda_input to outside of the lock.
*/
ibuf_len = reply_ptr - cuda_rbuf;
memcpy(ibuf, cuda_rbuf, ibuf_len);
}
if (status == TREQ) {
via[B] &= ~TIP; eieio();
......@@ -565,6 +576,19 @@ cuda_interrupt(int irq, void *arg, struct pt_regs *regs)
default:
printk("cuda_interrupt: unknown cuda_state %d?\n", cuda_state);
}
spin_unlock(&cuda_lock);
if (complete && req) {
void (*done)(struct adb_request *) = req->done;
mb();
req->complete = 1;
/* Here, we assume that if the request has a done member, the
* struct request will survive to setting req->complete to 1
*/
if (done)
(*done)(req);
}
if (ibuf_len)
cuda_input(ibuf, ibuf_len, regs);
}
static void
......
......@@ -9,11 +9,7 @@
* and the RTC (real time clock) chip.
*
* Copyright (C) 1998 Paul Mackerras and Fabio Riccardi.
* Copyright (C) 2001 Benjamin Herrenschmidt
*
* todo: - Cleanup synchro between VIA interrupt and GPIO-based PMU
* interrupt.
*
* Copyright (C) 2001-2002 Benjamin Herrenschmidt
*
*/
#include <stdarg.h>
......@@ -111,14 +107,24 @@ static volatile enum pmu_state {
reading_intr,
} pmu_state;
static volatile enum int_data_state {
int_data_empty,
int_data_fill,
int_data_ready,
int_data_flush
} int_data_state[2] = { int_data_empty, int_data_empty };
static struct adb_request *current_req;
static struct adb_request *last_req;
static struct adb_request *req_awaiting_reply;
static unsigned char interrupt_data[256]; /* Made bigger: I've been told that might happen */
static unsigned char interrupt_data[2][32];
static int interrupt_data_len[2];
static int int_data_last;
static unsigned char *reply_ptr;
static int data_index;
static int data_len;
static volatile int adb_int_pending;
static volatile int disable_poll;
static struct adb_request bright_req_1, bright_req_2, bright_req_3;
static struct device_node *vias;
static int pmu_kind = PMU_UNKNOWN;
......@@ -174,12 +180,6 @@ static int init_pmu(void);
static int pmu_queue_request(struct adb_request *req);
static void pmu_start(void);
static void via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs);
static void send_byte(int x);
static void recv_byte(void);
static void pmu_sr_intr(struct pt_regs *regs);
static void pmu_done(struct adb_request *req);
static void pmu_handle_data(unsigned char *data, int len,
struct pt_regs *regs);
static void gpio1_interrupt(int irq, void *arg, struct pt_regs *regs);
static int proc_get_info(char *page, char **start, off_t off,
int count, int *eof, void *data);
......@@ -521,8 +521,8 @@ init_pmu()
/* ack all pending interrupts */
timeout = 100000;
interrupt_data[0] = 1;
while (interrupt_data[0] || pmu_state != idle) {
interrupt_data[0][0] = 1;
while (interrupt_data[0][0] || pmu_state != idle) {
if (--timeout < 0) {
printk(KERN_ERR "init_pmu: timed out acking intrs\n");
return 0;
......@@ -1176,7 +1176,7 @@ pmu_queue_request(struct adb_request *req)
return 0;
}
static void __openfirmware
static inline void
wait_for_ack(void)
{
/* Sightly increased the delay, I had one occurence of the message
......@@ -1194,7 +1194,7 @@ wait_for_ack(void)
/* New PMU seems to be very sensitive to those timings, so we make sure
* PCI is flushed immediately */
static void __openfirmware
static inline void
send_byte(int x)
{
volatile unsigned char *v = via;
......@@ -1205,8 +1205,8 @@ send_byte(int x)
(void)in_8(&v[B]);
}
static void __openfirmware
recv_byte()
static inline void
recv_byte(void)
{
volatile unsigned char *v = via;
......@@ -1216,7 +1216,18 @@ recv_byte()
(void)in_8(&v[B]);
}
static volatile int disable_poll;
static inline void
pmu_done(struct adb_request *req)
{
void (*done)(struct adb_request *) = req->done;
mb();
req->complete = 1;
/* Here, we assume that if the request has a done member, the
* struct request will survive to setting req->complete to 1
*/
if (done)
(*done)(req);
}
static void __openfirmware
pmu_start()
......@@ -1281,9 +1292,9 @@ pmu_suspend(void)
}
do {
spin_unlock(&pmu_lock);
spin_unlock_irqrestore(&pmu_lock, flags);
via_pmu_interrupt(0, 0, 0);
spin_lock(&pmu_lock);
spin_lock_irqsave(&pmu_lock, flags);
if (!adb_int_pending && pmu_state == idle && !req_awaiting_reply) {
#ifdef SUSPEND_USES_PMU
pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, 0);
......@@ -1330,61 +1341,83 @@ pmu_resume(void)
#endif /* SUSPEND_USES_PMU */
}
/* Interrupt data could be the result data from an ADB cmd */
static void __openfirmware
via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs)
pmu_handle_data(unsigned char *data, int len, struct pt_regs *regs)
{
unsigned long flags;
int intr;
int nloop = 0;
/* This is a bit brutal, we can probably do better */
spin_lock_irqsave(&pmu_lock, flags);
++disable_poll;
for (;;) {
intr = in_8(&via[IFR]) & (SR_INT | CB1_INT);
if (intr == 0)
break;
if (++nloop > 1000) {
printk(KERN_DEBUG "PMU: stuck in intr loop, "
"intr=%x, ier=%x pmu_state=%d\n",
intr, in_8(&via[IER]), pmu_state);
break;
}
out_8(&via[IFR], intr);
if (intr & SR_INT)
pmu_sr_intr(regs);
if (intr & CB1_INT)
adb_int_pending = 1;
asleep = 0;
if (drop_interrupts || len < 1) {
adb_int_pending = 0;
return;
}
if (pmu_state == idle) {
if (adb_int_pending) {
pmu_state = intack;
/* Sounds safer to make sure ACK is high before writing.
* This helped kill a problem with ADB and some iBooks
/* Note: for some reason, we get an interrupt with len=1,
* data[0]==0 after each normal ADB interrupt, at least
* on the Pismo. Still investigating... --BenH
*/
if (data[0] & PMU_INT_ADB) {
if ((data[0] & PMU_INT_ADB_AUTO) == 0) {
struct adb_request *req = req_awaiting_reply;
if (req == 0) {
printk(KERN_ERR "PMU: extra ADB reply\n");
return;
}
req_awaiting_reply = 0;
if (len <= 2)
req->reply_len = 0;
else {
memcpy(req->reply, data + 1, len - 1);
req->reply_len = len - 1;
}
pmu_done(req);
} else {
#ifdef CONFIG_XMON
if (len == 4 && data[1] == 0x2c) {
extern int xmon_wants_key, xmon_adb_keycode;
if (xmon_wants_key) {
xmon_adb_keycode = data[2];
return;
}
}
#endif /* CONFIG_XMON */
#ifdef CONFIG_ADB
/*
* XXX On the [23]400 the PMU gives us an up
* event for keycodes 0x74 or 0x75 when the PC
* card eject buttons are released, so we
* ignore those events.
*/
wait_for_ack();
send_byte(PMU_INT_ACK);
adb_int_pending = 0;
} else if (current_req)
pmu_start();
}
--disable_poll;
spin_unlock_irqrestore(&pmu_lock, flags);
}
static void __openfirmware
gpio1_interrupt(int irq, void *arg, struct pt_regs *regs)
{
if ((in_8(gpio_reg + 0x9) & 0x02) == 0) {
adb_int_pending = 1;
via_pmu_interrupt(0, 0, 0);
if (!(pmu_kind == PMU_OHARE_BASED && len == 4
&& data[1] == 0x2c && data[3] == 0xff
&& (data[2] & ~1) == 0xf4))
adb_input(data+1, len-1, regs, 1);
#endif /* CONFIG_ADB */
}
} else {
/* Sound/brightness button pressed */
if ((data[0] & PMU_INT_SNDBRT) && len == 3) {
#ifdef CONFIG_PMAC_BACKLIGHT
#ifdef CONFIG_INPUT_ADBHID
if (!disable_kernel_backlight)
#endif /* CONFIG_INPUT_ADBHID */
set_backlight_level(data[1] >> 4);
#endif /* CONFIG_PMAC_BACKLIGHT */
}
#ifdef CONFIG_PMAC_PBOOK
/* Environement or tick interrupt, query batteries */
if (pmu_battery_count && (data[0] & PMU_INT_TICK)) {
if ((--query_batt_timer) == 0) {
query_battery_state();
query_batt_timer = BATTERY_POLLING_COUNT;
}
} else if (pmu_battery_count && (data[0] & PMU_INT_ENVIRONMENT))
query_battery_state();
if (data[0])
pmu_pass_intr(data, len);
#endif /* CONFIG_PMAC_PBOOK */
}
}
static void __openfirmware
static struct adb_request* __openfirmware
pmu_sr_intr(struct pt_regs *regs)
{
struct adb_request *req;
......@@ -1393,7 +1426,7 @@ pmu_sr_intr(struct pt_regs *regs)
if (via[B] & TREQ) {
printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]);
out_8(&via[IFR], SR_INT);
return;
return NULL;
}
/* The ack may not yet be low when we get the interrupt */
while ((in_8(&via[B]) & TACK) != 0)
......@@ -1426,11 +1459,8 @@ pmu_sr_intr(struct pt_regs *regs)
current_req = req->next;
if (req->reply_expected)
req_awaiting_reply = req;
else {
spin_unlock(&pmu_lock);
pmu_done(req);
spin_lock(&pmu_lock);
}
else
return req;
} else {
pmu_state = reading;
data_index = 0;
......@@ -1443,7 +1473,7 @@ pmu_sr_intr(struct pt_regs *regs)
data_index = 0;
data_len = -1;
pmu_state = reading_intr;
reply_ptr = interrupt_data;
reply_ptr = interrupt_data[int_data_last];
recv_byte();
break;
......@@ -1462,108 +1492,113 @@ pmu_sr_intr(struct pt_regs *regs)
}
if (pmu_state == reading_intr) {
spin_unlock(&pmu_lock);
pmu_handle_data(interrupt_data, data_index, regs);
spin_lock(&pmu_lock);
pmu_state = idle;
int_data_state[int_data_last] = int_data_ready;
interrupt_data_len[int_data_last] = data_len;
} else {
req = current_req;
current_req = req->next;
req->reply_len += data_index;
spin_unlock(&pmu_lock);
pmu_done(req);
spin_lock(&pmu_lock);
pmu_state = idle;
return req;
}
pmu_state = idle;
break;
default:
printk(KERN_ERR "via_pmu_interrupt: unknown state %d?\n",
pmu_state);
}
return NULL;
}
static void __openfirmware
pmu_done(struct adb_request *req)
via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs)
{
req->complete = 1;
if (req->done)
(*req->done)(req);
unsigned long flags;
int intr;
int nloop = 0;
int int_data = -1;
struct adb_request *req = NULL;
/* This is a bit brutal, we can probably do better */
spin_lock_irqsave(&pmu_lock, flags);
++disable_poll;
for (;;) {
intr = in_8(&via[IFR]) & (SR_INT | CB1_INT);
if (intr == 0)
break;
if (++nloop > 1000) {
printk(KERN_DEBUG "PMU: stuck in intr loop, "
"intr=%x, ier=%x pmu_state=%d\n",
intr, in_8(&via[IER]), pmu_state);
break;
}
out_8(&via[IFR], intr);
if (intr & CB1_INT)
adb_int_pending = 1;
if (intr & SR_INT) {
req = pmu_sr_intr(regs);
if (req)
break;
}
}
recheck:
if (pmu_state == idle) {
if (adb_int_pending) {
if (int_data_state[0] == int_data_empty)
int_data_last = 0;
else if (int_data_state[1] == int_data_empty)
int_data_last = 1;
else
goto no_free_slot;
pmu_state = intack;
int_data_state[int_data_last] = int_data_fill;
/* Sounds safer to make sure ACK is high before writing.
* This helped kill a problem with ADB and some iBooks
*/
wait_for_ack();
send_byte(PMU_INT_ACK);
adb_int_pending = 0;
no_free_slot:
} else if (current_req)
pmu_start();
}
/* Mark the oldest buffer for flushing */
if (int_data_state[!int_data_last] == int_data_ready) {
int_data_state[!int_data_last] = int_data_flush;
int_data = !int_data_last;
} else if (int_data_state[int_data_last] == int_data_ready) {
int_data_state[int_data_last] = int_data_flush;
int_data = int_data_last;
}
--disable_poll;
spin_unlock_irqrestore(&pmu_lock, flags);
/* Deal with completed PMU requests outside of the lock */
if (req) {
pmu_done(req);
req = NULL;
}
/* Deal with interrupt datas outside of the lock */
if (int_data >= 0) {
pmu_handle_data(interrupt_data[int_data], interrupt_data_len[int_data], regs);
spin_lock_irqsave(&pmu_lock, flags);
++disable_poll;
int_data_state[int_data] = int_data_empty;
int_data = -1;
goto recheck;
}
}
/* Interrupt data could be the result data from an ADB cmd */
static void __openfirmware
pmu_handle_data(unsigned char *data, int len, struct pt_regs *regs)
gpio1_interrupt(int irq, void *arg, struct pt_regs *regs)
{
asleep = 0;
if (drop_interrupts || len < 1) {
adb_int_pending = 0;
return;
}
/* Note: for some reason, we get an interrupt with len=1,
* data[0]==0 after each normal ADB interrupt, at least
* on the Pismo. Still investigating... --BenH
*/
if (data[0] & PMU_INT_ADB) {
if ((data[0] & PMU_INT_ADB_AUTO) == 0) {
struct adb_request *req = req_awaiting_reply;
if (req == 0) {
printk(KERN_ERR "PMU: extra ADB reply\n");
return;
}
req_awaiting_reply = 0;
if (len <= 2)
req->reply_len = 0;
else {
memcpy(req->reply, data + 1, len - 1);
req->reply_len = len - 1;
}
pmu_done(req);
} else {
#ifdef CONFIG_XMON
if (len == 4 && data[1] == 0x2c) {
extern int xmon_wants_key, xmon_adb_keycode;
if (xmon_wants_key) {
xmon_adb_keycode = data[2];
return;
}
}
#endif /* CONFIG_XMON */
#ifdef CONFIG_ADB
/*
* XXX On the [23]400 the PMU gives us an up
* event for keycodes 0x74 or 0x75 when the PC
* card eject buttons are released, so we
* ignore those events.
*/
if (!(pmu_kind == PMU_OHARE_BASED && len == 4
&& data[1] == 0x2c && data[3] == 0xff
&& (data[2] & ~1) == 0xf4))
adb_input(data+1, len-1, regs, 1);
#endif /* CONFIG_ADB */
}
} else {
/* Sound/brightness button pressed */
if ((data[0] & PMU_INT_SNDBRT) && len == 3) {
#ifdef CONFIG_PMAC_BACKLIGHT
#ifdef CONFIG_INPUT_ADBHID
if (!disable_kernel_backlight)
#endif /* CONFIG_INPUT_ADBHID */
set_backlight_level(data[1] >> 4);
#endif /* CONFIG_PMAC_BACKLIGHT */
}
#ifdef CONFIG_PMAC_PBOOK
/* Environement or tick interrupt, query batteries */
if (pmu_battery_count && (data[0] & PMU_INT_TICK)) {
if ((--query_batt_timer) == 0) {
query_battery_state();
query_batt_timer = BATTERY_POLLING_COUNT;
}
} else if (pmu_battery_count && (data[0] & PMU_INT_ENVIRONMENT))
query_battery_state();
if (data[0])
pmu_pass_intr(data, len);
#endif /* CONFIG_PMAC_PBOOK */
if ((in_8(gpio_reg + 0x9) & 0x02) == 0) {
adb_int_pending = 1;
via_pmu_interrupt(0, 0, 0);
}
}
......@@ -1635,7 +1670,7 @@ pmu_restart(void)
{
struct adb_request req;
cli();
local_irq_disable();
drop_interrupts = 1;
......@@ -1658,7 +1693,7 @@ pmu_shutdown(void)
{
struct adb_request req;
cli();
local_irq_disable();
drop_interrupts = 1;
......@@ -1979,7 +2014,7 @@ int __openfirmware powerbook_sleep_G3(void)
unsigned long save_l2cr;
unsigned short pmcr1;
struct adb_request req;
int ret, timeout;
int ret;
struct pci_dev *grackle;
grackle = pci_find_slot(0, 0);
......@@ -2036,17 +2071,16 @@ int __openfirmware powerbook_sleep_G3(void)
mb();
asm volatile("mtdec %0" : : "r" (0x7fffffff));
/* Giveup the FPU */
if (current->thread.regs && (current->thread.regs->msr & MSR_FP) != 0)
giveup_fpu(current);
/* We can now disable MSR_EE */
cli();
local_irq_disable();
/* Giveup the FPU */
enable_kernel_fp();
/* For 750, save backside cache setting and disable it */
save_l2cr = _get_L2CR(); /* (returns 0 if not 750) */
if (save_l2cr)
_set_L2CR(0);
save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
_set_L2CR(save_l2cr & 0x7fffffff);
/* Ask the PMU to put us to sleep */
pmu_request(&req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T');
......@@ -2077,7 +2111,7 @@ int __openfirmware powerbook_sleep_G3(void)
restore_via_state();
/* Restore L2 cache */
if (save_l2cr)
if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
_set_L2CR(save_l2cr);
/* Restore userland MMU context */
......@@ -2096,18 +2130,6 @@ int __openfirmware powerbook_sleep_G3(void)
while (!req.complete)
pmu_poll();
/* ack all pending interrupts */
timeout = 100000;
interrupt_data[0] = 1;
while (interrupt_data[0] || pmu_state != idle) {
if (--timeout < 0)
break;
if (pmu_state == idle)
adb_int_pending = 1;
via_pmu_interrupt(0, 0, 0);
udelay(10);
}
/* reenable interrupt controller */
pmac_sleep_restore_intrs();
......@@ -2116,7 +2138,13 @@ int __openfirmware powerbook_sleep_G3(void)
/* Restart jiffies & scheduling */
wakeup_decrementer();
sti();
/* Force a poll of ADB interrupts */
adb_int_pending = 1;
via_pmu_interrupt(0, 0, 0);
/* Re-enable local CPU interrupts */
local_irq_enable();
/* Notify drivers */
broadcast_wake();
......@@ -2127,8 +2155,9 @@ int __openfirmware powerbook_sleep_G3(void)
int __openfirmware powerbook_sleep_Core99(void)
{
unsigned long save_l2cr;
unsigned long save_l3cr;
struct adb_request req;
int ret, timeout;
int ret;
if (!can_sleep) {
printk(KERN_ERR "Sleep mode not supported on this machine\n");
......@@ -2187,6 +2216,9 @@ int __openfirmware powerbook_sleep_Core99(void)
mb();
asm volatile("mtdec %0" : : "r" (0x7fffffff));
/* We can now disable MSR_EE */
local_irq_disable();
/* Giveup the FPU & vec */
enable_kernel_fp();
......@@ -2195,12 +2227,12 @@ int __openfirmware powerbook_sleep_Core99(void)
enable_kernel_altivec();
#endif /* CONFIG_ALTIVEC */
/* We can now disable MSR_EE */
cli();
/* For 750, save backside cache setting and disable it */
save_l2cr = _get_L2CR(); /* (returns 0 if not 750) */
if (save_l2cr)
/* Save & disable L2 and L3 caches*/
save_l3cr = _get_L3CR(); /* (returns -1 if not available) */
save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
_set_L3CR(save_l3cr & 0x7fffffff);
if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
_set_L2CR(save_l2cr & 0x7fffffff);
/* Save the state of PCI config space for some slots */
......@@ -2248,8 +2280,11 @@ int __openfirmware powerbook_sleep_Core99(void)
pmu_blink(2);
/* Restore L2 cache */
if (save_l2cr)
if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
_set_L2CR(save_l2cr);
/* Restore L3 cache */
if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
_set_L3CR(save_l3cr);
/* Restore userland MMU context */
set_context(current->active_mm->context, current->active_mm->pgd);
......@@ -2262,18 +2297,6 @@ int __openfirmware powerbook_sleep_Core99(void)
while (!req.complete)
pmu_poll();
/* ack all pending interrupts */
timeout = 100000;
interrupt_data[0] = 1;
while (interrupt_data[0] || pmu_state != idle) {
if (--timeout < 0)
break;
if (pmu_state == idle)
adb_int_pending = 1;
via_pmu_interrupt(0, 0, 0);
udelay(10);
}
/* reenable interrupt controller */
openpic_sleep_restore_intrs();
......@@ -2282,7 +2305,13 @@ int __openfirmware powerbook_sleep_Core99(void)
/* Restart jiffies & scheduling */
wakeup_decrementer();
sti();
/* Force a poll of ADB interrupts */
adb_int_pending = 1;
via_pmu_interrupt(0, 0, 0);
/* Re-enable local CPU interrupts */
local_irq_enable();
/* Notify drivers */
broadcast_wake();
......@@ -2402,7 +2431,9 @@ int __openfirmware powerbook_sleep_3400(void)
/* Restart jiffies & scheduling */
wakeup_decrementer();
sti();
/* Re-enable local CPU interrupts */
local_irq_enable();
/* Notify drivers */
broadcast_wake();
......@@ -2718,7 +2749,7 @@ pmu_polled_request(struct adb_request *req)
if (l >= 0 && req->nbytes != l + 1)
return -EINVAL;
save_flags(flags); cli();
local_irq_save(flags);
while (pmu_state != idle)
pmu_poll();
......@@ -2741,7 +2772,7 @@ pmu_polled_request(struct adb_request *req)
if (req->done)
(*req->done)(req);
restore_flags(flags);
local_irq_restore(flags);
return 0;
}
#endif /* DEBUG_SLEEP */
......
......@@ -612,7 +612,7 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
#if defined(__mc68000__)
#if defined(CONFIG_SUN3)
pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
#else
#elif defined(CONFIG_MMU)
if (CPU_IS_020_OR_030)
pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030;
if (CPU_IS_040_OR_060) {
......
......@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kdev_t.h>
#include <linux/bio.h>
#include <linux/fs.h>
......
......@@ -4,7 +4,7 @@
obj-$(CONFIG_NFS_FS) += nfs.o
nfs-y := dir.o file.o flushd.o inode.o nfs2xdr.o pagelist.o \
nfs-y := dir.o file.o inode.o nfs2xdr.o pagelist.o \
proc.o read.o symlink.o unlink.o write.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o mount_clnt.o
nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
......
/*
* linux/fs/nfs/flushd.c
*
* For each NFS mount, there is a separate cache object that contains
* a hash table of all clusters. With this cache, an async RPC task
* (`flushd') is associated, which wakes up occasionally to inspect
* its list of dirty buffers.
* (Note that RPC tasks aren't kernel threads. Take a look at the
* rpciod code to understand what they are).
*
* Inside the cache object, we also maintain a count of the current number
* of dirty pages, which may not exceed a certain threshold.
* (FIXME: This threshold should be configurable).
*
* The code is streamlined for what I think is the prevalent case for
* NFS traffic, which is sequential write access without concurrent
* access by different processes.
*
* Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
*
* Rewritten 6/3/2000 by Trond Myklebust
* Copyright (C) 1999, 2000, Trond Myklebust <trond.myklebust@fys.uio.no>
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/time.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/sched.h>
#include <linux/smp_lock.h>
#include <linux/nfs.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/nfs_fs_sb.h>
#include <linux/nfs_flushd.h>
/*
* Various constants
*/
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
/*
* This is the wait queue all cluster daemons sleep on
*/
static RPC_WAITQ(flushd_queue, "nfs_flushd");
/*
* Local function declarations.
*/
static void nfs_flushd(struct rpc_task *);
static void nfs_flushd_exit(struct rpc_task *);
int nfs_reqlist_init(struct nfs_server *server)
{
struct nfs_reqlist *cache;
struct rpc_task *task;
int status;
dprintk("NFS: writecache_init\n");
lock_kernel();
status = -ENOMEM;
/* Create the RPC task */
if (!(task = rpc_new_task(server->client, NULL, RPC_TASK_ASYNC)))
goto out_unlock;
cache = server->rw_requests;
status = 0;
if (cache->task)
goto out_unlock;
task->tk_calldata = server;
cache->task = task;
/* Run the task */
cache->runat = jiffies;
cache->auth = server->client->cl_auth;
task->tk_action = nfs_flushd;
task->tk_exit = nfs_flushd_exit;
rpc_execute(task);
unlock_kernel();
return 0;
out_unlock:
if (task)
rpc_release_task(task);
unlock_kernel();
return status;
}
void nfs_reqlist_exit(struct nfs_server *server)
{
struct nfs_reqlist *cache;
lock_kernel();
cache = server->rw_requests;
if (!cache)
goto out;
dprintk("NFS: reqlist_exit (ptr %p rpc %p)\n", cache, cache->task);
while (cache->task) {
rpc_exit(cache->task, 0);
rpc_wake_up_task(cache->task);
interruptible_sleep_on_timeout(&cache->request_wait, 1 * HZ);
}
out:
unlock_kernel();
}
int nfs_reqlist_alloc(struct nfs_server *server)
{
struct nfs_reqlist *cache;
if (server->rw_requests)
return 0;
cache = (struct nfs_reqlist *)kmalloc(sizeof(*cache), GFP_KERNEL);
if (!cache)
return -ENOMEM;
memset(cache, 0, sizeof(*cache));
atomic_set(&cache->nr_requests, 0);
init_waitqueue_head(&cache->request_wait);
server->rw_requests = cache;
return 0;
}
void nfs_reqlist_free(struct nfs_server *server)
{
if (server->rw_requests) {
kfree(server->rw_requests);
server->rw_requests = NULL;
}
}
#define NFS_FLUSHD_TIMEOUT (30*HZ)
static void
nfs_flushd(struct rpc_task *task)
{
struct nfs_server *server;
struct nfs_reqlist *cache;
LIST_HEAD(head);
dprintk("NFS: %4d flushd starting\n", task->tk_pid);
server = (struct nfs_server *) task->tk_calldata;
cache = server->rw_requests;
for(;;) {
spin_lock(&nfs_wreq_lock);
if (nfs_scan_lru_dirty_timeout(server, &head)) {
spin_unlock(&nfs_wreq_lock);
nfs_flush_list(&head, server->wpages, FLUSH_AGING);
continue;
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
if (nfs_scan_lru_commit_timeout(server, &head)) {
spin_unlock(&nfs_wreq_lock);
nfs_commit_list(&head, FLUSH_AGING);
continue;
}
#endif
spin_unlock(&nfs_wreq_lock);
break;
}
dprintk("NFS: %4d flushd back to sleep\n", task->tk_pid);
if (task->tk_action) {
task->tk_timeout = NFS_FLUSHD_TIMEOUT;
cache->runat = jiffies + task->tk_timeout;
rpc_sleep_on(&flushd_queue, task, NULL, NULL);
}
}
static void
nfs_flushd_exit(struct rpc_task *task)
{
struct nfs_server *server;
struct nfs_reqlist *cache;
server = (struct nfs_server *) task->tk_calldata;
cache = server->rw_requests;
if (cache->task == task)
cache->task = NULL;
wake_up(&cache->request_wait);
}
......@@ -29,7 +29,6 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/nfs4_mount.h>
#include <linux/nfs_flushd.h>
#include <linux/lockd/bind.h>
#include <linux/smp_lock.h>
#include <linux/seq_file.h>
......@@ -37,7 +36,6 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#define CONFIG_NFS_SNAPSHOT 1
#define NFSDBG_FACILITY NFSDBG_VFS
#define NFS_PARANOIA 1
......@@ -147,18 +145,9 @@ nfs_put_super(struct super_block *sb)
struct nfs_server *server = NFS_SB(sb);
struct rpc_clnt *rpc;
/*
* First get rid of the request flushing daemon.
* Relies on rpc_shutdown_client() waiting on all
* client tasks to finish.
*/
nfs_reqlist_exit(server);
if ((rpc = server->client) != NULL)
rpc_shutdown_client(rpc);
nfs_reqlist_free(server);
if (!(server->flags & NFS_MOUNT_NONLM))
lockd_down(); /* release rpc.lockd */
rpciod_down(); /* release rpciod */
......@@ -262,10 +251,6 @@ int nfs_sb_init(struct super_block *sb)
sb->s_magic = NFS_SUPER_MAGIC;
sb->s_op = &nfs_sops;
INIT_LIST_HEAD(&server->lru_read);
INIT_LIST_HEAD(&server->lru_dirty);
INIT_LIST_HEAD(&server->lru_commit);
INIT_LIST_HEAD(&server->lru_busy);
/* Did getting the root inode fail? */
root_inode = nfs_get_root(sb, &server->fh);
......@@ -333,22 +318,13 @@ int nfs_sb_init(struct super_block *sb)
if (sb->s_maxbytes > MAX_LFS_FILESIZE)
sb->s_maxbytes = MAX_LFS_FILESIZE;
/* Fire up the writeback cache */
if (nfs_reqlist_alloc(server) < 0) {
printk(KERN_NOTICE "NFS: cannot initialize writeback cache.\n");
goto failure_kill_reqlist;
}
/* We're airborne Set socket buffersize */
rpc_setbufsize(server->client, server->wsize + 100, server->rsize + 100);
return 0;
/* Yargs. It didn't work out. */
failure_kill_reqlist:
nfs_reqlist_exit(server);
out_free_all:
if (root_inode)
iput(root_inode);
nfs_reqlist_free(server);
return -EINVAL;
out_no_root:
printk("nfs_read_super: get root inode failed\n");
......
......@@ -355,13 +355,6 @@ nfs_xdr_readdirargs(struct rpc_rqst *req, u32 *p, struct nfs_readdirargs *args)
unsigned int replen;
u32 count = args->count;
/*
* Some servers (e.g. HP OS 9.5) seem to expect the buffer size
* to be in longwords ... check whether to convert the size.
*/
if (task->tk_client->cl_flags & NFS_CLNTF_BUFSIZE)
count = count >> 2;
p = xdr_encode_fhandle(p, args->fh);
*p++ = htonl(args->cookie);
*p++ = htonl(count); /* see above */
......
......@@ -17,7 +17,6 @@
#include <linux/nfs4.h>
#include <linux/nfs_page.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_flushd.h>
#include <linux/nfs_mount.h>
#define NFS_PARANOIA 1
......@@ -37,7 +36,6 @@ nfs_page_alloc(void)
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->wb_list);
INIT_LIST_HEAD(&p->wb_lru);
init_waitqueue_head(&p->wb_wait);
}
return p;
......@@ -49,8 +47,6 @@ nfs_page_free(struct nfs_page *p)
kmem_cache_free(nfs_page_cachep, p);
}
static int nfs_try_to_free_pages(struct nfs_server *);
/**
* nfs_create_request - Create an NFS read/write request.
* @cred: RPC credential to use
......@@ -71,29 +67,18 @@ nfs_create_request(struct rpc_cred *cred, struct inode *inode,
unsigned int offset, unsigned int count)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_reqlist *cache = NFS_REQUESTLIST(inode);
struct nfs_page *req;
/* Deal with hard limits. */
for (;;) {
/* Prevent races by incrementing *before* we test */
atomic_inc(&cache->nr_requests);
/* If we haven't reached the local hard limit yet,
* try to allocate the request struct */
if (atomic_read(&cache->nr_requests) <= MAX_REQUEST_HARD) {
req = nfs_page_alloc();
if (req != NULL)
break;
}
atomic_dec(&cache->nr_requests);
/* try to allocate the request struct */
req = nfs_page_alloc();
if (req != NULL)
break;
/* Try to free up at least one request in order to stay
* below the hard limit
*/
if (nfs_try_to_free_pages(server))
continue;
if (signalled() && (server->flags & NFS_MOUNT_INTR))
return ERR_PTR(-ERESTARTSYS);
yield();
......@@ -137,7 +122,6 @@ void nfs_clear_request(struct nfs_page *req)
if (req->wb_page) {
page_cache_release(req->wb_page);
req->wb_page = NULL;
atomic_dec(&NFS_REQUESTLIST(req->wb_inode)->nr_requests);
}
}
......@@ -156,13 +140,11 @@ nfs_release_request(struct nfs_page *req)
spin_unlock(&nfs_wreq_lock);
return;
}
__nfs_del_lru(req);
spin_unlock(&nfs_wreq_lock);
#ifdef NFS_PARANOIA
BUG_ON (!list_empty(&req->wb_list));
BUG_ON (NFS_WBACK_BUSY(req));
BUG_ON (atomic_read(&NFS_REQUESTLIST(req->wb_inode)->nr_requests) < 0);
#endif
/* Release struct file or cached credential */
......@@ -310,104 +292,6 @@ nfs_coalesce_requests(struct list_head *head, struct list_head *dst,
return npages;
}
/*
* nfs_scan_forward - Coalesce more requests
* @req: First request to add
* @dst: destination list
* @nmax: maximum number of requests to coalesce
*
* Tries to coalesce more requests by traversing the request's wb_list.
* Moves the resulting list into dst. Requests are guaranteed to be
* contiguous, and have the same RPC credentials.
*/
static int
nfs_scan_forward(struct nfs_page *req, struct list_head *dst, int nmax)
{
struct nfs_server *server = NFS_SERVER(req->wb_inode);
struct list_head *pos, *head = req->wb_list_head;
struct rpc_cred *cred = req->wb_cred;
unsigned long idx = req->wb_index + 1;
int npages = 0;
for (pos = req->wb_list.next; nfs_lock_request(req); pos = pos->next) {
nfs_list_remove_request(req);
nfs_list_add_request(req, dst);
__nfs_del_lru(req);
__nfs_add_lru(&server->lru_busy, req);
npages++;
if (npages == nmax)
break;
if (pos == head)
break;
if (req->wb_offset + req->wb_bytes != PAGE_CACHE_SIZE)
break;
req = nfs_list_entry(pos);
if (req->wb_index != idx++)
break;
if (req->wb_offset != 0)
break;
if (req->wb_cred != cred)
break;
}
return npages;
}
/**
* nfs_scan_lru - Scan one of the least recently used list
* @head: One of the NFS superblock lru lists
* @dst: Destination list
* @nmax: maximum number of requests to coalesce
*
* Scans one of the NFS superblock lru lists for upto nmax requests
* and returns them on a list. The requests are all guaranteed to be
* contiguous, originating from the same inode and the same file.
*/
int
nfs_scan_lru(struct list_head *head, struct list_head *dst, int nmax)
{
struct list_head *pos;
struct nfs_page *req;
int npages = 0;
list_for_each(pos, head) {
req = nfs_lru_entry(pos);
npages = nfs_scan_forward(req, dst, nmax);
if (npages)
break;
}
return npages;
}
/**
* nfs_scan_lru_timeout - Scan one of the superblock lru lists for timed out requests
* @head: One of the NFS superblock lru lists
* @dst: Destination list
* @nmax: maximum number of requests to coalesce
*
* Scans one of the NFS superblock lru lists for upto nmax requests
* and returns them on a list. The requests are all guaranteed to be
* contiguous, originating from the same inode and the same file.
* The first request on the destination list will be timed out, the
* others are not guaranteed to be so.
*/
int
nfs_scan_lru_timeout(struct list_head *head, struct list_head *dst, int nmax)
{
struct list_head *pos;
struct nfs_page *req;
int npages = 0;
list_for_each(pos, head) {
req = nfs_lru_entry(pos);
if (time_after(req->wb_timeout, jiffies))
break;
npages = nfs_scan_forward(req, dst, nmax);
if (npages)
break;
}
return npages;
}
/**
* nfs_scan_list - Scan a list for matching requests
* @head: One of the NFS inode request lists
......@@ -454,76 +338,11 @@ nfs_scan_list(struct list_head *head, struct list_head *dst,
continue;
nfs_list_remove_request(req);
nfs_list_add_request(req, dst);
__nfs_del_lru(req);
__nfs_add_lru(&NFS_SERVER(req->wb_inode)->lru_busy, req);
res++;
}
return res;
}
/*
* nfs_try_to_free_pages - Free up NFS read/write requests
* @server: The NFS superblock
*
* This function attempts to flush out NFS reads and writes in order
* to keep the hard limit on the total number of pending requests
* on a given NFS partition.
* Note: we first try to commit unstable writes, then flush out pending
* reads, then finally the dirty pages.
* The assumption is that this reflects the ordering from the fastest
* to the slowest method for reclaiming requests.
*/
static int
nfs_try_to_free_pages(struct nfs_server *server)
{
LIST_HEAD(head);
struct nfs_page *req = NULL;
int nreq;
for (;;) {
if (req) {
int status = nfs_wait_on_request(req);
nfs_release_request(req);
if (status)
break;
req = NULL;
}
nreq = atomic_read(&server->rw_requests->nr_requests);
if (nreq < MAX_REQUEST_HARD)
return 1;
spin_lock(&nfs_wreq_lock);
/* Are there any busy RPC calls that might free up requests? */
if (!list_empty(&server->lru_busy)) {
req = nfs_lru_entry(server->lru_busy.next);
req->wb_count++;
__nfs_del_lru(req);
spin_unlock(&nfs_wreq_lock);
continue;
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
/* Let's try to free up some completed NFSv3 unstable writes */
nfs_scan_lru_commit(server, &head);
if (!list_empty(&head)) {
spin_unlock(&nfs_wreq_lock);
nfs_commit_list(&head, 0);
continue;
}
#endif
/* Last resort: we try to flush out single requests */
nfs_scan_lru_dirty(server, &head);
if (!list_empty(&head)) {
spin_unlock(&nfs_wreq_lock);
nfs_flush_list(&head, server->wpages, FLUSH_STABLE);
continue;
}
spin_unlock(&nfs_wreq_lock);
break;
}
/* We failed to free up requests */
return 0;
}
int nfs_init_nfspagecache(void)
{
nfs_page_cachep = kmem_cache_create("nfs_page",
......
......@@ -28,7 +28,6 @@
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/nfs_flushd.h>
#include <linux/smp_lock.h>
#include <asm/system.h>
......
......@@ -58,7 +58,6 @@
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/nfs_flushd.h>
#include <linux/nfs_page.h>
#include <asm/uaccess.h>
#include <linux/smp_lock.h>
......@@ -280,33 +279,6 @@ nfs_writepage(struct page *page)
return err;
}
/*
* Check whether the file range we want to write to is locked by
* us.
*/
static int
region_locked(struct inode *inode, struct nfs_page *req)
{
struct file_lock *fl;
loff_t rqstart, rqend;
/* Don't optimize writes if we don't use NLM */
if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)
return 0;
rqstart = req_offset(req) + req->wb_offset;
rqend = rqstart + req->wb_bytes;
for (fl = inode->i_flock; fl; fl = fl->fl_next) {
if (fl->fl_owner == current->files && (fl->fl_flags & FL_POSIX)
&& fl->fl_type == F_WRLCK
&& fl->fl_start <= rqstart && rqend <= fl->fl_end) {
return 1;
}
}
return 0;
}
int
nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
......@@ -408,8 +380,6 @@ nfs_mark_request_dirty(struct nfs_page *req)
spin_lock(&nfs_wreq_lock);
nfs_list_add_request(req, &nfsi->dirty);
nfsi->ndirty++;
__nfs_del_lru(req);
__nfs_add_lru(&NFS_SERVER(inode)->lru_dirty, req);
spin_unlock(&nfs_wreq_lock);
mark_inode_dirty(inode);
}
......@@ -437,8 +407,6 @@ nfs_mark_request_commit(struct nfs_page *req)
spin_lock(&nfs_wreq_lock);
nfs_list_add_request(req, &nfsi->commit);
nfsi->ncommit++;
__nfs_del_lru(req);
__nfs_add_lru(&NFS_SERVER(inode)->lru_commit, req);
spin_unlock(&nfs_wreq_lock);
mark_inode_dirty(inode);
}
......@@ -489,52 +457,6 @@ nfs_wait_on_requests(struct inode *inode, struct file *file, unsigned long idx_s
return res;
}
/**
* nfs_scan_lru_dirty_timeout - Scan LRU list for timed out dirty requests
* @server: NFS superblock data
* @dst: destination list
*
* Moves a maximum of 'wpages' requests from the NFS dirty page LRU list.
* The elements are checked to ensure that they form a contiguous set
* of pages, and that they originated from the same file.
*/
int
nfs_scan_lru_dirty_timeout(struct nfs_server *server, struct list_head *dst)
{
struct nfs_inode *nfsi;
int npages;
npages = nfs_scan_lru_timeout(&server->lru_dirty, dst, server->wpages);
if (npages) {
nfsi = NFS_I(nfs_list_entry(dst->next)->wb_inode);
nfsi->ndirty -= npages;
}
return npages;
}
/**
* nfs_scan_lru_dirty - Scan LRU list for dirty requests
* @server: NFS superblock data
* @dst: destination list
*
* Moves a maximum of 'wpages' requests from the NFS dirty page LRU list.
* The elements are checked to ensure that they form a contiguous set
* of pages, and that they originated from the same file.
*/
int
nfs_scan_lru_dirty(struct nfs_server *server, struct list_head *dst)
{
struct nfs_inode *nfsi;
int npages;
npages = nfs_scan_lru(&server->lru_dirty, dst, server->wpages);
if (npages) {
nfsi = NFS_I(nfs_list_entry(dst->next)->wb_inode);
nfsi->ndirty -= npages;
}
return npages;
}
/*
* nfs_scan_dirty - Scan an inode for dirty requests
* @inode: NFS inode to scan
......@@ -559,59 +481,6 @@ nfs_scan_dirty(struct inode *inode, struct list_head *dst, struct file *file, un
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
/**
* nfs_scan_lru_commit_timeout - Scan LRU list for timed out commit requests
* @server: NFS superblock data
* @dst: destination list
*
* Finds the first a timed out request in the NFS commit LRU list and moves it
* to the list dst. If such an element is found, we move all other commit
* requests that apply to the same inode.
* The assumption is that doing everything in a single commit-to-disk is
* the cheaper alternative.
*/
int
nfs_scan_lru_commit_timeout(struct nfs_server *server, struct list_head *dst)
{
struct nfs_inode *nfsi;
int npages;
npages = nfs_scan_lru_timeout(&server->lru_commit, dst, 1);
if (npages) {
nfsi = NFS_I(nfs_list_entry(dst->next)->wb_inode);
npages += nfs_scan_list(&nfsi->commit, dst, NULL, 0, 0);
nfsi->ncommit -= npages;
}
return npages;
}
/**
* nfs_scan_lru_commit_timeout - Scan LRU list for timed out commit requests
* @server: NFS superblock data
* @dst: destination list
*
* Finds the first request in the NFS commit LRU list and moves it
* to the list dst. If such an element is found, we move all other commit
* requests that apply to the same inode.
* The assumption is that doing everything in a single commit-to-disk is
* the cheaper alternative.
*/
int
nfs_scan_lru_commit(struct nfs_server *server, struct list_head *dst)
{
struct nfs_inode *nfsi;
int npages;
npages = nfs_scan_lru(&server->lru_commit, dst, 1);
if (npages) {
nfsi = NFS_I(nfs_list_entry(dst->next)->wb_inode);
npages += nfs_scan_list(&nfsi->commit, dst, NULL, 0, 0);
nfsi->ncommit -= npages;
}
return npages;
}
/*
* nfs_scan_commit - Scan an inode for commit requests
* @inode: NFS inode to scan
......@@ -697,11 +566,6 @@ nfs_update_request(struct file* file, struct inode *inode, struct page *page,
new->wb_file = file;
get_file(file);
}
/* If the region is locked, adjust the timeout */
if (region_locked(inode, new))
new->wb_timeout = jiffies + NFS_WRITEBACK_LOCKDELAY;
else
new->wb_timeout = jiffies + NFS_WRITEBACK_DELAY;
}
/* We have a request for our page.
......@@ -1059,7 +923,6 @@ nfs_writeback_done(struct rpc_task *task, int stable,
goto next;
}
memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
req->wb_timeout = jiffies + NFS_COMMIT_DELAY;
nfs_mark_request_commit(req);
dprintk(" marked for commit\n");
#else
......
......@@ -2,6 +2,9 @@
/* Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>
* The Silver Hammer Group, Ltd.
* Copyright (C) 2002 David McCullough <davidm@snapgear.com>
*
* This file provides the definitions and structures needed to
* support uClinux flat-format executables.
*/
#ifndef _LINUX_FLAT_H
......
#ifndef NFS_CLUSTER_H
#define NFS_CLUSTER_H
#ifdef __KERNEL__
#include <asm/atomic.h>
#include <linux/nfs_fs_sb.h>
/*
* Counters of total number and pending number of requests.
* When the total number of requests exceeds the hard limit, we stall
* until it drops again.
*/
#define MAX_REQUEST_HARD 256
/*
* Maximum number of requests per write cluster.
* 32 requests per cluster account for 128K of data on an intel box.
* Note: it's a good idea to make this number smaller than MAX_REQUEST_SOFT.
*
* For 100Mbps Ethernet, 128 pages (i.e. 256K) per cluster gives much
* better performance.
*/
#define REQUEST_HASH_SIZE 16
#define REQUEST_NR(off) ((off) >> PAGE_CACHE_SHIFT)
#define REQUEST_HASH(ino, off) (((ino) ^ REQUEST_NR(off)) & (REQUEST_HASH_SIZE - 1))
/*
* Functions
*/
extern int nfs_reqlist_alloc(struct nfs_server *);
extern void nfs_reqlist_free(struct nfs_server *);
extern int nfs_reqlist_init(struct nfs_server *);
extern void nfs_reqlist_exit(struct nfs_server *);
extern void nfs_wake_flushd(void);
/*
* This is the per-mount writeback cache.
*/
struct nfs_reqlist {
atomic_t nr_requests;
unsigned long runat;
wait_queue_head_t request_wait;
/* The async RPC task that is responsible for scanning the
* requests.
*/
struct rpc_task *task; /* request flush task */
/* Authentication flavor handle for this NFS client */
struct rpc_auth *auth;
/* The list of all inodes with pending writebacks. */
struct inode *inodes;
};
#endif
#endif
......@@ -37,34 +37,16 @@
# define NFS_DEBUG
#endif
/*
* NFS_MAX_DIRCACHE controls the number of simultaneously cached
* directory chunks. Each chunk holds the list of nfs_entry's returned
* in a single readdir call in a memory region of size PAGE_SIZE.
*
* Note that at most server->rsize bytes of the cache memory are used.
*/
#define NFS_MAX_DIRCACHE 16
#define NFS_MAX_FILE_IO_BUFFER_SIZE 32768
#define NFS_DEF_FILE_IO_BUFFER_SIZE 4096
/*
* The upper limit on timeouts for the exponential backoff algorithm.
*/
#define NFS_MAX_RPC_TIMEOUT (6*HZ)
#define NFS_READ_DELAY (2*HZ)
#define NFS_WRITEBACK_DELAY (5*HZ)
#define NFS_WRITEBACK_LOCKDELAY (60*HZ)
#define NFS_COMMIT_DELAY (5*HZ)
/*
* Size of the lookup cache in units of number of entries cached.
* It is better not to make this too large although the optimum
* depends on a usage and environment.
*/
#define NFS_LOOKUP_CACHE_SIZE 64
/*
* superblock magic number for NFS
*/
......@@ -75,9 +57,6 @@
*/
#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS)
/* Flags in the RPC client structure */
#define NFS_CLNTF_BUFSIZE 0x0001 /* readdir buffer in longwords */
#define NFS_RW_SYNC 0x0001 /* O_SYNC handling */
#define NFS_RW_SWAP 0x0002 /* This is a swap request */
......@@ -185,8 +164,7 @@ struct nfs_inode {
#define NFS_INO_STALE 0x0001 /* possible stale inode */
#define NFS_INO_ADVISE_RDPLUS 0x0002 /* advise readdirplus */
#define NFS_INO_REVALIDATING 0x0004 /* revalidating attrs */
#define NFS_IS_SNAPSHOT 0x0010 /* a snapshot file */
#define NFS_INO_FLUSH 0x0020 /* inode is due for flushing */
#define NFS_INO_FLUSH 0x0008 /* inode is due for flushing */
static inline struct nfs_inode *NFS_I(struct inode *inode)
{
......@@ -198,9 +176,7 @@ static inline struct nfs_inode *NFS_I(struct inode *inode)
#define NFS_SERVER(inode) (NFS_SB(inode->i_sb))
#define NFS_CLIENT(inode) (NFS_SERVER(inode)->client)
#define NFS_PROTO(inode) (NFS_SERVER(inode)->rpc_ops)
#define NFS_REQUESTLIST(inode) (NFS_SERVER(inode)->rw_requests)
#define NFS_ADDR(inode) (RPC_PEERADDR(NFS_CLIENT(inode)))
#define NFS_CONGESTED(inode) (RPC_CONGESTED(NFS_CLIENT(inode)))
#define NFS_COOKIEVERF(inode) (NFS_I(inode)->cookieverf)
#define NFS_READTIME(inode) (NFS_I(inode)->read_cache_jiffies)
#define NFS_MTIME_UPDATE(inode) (NFS_I(inode)->cache_mtime_jiffies)
......@@ -208,7 +184,6 @@ static inline struct nfs_inode *NFS_I(struct inode *inode)
#define NFS_CACHE_MTIME(inode) (NFS_I(inode)->read_cache_mtime)
#define NFS_CACHE_ISIZE(inode) (NFS_I(inode)->read_cache_isize)
#define NFS_CHANGE_ATTR(inode) (NFS_I(inode)->change_attr)
#define NFS_NEXTSCAN(inode) (NFS_I(inode)->nextscan)
#define NFS_CACHEINV(inode) \
do { \
NFS_READTIME(inode) = jiffies - NFS_MAXATTRTIMEO(inode) - 1; \
......@@ -254,8 +229,6 @@ loff_t req_offset(struct nfs_page *req)
* linux/fs/nfs/inode.c
*/
extern void nfs_zap_caches(struct inode *);
extern int nfs_inode_is_stale(struct inode *, struct nfs_fh *,
struct nfs_fattr *);
extern struct inode *nfs_fhget(struct dentry *, struct nfs_fh *,
struct nfs_fattr *);
extern int __nfs_refresh_inode(struct inode *, struct nfs_fattr *);
......@@ -338,13 +311,9 @@ extern void nfs_commit_done(struct rpc_task *);
extern int nfs_sync_file(struct inode *, struct file *, unsigned long, unsigned int, int);
extern int nfs_flush_file(struct inode *, struct file *, unsigned long, unsigned int, int);
extern int nfs_flush_list(struct list_head *, int, int);
extern int nfs_scan_lru_dirty(struct nfs_server *, struct list_head *);
extern int nfs_scan_lru_dirty_timeout(struct nfs_server *, struct list_head *);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
extern int nfs_commit_file(struct inode *, struct file *, unsigned long, unsigned int, int);
extern int nfs_commit_list(struct list_head *, int);
extern int nfs_scan_lru_commit(struct nfs_server *, struct list_head *);
extern int nfs_scan_lru_commit_timeout(struct nfs_server *, struct list_head *);
#else
static inline int
nfs_commit_file(struct inode *inode, struct file *file, unsigned long offset,
......
......@@ -25,11 +25,6 @@ struct nfs_server {
unsigned int acdirmax;
unsigned int namelen;
char * hostname; /* remote hostname */
struct nfs_reqlist * rw_requests; /* async read/write requests */
struct list_head lru_read,
lru_dirty,
lru_commit,
lru_busy;
struct nfs_fh fh;
struct sockaddr_in addr;
#if CONFIG_NFS_V4
......
......@@ -23,15 +23,13 @@
#define PG_BUSY 0
struct nfs_page {
struct list_head wb_lru, /* superblock lru list */
wb_list, /* Defines state of page: */
struct list_head wb_list, /* Defines state of page: */
*wb_list_head; /* read/write/commit */
struct file *wb_file;
struct inode *wb_inode;
struct rpc_cred *wb_cred;
struct page *wb_page; /* page to read in/write out */
wait_queue_head_t wb_wait; /* wait queue */
unsigned long wb_timeout; /* when to read/write/commit */
unsigned long wb_index; /* Offset within mapping */
unsigned int wb_offset, /* Offset within page */
wb_bytes, /* Length of request */
......@@ -52,8 +50,6 @@ extern void nfs_release_list(struct list_head *list);
extern void nfs_list_add_request(struct nfs_page *, struct list_head *);
extern int nfs_scan_lru(struct list_head *, struct list_head *, int);
extern int nfs_scan_lru_timeout(struct list_head *, struct list_head *, int);
extern int nfs_scan_list(struct list_head *, struct list_head *,
struct file *, unsigned long, unsigned int);
extern int nfs_coalesce_requests(struct list_head *, struct list_head *,
......@@ -124,24 +120,4 @@ nfs_list_entry(struct list_head *head)
return list_entry(head, struct nfs_page, wb_list);
}
static inline void
__nfs_add_lru(struct list_head *head, struct nfs_page *req)
{
list_add_tail(&req->wb_lru, head);
}
static inline void
__nfs_del_lru(struct nfs_page *req)
{
if (list_empty(&req->wb_lru))
return;
list_del_init(&req->wb_lru);
}
static inline struct nfs_page *
nfs_lru_entry(struct list_head *head)
{
return list_entry(head, struct nfs_page, wb_lru);
}
#endif /* _LINUX_NFS_PAGE_H */
/*
* linux/include/linux/nfsiod.h
*
* Declarations for asynchronous NFS RPC calls.
*
*/
#ifndef _LINUX_NFSIOD_H
#define _LINUX_NFSIOD_H
#include <linux/rpcsock.h>
#include <linux/nfs_fs.h>
#ifdef __KERNEL__
/*
* This is the callback handler for nfsiod requests.
* Note that the callback procedure must NOT sleep.
*/
struct nfsiod_req;
typedef int (*nfsiod_callback_t)(int result, struct nfsiod_req *);
/*
* This is the nfsiod request struct.
*/
struct nfsiod_req {
struct nfsiod_req * rq_next;
struct nfsiod_req * rq_prev;
wait_queue_head_t rq_wait;
struct rpc_ioreq rq_rpcreq;
nfsiod_callback_t rq_callback;
struct nfs_server * rq_server;
struct inode * rq_inode;
struct page * rq_page;
/* user creds */
uid_t rq_fsuid;
gid_t rq_fsgid;
int rq_groups[NGROUPS];
/* retry handling */
int rq_retries;
};
struct nfsiod_req * nfsiod_reserve(struct nfs_server *);
void nfsiod_release(struct nfsiod_req *);
void nfsiod_enqueue(struct nfsiod_req *);
int nfsiod(void);
#endif /* __KERNEL__ */
#endif /* _LINUX_NFSIOD_H */
/*
* rpcsock.h Declarations for the RPC call interface.
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
#ifndef _LINUX_RPCSOCK_H
#define _LINUX_RPCSOCK_H
/*
* The rpcsock code maintains an estimate on the maximum number of out-
* standing RPC requests, using the congestion avoidance implemented in
* 44BSD. This is basically the Van Jacobson slow start algorithm: If a
* retransmit occurs, the congestion window is halved; otherwise, it is
* incremented by 1/cwnd when a reply is received and a full number of
* requests are outstanding.
*
* Upper procedures may check whether a request would block waiting for
* a free RPC slot by using the RPC_CONGESTED() macro.
*
* Note: on machines with low memory we should probably use a smaller
* MAXREQS value: At 32 outstanding reqs with 8 megs of RAM, fragment
* reassembly will frequently run out of memory.
*/
#define RPC_MAXREQS 32
#define RPC_CWNDSCALE 256
#define RPC_MAXCWND (RPC_MAXREQS * RPC_CWNDSCALE)
/* #define RPC_INITCWND (RPC_MAXCWND / 2) */
#define RPC_INITCWND RPC_CWNDSCALE
#define RPC_CONGESTED(rsock) ((rsock)->cong >= (rsock)->cwnd)
/* RPC reply header size: xid, direction, status, accept_status (verifier
* size computed separately)
*/
#define RPC_HDRSIZE (4 * 4)
/*
* This describes a timeout strategy
*/
struct rpc_timeout {
unsigned long to_initval,
to_maxval,
to_increment;
int to_retries;
char to_exponential;
};
/*
* This describes a complete RPC request
*/
struct rpc_ioreq {
struct rpc_wait * rq_slot;
struct sockaddr * rq_addr;
int rq_alen;
struct iovec rq_svec[UIO_FASTIOV];
unsigned int rq_snr;
unsigned long rq_slen;
struct iovec rq_rvec[UIO_FASTIOV];
unsigned int rq_rnr;
unsigned long rq_rlen;
};
/*
* This is the callback handler for async RPC.
*/
struct rpc_wait;
typedef void (*rpc_callback_fn_t)(int, struct rpc_wait *, void *);
/*
* Wait information. This struct defines all the state of an RPC
* request currently in flight.
*/
struct rpc_wait {
struct rpc_sock * w_sock;
struct rpc_wait * w_prev;
struct rpc_wait * w_next;
struct rpc_ioreq * w_req;
int w_result;
wait_queue_head_t w_wait;
rpc_callback_fn_t w_handler;
void * w_cdata;
char w_queued;
char w_gotit;
__u32 w_xid;
};
struct rpc_sock {
struct file * file;
struct socket * sock;
struct sock * inet;
struct rpc_wait waiting[RPC_MAXREQS];
unsigned long cong;
unsigned long cwnd;
struct rpc_wait * pending;
struct rpc_wait * free;
wait_queue_head_t backlog;
wait_queue_head_t shutwait;
int shutdown;
};
#ifdef __KERNEL__
/* rpc_call: Call synchronously */
int rpc_call(struct rpc_sock *, struct rpc_ioreq *,
struct rpc_timeout *);
/* These implement asynch calls for nfsiod: Process calls rpc_reserve and
* rpc_transmits, then passes the request to nfsiod, which collects the
* results via rpc_doio
*/
int rpc_reserve(struct rpc_sock *, struct rpc_ioreq *, int);
void rpc_release(struct rpc_sock *, struct rpc_ioreq *);
int rpc_transmit(struct rpc_sock *, struct rpc_ioreq *);
int rpc_doio(struct rpc_sock *, struct rpc_ioreq *,
struct rpc_timeout *, int);
struct rpc_sock * rpc_makesock(struct file *);
int rpc_closesock(struct rpc_sock *);
#endif /* __KERNEL__*/
#endif /* _LINUX_RPCSOCK_H */
......@@ -50,8 +50,6 @@ struct rpc_clnt {
cl_droppriv : 1,/* enable NFS suid hack */
cl_oneshot : 1,/* dispose after use */
cl_dead : 1;/* abandoned */
unsigned int cl_flags; /* misc client flags */
unsigned long cl_hardmax; /* max hard timeout */
struct rpc_rtt cl_rtt; /* RTO estimator data */
......@@ -132,17 +130,15 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
static __inline__
int rpc_call(struct rpc_clnt *clnt, u32 proc, void *argp, void *resp, int flags)
{
struct rpc_message msg = { proc, argp, resp, NULL };
struct rpc_message msg = {
.rpc_proc = proc,
.rpc_argp = argp,
.rpc_resp = resp,
.rpc_cred = NULL
};
return rpc_call_sync(clnt, &msg, flags);
}
static __inline__ void
rpc_set_timeout(struct rpc_clnt *clnt, unsigned int retr, unsigned long incr)
{
xprt_set_timeout(&clnt->cl_timeout, retr, incr);
}
extern void rpciod_wake_up(void);
/*
......
......@@ -13,12 +13,6 @@
#include <linux/sunrpc/types.h>
#include <linux/wait.h>
/*
* Define this if you want to test the fast scheduler for async calls.
* This is still experimental and may not work.
*/
#undef CONFIG_RPC_FASTSCHED
/*
* This is the actual RPC procedure call info.
*/
......@@ -48,6 +42,7 @@ struct rpc_task {
*/
struct rpc_message tk_msg; /* RPC call info */
__u32 * tk_buffer; /* XDR buffer */
size_t tk_bufsize;
__u8 tk_garb_retry,
tk_cred_retry,
tk_suid_retry;
......@@ -184,20 +179,16 @@ void rpc_wake_up(struct rpc_wait_queue *);
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
void rpc_wake_up_status(struct rpc_wait_queue *, int);
void rpc_delay(struct rpc_task *, unsigned long);
void * rpc_allocate(unsigned int flags, unsigned int);
void rpc_free(void *);
void * rpc_malloc(struct rpc_task *, size_t);
void rpc_free(struct rpc_task *);
int rpciod_up(void);
void rpciod_down(void);
void rpciod_wake_up(void);
#ifdef RPC_DEBUG
void rpc_show_tasks(void);
#endif
static __inline__ void *
rpc_malloc(struct rpc_task *task, unsigned int size)
{
return rpc_allocate(task->tk_flags, size);
}
int rpc_init_mempool(void);
void rpc_destroy_mempool(void);
static __inline__ void
rpc_exit(struct rpc_task *task, int status)
......
......@@ -12,16 +12,16 @@
#include <asm/atomic.h>
struct rpc_rtt {
long timeo; /* default timeout value */
long srtt[5]; /* smoothed round trip time << 3 */
long sdrtt[5]; /* soothed medium deviation of RTT */
unsigned long timeo; /* default timeout value */
unsigned long srtt[5]; /* smoothed round trip time << 3 */
unsigned long sdrtt[5]; /* smoothed medium deviation of RTT */
atomic_t ntimeouts; /* Global count of the number of timeouts */
};
extern void rpc_init_rtt(struct rpc_rtt *rt, long timeo);
extern void rpc_update_rtt(struct rpc_rtt *rt, int timer, long m);
extern long rpc_calc_rto(struct rpc_rtt *rt, int timer);
extern void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo);
extern void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m);
extern unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer);
static inline void rpc_inc_timeo(struct rpc_rtt *rt)
{
......
......@@ -127,7 +127,6 @@ xdr_adjust_iovec(struct iovec *iov, u32 *p)
}
void xdr_shift_iovec(struct iovec *, int, size_t);
void xdr_zero_iovec(struct iovec *, int, size_t);
/*
* Maximum number of iov's we use.
......@@ -156,12 +155,6 @@ typedef size_t (*skb_read_actor_t)(skb_reader_t *desc, void *to, size_t len);
extern void xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int,
skb_reader_t *, skb_read_actor_t);
extern int xdr_copy_skb(struct xdr_buf *xdr, unsigned int base,
struct sk_buff *skb, unsigned int offset);
extern int xdr_copy_and_csum_skb(struct xdr_buf *xdr, unsigned int base,
struct sk_buff *skb, unsigned int offset, unsigned int csum);
#endif /* __KERNEL__ */
#endif /* _SUNRPC_XDR_H_ */
......@@ -71,7 +71,7 @@ struct rpc_timeout {
to_initval, /* initial timeout */
to_maxval, /* max timeout */
to_increment; /* if !exponential */
short to_retries; /* max # of retries */
unsigned int to_retries; /* max # of retries */
unsigned char to_exponential;
};
......@@ -109,7 +109,7 @@ struct rpc_rqst {
u32 rq_bytes_sent; /* Bytes we have sent */
long rq_xtime; /* when transmitted */
unsigned long rq_xtime; /* when transmitted */
int rq_ntimeo;
int rq_nresend;
};
......
......@@ -2292,7 +2292,7 @@ unsigned int ksize(const void *objp)
unsigned long flags;
unsigned int size = 0;
if (likely(objp)) {
if (likely(objp != NULL)) {
local_irq_save(flags);
c = GET_PAGE_CACHE(virt_to_page(objp));
size = kmem_cache_size(c);
......
......@@ -25,7 +25,7 @@ nul_create(struct rpc_clnt *clnt)
struct rpc_auth *auth;
dprintk("RPC: creating NULL authenticator for client %p\n", clnt);
if (!(auth = (struct rpc_auth *) rpc_allocate(0, sizeof(*auth))))
if (!(auth = (struct rpc_auth *) kmalloc(sizeof(*auth),GFP_KERNEL)))
return NULL;
auth->au_cslack = 4;
auth->au_rslack = 2;
......@@ -41,7 +41,7 @@ nul_destroy(struct rpc_auth *auth)
{
dprintk("RPC: destroying NULL authenticator %p\n", auth);
rpcauth_free_credcache(auth);
rpc_free(auth);
kfree(auth);
}
/*
......@@ -52,7 +52,7 @@ nul_create_cred(int flags)
{
struct rpc_cred *cred;
if (!(cred = (struct rpc_cred *) rpc_allocate(flags, sizeof(*cred))))
if (!(cred = (struct rpc_cred *) kmalloc(sizeof(*cred),GFP_KERNEL)))
return NULL;
atomic_set(&cred->cr_count, 0);
cred->cr_flags = RPCAUTH_CRED_UPTODATE;
......@@ -68,7 +68,7 @@ nul_create_cred(int flags)
static void
nul_destroy_cred(struct rpc_cred *cred)
{
rpc_free(cred);
kfree(cred);
}
/*
......
......@@ -41,7 +41,7 @@ unx_create(struct rpc_clnt *clnt)
struct rpc_auth *auth;
dprintk("RPC: creating UNIX authenticator for client %p\n", clnt);
if (!(auth = (struct rpc_auth *) rpc_allocate(0, sizeof(*auth))))
if (!(auth = (struct rpc_auth *) kmalloc(sizeof(*auth), GFP_KERNEL)))
return NULL;
auth->au_cslack = UNX_WRITESLACK;
auth->au_rslack = 2; /* assume AUTH_NULL verf */
......@@ -58,7 +58,7 @@ unx_destroy(struct rpc_auth *auth)
{
dprintk("RPC: destroying UNIX authenticator %p\n", auth);
rpcauth_free_credcache(auth);
rpc_free(auth);
kfree(auth);
}
static struct rpc_cred *
......@@ -70,7 +70,7 @@ unx_create_cred(int flags)
dprintk("RPC: allocating UNIX cred for uid %d gid %d\n",
current->uid, current->gid);
if (!(cred = (struct unx_cred *) rpc_allocate(flags, sizeof(*cred))))
if (!(cred = (struct unx_cred *) kmalloc(sizeof(*cred), GFP_KERNEL)))
return NULL;
atomic_set(&cred->uc_count, 0);
......@@ -98,32 +98,10 @@ unx_create_cred(int flags)
return (struct rpc_cred *) cred;
}
struct rpc_cred *
authunix_fake_cred(struct rpc_task *task, uid_t uid, gid_t gid)
{
struct unx_cred *cred;
dprintk("RPC: allocating fake UNIX cred for uid %d gid %d\n",
uid, gid);
if (!(cred = (struct unx_cred *) rpc_malloc(task, sizeof(*cred))))
return NULL;
atomic_set(&cred->uc_count, 1);
cred->uc_flags = RPCAUTH_CRED_DEAD|RPCAUTH_CRED_UPTODATE;
cred->uc_uid = uid;
cred->uc_gid = gid;
cred->uc_fsuid = uid;
cred->uc_fsgid = gid;
cred->uc_gids[0] = (gid_t) NOGROUP;
return task->tk_msg.rpc_cred = (struct rpc_cred *) cred;
}
static void
unx_destroy_cred(struct rpc_cred *cred)
{
rpc_free(cred);
kfree(cred);
}
/*
......
......@@ -85,7 +85,7 @@ rpc_create_client(struct rpc_xprt *xprt, char *servname,
if (vers >= program->nrvers || !(version = program->version[vers]))
goto out;
clnt = (struct rpc_clnt *) rpc_allocate(0, sizeof(*clnt));
clnt = (struct rpc_clnt *) kmalloc(sizeof(*clnt), GFP_KERNEL);
if (!clnt)
goto out_no_clnt;
memset(clnt, 0, sizeof(*clnt));
......@@ -125,7 +125,7 @@ rpc_create_client(struct rpc_xprt *xprt, char *servname,
out_no_auth:
printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
flavor);
rpc_free(clnt);
kfree(clnt);
clnt = NULL;
goto out;
}
......@@ -180,7 +180,7 @@ rpc_destroy_client(struct rpc_clnt *clnt)
xprt_destroy(clnt->cl_xprt);
clnt->cl_xprt = NULL;
}
rpc_free(clnt);
kfree(clnt);
return 0;
}
......@@ -487,7 +487,7 @@ call_allocate(struct rpc_task *task)
* auth->au_wslack */
bufsiz = rpcproc_bufsiz(clnt, task->tk_msg.rpc_proc) + RPC_SLACK_SPACE;
if ((task->tk_buffer = rpc_malloc(task, bufsiz << 1)) != NULL)
if (rpc_malloc(task, bufsiz << 1) != NULL)
return;
printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
......@@ -522,7 +522,7 @@ call_encode(struct rpc_task *task)
task->tk_action = call_bind;
/* Default buffer setup */
bufsiz = rpcproc_bufsiz(clnt, task->tk_msg.rpc_proc)+RPC_SLACK_SPACE;
bufsiz = task->tk_bufsize >> 1;
sndbuf->head[0].iov_base = (void *)task->tk_buffer;
sndbuf->head[0].iov_len = bufsiz;
sndbuf->tail[0].iov_len = 0;
......
......@@ -42,7 +42,12 @@ rpc_getport(struct rpc_task *task, struct rpc_clnt *clnt)
{
struct rpc_portmap *map = &clnt->cl_pmap;
struct sockaddr_in *sap = &clnt->cl_xprt->addr;
struct rpc_message msg = { PMAP_GETPORT, map, &clnt->cl_port, NULL };
struct rpc_message msg = {
.rpc_proc = PMAP_GETPORT,
.rpc_argp = map,
.rpc_resp = &clnt->cl_port,
.rpc_cred = NULL
};
struct rpc_clnt *pmap_clnt;
struct rpc_task *child;
......@@ -90,7 +95,12 @@ rpc_getport(struct rpc_task *task, struct rpc_clnt *clnt)
int
rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot)
{
struct rpc_portmap map = { prog, vers, prot, 0 };
struct rpc_portmap map = {
.pm_prog = prog,
.pm_vers = vers,
.pm_prot = prot,
.pm_port = 0
};
struct rpc_clnt *pmap_clnt;
char hostname[32];
int status;
......
......@@ -15,6 +15,7 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/mempool.h>
#include <linux/unistd.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
......@@ -29,9 +30,15 @@ static int rpc_task_id;
#endif
/*
* We give RPC the same get_free_pages priority as NFS
* RPC slabs and memory pools
*/
#define GFP_RPC GFP_NOFS
#define RPC_BUFFER_MAXSIZE (2048)
#define RPC_BUFFER_POOLSIZE (8)
#define RPC_TASK_POOLSIZE (8)
static kmem_cache_t *rpc_task_slabp;
static kmem_cache_t *rpc_buffer_slabp;
static mempool_t *rpc_task_mempool;
static mempool_t *rpc_buffer_mempool;
static void __rpc_default_timer(struct rpc_task *task);
static void rpciod_killall(void);
......@@ -79,24 +86,6 @@ static spinlock_t rpc_queue_lock = SPIN_LOCK_UNLOCKED;
*/
static spinlock_t rpc_sched_lock = SPIN_LOCK_UNLOCKED;
/*
* This is the last-ditch buffer for NFS swap requests
*/
static u32 swap_buffer[PAGE_SIZE >> 2];
static long swap_buffer_used;
/*
* Make allocation of the swap_buffer SMP-safe
*/
static __inline__ int rpc_lock_swapbuf(void)
{
return !test_and_set_bit(1, &swap_buffer_used);
}
static __inline__ void rpc_unlock_swapbuf(void)
{
clear_bit(1, &swap_buffer_used);
}
/*
* Disable the timer for a given RPC task. Should be called with
* rpc_queue_lock and bh_disabled in order to avoid races within
......@@ -592,10 +581,7 @@ __rpc_execute(struct rpc_task *task)
/* Release RPC slot and buffer memory */
if (task->tk_rqstp)
xprt_release(task);
if (task->tk_buffer) {
rpc_free(task->tk_buffer);
task->tk_buffer = NULL;
}
rpc_free(task);
goto restarted;
}
printk(KERN_ERR "RPC: dead task tries to walk away.\n");
......@@ -676,63 +662,46 @@ __rpc_schedule(void)
}
/*
* Allocate memory for RPC purpose.
* Allocate memory for RPC purposes.
*
* This is yet another tricky issue: For sync requests issued by
* a user process, we want to make kmalloc sleep if there isn't
* enough memory. Async requests should not sleep too excessively
* because that will block rpciod (but that's not dramatic when
* it's starved of memory anyway). Finally, swapout requests should
* never sleep at all, and should not trigger another swap_out
* request through kmalloc which would just increase memory contention.
*
* I hope the following gets it right, which gives async requests
* a slight advantage over sync requests (good for writeback, debatable
* for readahead):
*
* sync user requests: GFP_KERNEL
* async requests: GFP_RPC (== GFP_NOFS)
* swap requests: GFP_ATOMIC (or new GFP_SWAPPER)
* We try to ensure that some NFS reads and writes can always proceed
* by using a mempool when allocating 'small' buffers.
* In order to avoid memory starvation triggering more writebacks of
* NFS requests, we use GFP_NOFS rather than GFP_KERNEL.
*/
void *
rpc_allocate(unsigned int flags, unsigned int size)
rpc_malloc(struct rpc_task *task, size_t size)
{
u32 *buffer;
int gfp;
if (flags & RPC_TASK_SWAPPER)
if (task->tk_flags & RPC_TASK_SWAPPER)
gfp = GFP_ATOMIC;
else if (flags & RPC_TASK_ASYNC)
gfp = GFP_RPC;
else
gfp = GFP_KERNEL;
do {
if ((buffer = (u32 *) kmalloc(size, gfp)) != NULL) {
dprintk("RPC: allocated buffer %p\n", buffer);
return buffer;
}
if ((flags & RPC_TASK_SWAPPER) && size <= sizeof(swap_buffer)
&& rpc_lock_swapbuf()) {
dprintk("RPC: used last-ditch swap buffer\n");
return swap_buffer;
}
if (flags & RPC_TASK_ASYNC)
return NULL;
yield();
} while (!signalled());
gfp = GFP_NOFS;
return NULL;
if (size > RPC_BUFFER_MAXSIZE) {
task->tk_buffer = kmalloc(size, gfp);
if (task->tk_buffer)
task->tk_bufsize = size;
} else {
task->tk_buffer = mempool_alloc(rpc_buffer_mempool, gfp);
if (task->tk_buffer)
task->tk_bufsize = RPC_BUFFER_MAXSIZE;
}
return task->tk_buffer;
}
void
rpc_free(void *buffer)
rpc_free(struct rpc_task *task)
{
if (buffer != swap_buffer) {
kfree(buffer);
return;
if (task->tk_buffer) {
if (task->tk_bufsize == RPC_BUFFER_MAXSIZE)
mempool_free(task->tk_buffer, rpc_buffer_mempool);
else
kfree(task->tk_buffer);
task->tk_buffer = NULL;
task->tk_bufsize = 0;
}
rpc_unlock_swapbuf();
}
/*
......@@ -774,11 +743,17 @@ rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
current->pid);
}
static struct rpc_task *
rpc_alloc_task(void)
{
return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
}
static void
rpc_default_free_task(struct rpc_task *task)
{
dprintk("RPC: %4d freeing task\n", task->tk_pid);
rpc_free(task);
mempool_free(task, rpc_task_mempool);
}
/*
......@@ -791,7 +766,7 @@ rpc_new_task(struct rpc_clnt *clnt, rpc_action callback, int flags)
{
struct rpc_task *task;
task = (struct rpc_task *) rpc_allocate(flags, sizeof(*task));
task = rpc_alloc_task();
if (!task)
goto cleanup;
......@@ -856,10 +831,7 @@ rpc_release_task(struct rpc_task *task)
xprt_release(task);
if (task->tk_msg.rpc_cred)
rpcauth_unbindcred(task);
if (task->tk_buffer) {
rpc_free(task->tk_buffer);
task->tk_buffer = NULL;
}
rpc_free(task);
if (task->tk_client) {
rpc_release_client(task->tk_client);
task->tk_client = NULL;
......@@ -1159,3 +1131,49 @@ void rpc_show_tasks(void)
spin_unlock(&rpc_sched_lock);
}
#endif
void
rpc_destroy_mempool(void)
{
if (rpc_buffer_mempool)
mempool_destroy(rpc_buffer_mempool);
if (rpc_task_mempool)
mempool_destroy(rpc_task_mempool);
if (rpc_task_slabp && kmem_cache_destroy(rpc_task_slabp))
printk(KERN_INFO "rpc_task: not all structures were freed\n");
if (rpc_buffer_slabp && kmem_cache_destroy(rpc_buffer_slabp))
printk(KERN_INFO "rpc_buffers: not all structures were freed\n");
}
int
rpc_init_mempool(void)
{
rpc_task_slabp = kmem_cache_create("rpc_tasks",
sizeof(struct rpc_task),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
if (!rpc_task_slabp)
goto err_nomem;
rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
RPC_BUFFER_MAXSIZE,
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
if (!rpc_buffer_slabp)
goto err_nomem;
rpc_task_mempool = mempool_create(RPC_TASK_POOLSIZE,
mempool_alloc_slab,
mempool_free_slab,
rpc_task_slabp);
if (!rpc_task_mempool)
goto err_nomem;
rpc_buffer_mempool = mempool_create(RPC_BUFFER_POOLSIZE,
mempool_alloc_slab,
mempool_free_slab,
rpc_buffer_slabp);
if (!rpc_buffer_mempool)
goto err_nomem;
return 0;
err_nomem:
rpc_destroy_mempool();
return -ENOMEM;
}
......@@ -25,8 +25,6 @@
/* RPC scheduler */
EXPORT_SYMBOL(rpc_allocate);
EXPORT_SYMBOL(rpc_free);
EXPORT_SYMBOL(rpc_execute);
EXPORT_SYMBOL(rpc_init_task);
EXPORT_SYMBOL(rpc_sleep_on);
......@@ -134,6 +132,8 @@ EXPORT_SYMBOL(nlm_debug);
static int __init
init_sunrpc(void)
{
if (rpc_init_mempool() != 0)
return -ENOMEM;
#ifdef RPC_DEBUG
rpc_register_sysctl();
#endif
......@@ -148,6 +148,7 @@ init_sunrpc(void)
static void __exit
cleanup_sunrpc(void)
{
rpc_destroy_mempool();
cache_unregister(&auth_domain_cache);
cache_unregister(&ip_map_cache);
#ifdef RPC_DEBUG
......
/*
* linux/net/sunrpc/timer.c
*
* Estimate RPC request round trip time.
*
* Based on packet round-trip and variance estimator algorithms described
* in appendix A of "Congestion Avoidance and Control" by Van Jacobson
* and Michael J. Karels (ACM Computer Communication Review; Proceedings
* of the Sigcomm '88 Symposium in Stanford, CA, August, 1988).
*
* This RTT estimator is used only for RPC over datagram protocols.
*
* Copyright (C) 2002 Trond Myklebust <trond.myklebust@fys.uio.no>
*/
#include <asm/param.h>
#include <linux/version.h>
#include <linux/types.h>
#include <linux/unistd.h>
......@@ -11,38 +28,53 @@
#define RPC_RTO_MIN (2)
void
rpc_init_rtt(struct rpc_rtt *rt, long timeo)
rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
{
long t = (timeo - RPC_RTO_INIT) << 3;
int i;
unsigned long init = 0;
unsigned i;
rt->timeo = timeo;
if (t < 0)
t = 0;
if (timeo > RPC_RTO_INIT)
init = (timeo - RPC_RTO_INIT) << 3;
for (i = 0; i < 5; i++) {
rt->srtt[i] = t;
rt->srtt[i] = init;
rt->sdrtt[i] = RPC_RTO_INIT;
}
atomic_set(&rt->ntimeouts, 0);
}
/*
* NB: When computing the smoothed RTT and standard deviation,
* be careful not to produce negative intermediate results.
*/
void
rpc_update_rtt(struct rpc_rtt *rt, int timer, long m)
rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m)
{
long *srtt, *sdrtt;
unsigned long *srtt, *sdrtt;
if (timer-- == 0)
return;
/* jiffies wrapped; ignore this one */
if (m < 0)
return;
if (m == 0)
m = 1;
m = 1L;
srtt = &rt->srtt[timer];
m -= *srtt >> 3;
*srtt += m;
if (m < 0)
m = -m;
sdrtt = &rt->sdrtt[timer];
m -= *sdrtt >> 2;
*sdrtt += m;
/* Set lower bound on the variance */
if (*sdrtt < RPC_RTO_MIN)
*sdrtt = RPC_RTO_MIN;
......@@ -61,14 +93,17 @@ rpc_update_rtt(struct rpc_rtt *rt, int timer, long m)
* other - timeo
*/
long
rpc_calc_rto(struct rpc_rtt *rt, int timer)
unsigned long
rpc_calc_rto(struct rpc_rtt *rt, unsigned timer)
{
long res;
unsigned long res;
if (timer-- == 0)
return rt->timeo;
res = (rt->srtt[timer] >> 3) + rt->sdrtt[timer];
if (res > RPC_RTO_MAX)
res = RPC_RTO_MAX;
return res;
}
......@@ -389,12 +389,6 @@ xprt_close(struct rpc_xprt *xprt)
sk->no_check = 0;
sock_release(sock);
/*
* TCP doesn't require the rpciod now - other things may
* but rpciod handles that not us.
*/
if(xprt->stream)
rpciod_down();
}
/*
......@@ -590,9 +584,11 @@ xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied)
xprt_adjust_cwnd(xprt, copied);
__xprt_put_cong(xprt, req);
if (!req->rq_nresend) {
int timer = rpcproc_timer(clnt, task->tk_msg.rpc_proc);
unsigned timer =
rpcproc_timer(clnt, task->tk_msg.rpc_proc);
if (timer)
rpc_update_rtt(&clnt->cl_rtt, timer, (long)jiffies - req->rq_xtime);
rpc_update_rtt(&clnt->cl_rtt, timer,
(long)jiffies - req->rq_xtime);
}
rpc_clear_timeo(&clnt->cl_rtt);
}
......@@ -911,7 +907,12 @@ tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
unsigned int offset, size_t len)
{
struct rpc_xprt *xprt = (struct rpc_xprt *)rd_desc->buf;
skb_reader_t desc = { skb, offset, len };
skb_reader_t desc = {
.skb = skb,
.offset = offset,
.count = len,
.csum = 0
};
dprintk("RPC: tcp_data_recv\n");
do {
......@@ -1457,11 +1458,6 @@ xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock)
/* Reset to new socket */
xprt->sock = sock;
xprt->inet = sk;
/*
* TCP requires the rpc I/O daemon is present
*/
if(xprt->stream)
rpciod_up();
return;
}
......@@ -1547,7 +1543,7 @@ xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to)
out_bad:
dprintk("RPC: xprt_create_proto failed\n");
if (xprt)
rpc_free(xprt);
kfree(xprt);
return NULL;
}
......@@ -1586,7 +1582,7 @@ xprt_destroy(struct rpc_xprt *xprt)
dprintk("RPC: destroying transport %p\n", xprt);
xprt_shutdown(xprt);
xprt_close(xprt);
rpc_free(xprt);
kfree(xprt);
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment