Commit c0454a9f authored by Linus Torvalds's avatar Linus Torvalds

Merge http://ppc.bkbits.net/for-linus-ppc64

into home.transmeta.com:/home/torvalds/v2.5/linux
parents d0f733e4 0288cf54
......@@ -144,7 +144,6 @@ EXPORT_SYMBOL(pci_dac_dma_to_offset);
EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(hwrpb);
EXPORT_SYMBOL(wrusp);
EXPORT_SYMBOL(start_thread);
EXPORT_SYMBOL(alpha_read_fp_reg);
EXPORT_SYMBOL(alpha_read_fp_reg_s);
......
......@@ -20,11 +20,22 @@ void foo(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
BLANK();
DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
DEFINE(TASK_UID, offsetof(struct task_struct, uid));
DEFINE(TASK_EUID, offsetof(struct task_struct, euid));
DEFINE(TASK_GID, offsetof(struct task_struct, gid));
DEFINE(TASK_EGID, offsetof(struct task_struct, egid));
DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent));
DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
BLANK();
DEFINE(PT_PTRACED, PT_PTRACED);
DEFINE(CLONE_VM, CLONE_VM);
DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
DEFINE(SIGCHLD, SIGCHLD);
BLANK();
DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache));
DEFINE(HAE_REG, offsetof(struct alpha_machine_vector, hae_register));
}
This diff is collapsed.
......@@ -37,14 +37,13 @@ void (*perf_irq)(unsigned long, struct pt_regs *) = dummy_perf;
*/
asmlinkage void
do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr,
unsigned long a3, unsigned long a4, unsigned long a5,
struct pt_regs regs)
do_entInt(unsigned long type, unsigned long vector,
unsigned long la_ptr, struct pt_regs *regs)
{
switch (type) {
case 0:
#ifdef CONFIG_SMP
handle_ipi(&regs);
handle_ipi(regs);
return;
#else
irq_err_count++;
......@@ -56,32 +55,32 @@ do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr,
#ifdef CONFIG_SMP
{
long cpu;
smp_percpu_timer_interrupt(&regs);
smp_percpu_timer_interrupt(regs);
cpu = smp_processor_id();
if (cpu != boot_cpuid) {
kstat_cpu(cpu).irqs[RTC_IRQ]++;
} else {
handle_irq(RTC_IRQ, &regs);
handle_irq(RTC_IRQ, regs);
}
}
#else
handle_irq(RTC_IRQ, &regs);
handle_irq(RTC_IRQ, regs);
#endif
return;
case 2:
alpha_mv.machine_check(vector, la_ptr, &regs);
alpha_mv.machine_check(vector, la_ptr, regs);
return;
case 3:
alpha_mv.device_interrupt(vector, &regs);
alpha_mv.device_interrupt(vector, regs);
return;
case 4:
perf_irq(vector, &regs);
perf_irq(vector, regs);
return;
default:
printk(KERN_CRIT "Hardware intr %ld %lx? Huh?\n",
type, vector);
}
printk("PC = %016lx PS=%04lx\n", regs.pc, regs.ps);
printk(KERN_CRIT "PC = %016lx PS=%04lx\n", regs->pc, regs->ps);
}
void __init
......@@ -96,10 +95,8 @@ common_init_isa_dma(void)
void __init
init_IRQ(void)
{
/* Uh, this really MUST come first, just in case
* the platform init_irq() causes interrupts/mchecks
* (as is the case with RAWHIDE, at least).
*/
/* Just in case the platform init_irq() causes interrupts/mchecks
(as is the case with RAWHIDE, at least). */
wrent(entInt, 0);
alpha_mv.init_irq();
......
......@@ -45,7 +45,6 @@
extern int do_pipe(int *);
extern asmlinkage unsigned long sys_brk(unsigned long);
extern int sys_getpriority(int, int);
extern asmlinkage unsigned long sys_create_module(char *, unsigned long);
/*
......@@ -172,65 +171,6 @@ osf_getdirentries(unsigned int fd, struct osf_dirent *dirent,
#undef ROUND_UP
#undef NAME_OFFSET
/*
* Alpha syscall convention has no problem returning negative
* values:
*/
asmlinkage int
osf_getpriority(int which, int who,
int a2, int a3, int a4, int a5, struct pt_regs regs)
{
extern int sys_getpriority(int, int);
int prio;
/*
* We don't need to acquire the kernel lock here, because
* all of these operations are local. sys_getpriority
* will get the lock as required..
*/
prio = sys_getpriority(which, who);
if (prio >= 0) {
regs.r0 = 0; /* special return: no errors */
prio = 20 - prio;
}
return prio;
}
/*
* No need to acquire the kernel lock, we're local..
*/
asmlinkage unsigned long
sys_getxuid(int a0, int a1, int a2, int a3, int a4, int a5, struct pt_regs regs)
{
struct task_struct * tsk = current;
(&regs)->r20 = tsk->euid;
return tsk->uid;
}
asmlinkage unsigned long
sys_getxgid(int a0, int a1, int a2, int a3, int a4, int a5, struct pt_regs regs)
{
struct task_struct * tsk = current;
(&regs)->r20 = tsk->egid;
return tsk->gid;
}
asmlinkage unsigned long
sys_getxpid(int a0, int a1, int a2, int a3, int a4, int a5, struct pt_regs regs)
{
struct task_struct *tsk = current;
/*
* This isn't strictly "local" any more and we should actually
* acquire the kernel lock. The "p_opptr" pointer might change
* if the parent goes away (or due to ptrace). But any race
* isn't actually going to matter, as if the parent happens
* to change we can happily return either of the pids.
*/
(&regs)->r20 = tsk->real_parent->tgid;
return tsk->tgid;
}
asmlinkage unsigned long
osf_mmap(unsigned long addr, unsigned long len, unsigned long prot,
unsigned long flags, unsigned long fd, unsigned long off)
......@@ -502,19 +442,6 @@ sys_getdtablesize(void)
return NR_OPEN;
}
asmlinkage int
sys_pipe(int a0, int a1, int a2, int a3, int a4, int a5, struct pt_regs regs)
{
int fd[2], error;
error = do_pipe(fd);
if (!error) {
regs.r20 = fd[1];
error = fd[0];
}
return error;
}
/*
* For compatibility with OSF/1 only. Use utsname(2) instead.
*/
......@@ -723,8 +650,8 @@ osf_sigstack(struct sigstack *uss, struct sigstack *uoss)
*/
asmlinkage unsigned long
alpha_create_module(char *module_name, unsigned long size,
int a3, int a4, int a5, int a6, struct pt_regs regs)
do_alpha_create_module(char *module_name, unsigned long size,
struct pt_regs *regs)
{
long retval;
......@@ -735,7 +662,7 @@ alpha_create_module(char *module_name, unsigned long size,
the error number is a small negative number, while the address
is always negative but much larger. */
if (retval + 1000 < 0)
regs.r0 = 0;
regs->r0 = 0;
unlock_kernel();
return retval;
......
......@@ -42,18 +42,6 @@
#include "proto.h"
#include "pci_impl.h"
/*
* No need to acquire the kernel lock, we're entirely local..
*/
asmlinkage int
sys_sethae(unsigned long hae, unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4, unsigned long a5,
struct pt_regs regs)
{
(&regs)->hae = hae;
return 0;
}
void default_idle(void)
{
barrier();
......@@ -227,6 +215,9 @@ flush_thread(void)
with respect to the FPU. This is all exceptions disabled. */
current_thread_info()->ieee_state = 0;
wrfpcr(FPCR_DYN_NORMAL | ieee_swcr_to_fpcr(0));
/* Clean slate for TLS. */
current_thread_info()->pcb.unique = 0;
}
void
......@@ -244,16 +235,15 @@ release_thread(struct task_struct *dead_task)
* with parameters (SIGCHLD, 0).
*/
int
alpha_clone(unsigned long clone_flags, unsigned long usp,
int *user_tid, struct switch_stack * swstack)
alpha_clone(unsigned long clone_flags, unsigned long usp, int *user_tid,
struct pt_regs *regs)
{
struct task_struct *p;
struct pt_regs *u_regs = (struct pt_regs *) (swstack+1);
if (!usp)
usp = rdusp();
p = do_fork(clone_flags & ~CLONE_IDLETASK, usp, u_regs, 0, user_tid);
p = do_fork(clone_flags & ~CLONE_IDLETASK, usp, regs, 0, user_tid);
return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
......@@ -282,7 +272,6 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
unsigned long unused,
struct task_struct * p, struct pt_regs * regs)
{
extern void ret_from_sys_call(void);
extern void ret_from_fork(void);
struct thread_info *childti = p->thread_info;
......@@ -304,11 +293,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
stack = ((struct switch_stack *) regs) - 1;
childstack = ((struct switch_stack *) childregs) - 1;
*childstack = *stack;
#ifdef CONFIG_SMP
childstack->r26 = (unsigned long) ret_from_fork;
#else
childstack->r26 = (unsigned long) ret_from_sys_call;
#endif
childti->pcb.usp = usp;
childti->pcb.ksp = (unsigned long) childstack;
childti->pcb.flags = 1; /* set FEN, clear everything else */
......
......@@ -249,8 +249,8 @@ void ptrace_disable(struct task_struct *child)
}
asmlinkage long
sys_ptrace(long request, long pid, long addr, long data,
int a4, int a5, struct pt_regs regs)
do_sys_ptrace(long request, long pid, long addr, long data,
struct pt_regs *regs)
{
struct task_struct *child;
long ret;
......@@ -307,14 +307,14 @@ sys_ptrace(long request, long pid, long addr, long data,
if (copied != sizeof(tmp))
goto out;
regs.r0 = 0; /* special return: no errors */
regs->r0 = 0; /* special return: no errors */
ret = tmp;
goto out;
}
/* Read register number ADDR. */
case PTRACE_PEEKUSR:
regs.r0 = 0; /* special return: no errors */
regs->r0 = 0; /* special return: no errors */
ret = get_reg(child, addr);
DBG(DBG_MEM, ("peek $%ld->%#lx\n", addr, ret));
goto out;
......
......@@ -210,8 +210,7 @@ long alpha_fp_emul (unsigned long pc);
asmlinkage void
do_entArith(unsigned long summary, unsigned long write_mask,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, struct pt_regs regs)
struct pt_regs *regs)
{
long si_code = FPE_FLTINV;
siginfo_t info;
......@@ -221,23 +220,23 @@ do_entArith(unsigned long summary, unsigned long write_mask,
emulate the instruction. If the processor supports
precise exceptions, we don't have to search. */
if (!amask(AMASK_PRECISE_TRAP))
si_code = alpha_fp_emul(regs.pc - 4);
si_code = alpha_fp_emul(regs->pc - 4);
else
si_code = alpha_fp_emul_imprecise(&regs, write_mask);
si_code = alpha_fp_emul_imprecise(regs, write_mask);
if (si_code == 0)
return;
}
die_if_kernel("Arithmetic fault", &regs, 0, 0);
die_if_kernel("Arithmetic fault", regs, 0, 0);
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void *) regs.pc;
info.si_addr = (void *) regs->pc;
send_sig_info(SIGFPE, &info, current);
}
asmlinkage void
do_entIF(unsigned long type, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, struct pt_regs regs)
do_entIF(unsigned long type, struct pt_regs *regs)
{
siginfo_t info;
int signo, code;
......@@ -245,13 +244,13 @@ do_entIF(unsigned long type, unsigned long a1,
if (!opDEC_testing || type != 4) {
if (type == 1) {
const unsigned int *data
= (const unsigned int *) regs.pc;
= (const unsigned int *) regs->pc;
printk("Kernel bug at %s:%d\n",
(const char *)(data[1] | (long)data[2] << 32),
data[0]);
}
die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
&regs, type, 0);
regs, type, 0);
}
switch (type) {
......@@ -260,10 +259,10 @@ do_entIF(unsigned long type, unsigned long a1,
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_trapno = 0;
info.si_addr = (void *) regs.pc;
info.si_addr = (void *) regs->pc;
if (ptrace_cancel_bpt(current)) {
regs.pc -= 4; /* make pc point to former bpt */
regs->pc -= 4; /* make pc point to former bpt */
}
send_sig_info(SIGTRAP, &info, current);
......@@ -273,15 +272,15 @@ do_entIF(unsigned long type, unsigned long a1,
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = __SI_FAULT;
info.si_addr = (void *) regs.pc;
info.si_addr = (void *) regs->pc;
info.si_trapno = 0;
send_sig_info(SIGTRAP, &info, current);
return;
case 2: /* gentrap */
info.si_addr = (void *) regs.pc;
info.si_trapno = regs.r16;
switch ((long) regs.r16) {
info.si_addr = (void *) regs->pc;
info.si_trapno = regs->r16;
switch ((long) regs->r16) {
case GEN_INTOVF:
signo = SIGFPE;
code = FPE_INTOVF;
......@@ -341,7 +340,7 @@ do_entIF(unsigned long type, unsigned long a1,
info.si_signo = signo;
info.si_errno = 0;
info.si_code = code;
info.si_addr = (void *) regs.pc;
info.si_addr = (void *) regs->pc;
send_sig_info(signo, &info, current);
return;
......@@ -358,26 +357,26 @@ do_entIF(unsigned long type, unsigned long a1,
we get the correct PC. If not, we set a flag
to correct it every time through. */
if (opDEC_testing) {
if (regs.pc == opDEC_test_pc) {
if (regs->pc == opDEC_test_pc) {
opDEC_fix = 4;
regs.pc += 4;
regs->pc += 4;
printk("opDEC fixup enabled.\n");
}
return;
}
regs.pc += opDEC_fix;
regs->pc += opDEC_fix;
/* EV4 does not implement anything except normal
rounding. Everything else will come here as
an illegal instruction. Emulate them. */
si_code = alpha_fp_emul(regs.pc - 4);
si_code = alpha_fp_emul(regs->pc - 4);
if (si_code == 0)
return;
if (si_code > 0) {
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void *) regs.pc;
info.si_addr = (void *) regs->pc;
send_sig_info(SIGFPE, &info, current);
return;
}
......@@ -406,7 +405,7 @@ do_entIF(unsigned long type, unsigned long a1,
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
info.si_addr = regs.pc;
info.si_addr = (void *) regs->pc;
send_sig_info(SIGILL, &info, current);
}
......@@ -418,18 +417,16 @@ do_entIF(unsigned long type, unsigned long a1,
and if we don't put something on the entry point we'll oops. */
asmlinkage void
do_entDbg(unsigned long type, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, struct pt_regs regs)
do_entDbg(struct pt_regs *regs)
{
siginfo_t info;
die_if_kernel("Instruction fault", &regs, type, 0);
die_if_kernel("Instruction fault", regs, 0, 0);
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLOPC;
info.si_addr = regs.pc;
info.si_addr = (void *) regs->pc;
force_sig_info(SIGILL, &info, current);
}
......@@ -1083,22 +1080,6 @@ do_entUnaUser(void * va, unsigned long opcode,
return;
}
/*
* Unimplemented system calls.
*/
asmlinkage long
alpha_ni_syscall(unsigned long a0, unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4, unsigned long a5,
struct pt_regs regs)
{
/* We only get here for OSF system calls, minus #112;
the rest go to sys_ni_syscall. */
#if 0
printk("<sc %ld(%lx,%lx,%lx)>", regs.r0, a0, a1, a2);
#endif
return -ENOSYS;
}
void
trap_init(void)
{
......@@ -1114,9 +1095,7 @@ trap_init(void)
wrent(entDbg, 6);
/* Hack for Multia (UDB) and JENSEN: some of their SRMs have
* a bug in the handling of the opDEC fault. Fix it up if so.
*/
if (implver() == IMPLVER_EV4) {
a bug in the handling of the opDEC fault. Fix it up if so. */
if (implver() == IMPLVER_EV4)
opDEC_check();
}
}
......@@ -223,12 +223,12 @@ alpha_fp_emul (unsigned long pc)
FP_CONV(S,D,1,1,SR,DB);
goto pack_s;
} else {
/* CVTST need do nothing else but copy the
bits and repack. */
vb = alpha_read_fp_reg_s(fb);
FP_UNPACK_SP(SB, &vb);
DR_c = DB_c;
DR_s = DB_s;
DR_e = DB_e;
DR_f = DB_f;
DR_f = SB_f << (52 - 23);
goto pack_d;
}
......
......@@ -140,7 +140,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
prot |= _PAGE_PCD | _PAGE_PWT;
#elif defined(__powerpc__)
prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
#elif defined(__mc68000__)
#elif defined(__mc68000__) && defined(CONFIG_MMU)
#ifdef SUN3_PAGE_NOCACHE
if (MMU_IS_SUN3)
prot |= SUN3_PAGE_NOCACHE;
......
......@@ -68,7 +68,7 @@
#include <asm/uaccess.h>
#include <asm/io.h>
/* FIXME: soem day we shouldnt need to look in here! */
/* FIXME: some day we shouldnt need to look in here! */
#include "legacy/pdc4030.h"
......
......@@ -590,7 +590,7 @@ int proc_ide_read_driver
(char *page, char **start, off_t off, int count, int *eof, void *data)
{
ide_drive_t *drive = (ide_drive_t *) data;
ide_driver_t *driver = (ide_driver_t *) drive->driver;
ide_driver_t *driver = drive->driver;
int len;
if (!driver)
......@@ -720,7 +720,6 @@ void recreate_proc_ide_device(ide_hwif_t *hwif, ide_drive_t *drive)
struct proc_dir_entry *ent;
struct proc_dir_entry *parent = hwif->proc;
char name[64];
// ide_driver_t *driver = drive->driver;
if (drive->present && !drive->proc) {
drive->proc = proc_mkdir(drive->name, parent);
......
......@@ -153,6 +153,7 @@
#include <linux/cdrom.h>
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/kmod.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
......@@ -162,7 +163,6 @@
#include "ide_modes.h"
#include <linux/kmod.h>
/* default maximum number of failures */
#define IDE_DEFAULT_MAX_FAILURES 1
......
......@@ -22,7 +22,6 @@ ifneq ($(CONFIG_MAC),y)
endif
obj-$(CONFIG_MAC_EMUMOUSEBTN) += mac_hid.o
obj-$(CONFIG_INPUT_ADBHID) += adbhid.o
obj-$(CONFIG_PPC_RTC) += rtc.o
obj-$(CONFIG_ANSLCD) += ans-lcd.o
obj-$(CONFIG_ADB_PMU) += via-pmu.o
......@@ -30,7 +29,6 @@ obj-$(CONFIG_ADB_CUDA) += via-cuda.o
obj-$(CONFIG_PMAC_APM_EMU) += apm_emu.o
obj-$(CONFIG_ADB) += adb.o
obj-$(CONFIG_ADB_KEYBOARD) += mac_keyb.o
obj-$(CONFIG_ADB_MACII) += via-macii.o
obj-$(CONFIG_ADB_MACIISI) += via-maciisi.o
obj-$(CONFIG_ADB_IOP) += adb-iop.o
......
......@@ -34,8 +34,10 @@
#include <linux/wait.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
#ifdef CONFIG_PPC
#include <asm/prom.h>
#include <asm/hydra.h>
......@@ -75,8 +77,8 @@ static struct adb_driver *adb_driver_list[] = {
struct adb_driver *adb_controller;
struct notifier_block *adb_client_list = NULL;
static int adb_got_sleep = 0;
static int adb_inited = 0;
static int adb_got_sleep;
static int adb_inited;
static pid_t adb_probe_task_pid;
static DECLARE_MUTEX(adb_probe_mutex);
static struct completion adb_probe_task_comp;
......@@ -94,7 +96,7 @@ static struct pmu_sleep_notifier adb_sleep_notifier = {
static int adb_scan_bus(void);
static int do_adb_reset_bus(void);
static void adbdev_init(void);
static int try_handler_change(int, int);
static struct adb_handler {
void (*handler)(unsigned char *, int, struct pt_regs *, int);
......@@ -102,6 +104,18 @@ static struct adb_handler {
int handler_id;
} adb_handler[16];
/*
* The adb_handler_sem mutex protects all accesses to the original_address
* and handler_id fields of adb_handler[i] for all i, and changes to the
* handler field.
* Accesses to the handler field are protected by the adb_handler_lock
* rwlock. It is held across all calls to any handler, so that by the
* time adb_unregister returns, we know that the old handler isn't being
* called.
*/
static DECLARE_MUTEX(adb_handler_sem);
static rwlock_t adb_handler_lock = RW_LOCK_UNLOCKED;
#if 0
static void printADBreply(struct adb_request *req)
{
......@@ -254,25 +268,18 @@ __adb_probe_task(void *data)
SIGCHLD | CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
}
static DECLARE_WORK(adb_reset_work, __adb_probe_task, NULL);
int
adb_reset_bus(void)
{
static struct tq_struct tqs = {
routine: __adb_probe_task,
};
if (__adb_probe_sync) {
do_adb_reset_bus();
return 0;
}
down(&adb_probe_mutex);
/* Create probe thread as a child of keventd */
if (current_is_keventd())
__adb_probe_task(NULL);
else
schedule_task(&tqs);
schedule_work(&adb_reset_work);
return 0;
}
......@@ -372,7 +379,6 @@ static int
do_adb_reset_bus(void)
{
int ret, nret, devs;
unsigned long flags;
if (adb_controller == NULL)
return -ENXIO;
......@@ -392,10 +398,10 @@ do_adb_reset_bus(void)
adb_wait_ms(500);
}
save_flags(flags);
cli();
down(&adb_handler_sem);
write_lock_irq(&adb_handler_lock);
memset(adb_handler, 0, sizeof(adb_handler));
restore_flags(flags);
write_unlock_irq(&adb_handler_lock);
/* That one is still a bit synchronous, oh well... */
if (adb_controller->reset_bus)
......@@ -413,6 +419,7 @@ do_adb_reset_bus(void)
if (adb_controller->autopoll)
adb_controller->autopoll(devs);
}
up(&adb_handler_sem);
nret = notifier_call_chain(&adb_client_list, ADB_MSG_POST_RESET, NULL);
if (nret & NOTIFY_STOP_MASK)
......@@ -512,30 +519,41 @@ adb_register(int default_id, int handler_id, struct adb_ids *ids,
{
int i;
down(&adb_handler_sem);
ids->nids = 0;
for (i = 1; i < 16; i++) {
if ((adb_handler[i].original_address == default_id) &&
(!handler_id || (handler_id == adb_handler[i].handler_id) ||
adb_try_handler_change(i, handler_id))) {
try_handler_change(i, handler_id))) {
if (adb_handler[i].handler != 0) {
printk(KERN_ERR
"Two handlers for ADB device %d\n",
default_id);
continue;
}
write_lock_irq(&adb_handler_lock);
adb_handler[i].handler = handler;
write_unlock_irq(&adb_handler_lock);
ids->id[ids->nids++] = i;
}
}
up(&adb_handler_sem);
return ids->nids;
}
int
adb_unregister(int index)
{
if (!adb_handler[index].handler)
return -ENODEV;
int ret = -ENODEV;
down(&adb_handler_sem);
write_lock_irq(&adb_handler_lock);
if (adb_handler[index].handler) {
ret = 0;
adb_handler[index].handler = 0;
}
write_unlock_irq(&adb_handler_lock);
up(&adb_handler_sem);
return 0;
}
......@@ -544,6 +562,7 @@ adb_input(unsigned char *buf, int nb, struct pt_regs *regs, int autopoll)
{
int i, id;
static int dump_adb_input = 0;
void (*handler)(unsigned char *, int, struct pt_regs *, int);
/* We skip keystrokes and mouse moves when the sleep process
* has been started. We stop autopoll, but this is another security
......@@ -558,14 +577,15 @@ adb_input(unsigned char *buf, int nb, struct pt_regs *regs, int autopoll)
printk(" %x", buf[i]);
printk(", id = %d\n", id);
}
if (adb_handler[id].handler != 0) {
(*adb_handler[id].handler)(buf, nb, regs, autopoll);
}
read_lock(&adb_handler_lock);
handler = adb_handler[id].handler;
if (handler != 0)
(*handler)(buf, nb, regs, autopoll);
read_unlock(&adb_handler_lock);
}
/* Try to change handler to new_id. Will return 1 if successful */
int
adb_try_handler_change(int address, int new_id)
/* Try to change handler to new_id. Will return 1 if successful. */
static int try_handler_change(int address, int new_id)
{
struct adb_request req;
......@@ -584,11 +604,24 @@ adb_try_handler_change(int address, int new_id)
return 1;
}
int
adb_try_handler_change(int address, int new_id)
{
int ret;
down(&adb_handler_sem);
ret = try_handler_change(address, new_id);
up(&adb_handler_sem);
return ret;
}
int
adb_get_infos(int address, int *original_address, int *handler_id)
{
down(&adb_handler_sem);
*original_address = adb_handler[address].original_address;
*handler_id = adb_handler[address].handler_id;
up(&adb_handler_sem);
return (*original_address != 0);
}
......
This diff is collapsed.
......@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <asm/prom.h>
#include <linux/adb.h>
#include <asm/io.h>
......@@ -57,7 +58,7 @@ struct adb_regs {
static volatile struct adb_regs *adb;
static struct adb_request *current_req, *last_req;
static unsigned char adb_rbuf[16];
static spinlock_t macio_lock = SPIN_LOCK_UNLOCKED;
static int macio_probe(void);
static int macio_init(void);
......@@ -66,7 +67,6 @@ static int macio_send_request(struct adb_request *req, int sync);
static int macio_adb_autopoll(int devs);
static void macio_adb_poll(void);
static int macio_adb_reset_bus(void);
static void completed(void);
struct adb_driver macio_adb_driver = {
"MACIO",
......@@ -107,19 +107,19 @@ int macio_init(void)
adb = (volatile struct adb_regs *)
ioremap(adbs->addrs->address, sizeof(struct adb_regs));
if (request_irq(adbs->intrs[0].line, macio_adb_interrupt,
0, "ADB", (void *)0)) {
printk(KERN_ERR "ADB: can't get irq %d\n",
adbs->intrs[0].line);
return -EAGAIN;
}
out_8(&adb->ctrl.r, 0);
out_8(&adb->intr.r, 0);
out_8(&adb->error.r, 0);
out_8(&adb->active_hi.r, 0xff); /* for now, set all devices active */
out_8(&adb->active_lo.r, 0xff);
out_8(&adb->autopoll.r, APE);
if (request_irq(adbs->intrs[0].line, macio_adb_interrupt,
0, "ADB", (void *)0)) {
printk(KERN_ERR "ADB: can't get irq %d\n",
adbs->intrs[0].line);
return -EAGAIN;
}
out_8(&adb->intr_enb.r, DFB | TAG);
printk("adb: mac-io driver 1.0 for unified ADB\n");
......@@ -129,16 +129,27 @@ int macio_init(void)
static int macio_adb_autopoll(int devs)
{
unsigned long flags;
spin_lock_irqsave(&macio_lock, flags);
out_8(&adb->active_hi.r, devs >> 8);
out_8(&adb->active_lo.r, devs);
out_8(&adb->autopoll.r, devs? APE: 0);
spin_unlock_irqrestore(&macio_lock, flags);
return 0;
}
static int macio_adb_reset_bus(void)
{
unsigned long flags;
int timeout = 1000000;
/* Hrm... we may want to not lock interrupts for so
* long ... oh well, who uses that chip anyway ? :)
* That function will be seldomly used during boot
* on rare machines, so...
*/
spin_lock_irqsave(&macio_lock, flags);
out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | ADB_RST);
while ((in_8(&adb->ctrl.r) & ADB_RST) != 0) {
if (--timeout == 0) {
......@@ -146,13 +157,14 @@ static int macio_adb_reset_bus(void)
return -1;
}
}
spin_unlock_irqrestore(&macio_lock, flags);
return 0;
}
/* Send an ADB command */
static int macio_send_request(struct adb_request *req, int sync)
{
unsigned long mflags;
unsigned long flags;
int i;
if (req->data[0] != ADB_PACKET)
......@@ -167,8 +179,7 @@ static int macio_send_request(struct adb_request *req, int sync)
req->complete = 0;
req->reply_len = 0;
save_flags(mflags);
cli();
spin_lock_irqsave(&macio_lock, flags);
if (current_req != 0) {
last_req->next = req;
last_req = req;
......@@ -176,7 +187,7 @@ static int macio_send_request(struct adb_request *req, int sync)
current_req = last_req = req;
out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR);
}
restore_flags(mflags);
spin_unlock_irqrestore(&macio_lock, flags);
if (sync) {
while (!req->complete)
......@@ -190,7 +201,12 @@ static void macio_adb_interrupt(int irq, void *arg, struct pt_regs *regs)
{
int i, n, err;
struct adb_request *req;
unsigned char ibuf[16];
int ibuf_len = 0;
int complete = 0;
int autopoll = 0;
spin_lock(&macio_lock);
if (in_8(&adb->intr.r) & TAG) {
if ((req = current_req) != 0) {
/* put the current request in */
......@@ -202,7 +218,10 @@ static void macio_adb_interrupt(int irq, void *arg, struct pt_regs *regs)
out_8(&adb->ctrl.r, DTB + CRE);
} else {
out_8(&adb->ctrl.r, DTB);
completed();
current_req = req->next;
complete = 1;
if (current_req)
out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR);
}
}
out_8(&adb->intr.r, 0);
......@@ -218,39 +237,42 @@ static void macio_adb_interrupt(int irq, void *arg, struct pt_regs *regs)
for (i = 0; i < req->reply_len; ++i)
req->reply[i] = in_8(&adb->data[i].r);
}
completed();
current_req = req->next;
complete = 1;
if (current_req)
out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR);
} else if (err == 0) {
/* autopoll data */
n = in_8(&adb->dcount.r) & HMB;
for (i = 0; i < n; ++i)
adb_rbuf[i] = in_8(&adb->data[i].r);
adb_input(adb_rbuf, n, regs,
in_8(&adb->dcount.r) & APD);
ibuf[i] = in_8(&adb->data[i].r);
ibuf_len = n;
autopoll = (in_8(&adb->dcount.r) & APD) != 0;
}
out_8(&adb->error.r, 0);
out_8(&adb->intr.r, 0);
}
}
static void completed(void)
{
struct adb_request *req = current_req;
spin_unlock(&macio_lock);
if (complete && req) {
void (*done)(struct adb_request *) = req->done;
mb();
req->complete = 1;
current_req = req->next;
if (current_req)
out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR);
if (req->done)
(*req->done)(req);
/* Here, we assume that if the request has a done member, the
* struct request will survive to setting req->complete to 1
*/
if (done)
(*done)(req);
}
if (ibuf_len)
adb_input(ibuf, ibuf_len, regs, autopoll);
}
static void macio_adb_poll(void)
{
unsigned long flags;
save_flags(flags);
cli();
local_irq_save(flags);
if (in_8(&adb->intr.r) != 0)
macio_adb_interrupt(0, 0, 0);
restore_flags(flags);
local_irq_restore(flags);
}
This diff is collapsed.
/*
* Linux/PowerPC Real Time Clock Driver
*
* heavily based on:
* Linux/SPARC Real Time Clock Driver
* Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
*
* This is a little driver that lets a user-level program access
* the PPC clocks chip. It is no use unless you
* use the modified clock utility.
*
* Get the modified clock utility from:
* ftp://vger.rutgers.edu/pub/linux/Sparc/userland/clock.c
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/mc146818rtc.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/machdep.h>
#include <asm/time.h>
static int rtc_busy = 0;
/* Retrieve the current date and time from the real time clock. */
void get_rtc_time(struct rtc_time *t)
{
unsigned long nowtime;
nowtime = (ppc_md.get_rtc_time)();
to_tm(nowtime, t);
t->tm_year -= 1900;
t->tm_mon -= 1; /* Make sure userland has a 0-based month */
}
/* Set the current date and time in the real time clock. */
void set_rtc_time(struct rtc_time *t)
{
unsigned long nowtime;
nowtime = mktime(t->tm_year+1900, t->tm_mon+1, t->tm_mday,
t->tm_hour, t->tm_min, t->tm_sec);
(ppc_md.set_rtc_time)(nowtime);
}
static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
{
struct rtc_time rtc_tm;
switch (cmd)
{
case RTC_RD_TIME:
if (ppc_md.get_rtc_time)
{
get_rtc_time(&rtc_tm);
if (copy_to_user((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time)))
return -EFAULT;
return 0;
}
else
return -EINVAL;
case RTC_SET_TIME:
if (!capable(CAP_SYS_TIME))
return -EPERM;
if (ppc_md.set_rtc_time)
{
if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, sizeof(struct rtc_time)))
return -EFAULT;
set_rtc_time(&rtc_tm);
return 0;
}
else
return -EINVAL;
default:
return -EINVAL;
}
}
static int rtc_open(struct inode *inode, struct file *file)
{
if (rtc_busy)
return -EBUSY;
rtc_busy = 1;
MOD_INC_USE_COUNT;
return 0;
}
static int rtc_release(struct inode *inode, struct file *file)
{
MOD_DEC_USE_COUNT;
rtc_busy = 0;
return 0;
}
static struct file_operations rtc_fops = {
owner: THIS_MODULE,
llseek: no_llseek,
ioctl: rtc_ioctl,
open: rtc_open,
release: rtc_release
};
static struct miscdevice rtc_dev = { RTC_MINOR, "rtc", &rtc_fops };
static int __init rtc_init(void)
{
int error;
error = misc_register(&rtc_dev);
if (error) {
printk(KERN_ERR "rtc: unable to get misc minor\n");
return error;
}
return 0;
}
static void __exit rtc_exit(void)
{
misc_deregister(&rtc_dev);
}
module_init(rtc_init);
module_exit(rtc_exit);
MODULE_LICENSE("GPL");
......@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/adb.h>
#include <linux/cuda.h>
#include <linux/spinlock.h>
#ifdef CONFIG_PPC
#include <asm/prom.h>
#include <asm/machdep.h>
......@@ -31,6 +32,7 @@
#include <linux/init.h>
static volatile unsigned char *via;
static spinlock_t cuda_lock = SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_MAC
#define CUDA_IRQ IRQ_MAC_ADB
......@@ -386,8 +388,8 @@ cuda_write(struct adb_request *req)
req->sent = 0;
req->complete = 0;
req->reply_len = 0;
save_flags(flags); cli();
spin_lock_irqsave(&cuda_lock, flags);
if (current_req != 0) {
last_req->next = req;
last_req = req;
......@@ -397,15 +399,14 @@ cuda_write(struct adb_request *req)
if (cuda_state == idle)
cuda_start();
}
spin_unlock_irqrestore(&cuda_lock, flags);
restore_flags(flags);
return 0;
}
static void
cuda_start()
{
unsigned long flags;
struct adb_request *req;
/* assert cuda_state == idle */
......@@ -413,41 +414,46 @@ cuda_start()
req = current_req;
if (req == 0)
return;
save_flags(flags); cli();
if ((via[B] & TREQ) == 0) {
restore_flags(flags);
if ((via[B] & TREQ) == 0)
return; /* a byte is coming in from the CUDA */
}
/* set the shift register to shift out and send a byte */
via[ACR] |= SR_OUT; eieio();
via[SR] = req->data[0]; eieio();
via[B] &= ~TIP;
cuda_state = sent_first_byte;
restore_flags(flags);
}
void
cuda_poll()
{
if (via[IFR] & SR_INT) {
unsigned long flags;
save_flags(flags);
cli();
if (via[IFR] & SR_INT)
/* cuda_interrupt only takes a normal lock, we disable
* interrupts here to avoid re-entering and thus deadlocking.
* An option would be to disable only the IRQ source with
* disable_irq(), would that work on m68k ? --BenH
*/
local_irq_save(flags);
cuda_interrupt(0, 0, 0);
restore_flags(flags);
local_irq_restore(flags);
}
}
static void
cuda_interrupt(int irq, void *arg, struct pt_regs *regs)
{
int x, status;
struct adb_request *req;
struct adb_request *req = NULL;
unsigned char ibuf[16];
int ibuf_len = 0;
int complete = 0;
if ((via[IFR] & SR_INT) == 0)
return;
spin_lock(&cuda_lock);
status = (~via[B] & (TIP|TREQ)) | (via[ACR] & SR_OUT); eieio();
/* printk("cuda_interrupt: state=%d status=%x\n", cuda_state, status); */
switch (cuda_state) {
......@@ -502,8 +508,7 @@ cuda_interrupt(int irq, void *arg, struct pt_regs *regs)
cuda_state = awaiting_reply;
} else {
current_req = req->next;
if (req->done)
(*req->done)(req);
complete = 1;
/* not sure about this */
cuda_state = idle;
cuda_start();
......@@ -544,12 +549,18 @@ cuda_interrupt(int irq, void *arg, struct pt_regs *regs)
memmove(req->reply, req->reply + 2, req->reply_len);
}
}
req->complete = 1;
current_req = req->next;
if (req->done)
(*req->done)(req);
complete = 1;
} else {
cuda_input(cuda_rbuf, reply_ptr - cuda_rbuf, regs);
/* This is tricky. We must break the spinlock to call
* cuda_input. However, doing so means we might get
* re-entered from another CPU getting an interrupt
* or calling cuda_poll(). I ended up using the stack
* (it's only for 16 bytes) and moving the actual
* call to cuda_input to outside of the lock.
*/
ibuf_len = reply_ptr - cuda_rbuf;
memcpy(ibuf, cuda_rbuf, ibuf_len);
}
if (status == TREQ) {
via[B] &= ~TIP; eieio();
......@@ -565,6 +576,19 @@ cuda_interrupt(int irq, void *arg, struct pt_regs *regs)
default:
printk("cuda_interrupt: unknown cuda_state %d?\n", cuda_state);
}
spin_unlock(&cuda_lock);
if (complete && req) {
void (*done)(struct adb_request *) = req->done;
mb();
req->complete = 1;
/* Here, we assume that if the request has a done member, the
* struct request will survive to setting req->complete to 1
*/
if (done)
(*done)(req);
}
if (ibuf_len)
cuda_input(ibuf, ibuf_len, regs);
}
static void
......
This diff is collapsed.
......@@ -612,7 +612,7 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
#if defined(__mc68000__)
#if defined(CONFIG_SUN3)
pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
#else
#elif defined(CONFIG_MMU)
if (CPU_IS_020_OR_030)
pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030;
if (CPU_IS_040_OR_060) {
......
......@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kdev_t.h>
#include <linux/bio.h>
#include <linux/fs.h>
......
......@@ -4,7 +4,7 @@
obj-$(CONFIG_NFS_FS) += nfs.o
nfs-y := dir.o file.o flushd.o inode.o nfs2xdr.o pagelist.o \
nfs-y := dir.o file.o inode.o nfs2xdr.o pagelist.o \
proc.o read.o symlink.o unlink.o write.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o mount_clnt.o
nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
......
/*
* linux/fs/nfs/flushd.c
*
* For each NFS mount, there is a separate cache object that contains
* a hash table of all clusters. With this cache, an async RPC task
* (`flushd') is associated, which wakes up occasionally to inspect
* its list of dirty buffers.
* (Note that RPC tasks aren't kernel threads. Take a look at the
* rpciod code to understand what they are).
*
* Inside the cache object, we also maintain a count of the current number
* of dirty pages, which may not exceed a certain threshold.
* (FIXME: This threshold should be configurable).
*
* The code is streamlined for what I think is the prevalent case for
* NFS traffic, which is sequential write access without concurrent
* access by different processes.
*
* Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
*
* Rewritten 6/3/2000 by Trond Myklebust
* Copyright (C) 1999, 2000, Trond Myklebust <trond.myklebust@fys.uio.no>
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/time.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/sched.h>
#include <linux/smp_lock.h>
#include <linux/nfs.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/nfs_fs_sb.h>
#include <linux/nfs_flushd.h>
/*
* Various constants
*/
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
/*
* This is the wait queue all cluster daemons sleep on
*/
static RPC_WAITQ(flushd_queue, "nfs_flushd");
/*
* Local function declarations.
*/
static void nfs_flushd(struct rpc_task *);
static void nfs_flushd_exit(struct rpc_task *);
int nfs_reqlist_init(struct nfs_server *server)
{
struct nfs_reqlist *cache;
struct rpc_task *task;
int status;
dprintk("NFS: writecache_init\n");
lock_kernel();
status = -ENOMEM;
/* Create the RPC task */
if (!(task = rpc_new_task(server->client, NULL, RPC_TASK_ASYNC)))
goto out_unlock;
cache = server->rw_requests;
status = 0;
if (cache->task)
goto out_unlock;
task->tk_calldata = server;
cache->task = task;
/* Run the task */
cache->runat = jiffies;
cache->auth = server->client->cl_auth;
task->tk_action = nfs_flushd;
task->tk_exit = nfs_flushd_exit;
rpc_execute(task);
unlock_kernel();
return 0;
out_unlock:
if (task)
rpc_release_task(task);
unlock_kernel();
return status;
}
void nfs_reqlist_exit(struct nfs_server *server)
{
struct nfs_reqlist *cache;
lock_kernel();
cache = server->rw_requests;
if (!cache)
goto out;
dprintk("NFS: reqlist_exit (ptr %p rpc %p)\n", cache, cache->task);
while (cache->task) {
rpc_exit(cache->task, 0);
rpc_wake_up_task(cache->task);
interruptible_sleep_on_timeout(&cache->request_wait, 1 * HZ);
}
out:
unlock_kernel();
}
int nfs_reqlist_alloc(struct nfs_server *server)
{
struct nfs_reqlist *cache;
if (server->rw_requests)
return 0;
cache = (struct nfs_reqlist *)kmalloc(sizeof(*cache), GFP_KERNEL);
if (!cache)
return -ENOMEM;
memset(cache, 0, sizeof(*cache));
atomic_set(&cache->nr_requests, 0);
init_waitqueue_head(&cache->request_wait);
server->rw_requests = cache;
return 0;
}
void nfs_reqlist_free(struct nfs_server *server)
{
if (server->rw_requests) {
kfree(server->rw_requests);
server->rw_requests = NULL;
}
}
#define NFS_FLUSHD_TIMEOUT (30*HZ)
static void
nfs_flushd(struct rpc_task *task)
{
struct nfs_server *server;
struct nfs_reqlist *cache;
LIST_HEAD(head);
dprintk("NFS: %4d flushd starting\n", task->tk_pid);
server = (struct nfs_server *) task->tk_calldata;
cache = server->rw_requests;
for(;;) {
spin_lock(&nfs_wreq_lock);
if (nfs_scan_lru_dirty_timeout(server, &head)) {
spin_unlock(&nfs_wreq_lock);
nfs_flush_list(&head, server->wpages, FLUSH_AGING);
continue;
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
if (nfs_scan_lru_commit_timeout(server, &head)) {
spin_unlock(&nfs_wreq_lock);
nfs_commit_list(&head, FLUSH_AGING);
continue;
}
#endif
spin_unlock(&nfs_wreq_lock);
break;
}
dprintk("NFS: %4d flushd back to sleep\n", task->tk_pid);
if (task->tk_action) {
task->tk_timeout = NFS_FLUSHD_TIMEOUT;
cache->runat = jiffies + task->tk_timeout;
rpc_sleep_on(&flushd_queue, task, NULL, NULL);
}
}
static void
nfs_flushd_exit(struct rpc_task *task)
{
struct nfs_server *server;
struct nfs_reqlist *cache;
server = (struct nfs_server *) task->tk_calldata;
cache = server->rw_requests;
if (cache->task == task)
cache->task = NULL;
wake_up(&cache->request_wait);
}
......@@ -29,7 +29,6 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/nfs4_mount.h>
#include <linux/nfs_flushd.h>
#include <linux/lockd/bind.h>
#include <linux/smp_lock.h>
#include <linux/seq_file.h>
......@@ -37,7 +36,6 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#define CONFIG_NFS_SNAPSHOT 1
#define NFSDBG_FACILITY NFSDBG_VFS
#define NFS_PARANOIA 1
......@@ -147,18 +145,9 @@ nfs_put_super(struct super_block *sb)
struct nfs_server *server = NFS_SB(sb);
struct rpc_clnt *rpc;
/*
* First get rid of the request flushing daemon.
* Relies on rpc_shutdown_client() waiting on all
* client tasks to finish.
*/
nfs_reqlist_exit(server);
if ((rpc = server->client) != NULL)
rpc_shutdown_client(rpc);
nfs_reqlist_free(server);
if (!(server->flags & NFS_MOUNT_NONLM))
lockd_down(); /* release rpc.lockd */
rpciod_down(); /* release rpciod */
......@@ -262,10 +251,6 @@ int nfs_sb_init(struct super_block *sb)
sb->s_magic = NFS_SUPER_MAGIC;
sb->s_op = &nfs_sops;
INIT_LIST_HEAD(&server->lru_read);
INIT_LIST_HEAD(&server->lru_dirty);
INIT_LIST_HEAD(&server->lru_commit);
INIT_LIST_HEAD(&server->lru_busy);
/* Did getting the root inode fail? */
root_inode = nfs_get_root(sb, &server->fh);
......@@ -333,22 +318,13 @@ int nfs_sb_init(struct super_block *sb)
if (sb->s_maxbytes > MAX_LFS_FILESIZE)
sb->s_maxbytes = MAX_LFS_FILESIZE;
/* Fire up the writeback cache */
if (nfs_reqlist_alloc(server) < 0) {
printk(KERN_NOTICE "NFS: cannot initialize writeback cache.\n");
goto failure_kill_reqlist;
}
/* We're airborne Set socket buffersize */
rpc_setbufsize(server->client, server->wsize + 100, server->rsize + 100);
return 0;
/* Yargs. It didn't work out. */
failure_kill_reqlist:
nfs_reqlist_exit(server);
out_free_all:
if (root_inode)
iput(root_inode);
nfs_reqlist_free(server);
return -EINVAL;
out_no_root:
printk("nfs_read_super: get root inode failed\n");
......
......@@ -355,13 +355,6 @@ nfs_xdr_readdirargs(struct rpc_rqst *req, u32 *p, struct nfs_readdirargs *args)
unsigned int replen;
u32 count = args->count;
/*
* Some servers (e.g. HP OS 9.5) seem to expect the buffer size
* to be in longwords ... check whether to convert the size.
*/
if (task->tk_client->cl_flags & NFS_CLNTF_BUFSIZE)
count = count >> 2;
p = xdr_encode_fhandle(p, args->fh);
*p++ = htonl(args->cookie);
*p++ = htonl(count); /* see above */
......
This diff is collapsed.
......@@ -28,7 +28,6 @@
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/nfs_flushd.h>
#include <linux/smp_lock.h>
#include <asm/system.h>
......
This diff is collapsed.
......@@ -2,6 +2,9 @@
/* Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>
* The Silver Hammer Group, Ltd.
* Copyright (C) 2002 David McCullough <davidm@snapgear.com>
*
* This file provides the definitions and structures needed to
* support uClinux flat-format executables.
*/
#ifndef _LINUX_FLAT_H
......
#ifndef NFS_CLUSTER_H
#define NFS_CLUSTER_H
#ifdef __KERNEL__
#include <asm/atomic.h>
#include <linux/nfs_fs_sb.h>
/*
* Counters of total number and pending number of requests.
* When the total number of requests exceeds the hard limit, we stall
* until it drops again.
*/
#define MAX_REQUEST_HARD 256
/*
* Maximum number of requests per write cluster.
* 32 requests per cluster account for 128K of data on an intel box.
* Note: it's a good idea to make this number smaller than MAX_REQUEST_SOFT.
*
* For 100Mbps Ethernet, 128 pages (i.e. 256K) per cluster gives much
* better performance.
*/
#define REQUEST_HASH_SIZE 16
#define REQUEST_NR(off) ((off) >> PAGE_CACHE_SHIFT)
#define REQUEST_HASH(ino, off) (((ino) ^ REQUEST_NR(off)) & (REQUEST_HASH_SIZE - 1))
/*
* Functions
*/
extern int nfs_reqlist_alloc(struct nfs_server *);
extern void nfs_reqlist_free(struct nfs_server *);
extern int nfs_reqlist_init(struct nfs_server *);
extern void nfs_reqlist_exit(struct nfs_server *);
extern void nfs_wake_flushd(void);
/*
* This is the per-mount writeback cache.
*/
struct nfs_reqlist {
atomic_t nr_requests;
unsigned long runat;
wait_queue_head_t request_wait;
/* The async RPC task that is responsible for scanning the
* requests.
*/
struct rpc_task *task; /* request flush task */
/* Authentication flavor handle for this NFS client */
struct rpc_auth *auth;
/* The list of all inodes with pending writebacks. */
struct inode *inodes;
};
#endif
#endif
This diff is collapsed.
......@@ -25,11 +25,6 @@ struct nfs_server {
unsigned int acdirmax;
unsigned int namelen;
char * hostname; /* remote hostname */
struct nfs_reqlist * rw_requests; /* async read/write requests */
struct list_head lru_read,
lru_dirty,
lru_commit,
lru_busy;
struct nfs_fh fh;
struct sockaddr_in addr;
#if CONFIG_NFS_V4
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -50,8 +50,6 @@ struct rpc_clnt {
cl_droppriv : 1,/* enable NFS suid hack */
cl_oneshot : 1,/* dispose after use */
cl_dead : 1;/* abandoned */
unsigned int cl_flags; /* misc client flags */
unsigned long cl_hardmax; /* max hard timeout */
struct rpc_rtt cl_rtt; /* RTO estimator data */
......@@ -132,17 +130,15 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
static __inline__
int rpc_call(struct rpc_clnt *clnt, u32 proc, void *argp, void *resp, int flags)
{
struct rpc_message msg = { proc, argp, resp, NULL };
struct rpc_message msg = {
.rpc_proc = proc,
.rpc_argp = argp,
.rpc_resp = resp,
.rpc_cred = NULL
};
return rpc_call_sync(clnt, &msg, flags);
}
static __inline__ void
rpc_set_timeout(struct rpc_clnt *clnt, unsigned int retr, unsigned long incr)
{
xprt_set_timeout(&clnt->cl_timeout, retr, incr);
}
extern void rpciod_wake_up(void);
/*
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment