Commit 2684649b authored by David S. Miller's avatar David S. Miller

Sparc64 updates mostly build fixes:

1) Update for schedule_tail and switch_to arg changes.
2) Update for PTE highmem.
3) Add random ioctls to ioctl32 translations.
4) Kill migration IPI.
5) Fixup scheduler bitmap function and move into bitops.h
parent 50b1b006
...@@ -29,6 +29,7 @@ CONFIG_BBC_I2C=m ...@@ -29,6 +29,7 @@ CONFIG_BBC_I2C=m
CONFIG_VT=y CONFIG_VT=y
CONFIG_VT_CONSOLE=y CONFIG_VT_CONSOLE=y
CONFIG_SMP=y CONFIG_SMP=y
# CONFIG_PREEMPT is not set
CONFIG_SPARC64=y CONFIG_SPARC64=y
CONFIG_HOTPLUG=y CONFIG_HOTPLUG=y
CONFIG_HAVE_DEC_LOCK=y CONFIG_HAVE_DEC_LOCK=y
...@@ -156,7 +157,7 @@ CONFIG_SPARCAUDIO_CS4231=m ...@@ -156,7 +157,7 @@ CONFIG_SPARCAUDIO_CS4231=m
# #
# Block devices # Block devices
# #
CONFIG_BLK_DEV_FD=y # CONFIG_BLK_DEV_FD is not set
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_NBD=m
...@@ -509,12 +510,12 @@ CONFIG_WINBOND_840=m ...@@ -509,12 +510,12 @@ CONFIG_WINBOND_840=m
CONFIG_ACENIC=m CONFIG_ACENIC=m
# CONFIG_ACENIC_OMIT_TIGON_I is not set # CONFIG_ACENIC_OMIT_TIGON_I is not set
CONFIG_DL2K=m CONFIG_DL2K=m
CONFIG_E1000=m
CONFIG_MYRI_SBUS=m CONFIG_MYRI_SBUS=m
CONFIG_NS83820=m CONFIG_NS83820=m
CONFIG_HAMACHI=m CONFIG_HAMACHI=m
CONFIG_YELLOWFIN=m CONFIG_YELLOWFIN=m
CONFIG_SK98LIN=m CONFIG_SK98LIN=m
# CONFIG_TIGON3 is not set
CONFIG_FDDI=y CONFIG_FDDI=y
# CONFIG_DEFXX is not set # CONFIG_DEFXX is not set
CONFIG_SKFP=m CONFIG_SKFP=m
...@@ -674,6 +675,7 @@ CONFIG_NFS_V3=y ...@@ -674,6 +675,7 @@ CONFIG_NFS_V3=y
# CONFIG_ROOT_NFS is not set # CONFIG_ROOT_NFS is not set
CONFIG_NFSD=m CONFIG_NFSD=m
CONFIG_NFSD_V3=y CONFIG_NFSD_V3=y
CONFIG_NFSD_TCP=y
CONFIG_SUNRPC=y CONFIG_SUNRPC=y
CONFIG_LOCKD=y CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y CONFIG_LOCKD_V4=y
...@@ -743,27 +745,16 @@ CONFIG_NLS_DEFAULT="iso8859-1" ...@@ -743,27 +745,16 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# Sound # Sound
# #
CONFIG_SOUND=m CONFIG_SOUND=m
CONFIG_SOUND_BT878=m
# CONFIG_SOUND_CMPCI is not set #
# CONFIG_SOUND_EMU10K1 is not set # Open Sound System
# CONFIG_MIDI_EMU10K1 is not set #
# CONFIG_SOUND_FUSION is not set # CONFIG_SOUND_PRIME is not set
# CONFIG_SOUND_CS4281 is not set
# CONFIG_SOUND_ES1370 is not set #
CONFIG_SOUND_ES1371=m # Advanced Linux Sound Architecture
# CONFIG_SOUND_ESSSOLO1 is not set #
# CONFIG_SOUND_MAESTRO is not set # CONFIG_SND is not set
# CONFIG_SOUND_MAESTRO3 is not set
# CONFIG_SOUND_ICH is not set
# CONFIG_SOUND_RME96XX is not set
# CONFIG_SOUND_SONICVIBES is not set
CONFIG_SOUND_TRIDENT=m
# CONFIG_SOUND_MSNDCLAS is not set
# CONFIG_SOUND_MSNDPIN is not set
# CONFIG_SOUND_VIA82CXXX is not set
# CONFIG_MIDI_VIA82CXXX is not set
# CONFIG_SOUND_OSS is not set
# CONFIG_SOUND_TVMIXER is not set
# #
# USB support # USB support
...@@ -831,6 +822,7 @@ CONFIG_USB_STV680=m ...@@ -831,6 +822,7 @@ CONFIG_USB_STV680=m
CONFIG_USB_VICAM=m CONFIG_USB_VICAM=m
CONFIG_USB_DSBR=m CONFIG_USB_DSBR=m
CONFIG_USB_DABUSB=m CONFIG_USB_DABUSB=m
CONFIG_USB_KONICAWC=m
# #
# USB Network adaptors # USB Network adaptors
......
...@@ -1436,7 +1436,6 @@ ret_from_syscall: ...@@ -1436,7 +1436,6 @@ ret_from_syscall:
* %o7 for us. Check performance counter stuff too. * %o7 for us. Check performance counter stuff too.
*/ */
andn %o7, _TIF_NEWCHILD, %l0 andn %o7, _TIF_NEWCHILD, %l0
mov %g5, %o0 /* 'prev' */
call schedule_tail call schedule_tail
stx %l0, [%g6 + TI_FLAGS] stx %l0, [%g6 + TI_FLAGS]
andcc %l0, _TIF_PERFCTR, %g0 andcc %l0, _TIF_PERFCTR, %g0
......
...@@ -96,6 +96,7 @@ ...@@ -96,6 +96,7 @@
#include <linux/usb.h> #include <linux/usb.h>
#include <linux/usbdevice_fs.h> #include <linux/usbdevice_fs.h>
#include <linux/nbd.h> #include <linux/nbd.h>
#include <linux/random.h>
/* Use this to get at 32-bit user passed pointers. /* Use this to get at 32-bit user passed pointers.
See sys_sparc32.c for description about these. */ See sys_sparc32.c for description about these. */
...@@ -4527,6 +4528,13 @@ COMPATIBLE_IOCTL(WDIOC_KEEPALIVE) ...@@ -4527,6 +4528,13 @@ COMPATIBLE_IOCTL(WDIOC_KEEPALIVE)
COMPATIBLE_IOCTL(WIOCSTART) COMPATIBLE_IOCTL(WIOCSTART)
COMPATIBLE_IOCTL(WIOCSTOP) COMPATIBLE_IOCTL(WIOCSTOP)
COMPATIBLE_IOCTL(WIOCGSTAT) COMPATIBLE_IOCTL(WIOCGSTAT)
/* Big R */
COMPATIBLE_IOCTL(RNDGETENTCNT)
COMPATIBLE_IOCTL(RNDADDTOENTCNT)
COMPATIBLE_IOCTL(RNDGETPOOL)
COMPATIBLE_IOCTL(RNDADDENTROPY)
COMPATIBLE_IOCTL(RNDZAPENTCNT)
COMPATIBLE_IOCTL(RNDCLEARPOOL)
/* Bluetooth ioctls */ /* Bluetooth ioctls */
COMPATIBLE_IOCTL(HCIDEVUP) COMPATIBLE_IOCTL(HCIDEVUP)
COMPATIBLE_IOCTL(HCIDEVDOWN) COMPATIBLE_IOCTL(HCIDEVDOWN)
......
...@@ -106,9 +106,11 @@ void kpreempt_maybe(void) ...@@ -106,9 +106,11 @@ void kpreempt_maybe(void)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (local_irq_count(cpu) == 0 && if (local_irq_count(cpu) == 0 &&
local_bh_count(cpu) == 0) local_bh_count(cpu) == 0 &&
preempt_schedule(); test_thread_flag(TIF_NEED_RESCHED)) {
current_thread_info()->preempt_count--; current->state = TASK_RUNNING;
schedule();
}
} }
#endif #endif
......
...@@ -627,9 +627,11 @@ asmlinkage void syscall_trace(void) ...@@ -627,9 +627,11 @@ asmlinkage void syscall_trace(void)
if (!(current->ptrace & PT_PTRACED)) if (!(current->ptrace & PT_PTRACED))
return; return;
current->exit_code = SIGTRAP; current->exit_code = SIGTRAP;
preempt_disable();
current->state = TASK_STOPPED; current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD); notify_parent(current, SIGCHLD);
schedule(); schedule();
preempt_enable();
/* /*
* this isn't the same as continuing with a signal, but it will do * this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the * for normal use. strace only continues with a signal if the
......
...@@ -276,9 +276,9 @@ to_kernel: ...@@ -276,9 +276,9 @@ to_kernel:
add %l5, 1, %l6 add %l5, 1, %l6
stw %l6, [%g6 + TI_PRE_COUNT] stw %l6, [%g6 + TI_PRE_COUNT]
call kpreempt_maybe call kpreempt_maybe
wrpr %g0, RTRAP_PSTATE, %pstate nop
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate ba,pt %xcc, rtrap
stw %l5, [%g6 + TI_PRE_COUNT] stw %l5, [%g6 + TI_PRE_COUNT]
#endif #endif
kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5 kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
brz,pt %l5, rt_continue brz,pt %l5, rt_continue
......
...@@ -160,11 +160,16 @@ int prom_callback(long *args) ...@@ -160,11 +160,16 @@ int prom_callback(long *args)
pmdp = pmd_offset(pgdp, va); pmdp = pmd_offset(pgdp, va);
if (pmd_none(*pmdp)) if (pmd_none(*pmdp))
goto done; goto done;
ptep = pte_offset(pmdp, va);
if (!pte_present(*ptep)) /* Preemption implicitly disabled by virtue of
goto done; * being called from inside OBP.
tte = pte_val(*ptep); */
res = PROM_TRUE; ptep = pte_offset_map(pmdp, va);
if (pte_present(*ptep)) {
tte = pte_val(*ptep);
res = PROM_TRUE;
}
pte_unmap(ptep);
goto done; goto done;
} }
...@@ -210,11 +215,15 @@ int prom_callback(long *args) ...@@ -210,11 +215,15 @@ int prom_callback(long *args)
pmdp = pmd_offset(pgdp, va); pmdp = pmd_offset(pgdp, va);
if (pmd_none(*pmdp)) if (pmd_none(*pmdp))
goto done; goto done;
ptep = pte_offset(pmdp, va);
if (!pte_present(*ptep)) /* Preemption implicitly disabled by virtue of
goto done; * being called from inside OBP.
tte = pte_val(*ptep); */
res = PROM_TRUE; ptep = pte_offset_kernel(pmdp, va);
if (pte_present(*ptep)) {
tte = pte_val(*ptep);
res = PROM_TRUE;
}
goto done; goto done;
} }
...@@ -530,7 +539,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -530,7 +539,7 @@ void __init setup_arch(char **cmdline_p)
if (!root_flags) if (!root_flags)
root_mountflags &= ~MS_RDONLY; root_mountflags &= ~MS_RDONLY;
ROOT_DEV = to_kdev_t(root_dev); ROOT_DEV = to_kdev_t(root_dev);
#ifdef CONFIG_BLK_DEV_RAM #ifdef CONFIG_BLK_DEV_INITRD
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0); rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0); rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
......
...@@ -713,9 +713,11 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs, ...@@ -713,9 +713,11 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs,
if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
current->exit_code = signr; current->exit_code = signr;
preempt_disable();
current->state = TASK_STOPPED; current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD); notify_parent(current, SIGCHLD);
schedule(); schedule();
preempt_enable();
if (!(signr = current->exit_code)) if (!(signr = current->exit_code))
continue; continue;
current->exit_code = 0; current->exit_code = 0;
...@@ -766,16 +768,20 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs, ...@@ -766,16 +768,20 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs,
if (is_orphaned_pgrp(current->pgrp)) if (is_orphaned_pgrp(current->pgrp))
continue; continue;
case SIGSTOP: case SIGSTOP: {
if (current->ptrace & PT_PTRACED) struct signal_struct *sig;
continue;
current->state = TASK_STOPPED;
current->exit_code = signr; current->exit_code = signr;
if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags & sig = current->p_pptr->sig;
preempt_disable();
current->state = TASK_STOPPED;
if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags &
SA_NOCLDSTOP)) SA_NOCLDSTOP))
notify_parent(current, SIGCHLD); notify_parent(current, SIGCHLD);
schedule(); schedule();
preempt_enable();
continue; continue;
}
case SIGQUIT: case SIGILL: case SIGTRAP: case SIGQUIT: case SIGILL: case SIGTRAP:
case SIGABRT: case SIGFPE: case SIGSEGV: case SIGABRT: case SIGFPE: case SIGSEGV:
......
...@@ -776,7 +776,7 @@ static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *reg ...@@ -776,7 +776,7 @@ static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *reg
unsigned long address = ((unsigned long)&(sf->insns[0])); unsigned long address = ((unsigned long)&(sf->insns[0]));
pgd_t *pgdp = pgd_offset(current->mm, address); pgd_t *pgdp = pgd_offset(current->mm, address);
pmd_t *pmdp = pmd_offset(pgdp, address); pmd_t *pmdp = pmd_offset(pgdp, address);
pte_t *ptep = pte_offset(pmdp, address); pte_t *ptep;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
...@@ -785,6 +785,8 @@ static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *reg ...@@ -785,6 +785,8 @@ static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *reg
if (err) if (err)
goto sigsegv; goto sigsegv;
preempt_disable();
ptep = pte_offset_map(pmdp, address);
if (pte_present(*ptep)) { if (pte_present(*ptep)) {
unsigned long page = (unsigned long) page_address(pte_page(*ptep)); unsigned long page = (unsigned long) page_address(pte_page(*ptep));
...@@ -794,6 +796,8 @@ static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *reg ...@@ -794,6 +796,8 @@ static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *reg
: : "r" (page), "r" (address & (PAGE_SIZE - 1)) : : "r" (page), "r" (address & (PAGE_SIZE - 1))
: "memory"); : "memory");
} }
pte_unmap(ptep);
preempt_enable();
} }
return; return;
...@@ -1225,7 +1229,7 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs ...@@ -1225,7 +1229,7 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs
unsigned long address = ((unsigned long)&(sf->insns[0])); unsigned long address = ((unsigned long)&(sf->insns[0]));
pgd_t *pgdp = pgd_offset(current->mm, address); pgd_t *pgdp = pgd_offset(current->mm, address);
pmd_t *pmdp = pmd_offset(pgdp, address); pmd_t *pmdp = pmd_offset(pgdp, address);
pte_t *ptep = pte_offset(pmdp, address); pte_t *ptep;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
...@@ -1237,6 +1241,8 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs ...@@ -1237,6 +1241,8 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs
if (err) if (err)
goto sigsegv; goto sigsegv;
preempt_disable();
ptep = pte_offset_map(pmdp, address);
if (pte_present(*ptep)) { if (pte_present(*ptep)) {
unsigned long page = (unsigned long) page_address(pte_page(*ptep)); unsigned long page = (unsigned long) page_address(pte_page(*ptep));
...@@ -1246,6 +1252,8 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs ...@@ -1246,6 +1252,8 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs
: : "r" (page), "r" (address & (PAGE_SIZE - 1)) : : "r" (page), "r" (address & (PAGE_SIZE - 1))
: "memory"); : "memory");
} }
pte_unmap(ptep);
preempt_enable();
} }
return; return;
...@@ -1379,9 +1387,11 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs, ...@@ -1379,9 +1387,11 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs,
if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
current->exit_code = signr; current->exit_code = signr;
preempt_disable();
current->state = TASK_STOPPED; current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD); notify_parent(current, SIGCHLD);
schedule(); schedule();
preempt_enable();
if (!(signr = current->exit_code)) if (!(signr = current->exit_code))
continue; continue;
current->exit_code = 0; current->exit_code = 0;
...@@ -1432,17 +1442,20 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs, ...@@ -1432,17 +1442,20 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs,
if (is_orphaned_pgrp(current->pgrp)) if (is_orphaned_pgrp(current->pgrp))
continue; continue;
case SIGSTOP: case SIGSTOP: {
if (current->ptrace & PT_PTRACED) struct signal_struct *sig;
continue;
current->state = TASK_STOPPED;
current->exit_code = signr; current->exit_code = signr;
if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags & sig = current->p_pptr->sig;
preempt_disable();
current->state = TASK_STOPPED;
if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags &
SA_NOCLDSTOP)) SA_NOCLDSTOP))
notify_parent(current, SIGCHLD); notify_parent(current, SIGCHLD);
schedule(); schedule();
preempt_enable();
continue; continue;
}
case SIGQUIT: case SIGILL: case SIGTRAP: case SIGQUIT: case SIGILL: case SIGTRAP:
case SIGABRT: case SIGFPE: case SIGSEGV: case SIGABRT: case SIGFPE: case SIGSEGV:
case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ: case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
......
...@@ -885,48 +885,6 @@ void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page) ...@@ -885,48 +885,6 @@ void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
} }
} }
/* Process migration IPIs. */
extern unsigned long xcall_migrate_task;
static spinlock_t migration_lock = SPIN_LOCK_UNLOCKED;
static task_t *new_task;
void smp_migrate_task(int cpu, task_t *p)
{
unsigned long mask = 1UL << cpu;
if (cpu == smp_processor_id())
return;
if (smp_processors_ready && (cpu_present_map & mask) != 0) {
u64 data0 = (((u64)&xcall_migrate_task) & 0xffffffff);
_raw_spin_lock(&migration_lock);
new_task = p;
if (tlb_type == spitfire)
spitfire_xcall_deliver(data0, 0, 0, mask);
else
cheetah_xcall_deliver(data0, 0, 0, mask);
}
}
/* Called at PIL level 1. */
asmlinkage void smp_task_migration_interrupt(int irq, struct pt_regs *regs)
{
task_t *p;
if (irq != PIL_MIGRATE)
BUG();
clear_softint(1 << irq);
p = new_task;
_raw_spin_unlock(&migration_lock);
sched_task_migrated(p);
}
/* CPU capture. */ /* CPU capture. */
/* #define CAPTURE_DEBUG */ /* #define CAPTURE_DEBUG */
extern unsigned long xcall_capture; extern unsigned long xcall_capture;
......
...@@ -249,7 +249,7 @@ EXPORT_SYMBOL(_sigpause_common); ...@@ -249,7 +249,7 @@ EXPORT_SYMBOL(_sigpause_common);
/* Should really be in linux/kernel/ksyms.c */ /* Should really be in linux/kernel/ksyms.c */
EXPORT_SYMBOL(dump_thread); EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(pte_alloc_one); EXPORT_SYMBOL(pte_alloc_one_kernel);
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
EXPORT_SYMBOL(pgt_quicklists); EXPORT_SYMBOL(pgt_quicklists);
#endif #endif
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include <linux/icmpv6.h> #include <linux/icmpv6.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/binfmts.h> #include <linux/binfmts.h>
#include <linux/dnotify.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/ipc.h> #include <asm/ipc.h>
...@@ -1067,16 +1068,20 @@ static long do_readv_writev32(int type, struct file *file, ...@@ -1067,16 +1068,20 @@ static long do_readv_writev32(int type, struct file *file,
/* First get the "struct iovec" from user memory and /* First get the "struct iovec" from user memory and
* verify all the pointers * verify all the pointers
*/ */
retval = 0;
if (!count) if (!count)
return 0; goto out_nofree;
retval = -EFAULT;
if (verify_area(VERIFY_READ, vector, sizeof(struct iovec32)*count)) if (verify_area(VERIFY_READ, vector, sizeof(struct iovec32)*count))
return -EFAULT; goto out_nofree;
retval = -EINVAL;
if (count > UIO_MAXIOV) if (count > UIO_MAXIOV)
return -EINVAL; goto out_nofree;
if (count > UIO_FASTIOV) { if (count > UIO_FASTIOV) {
retval = -ENOMEM;
iov = kmalloc(count*sizeof(struct iovec), GFP_KERNEL); iov = kmalloc(count*sizeof(struct iovec), GFP_KERNEL);
if (!iov) if (!iov)
return -ENOMEM; goto out_nofree;
} }
tot_len = 0; tot_len = 0;
...@@ -1136,6 +1141,11 @@ static long do_readv_writev32(int type, struct file *file, ...@@ -1136,6 +1141,11 @@ static long do_readv_writev32(int type, struct file *file,
out: out:
if (iov != iovstack) if (iov != iovstack)
kfree(iov); kfree(iov);
out_nofree:
/* VERIFY_WRITE actually means a read, as we write to user space */
if ((retval + (type == VERIFY_WRITE)) > 0)
dnotify_parent(file->f_dentry,
(type == VERIFY_WRITE) ? DN_MODIFY : DN_ACCESS);
return retval; return retval;
} }
......
...@@ -44,11 +44,7 @@ tl0_stdfmna: TRAP_NOSAVE(do_stdfmna) ...@@ -44,11 +44,7 @@ tl0_stdfmna: TRAP_NOSAVE(do_stdfmna)
tl0_privact: TRAP_NOSAVE(__do_privact) tl0_privact: TRAP_NOSAVE(__do_privact)
tl0_resv038: BTRAP(0x38) BTRAP(0x39) BTRAP(0x3a) BTRAP(0x3b) BTRAP(0x3c) BTRAP(0x3d) tl0_resv038: BTRAP(0x38) BTRAP(0x39) BTRAP(0x3a) BTRAP(0x3b) BTRAP(0x3c) BTRAP(0x3d)
tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40) tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
#ifdef CONFIG_SMP
tl0_irq1: TRAP_IRQ(smp_task_migration_interrupt, 1)
#else
tl0_irq1: BTRAP(0x41) tl0_irq1: BTRAP(0x41)
#endif
tl0_irq2: TRAP_IRQ(handler_irq, 2) tl0_irq2: TRAP_IRQ(handler_irq, 2)
tl0_irq3: TRAP_IRQ(handler_irq, 3) TRAP_IRQ(handler_irq, 4) tl0_irq3: TRAP_IRQ(handler_irq, 3) TRAP_IRQ(handler_irq, 4)
tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6) tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6)
......
...@@ -160,10 +160,12 @@ static unsigned int get_user_insn(unsigned long tpc) ...@@ -160,10 +160,12 @@ static unsigned int get_user_insn(unsigned long tpc)
pmdp = pmd_offset(pgdp, tpc); pmdp = pmd_offset(pgdp, tpc);
if (pmd_none(*pmdp)) if (pmd_none(*pmdp))
goto outret; goto outret;
ptep = pte_offset(pmdp, tpc);
/* This disables preemption for us as well. */
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
__asm__ __volatile__("wrpr %0, %1, %%pstate" __asm__ __volatile__("wrpr %0, %1, %%pstate"
: : "r" (pstate), "i" (PSTATE_IE)); : : "r" (pstate), "i" (PSTATE_IE));
ptep = pte_offset_map(pmdp, tpc);
pte = *ptep; pte = *ptep;
if (!pte_present(pte)) if (!pte_present(pte))
goto out; goto out;
...@@ -177,6 +179,7 @@ static unsigned int get_user_insn(unsigned long tpc) ...@@ -177,6 +179,7 @@ static unsigned int get_user_insn(unsigned long tpc)
: "r" (pa), "i" (ASI_PHYS_USE_EC)); : "r" (pa), "i" (ASI_PHYS_USE_EC));
out: out:
pte_unmap(ptep);
__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
outret: outret:
return insn; return insn;
......
...@@ -101,10 +101,11 @@ static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigne ...@@ -101,10 +101,11 @@ static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigne
end = PGDIR_SIZE; end = PGDIR_SIZE;
offset -= address; offset -= address;
do { do {
pte_t * pte = pte_alloc(current->mm, pmd, address); pte_t * pte = pte_alloc_map(current->mm, pmd, address);
if (!pte) if (!pte)
return -ENOMEM; return -ENOMEM;
io_remap_pte_range(pte, address, end - address, address + offset, prot, space); io_remap_pte_range(pte, address, end - address, address + offset, prot, space);
pte_unmap(pte);
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
pmd++; pmd++;
} while (address < end); } while (address < end);
......
...@@ -65,24 +65,27 @@ struct page *mem_map_zero; ...@@ -65,24 +65,27 @@ struct page *mem_map_zero;
int bigkernel = 0; int bigkernel = 0;
int do_check_pgt_cache(int low, int high) /* XXX Tune this... */
{ #define PGT_CACHE_LOW 25
int freed = 0; #define PGT_CACHE_HIGH 50
if (pgtable_cache_size > high) { void check_pgt_cache(void)
{
preempt_disable();
if (pgtable_cache_size > PGT_CACHE_HIGH) {
do { do {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (pgd_quicklist) if (pgd_quicklist)
free_pgd_slow(get_pgd_fast()), freed++; free_pgd_slow(get_pgd_fast());
#endif #endif
if (pte_quicklist[0]) if (pte_quicklist[0])
free_pte_slow(pte_alloc_one_fast(NULL, 0)), freed++; free_pte_slow(pte_alloc_one_fast(NULL, 0));
if (pte_quicklist[1]) if (pte_quicklist[1])
free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10))), freed++; free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
} while (pgtable_cache_size > low); } while (pgtable_cache_size > PGT_CACHE_LOW);
} }
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
if (pgd_cache_size > high / 4) { if (pgd_cache_size > PGT_CACHE_HIGH / 4) {
struct page *page, *page2; struct page *page, *page2;
for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) { for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
if ((unsigned long)page->pprev_hash == 3) { if ((unsigned long)page->pprev_hash == 3) {
...@@ -94,12 +97,11 @@ int do_check_pgt_cache(int low, int high) ...@@ -94,12 +97,11 @@ int do_check_pgt_cache(int low, int high)
page->pprev_hash = NULL; page->pprev_hash = NULL;
pgd_cache_size -= 2; pgd_cache_size -= 2;
__free_page(page); __free_page(page);
freed++;
if (page2) if (page2)
page = page2->next_hash; page = page2->next_hash;
else else
page = (struct page *)pgd_quicklist; page = (struct page *)pgd_quicklist;
if (pgd_cache_size <= low / 4) if (pgd_cache_size <= PGT_CACHE_LOW / 4)
break; break;
continue; continue;
} }
...@@ -108,7 +110,7 @@ int do_check_pgt_cache(int low, int high) ...@@ -108,7 +110,7 @@ int do_check_pgt_cache(int low, int high)
} }
} }
#endif #endif
return freed; preempt_enable();
} }
#ifdef CONFIG_DEBUG_DCFLUSH #ifdef CONFIG_DEBUG_DCFLUSH
...@@ -143,7 +145,7 @@ __inline__ void flush_dcache_page_impl(struct page *page) ...@@ -143,7 +145,7 @@ __inline__ void flush_dcache_page_impl(struct page *page)
static __inline__ void set_dcache_dirty(struct page *page) static __inline__ void set_dcache_dirty(struct page *page)
{ {
unsigned long mask = smp_processor_id(); unsigned long mask = smp_processor_id();
unsigned long non_cpu_bits = (1UL << 24UL) - 1UL; unsigned long non_cpu_bits = ~((NR_CPUS - 1UL) << 24UL);
mask = (mask << 24) | (1UL << PG_dcache_dirty); mask = (mask << 24) | (1UL << PG_dcache_dirty);
__asm__ __volatile__("1:\n\t" __asm__ __volatile__("1:\n\t"
"ldx [%2], %%g7\n\t" "ldx [%2], %%g7\n\t"
...@@ -166,6 +168,7 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c ...@@ -166,6 +168,7 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
"1:\n\t" "1:\n\t"
"ldx [%2], %%g7\n\t" "ldx [%2], %%g7\n\t"
"srlx %%g7, 24, %%g5\n\t" "srlx %%g7, 24, %%g5\n\t"
"and %%g5, %3, %%g5\n\t"
"cmp %%g5, %0\n\t" "cmp %%g5, %0\n\t"
"bne,pn %%icc, 2f\n\t" "bne,pn %%icc, 2f\n\t"
" andn %%g7, %1, %%g5\n\t" " andn %%g7, %1, %%g5\n\t"
...@@ -175,7 +178,8 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c ...@@ -175,7 +178,8 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
" membar #StoreLoad | #StoreStore\n" " membar #StoreLoad | #StoreStore\n"
"2:" "2:"
: /* no outputs */ : /* no outputs */
: "r" (cpu), "r" (mask), "r" (&page->flags) : "r" (cpu), "r" (mask), "r" (&page->flags),
"i" (NR_CPUS - 1UL)
: "g5", "g7"); : "g5", "g7");
} }
...@@ -189,7 +193,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p ...@@ -189,7 +193,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
if (VALID_PAGE(page) && if (VALID_PAGE(page) &&
page->mapping && page->mapping &&
((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
int cpu = (pg_flags >> 24); int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
/* This is just to optimize away some function calls /* This is just to optimize away some function calls
* in the SMP case. * in the SMP case.
...@@ -212,8 +216,8 @@ void flush_dcache_page(struct page *page) ...@@ -212,8 +216,8 @@ void flush_dcache_page(struct page *page)
int dirty_cpu = dcache_dirty_cpu(page); int dirty_cpu = dcache_dirty_cpu(page);
if (page->mapping && if (page->mapping &&
page->mapping->i_mmap == NULL && list_empty(&page->mapping->i_mmap) &&
page->mapping->i_mmap_shared == NULL) { list_empty(&page->mapping->i_mmap_shared)) {
if (dirty) { if (dirty) {
if (dirty_cpu == smp_processor_id()) if (dirty_cpu == smp_processor_id())
return; return;
...@@ -244,7 +248,7 @@ static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsig ...@@ -244,7 +248,7 @@ static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsig
if (pmd_none(*pmd)) if (pmd_none(*pmd))
return; return;
ptep = pte_offset(pmd, address); ptep = pte_offset_map(pmd, address);
offset = address & ~PMD_MASK; offset = address & ~PMD_MASK;
if (offset + size > PMD_SIZE) if (offset + size > PMD_SIZE)
size = PMD_SIZE - offset; size = PMD_SIZE - offset;
...@@ -267,6 +271,7 @@ static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsig ...@@ -267,6 +271,7 @@ static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsig
flush_dcache_page_all(mm, page); flush_dcache_page_all(mm, page);
} }
} }
pte_unmap(ptep - 1);
} }
static inline void flush_cache_pmd_range(struct mm_struct *mm, pgd_t *dir, unsigned long address, unsigned long size) static inline void flush_cache_pmd_range(struct mm_struct *mm, pgd_t *dir, unsigned long address, unsigned long size)
...@@ -389,7 +394,7 @@ unsigned long prom_virt_to_phys(unsigned long promva, int *error) ...@@ -389,7 +394,7 @@ unsigned long prom_virt_to_phys(unsigned long promva, int *error)
*error = 1; *error = 1;
return(0); return(0);
} }
ptep = (pte_t *)pmd_page(*pmdp) + ((promva >> 13) & 0x3ff); ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);
if (!pte_present(*ptep)) { if (!pte_present(*ptep)) {
if (error) if (error)
*error = 1; *error = 1;
...@@ -466,7 +471,7 @@ static void inherit_prom_mappings(void) ...@@ -466,7 +471,7 @@ static void inherit_prom_mappings(void)
memset(ptep, 0, BASE_PAGE_SIZE); memset(ptep, 0, BASE_PAGE_SIZE);
pmd_set(pmdp, ptep); pmd_set(pmdp, ptep);
} }
ptep = (pte_t *)pmd_page(*pmdp) + ptep = (pte_t *)__pmd_page(*pmdp) +
((vaddr >> 13) & 0x3ff); ((vaddr >> 13) & 0x3ff);
val = trans[i].data; val = trans[i].data;
...@@ -1133,11 +1138,20 @@ struct pgtable_cache_struct pgt_quicklists; ...@@ -1133,11 +1138,20 @@ struct pgtable_cache_struct pgt_quicklists;
#else #else
#define DC_ALIAS_SHIFT 0 #define DC_ALIAS_SHIFT 0
#endif #endif
pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address) pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
struct page *page = alloc_pages(GFP_KERNEL, DC_ALIAS_SHIFT); struct page *page;
unsigned long color = VPTE_COLOR(address); unsigned long color;
{
pte_t *ptep = pte_alloc_one_fast(mm, address);
if (ptep)
return ptep;
}
color = VPTE_COLOR(address);
page = alloc_pages(GFP_KERNEL, DC_ALIAS_SHIFT);
if (page) { if (page) {
unsigned long *to_free; unsigned long *to_free;
unsigned long paddr; unsigned long paddr;
...@@ -1159,9 +1173,11 @@ pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -1159,9 +1173,11 @@ pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
#if (L1DCACHE_SIZE > PAGE_SIZE) /* is there D$ aliasing problem */ #if (L1DCACHE_SIZE > PAGE_SIZE) /* is there D$ aliasing problem */
/* Now free the other one up, adjust cache size. */ /* Now free the other one up, adjust cache size. */
preempt_disable();
*to_free = (unsigned long) pte_quicklist[color ^ 0x1]; *to_free = (unsigned long) pte_quicklist[color ^ 0x1];
pte_quicklist[color ^ 0x1] = to_free; pte_quicklist[color ^ 0x1] = to_free;
pgtable_cache_size++; pgtable_cache_size++;
preempt_enable();
#endif #endif
return pte; return pte;
......
...@@ -684,11 +684,4 @@ xcall_call_function: ...@@ -684,11 +684,4 @@ xcall_call_function:
b,pt %xcc, rtrap_irq b,pt %xcc, rtrap_irq
nop nop
.globl xcall_migrate_task
xcall_migrate_task:
mov 1, %g2
sllx %g2, (PIL_MIGRATE), %g2
wr %g2, 0x0, %set_softint
retry
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#ifndef _SPARC64_BITOPS_H #ifndef _SPARC64_BITOPS_H
#define _SPARC64_BITOPS_H #define _SPARC64_BITOPS_H
#include <linux/compiler.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
extern long ___test_and_set_bit(unsigned long nr, volatile void *addr); extern long ___test_and_set_bit(unsigned long nr, volatile void *addr);
...@@ -101,6 +102,23 @@ static __inline__ unsigned long __ffs(unsigned long word) ...@@ -101,6 +102,23 @@ static __inline__ unsigned long __ffs(unsigned long word)
#ifdef __KERNEL__ #ifdef __KERNEL__
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(((unsigned int)b[1])))
return __ffs(b[1]) + 64;
if (b[1] >> 32)
return __ffs(b[1] >> 32) + 96;
return __ffs(b[2]) + 128;
}
/* /*
* ffs: find first bit set. This is defined the same way as * ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore * the libc and compiler builtin ffs routines, therefore
......
...@@ -27,25 +27,6 @@ ...@@ -27,25 +27,6 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
/*
* Every architecture must define this function. It's the fastest
* way of searching a 168-bit bitmap where the first 128 bits are
* unlikely to be set. It's guaranteed that at least one of the 168
* bits is cleared.
*/
#if MAX_RT_PRIO != 128 || MAX_PRIO != 168
# error update this function.
#endif
static inline int sched_find_first_bit(unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 64;
return __ffs(b[2]) + 128;
}
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{ {
} }
......
...@@ -10,95 +10,6 @@ ...@@ -10,95 +10,6 @@
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
/* Cache and TLB flush operations. */
/* These are the same regardless of whether this is an SMP kernel or not. */
#define flush_cache_mm(__mm) \
do { if ((__mm) == current->mm) flushw_user(); } while(0)
extern void flush_cache_range(struct vm_area_struct *, unsigned long, unsigned long);
#define flush_cache_page(vma, page) \
flush_cache_mm((vma)->vm_mm)
/* This is unnecessary on the SpitFire since D-CACHE is write-through. */
#define flush_page_to_ram(page) do { } while (0)
/*
* On spitfire, the icache doesn't snoop local stores and we don't
* use block commit stores (which invalidate icache lines) during
* module load, so we need this.
*/
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_page(void *addr, int flush_icache);
extern void __flush_icache_page(unsigned long);
extern void flush_dcache_page_impl(struct page *page);
#ifdef CONFIG_SMP
extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
#else
#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
#endif
extern void flush_dcache_page(struct page *page);
extern void __flush_dcache_range(unsigned long start, unsigned long end);
extern void __flush_cache_all(void);
extern void __flush_tlb_all(void);
extern void __flush_tlb_mm(unsigned long context, unsigned long r);
extern void __flush_tlb_range(unsigned long context, unsigned long start,
unsigned long r, unsigned long end,
unsigned long pgsz, unsigned long size);
extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
#ifndef CONFIG_SMP
#define flush_cache_all() __flush_cache_all()
#define flush_tlb_all() __flush_tlb_all()
#define flush_tlb_mm(__mm) \
do { if(CTX_VALID((__mm)->context)) \
__flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
} while(0)
#define flush_tlb_range(__vma, start, end) \
do { if(CTX_VALID((__vma)->vm_mm->context)) { \
unsigned long __start = (start)&PAGE_MASK; \
unsigned long __end = PAGE_ALIGN(end); \
__flush_tlb_range(CTX_HWBITS((__vma)->vm_mm->context), __start, \
SECONDARY_CONTEXT, __end, PAGE_SIZE, \
(__end - __start)); \
} \
} while(0)
#define flush_tlb_page(vma, page) \
do { struct mm_struct *__mm = (vma)->vm_mm; \
if(CTX_VALID(__mm->context)) \
__flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
SECONDARY_CONTEXT); \
} while(0)
#else /* CONFIG_SMP */
extern void smp_flush_cache_all(void);
extern void smp_flush_tlb_all(void);
extern void smp_flush_tlb_mm(struct mm_struct *mm);
extern void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
#define flush_cache_all() smp_flush_cache_all()
#define flush_tlb_all() smp_flush_tlb_all()
#define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
#define flush_tlb_range(vma, start, end) \
smp_flush_tlb_range(vma, start, end)
#define flush_tlb_page(vma, page) \
smp_flush_tlb_page((vma)->vm_mm, page)
#endif /* ! CONFIG_SMP */
#define VPTE_BASE_SPITFIRE 0xfffffffe00000000 #define VPTE_BASE_SPITFIRE 0xfffffffe00000000
#if 1 #if 1
#define VPTE_BASE_CHEETAH VPTE_BASE_SPITFIRE #define VPTE_BASE_CHEETAH VPTE_BASE_SPITFIRE
...@@ -106,7 +17,7 @@ extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page); ...@@ -106,7 +17,7 @@ extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
#define VPTE_BASE_CHEETAH 0xffe0000000000000 #define VPTE_BASE_CHEETAH 0xffe0000000000000
#endif #endif
extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, static __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
unsigned long end) unsigned long end)
{ {
/* Note the signed type. */ /* Note the signed type. */
...@@ -154,7 +65,7 @@ extern struct pgtable_cache_struct { ...@@ -154,7 +65,7 @@ extern struct pgtable_cache_struct {
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
extern __inline__ void free_pgd_fast(pgd_t *pgd) static __inline__ void free_pgd_fast(pgd_t *pgd)
{ {
struct page *page = virt_to_page(pgd); struct page *page = virt_to_page(pgd);
...@@ -169,7 +80,7 @@ extern __inline__ void free_pgd_fast(pgd_t *pgd) ...@@ -169,7 +80,7 @@ extern __inline__ void free_pgd_fast(pgd_t *pgd)
preempt_enable(); preempt_enable();
} }
extern __inline__ pgd_t *get_pgd_fast(void) static __inline__ pgd_t *get_pgd_fast(void)
{ {
struct page *ret; struct page *ret;
...@@ -212,7 +123,7 @@ extern __inline__ pgd_t *get_pgd_fast(void) ...@@ -212,7 +123,7 @@ extern __inline__ pgd_t *get_pgd_fast(void)
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
extern __inline__ void free_pgd_fast(pgd_t *pgd) static __inline__ void free_pgd_fast(pgd_t *pgd)
{ {
preempt_disable(); preempt_disable();
*(unsigned long *)pgd = (unsigned long) pgd_quicklist; *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
...@@ -221,7 +132,7 @@ extern __inline__ void free_pgd_fast(pgd_t *pgd) ...@@ -221,7 +132,7 @@ extern __inline__ void free_pgd_fast(pgd_t *pgd)
preempt_enable(); preempt_enable();
} }
extern __inline__ pgd_t *get_pgd_fast(void) static __inline__ pgd_t *get_pgd_fast(void)
{ {
unsigned long *ret; unsigned long *ret;
...@@ -240,7 +151,7 @@ extern __inline__ pgd_t *get_pgd_fast(void) ...@@ -240,7 +151,7 @@ extern __inline__ pgd_t *get_pgd_fast(void)
return (pgd_t *)ret; return (pgd_t *)ret;
} }
extern __inline__ void free_pgd_slow(pgd_t *pgd) static __inline__ void free_pgd_slow(pgd_t *pgd)
{ {
free_page((unsigned long)pgd); free_page((unsigned long)pgd);
} }
...@@ -257,23 +168,15 @@ extern __inline__ void free_pgd_slow(pgd_t *pgd) ...@@ -257,23 +168,15 @@ extern __inline__ void free_pgd_slow(pgd_t *pgd)
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
extern __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
if (pmd)
memset(pmd, 0, PAGE_SIZE);
return pmd;
}
extern __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{ {
unsigned long *ret; unsigned long *ret;
int color = 0; int color = 0;
preempt_disable();
if (pte_quicklist[color] == NULL) if (pte_quicklist[color] == NULL)
color = 1; color = 1;
preempt_disable();
if((ret = (unsigned long *)pte_quicklist[color]) != NULL) { if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
pte_quicklist[color] = (unsigned long *)(*ret); pte_quicklist[color] = (unsigned long *)(*ret);
ret[0] = 0; ret[0] = 0;
...@@ -284,7 +187,20 @@ extern __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long ...@@ -284,7 +187,20 @@ extern __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long
return (pmd_t *)ret; return (pmd_t *)ret;
} }
extern __inline__ void free_pmd_fast(pmd_t *pmd) static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd;
pmd = pmd_alloc_one_fast(mm, address);
if (!pmd) {
pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
if (pmd)
memset(pmd, 0, PAGE_SIZE);
}
return pmd;
}
static __inline__ void free_pmd_fast(pmd_t *pmd)
{ {
unsigned long color = DCACHE_COLOR((unsigned long)pmd); unsigned long color = DCACHE_COLOR((unsigned long)pmd);
...@@ -295,16 +211,19 @@ extern __inline__ void free_pmd_fast(pmd_t *pmd) ...@@ -295,16 +211,19 @@ extern __inline__ void free_pmd_fast(pmd_t *pmd)
preempt_enable(); preempt_enable();
} }
extern __inline__ void free_pmd_slow(pmd_t *pmd) static __inline__ void free_pmd_slow(pmd_t *pmd)
{ {
free_page((unsigned long)pmd); free_page((unsigned long)pmd);
} }
#define pmd_populate(MM, PMD, PTE) pmd_set(PMD, PTE) #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
#define pmd_populate(MM,PMD,PTE_PAGE) \
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
extern pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address); extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
#define pte_alloc_one(MM,ADDR) virt_to_page(pte_alloc_one_kernel(MM,ADDR))
extern __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address) static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{ {
unsigned long color = VPTE_COLOR(address); unsigned long color = VPTE_COLOR(address);
unsigned long *ret; unsigned long *ret;
...@@ -319,7 +238,7 @@ extern __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long ...@@ -319,7 +238,7 @@ extern __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long
return (pte_t *)ret; return (pte_t *)ret;
} }
extern __inline__ void free_pte_fast(pte_t *pte) static __inline__ void free_pte_fast(pte_t *pte)
{ {
unsigned long color = DCACHE_COLOR((unsigned long)pte); unsigned long color = DCACHE_COLOR((unsigned long)pte);
...@@ -330,16 +249,15 @@ extern __inline__ void free_pte_fast(pte_t *pte) ...@@ -330,16 +249,15 @@ extern __inline__ void free_pte_fast(pte_t *pte)
preempt_enable(); preempt_enable();
} }
extern __inline__ void free_pte_slow(pte_t *pte) static __inline__ void free_pte_slow(pte_t *pte)
{ {
free_page((unsigned long)pte); free_page((unsigned long)pte);
} }
#define pte_free(pte) free_pte_fast(pte) #define pte_free_kernel(pte) free_pte_fast(pte)
#define pte_free(pte) free_pte_fast(page_address(pte))
#define pmd_free(pmd) free_pmd_fast(pmd) #define pmd_free(pmd) free_pmd_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd) #define pgd_free(pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast() #define pgd_alloc(mm) get_pgd_fast()
extern int do_check_pgt_cache(int, int);
#endif /* _SPARC64_PGALLOC_H */ #endif /* _SPARC64_PGALLOC_H */
...@@ -36,6 +36,95 @@ ...@@ -36,6 +36,95 @@
#define LOW_OBP_ADDRESS 0x00000000f0000000 #define LOW_OBP_ADDRESS 0x00000000f0000000
#define HI_OBP_ADDRESS 0x0000000100000000 #define HI_OBP_ADDRESS 0x0000000100000000
#ifndef __ASSEMBLY__
/* Cache and TLB flush operations. */
/* These are the same regardless of whether this is an SMP kernel or not. */
#define flush_cache_mm(__mm) \
do { if ((__mm) == current->mm) flushw_user(); } while(0)
extern void flush_cache_range(struct vm_area_struct *, unsigned long, unsigned long);
#define flush_cache_page(vma, page) \
flush_cache_mm((vma)->vm_mm)
/*
* On spitfire, the icache doesn't snoop local stores and we don't
* use block commit stores (which invalidate icache lines) during
* module load, so we need this.
*/
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_page(void *addr, int flush_icache);
extern void __flush_icache_page(unsigned long);
extern void flush_dcache_page_impl(struct page *page);
#ifdef CONFIG_SMP
extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
#else
#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
#endif
extern void __flush_dcache_range(unsigned long start, unsigned long end);
extern void __flush_cache_all(void);
extern void __flush_tlb_all(void);
extern void __flush_tlb_mm(unsigned long context, unsigned long r);
extern void __flush_tlb_range(unsigned long context, unsigned long start,
unsigned long r, unsigned long end,
unsigned long pgsz, unsigned long size);
extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
#ifndef CONFIG_SMP
#define flush_cache_all() __flush_cache_all()
#define flush_tlb_all() __flush_tlb_all()
#define flush_tlb_mm(__mm) \
do { if(CTX_VALID((__mm)->context)) \
__flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
} while(0)
#define flush_tlb_range(__vma, start, end) \
do { if(CTX_VALID((__vma)->vm_mm->context)) { \
unsigned long __start = (start)&PAGE_MASK; \
unsigned long __end = PAGE_ALIGN(end); \
__flush_tlb_range(CTX_HWBITS((__vma)->vm_mm->context), __start, \
SECONDARY_CONTEXT, __end, PAGE_SIZE, \
(__end - __start)); \
} \
} while(0)
#define flush_tlb_page(vma, page) \
do { struct mm_struct *__mm = (vma)->vm_mm; \
if(CTX_VALID(__mm->context)) \
__flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
SECONDARY_CONTEXT); \
} while(0)
#else /* CONFIG_SMP */
extern void smp_flush_cache_all(void);
extern void smp_flush_tlb_all(void);
extern void smp_flush_tlb_mm(struct mm_struct *mm);
extern void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
#define flush_cache_all() smp_flush_cache_all()
#define flush_tlb_all() smp_flush_tlb_all()
#define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
#define flush_tlb_range(vma, start, end) \
smp_flush_tlb_range(vma, start, end)
#define flush_tlb_page(vma, page) \
smp_flush_tlb_page((vma)->vm_mm, page)
#endif /* ! CONFIG_SMP */
#endif /* ! __ASSEMBLY__ */
/* XXX All of this needs to be rethought so we can take advantage /* XXX All of this needs to be rethought so we can take advantage
* XXX cheetah's full 64-bit virtual address space, ie. no more hole * XXX cheetah's full 64-bit virtual address space, ie. no more hole
* XXX in the middle like on spitfire. -DaveM * XXX in the middle like on spitfire. -DaveM
...@@ -215,7 +304,8 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) ...@@ -215,7 +304,8 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
(pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
#define pgd_set(pgdp, pmdp) \ #define pgd_set(pgdp, pmdp) \
(pgd_val(*(pgdp)) = (__pa((unsigned long) (pmdp)) >> 11UL)) (pgd_val(*(pgdp)) = (__pa((unsigned long) (pmdp)) >> 11UL))
#define pmd_page(pmd) ((unsigned long) __va((pmd_val(pmd)<<11UL))) #define __pmd_page(pmd) ((unsigned long) __va((pmd_val(pmd)<<11UL)))
#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
#define pgd_page(pgd) ((unsigned long) __va((pgd_val(pgd)<<11UL))) #define pgd_page(pgd) ((unsigned long) __va((pgd_val(pgd)<<11UL)))
#define pte_none(pte) (!pte_val(pte)) #define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
...@@ -264,8 +354,13 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) ...@@ -264,8 +354,13 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
((address >> PMD_SHIFT) & (REAL_PTRS_PER_PMD-1))) ((address >> PMD_SHIFT) & (REAL_PTRS_PER_PMD-1)))
/* Find an entry in the third-level page table.. */ /* Find an entry in the third-level page table.. */
#define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \ #define __pte_offset(dir, address) ((pte_t *) __pmd_page(*(dir)) + \
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_kernel __pte_offset
#define pte_offset_map __pte_offset
#define pte_offset_map_nested __pte_offset
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
extern pgd_t swapper_pg_dir[1]; extern pgd_t swapper_pg_dir[1];
...@@ -312,10 +407,10 @@ sun4u_get_pte (unsigned long addr) ...@@ -312,10 +407,10 @@ sun4u_get_pte (unsigned long addr)
return addr & _PAGE_PADDR; return addr & _PAGE_PADDR;
if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS)) if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
return prom_virt_to_phys(addr, 0); return prom_virt_to_phys(addr, 0);
pgdp = pgd_offset_k (addr); pgdp = pgd_offset_k(addr);
pmdp = pmd_offset (pgdp, addr); pmdp = pmd_offset(pgdp, addr);
ptep = pte_offset (pmdp, addr); ptep = pte_offset_kernel(pmdp, addr);
return pte_val (*ptep) & _PAGE_PADDR; return pte_val(*ptep) & _PAGE_PADDR;
} }
extern __inline__ unsigned long extern __inline__ unsigned long
...@@ -350,11 +445,18 @@ extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, u ...@@ -350,11 +445,18 @@ extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, u
extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, unsigned long, unsigned long, unsigned long); extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, unsigned long, unsigned long, unsigned long);
#define HAVE_ARCH_FB_UNMAPPED_AREA #define HAVE_ARCH_FB_UNMAPPED_AREA
#endif /* !(__ASSEMBLY__) */
/* /*
* No page table caches to initialise * No page table caches to initialise
*/ */
#define pgtable_cache_init() do { } while (0) #define pgtable_cache_init() do { } while (0)
extern void check_pgt_cache(void);
extern void flush_dcache_page(struct page *page);
/* This is unnecessary on the SpitFire since D-CACHE is write-through. */
#define flush_page_to_ram(page) do { } while (0)
#endif /* !(__ASSEMBLY__) */
#endif /* !(_SPARC64_PGTABLE_H) */ #endif /* !(_SPARC64_PGTABLE_H) */
...@@ -12,10 +12,11 @@ ...@@ -12,10 +12,11 @@
* XXX happily sit at the same PIL. We would then need only two * XXX happily sit at the same PIL. We would then need only two
* XXX PILs, one for devices and one for the CPU local timer tick. * XXX PILs, one for devices and one for the CPU local timer tick.
*/ */
#define PIL_MIGRATE 1 /* None currently allocated, '1' is available for use. */
#define PIL_SMP_1 1
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define PIL_RESERVED(PIL) ((PIL) == PIL_MIGRATE) #define PIL_RESERVED(PIL) ((PIL) == PIL_SMP_1)
#endif #endif
#endif /* !(_SPARC64_PIL_H) */ #endif /* !(_SPARC64_PIL_H) */
...@@ -172,7 +172,7 @@ if ((PREV)->thread.smp_lock_count) { \ ...@@ -172,7 +172,7 @@ if ((PREV)->thread.smp_lock_count) { \
* not preserve it's value. Hairy, but it lets us remove 2 loads * not preserve it's value. Hairy, but it lets us remove 2 loads
* and 2 stores in this critical code path. -DaveM * and 2 stores in this critical code path. -DaveM
*/ */
#define switch_to(prev, next, last) \ #define switch_to(prev, next) \
do { CHECK_LOCKS(prev); \ do { CHECK_LOCKS(prev); \
if (test_thread_flag(TIF_PERFCTR)) { \ if (test_thread_flag(TIF_PERFCTR)) { \
unsigned long __tmp; \ unsigned long __tmp; \
...@@ -193,16 +193,16 @@ do { CHECK_LOCKS(prev); \ ...@@ -193,16 +193,16 @@ do { CHECK_LOCKS(prev); \
"stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
"stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
"rdpr %%wstate, %%o5\n\t" \ "rdpr %%wstate, %%o5\n\t" \
"stx %%o6, [%%g6 + %3]\n\t" \ "stx %%o6, [%%g6 + %2]\n\t" \
"stb %%o5, [%%g6 + %2]\n\t" \ "stb %%o5, [%%g6 + %1]\n\t" \
"rdpr %%cwp, %%o5\n\t" \ "rdpr %%cwp, %%o5\n\t" \
"stb %%o5, [%%g6 + %5]\n\t" \ "stb %%o5, [%%g6 + %4]\n\t" \
"mov %1, %%g6\n\t" \ "mov %0, %%g6\n\t" \
"ldub [%1 + %5], %%g1\n\t" \ "ldub [%0 + %4], %%g1\n\t" \
"wrpr %%g1, %%cwp\n\t" \ "wrpr %%g1, %%cwp\n\t" \
"ldx [%%g6 + %3], %%o6\n\t" \ "ldx [%%g6 + %2], %%o6\n\t" \
"ldub [%%g6 + %2], %%o5\n\t" \ "ldub [%%g6 + %1], %%o5\n\t" \
"ldx [%%g6 + %4], %%o7\n\t" \ "ldx [%%g6 + %3], %%o7\n\t" \
"mov %%g6, %%l2\n\t" \ "mov %%g6, %%l2\n\t" \
"wrpr %%o5, 0x0, %%wstate\n\t" \ "wrpr %%o5, 0x0, %%wstate\n\t" \
"ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
...@@ -210,13 +210,13 @@ do { CHECK_LOCKS(prev); \ ...@@ -210,13 +210,13 @@ do { CHECK_LOCKS(prev); \
"wrpr %%g0, 0x94, %%pstate\n\t" \ "wrpr %%g0, 0x94, %%pstate\n\t" \
"mov %%l2, %%g6\n\t" \ "mov %%l2, %%g6\n\t" \
"wrpr %%g0, 0x96, %%pstate\n\t" \ "wrpr %%g0, 0x96, %%pstate\n\t" \
"andcc %%o7, %6, %%g0\n\t" \ "andcc %%o7, %5, %%g0\n\t" \
"bne,pn %%icc, ret_from_syscall\n\t" \ "bne,pn %%icc, ret_from_syscall\n\t" \
" ldx [%%g5 + %7], %0\n\t" \ " nop\n\t" \
: "=&r" (last) \ : /* no outputs */ \
: "r" (next->thread_info), \ : "r" (next->thread_info), \
"i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_FLAGS), "i" (TI_CWP), \ "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_FLAGS), "i" (TI_CWP), \
"i" (_TIF_NEWCHILD), "i" (TI_TASK) \ "i" (_TIF_NEWCHILD) \
: "cc", "g1", "g2", "g3", "g5", "g7", \ : "cc", "g1", "g2", "g3", "g5", "g7", \
"l2", "l3", "l4", "l5", "l6", "l7", \ "l2", "l3", "l4", "l5", "l6", "l7", \
"i0", "i1", "i2", "i3", "i4", "i5", \ "i0", "i1", "i2", "i3", "i4", "i5", \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment