Commit fdf53efc authored by Linus Torvalds's avatar Linus Torvalds

Import 2.3.11pre4

parent 5c2f9737
......@@ -105,7 +105,7 @@ O_OBJS += sys_takara.o
endif
# Device support
ifdef CONFIG_ALPHA_MIATA
ifneq ($(CONFIG_ALPHA_MIATA)$(CONFIG_ALPHA_DP264),)
O_OBJS += es1888.o
endif
ifneq ($(CONFIG_ALPHA_SX164)$(CONFIG_ALPHA_MIATA)$(CONFIG_ALPHA_DP264),)
......
......@@ -48,8 +48,6 @@ extern void __divqu (void);
extern void __remqu (void);
EXPORT_SYMBOL(alpha_mv);
EXPORT_SYMBOL(local_bh_count);
EXPORT_SYMBOL(local_irq_count);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(disable_irq_nosync);
......@@ -178,6 +176,9 @@ EXPORT_SYMBOL(debug_spin_trylock);
EXPORT_SYMBOL(write_lock);
EXPORT_SYMBOL(read_lock);
#endif
#else /* __SMP__ */
EXPORT_SYMBOL(__local_bh_count);
EXPORT_SYMBOL(__local_irq_count);
#endif /* __SMP__ */
/*
......
......@@ -425,19 +425,24 @@ pyxis_native_window_setup(void)
{
/*
* Set up the PCI->physical memory translation windows.
* For now, windows 1,2 and 3 are disabled. In the future, we may
* For now, windows 2 and 3 are disabled. In the future, we may
* want to use them to do scatter/gather DMA.
*
* Window 0 goes at 1 GB and is 1 GB large.
* Window 0 goes at 2 GB and is 1 GB large.
* Window 1 goes at 3 GB and is 1 GB large.
*/
*(vuip)PYXIS_W0_BASE = 1U | (PYXIS_DMA_WIN_BASE_DEFAULT & 0xfff00000U);
*(vuip)PYXIS_W0_BASE = PYXIS_DMA_WIN_BASE_DEFAULT | 1UL;
*(vuip)PYXIS_W0_MASK = (PYXIS_DMA_WIN_SIZE_DEFAULT - 1) & 0xfff00000U;
*(vuip)PYXIS_T0_BASE = 0;
*(vuip)PYXIS_W1_BASE = 0x0 ;
*(vuip)PYXIS_W2_BASE = 0x0 ;
*(vuip)PYXIS_W3_BASE = 0x0 ;
*(vuip)PYXIS_W1_BASE = (PYXIS_DMA_WIN_BASE_DEFAULT +
PYXIS_DMA_WIN_SIZE_DEFAULT) | 1U;
*(vuip)PYXIS_W1_MASK = (PYXIS_DMA_WIN_SIZE_DEFAULT - 1) & 0xfff00000U;
*(vuip)PYXIS_T1_BASE = PYXIS_DMA_WIN_SIZE_DEFAULT;
*(vuip)PYXIS_W2_BASE = 0x0;
*(vuip)PYXIS_W3_BASE = 0x0;
mb();
}
......
......@@ -32,6 +32,7 @@ es1888_init(void)
continue;
inb(0x022a); /* pause */
outb(0xc6, 0x022c); /* enable extended mode */
inb(0x022a); /* pause, also forces the write */
while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
continue;
outb(0xb1, 0x022c); /* setup for write to Interrupt CR */
......@@ -44,4 +45,5 @@ es1888_init(void)
while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
continue;
outb(0x18, 0x022c); /* set DMA channel 1 */
inb(0x022c); /* force the write */
}
......@@ -35,9 +35,12 @@
#define vulp volatile unsigned long *
#define vuip volatile unsigned int *
unsigned int local_irq_count[NR_CPUS];
unsigned int local_bh_count[NR_CPUS];
unsigned long hardirq_no[NR_CPUS];
/* Only uniprocessor needs this IRQ/BH locking depth, on SMP it lives
in the per-cpu structure for cache reasons. */
#ifndef __SMP__
int __local_irq_count;
int __local_bh_count;
#endif
#if NR_IRQS > 64
# error Unable to handle more than 64 irq levels.
......@@ -384,6 +387,8 @@ static void *previous_irqholder = NULL;
static void show(char * str, void *where);
#define SYNC_OTHER_CPUS(x) udelay((x)+1);
static inline void
wait_on_irq(int cpu, void *where)
{
......@@ -397,8 +402,8 @@ wait_on_irq(int cpu, void *where)
* already executing in one..
*/
if (!atomic_read(&global_irq_count)) {
if (local_bh_count[cpu] ||
!atomic_read(&global_bh_count))
if (local_bh_count(cpu)
|| !atomic_read(&global_bh_count))
break;
}
......@@ -412,19 +417,15 @@ wait_on_irq(int cpu, void *where)
count = MAXCOUNT;
}
__sti();
#if 0
SYNC_OTHER_CORES(cpu);
#else
udelay(cpu+1);
#endif
SYNC_OTHER_CPUS(cpu);
__cli();
if (atomic_read(&global_irq_count))
continue;
if (global_irq_lock.lock)
if (spin_is_locked(&global_irq_lock))
continue;
if (!local_bh_count[cpu] &&
atomic_read(&global_bh_count))
if (!local_bh_count(cpu)
&& atomic_read(&global_bh_count))
continue;
if (spin_trylock(&global_irq_lock))
break;
......@@ -469,14 +470,14 @@ get_irqlock(int cpu, void* where)
void
__global_cli(void)
{
int cpu;
int cpu = smp_processor_id();
void *where = __builtin_return_address(0);
/*
* Maximize ipl. If ipl was previously 0 and if this thread
* is not in an irq, then take global_irq_lock.
*/
if ((swpipl(7) == 0) && !local_irq_count[cpu = smp_processor_id()])
if (swpipl(7) == 0 && !local_irq_count(cpu))
get_irqlock(cpu, where);
}
......@@ -485,9 +486,8 @@ __global_sti(void)
{
int cpu = smp_processor_id();
if (!local_irq_count[cpu]) {
if (!local_irq_count(cpu))
release_irqlock(cpu);
}
__sti();
}
......@@ -512,7 +512,7 @@ __global_save_flags(void)
retval = 2 + local_enabled;
/* Check for global flags if we're not in an interrupt. */
if (!local_irq_count[cpu]) {
if (!local_irq_count(cpu)) {
if (local_enabled)
retval = 1;
if (global_irq_holder == cpu)
......@@ -550,7 +550,7 @@ __global_restore_flags(unsigned long flags)
#define STUCK \
if (!--stuck) { \
printk("irq_enter stuck (irq=%d, cpu=%d, global=%d)\n", \
irq, cpu,global_irq_holder); \
irq, cpu, global_irq_holder); \
stuck = INIT_STUCK; \
}
......@@ -566,11 +566,11 @@ irq_enter(int cpu, int irq)
hardirq_enter(cpu, irq);
barrier();
while (global_irq_lock.lock) {
while (spin_is_locked(&global_irq_lock)) {
if (cpu == global_irq_holder) {
int globl_locked = global_irq_lock.lock;
int globl_locked = spin_is_locked(&global_irq_lock);
int globl_icount = atomic_read(&global_irq_count);
int local_count = local_irq_count[cpu];
int local_count = local_irq_count(cpu);
/* It is very important that we load the state
variables before we do the first call to
......@@ -609,19 +609,16 @@ show(char * str, void *where)
#endif
int cpu = smp_processor_id();
int global_count = atomic_read(&global_irq_count);
int local_count0 = local_irq_count[0];
int local_count1 = local_irq_count[1];
long hardirq_no0 = hardirq_no[0];
long hardirq_no1 = hardirq_no[1];
printk("\n%s, CPU %d: %p\n", str, cpu, where);
printk("irq: %d [%d(0x%016lx) %d(0x%016lx)]\n", global_count,
local_count0, hardirq_no0, local_count1, hardirq_no1);
printk("irq: %d [%d %d]\n",
atomic_read(&global_irq_count),
cpu_data[0].irq_count,
cpu_data[1].irq_count);
printk("bh: %d [%d %d]\n",
atomic_read(&global_bh_count), local_bh_count[0],
local_bh_count[1]);
atomic_read(&global_bh_count),
cpu_data[0].bh_count,
cpu_data[1].bh_count);
#if 0
stack = (unsigned long *) &str;
for (i = 40; i ; i--) {
......@@ -644,6 +641,7 @@ wait_on_bh(void)
count = ~0;
}
/* nothing .. wait for the other bh's to go away */
barrier();
} while (atomic_read(&global_bh_count) != 0);
}
......@@ -658,12 +656,8 @@ wait_on_bh(void)
void
synchronize_bh(void)
{
if (atomic_read(&global_bh_count)) {
int cpu = smp_processor_id();
if (!local_irq_count[cpu] && !local_bh_count[cpu]) {
wait_on_bh();
}
}
if (atomic_read(&global_bh_count) && !in_interrupt())
wait_on_bh();
}
/*
......@@ -680,6 +674,8 @@ synchronize_bh(void)
void
synchronize_irq(void)
{
#if 0
/* Joe's version. */
int cpu = smp_processor_id();
int local_count;
int global_count;
......@@ -688,7 +684,7 @@ synchronize_irq(void)
mb();
do {
local_count = local_irq_count[cpu];
local_count = local_irq_count(cpu);
global_count = atomic_read(&global_irq_count);
if (DEBUG_SYNCHRONIZE_IRQ && (--countdown == 0)) {
printk("%d:%d/%d\n", cpu, local_count, global_count);
......@@ -696,12 +692,19 @@ synchronize_irq(void)
break;
}
} while (global_count != local_count);
#else
/* Jay's version. */
if (atomic_read(&global_irq_count)) {
cli();
sti();
}
#endif
}
#else /* !__SMP__ */
#define irq_enter(cpu, irq) (++local_irq_count[cpu])
#define irq_exit(cpu, irq) (--local_irq_count[cpu])
#define irq_enter(cpu, irq) (++local_irq_count(cpu))
#define irq_exit(cpu, irq) (--local_irq_count(cpu))
#endif /* __SMP__ */
......@@ -868,31 +871,23 @@ do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr,
unsigned long a3, unsigned long a4, unsigned long a5,
struct pt_regs regs)
{
unsigned long flags;
switch (type) {
case 0:
#ifdef __SMP__
__save_and_cli(flags);
handle_ipi(&regs);
__restore_flags(flags);
return;
#else
printk("Interprocessor interrupt? You must be kidding\n");
#endif
break;
case 1:
__save_and_cli(flags);
handle_irq(RTC_IRQ, -1, &regs);
__restore_flags(flags);
return;
case 2:
alpha_mv.machine_check(vector, la_ptr, &regs);
return;
case 3:
__save_and_cli(flags);
alpha_mv.device_interrupt(vector, &regs);
__restore_flags(flags);
return;
case 4:
perf_irq(vector, &regs);
......
......@@ -31,3 +31,19 @@ extern void handle_irq(int irq, int ack, struct pt_regs * regs);
#define TIMER_IRQ RTC_IRQ /* timer is the rtc */
#endif
extern char _stext;
static inline void alpha_do_profile (unsigned long pc)
{
if (prof_buffer && current->pid) {
pc -= (unsigned long) &_stext;
pc >>= prof_shift;
/*
* Don't ignore out-of-bounds PC values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (pc > prof_len - 1)
pc = prof_len - 1;
atomic_inc((atomic_t *)&prof_buffer[pc]);
}
}
......@@ -927,6 +927,7 @@ asmlinkage unsigned long osf_getsysinfo(unsigned long op, void *buffer,
return -EINVAL;
cpu = (struct percpu_struct*)
((char*)hwrpb + hwrpb->processor_offset);
w = cpu->type;
if (put_user(w, (unsigned long *)buffer))
return -EFAULT;
return 1;
......
......@@ -76,7 +76,7 @@ sys_sethae(unsigned long hae, unsigned long a1, unsigned long a2,
}
#ifdef __SMP__
void
int
cpu_idle(void *unused)
{
/* An endless idle loop with no priority at all. */
......@@ -329,7 +329,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
p->tss.ksp = (unsigned long) childstack;
p->tss.pal_flags = 1; /* set FEN, clear everything else */
p->tss.flags = current->tss.flags;
p->tss.mm_context = p->tss.asn = 0;
return 0;
}
......
......@@ -193,7 +193,7 @@ extern void entDbg(void);
/* process.c */
extern void generic_kill_arch (int mode, char *reboot_cmd);
extern void cpu_idle(void *) __attribute__((noreturn));
extern int cpu_idle(void *) __attribute__((noreturn));
/* ptrace.c */
extern int ptrace_set_bpt (struct task_struct *child);
......
......@@ -231,7 +231,6 @@ sys_ptrace(long request, long pid, long addr, long data,
int a4, int a5, struct pt_regs regs)
{
struct task_struct *child;
unsigned long tmp;
long ret;
lock_kernel();
......
......@@ -97,6 +97,8 @@ smp_store_cpu_info(int cpuid)
cpu_data[cpuid].loops_per_sec = loops_per_sec;
cpu_data[cpuid].last_asn
= (cpuid << WIDTH_HARDWARE_ASN) + ASN_FIRST_VERSION;
cpu_data[cpuid].irq_count = 0;
cpu_data[cpuid].bh_count = 0;
}
/*
......@@ -107,12 +109,6 @@ smp_setup_percpu_timer(int cpuid)
{
cpu_data[cpuid].prof_counter = 1;
cpu_data[cpuid].prof_multiplier = 1;
#ifdef NOT_YET_PROFILING
load_profile_irq(mid_xlate[cpu], lvl14_resolution);
if (cpu == smp_boot_cpuid)
enable_pil_irq(14);
#endif
}
/*
......@@ -586,14 +582,12 @@ void
smp_percpu_timer_interrupt(struct pt_regs *regs)
{
int cpu = smp_processor_id();
int user = user_mode(regs);
unsigned long user = user_mode(regs);
struct cpuinfo_alpha *data = &cpu_data[cpu];
#ifdef NOT_YET_PROFILING
clear_profile_irq(mid_xlate[cpu]);
/* Record kernel PC. */
if (!user)
alpha_do_profile(regs->pc);
#endif
if (!--data->prof_counter) {
/* We need to make like a normal interrupt -- otherwise
......@@ -630,28 +624,7 @@ smp_percpu_timer_interrupt(struct pt_regs *regs)
int __init
setup_profiling_timer(unsigned int multiplier)
{
#ifdef NOT_YET_PROFILING
int i;
unsigned long flags;
/* Prevent level14 ticker IRQ flooding. */
if((!multiplier) || (lvl14_resolution / multiplier) < 500)
return -EINVAL;
save_and_cli(flags);
for (i = 0; i < NR_CPUS; i++) {
if (cpu_present_mask & (1L << i)) {
load_profile_irq(mid_xlate[i],
lvl14_resolution / multiplier);
prof_multiplier[i] = multiplier;
}
}
restore_flags(flags);
return 0;
#else
return -EINVAL;
#endif
}
......@@ -893,9 +866,11 @@ ipi_flush_tlb_mm(void *x)
void
flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->mm)
if (mm == current->mm) {
flush_tlb_current(mm);
else
if (atomic_read(&mm->count) == 1)
return;
} else
flush_tlb_other(mm);
if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
......@@ -923,15 +898,17 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
struct flush_tlb_page_struct data;
struct mm_struct *mm = vma->vm_mm;
if (mm == current->mm) {
flush_tlb_current_page(mm, vma, addr);
if (atomic_read(&mm->count) == 1)
return;
} else
flush_tlb_other(mm);
data.vma = vma;
data.mm = mm;
data.addr = addr;
if (mm == current->mm)
flush_tlb_current_page(mm, vma, addr);
else
flush_tlb_other(mm);
if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
printk(KERN_CRIT "flush_tlb_page: timed out\n");
}
......
......@@ -393,8 +393,8 @@ monet_pci_fixup(void)
{
layout_all_busses(DEFAULT_IO_BASE, DEFAULT_MEM_BASE);
common_pci_fixup(monet_map_irq, monet_swizzle);
/* es1888_init(); */ /* later? */
SMC669_Init(1);
es1888_init();
}
static void __init
......
......@@ -94,6 +94,10 @@ void timer_interrupt(int irq, void *dev, struct pt_regs * regs)
smp_percpu_timer_interrupt(regs);
if (smp_processor_id() != smp_boot_cpuid)
return;
#else
/* Not SMP, do kernel PC profiling here. */
if (!user_mode(regs))
alpha_do_profile(regs->pc);
#endif
write_lock(&xtime_lock);
......
......@@ -704,20 +704,21 @@ ieee_CVTQS (int f, unsigned long a, unsigned long *b)
* FPCR_INV if invalid operation occurred, etc.
*/
unsigned long
ieee_CVTQT (int f, unsigned long a, unsigned long *b)
ieee_CVTQT (int f, long a, unsigned long *b)
{
EXTENDED op_b;
op_b.s = 0;
op_b.f[0] = a;
op_b.f[1] = 0;
if (sign(a) < 0) {
op_b.s = 1;
op_b.f[0] = -a;
if (a != 0) {
op_b.s = (a < 0 ? 1 : 0);
op_b.f[0] = (a < 0 ? -a : a);
op_b.f[1] = 0;
op_b.e = 55;
normalize(&op_b);
return round_t_ieee(f, &op_b, b);
} else {
*b = 0;
return 0;
}
op_b.e = 55;
normalize(&op_b);
return round_t_ieee(f, &op_b, b);
}
......
......@@ -20,7 +20,7 @@
extern unsigned long ieee_CVTST (int rm, unsigned long a, unsigned long *b);
extern unsigned long ieee_CVTTS (int rm, unsigned long a, unsigned long *b);
extern unsigned long ieee_CVTQS (int rm, unsigned long a, unsigned long *b);
extern unsigned long ieee_CVTQT (int rm, unsigned long a, unsigned long *b);
extern unsigned long ieee_CVTQT (int rm, long a, unsigned long *b);
extern unsigned long ieee_CVTTQ (int rm, unsigned long a, unsigned long *b);
extern unsigned long ieee_CMPTEQ (unsigned long a, unsigned long b,
......
......@@ -41,7 +41,7 @@ void
get_new_mmu_context(struct task_struct *p, struct mm_struct *mm)
{
unsigned long new = __get_new_mmu_context(p, mm);
p->tss.mm_context = new;
mm->context = new;
p->tss.asn = new & HARDWARE_ASN_MASK;
}
......
......@@ -173,7 +173,7 @@ show_mem(void)
extern unsigned long free_area_init(unsigned long, unsigned long);
static inline struct thread_struct *
static inline unsigned long
load_PCB(struct thread_struct * pcb)
{
register unsigned long sp __asm__("$30");
......@@ -192,7 +192,7 @@ paging_init(unsigned long start_mem, unsigned long end_mem)
unsigned long newptbr;
struct memclust_struct * cluster;
struct memdesc_struct * memdesc;
struct thread_struct *original_pcb_ptr;
unsigned long original_pcb_ptr;
/* initialize mem_map[] */
start_mem = free_area_init(start_mem, end_mem);
......@@ -246,11 +246,11 @@ paging_init(unsigned long start_mem, unsigned long end_mem)
since KSEG values also happen to work, folks get confused.
Check this here. */
if ((unsigned long)original_pcb_ptr < PAGE_OFFSET) {
original_pcb_ptr = (struct thread_struct *)
phys_to_virt((unsigned long) original_pcb_ptr);
if (original_pcb_ptr < PAGE_OFFSET) {
original_pcb_ptr = (unsigned long)
phys_to_virt(original_pcb_ptr);
}
original_pcb = *original_pcb_ptr;
original_pcb = *(struct thread_struct *) original_pcb_ptr;
return start_mem;
}
......
......@@ -2,7 +2,7 @@
* linux/kernel/ldt.c
*
* Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
* Copyright (C) 1998 Ingo Molnar
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*/
#include <linux/errno.h>
......
......@@ -539,7 +539,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
:"=&a" (retval), "=&S" (d0)
:"0" (__NR_clone), "i" (__NR_exit),
"r" (arg), "r" (fn),
"b" (flags | CLONE_VM)
"b" (flags | CLONE_VM | CLONE_TLB)
: "memory");
return retval;
}
......@@ -762,7 +762,7 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
unsigned long new_cr3 = next->cr3;
tss->cr3 = new_cr3;
if (new_cr3 != prev->cr3)
if (new_cr3 != prev->cr3)
asm volatile("movl %0,%%cr3": :"r" (new_cr3));
}
......
......@@ -65,8 +65,7 @@ endif
checks:
@$(MAKE) -C arch/$(ARCH)/kernel checks
BOOT_TARGETS = netboot znetboot zImage floppy install \
vmlinux.coff znetboot.initrd zImage.initrd vmlinux.coff.initrd
BOOT_TARGETS = zImage znetboot.initrd zImage.initrd
ifdef CONFIG_MBX
$(BOOT_TARGETS): $(CHECKS) vmlinux
......@@ -77,6 +76,24 @@ $(BOOT_TARGETS): $(CHECKS) vmlinux
@$(MAKECOFFBOOT) $@
@$(MAKEBOOT) $@
@$(MAKECHRPBOOT) $@
znetboot: $(CHECKS) vmlinux
ifdef CONFIG_SMP
ifdef CONFIG_PPC64
cp -f vmlinux /tftpboot/vmlinux.smp.64
else
cp -f vmlinux /tftpboot/vmlinux.smp
endif
else
ifdef CONFIG_PPC64
cp -f vmlinux /tftpboot/vmlinux.64
else
cp -f vmlinux /tftpboot/vmlinux
endif
endif
@$(MAKECOFFBOOT) $@
@$(MAKEBOOT) $@
@$(MAKECHRPBOOT) $@
endif
pmac_config:
......
......@@ -108,7 +108,7 @@ __initfunc(void amiga_init_IRQ(void))
custom.intreq = 0x7fff;
#ifdef CONFIG_APUS
/* Clear any inter-CPU interrupt requests. Circumvents bug in
/* Clear any inter-CPU interupt requests. Circumvents bug in
Blizzard IPL emulation HW (or so it appears). */
APUS_WRITE(APUS_INT_LVL, INTLVL_SETRESET | INTLVL_MASK);
......
......@@ -30,6 +30,7 @@ void (*kbd_reset_setup) (char *, int) __initdata = 0;
#include <linux/kd.h>
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/init.h>
#include <asm/bootinfo.h>
#include <asm/setup.h>
......
......@@ -71,7 +71,7 @@ chrpboot(int a1, int a2, void *prom)
sa = *(unsigned long *)PROG_START+PROG_START;
printf("start address = 0x%x\n\r", sa);
(*(void (*)())sa)(a1, a2, prom, 0, 0);
(*(void (*)())sa)(0, 0, prom, a1, a2);
printf("returned?\n\r");
......
# $Id: config.in,v 1.94 1999/06/25 11:00:07 davem Exp $
# $Id: config.in,v 1.95 1999/07/03 08:57:06 davem Exp $
# For a description of the syntax of this configuration file,
# see the Configure script.
#
......
......@@ -97,7 +97,7 @@ int gg2_pcibios_write_config_dword(unsigned char bus, unsigned char dev_fn,
#define python_config_data(bus) ((0xfef00000+0xf8010)-(bus*0x100000))
#define PYTHON_CFA(b, d, o) (0x80 | ((b<<6) << 8) | ((d) << 16) \
| (((o) & ~3) << 24))
unsigned int python_busnr = 1;
unsigned int python_busnr = 0;
int python_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn,
unsigned char offset, unsigned char *val)
......@@ -347,7 +347,7 @@ chrp_setup_pci_ptrs(void)
} else if ( !strncmp("IBM,7043-260",
get_property(find_path_device("/"), "name", NULL),12) )
{
pci_dram_offset = 0x80000000;
pci_dram_offset = 0x0;
isa_mem_base = 0xc0000000;
isa_io_base = 0xf8000000;
}
......
......@@ -71,6 +71,8 @@ void chrp_calibrate_decr(void);
void chrp_time_init(void);
void chrp_setup_pci_ptrs(void);
extern void chrp_progress(char *, unsigned short);
void chrp_event_scan(void);
extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
extern int pckbd_getkeycode(unsigned int scancode);
......@@ -589,10 +591,10 @@ __initfunc(void
chrp_setup_pci_ptrs();
#ifdef CONFIG_BLK_DEV_INITRD
/* take care of initrd if we have one */
if ( r3 )
if ( r6 )
{
initrd_start = r3 + KERNELBASE;
initrd_end = r3 + r4 + KERNELBASE;
initrd_start = r6 + KERNELBASE;
initrd_end = r6 + r7 + KERNELBASE;
}
#endif /* CONFIG_BLK_DEV_INITRD */
......@@ -658,6 +660,8 @@ __initfunc(void
ppc_md.ppc_kbd_sysrq_xlate = pckbd_sysrq_xlate;
SYSRQ_KEY = 0x54;
#endif
if ( rtas_data )
ppc_md.progress = chrp_progress;
#endif
#endif
......@@ -678,16 +682,33 @@ __initfunc(void
* Print the banner, then scroll down so boot progress
* can be printed. -- Cort
*/
chrp_progress("Linux/PPC "UTS_RELEASE"\n");
if ( ppc_md.progress ) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
}
void chrp_progress(char *s)
void chrp_progress(char *s, unsigned short hex)
{
extern unsigned int rtas_data;
unsigned long width;
struct device_node *root;
char *os = s;
if ( (root = find_path_device("/rtas")) )
width = *(unsigned long *)get_property(root, "ibm,display-line-length", NULL);
else
width = 0x10;
if ( (_machine != _MACH_chrp) || !rtas_data )
return;
call_rtas( "display-character", 1, 1, NULL, '\r' );
while ( *s )
call_rtas( "display-character", 1, 1, NULL, *s++ );
if ( call_rtas( "display-character", 1, 1, NULL, '\r' ) )
{
/* assume no display-character RTAS method - use hex display */
return;
}
while ( *os )
call_rtas( "display-character", 1, 1, NULL, *os++ );
/* scan back for the last newline or carriage return */
for ( os-- ; (*os != '\n') && (*os != '\r') && (os > s) ; os--, width-- )
/* nothing */ ;
/*while ( width-- )*/
call_rtas( "display-character", 1, 1, NULL, ' ' );
}
/*
* arch/ppc/kernel/head.S
*
* $Id: head.S,v 1.133 1999/05/20 05:13:08 cort Exp $
* $Id: head.S,v 1.134 1999/06/30 05:05:52 paulus Exp $
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
......@@ -300,7 +300,7 @@ __secondary_start:
oris r21,r11,(KERNELBASE+0x20000000)@h
mtspr DBAT2L,r18 /* N.B. 6xx (not 601) have valid */
mtspr DBAT2U,r21 /* bit in upper BAT register */
mtspr IBAT2L,r28
mtspr IBAT2L,r18
mtspr IBAT2U,r21
#endif /* CONFIG_PPC64 */
#endif
......@@ -1978,7 +1978,13 @@ DoSyscall:
lwz r6,GPR4(r1)
lwz r7,GPR5(r1)
lwz r8,GPR6(r1)
mr r9,r2
lwz r9,GPR7(r1)
bl printk
lis r3,77f@ha
addi r3,r3,77f@l
lwz r4,GPR8(r1)
lwz r5,GPR9(r1)
mr r6,r2
bl printk
lwz r0,GPR0(r1)
lwz r3,GPR3(r1)
......@@ -2081,7 +2087,8 @@ syscall_ret_2:
66: li r3,ENOSYS
b 52b
#ifdef SHOW_SYSCALLS
7: .string "syscall %d(%x, %x, %x, %x), current=%p\n"
7: .string "syscall %d(%x, %x, %x, %x, %x, "
77: .string "%x, %x), current=%p\n"
79: .string " -> %x\n"
.align 2
#endif
......
......@@ -50,6 +50,7 @@
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/bitops.h>
#include <asm/gg2.h>
#include <asm/cache.h>
#include <asm/prom.h>
......
......@@ -182,6 +182,8 @@ __initfunc(void openpic_init(int main_pic))
if (!OpenPIC)
panic("No OpenPIC found");
if ( ppc_md.progress ) ppc_md.progress("openpic enter",0x122);
t = openpic_read(&OpenPIC->Global.Feature_Reporting0);
switch (t & OPENPIC_FEATURE_VERSION_MASK) {
case 1:
......@@ -201,6 +203,7 @@ __initfunc(void openpic_init(int main_pic))
OPENPIC_FEATURE_LAST_PROCESSOR_SHIFT) + 1;
NumSources = ((t & OPENPIC_FEATURE_LAST_SOURCE_MASK) >>
OPENPIC_FEATURE_LAST_SOURCE_SHIFT) + 1;
printk("OpenPIC Version %s (%d CPUs and %d IRQ sources) at %p\n", version,
NumProcessors, NumSources, OpenPIC);
timerfreq = openpic_read(&OpenPIC->Global.Timer_Frequency);
......@@ -212,6 +215,8 @@ __initfunc(void openpic_init(int main_pic))
if ( main_pic )
{
if ( ppc_md.progress ) ppc_md.progress("openpic main",0x3ff);
/* Initialize timer interrupts */
for (i = 0; i < OPENPIC_NUM_TIMERS; i++) {
/* Disabled, Priority 0 */
......@@ -226,9 +231,12 @@ __initfunc(void openpic_init(int main_pic))
openpic_initipi(i, 0, OPENPIC_VEC_IPI+i);
}
if ( ppc_md.progress ) ppc_md.progress("openpic initirq",0x3bb);
/* Initialize external interrupts */
/* SIOint (8259 cascade) is special */
openpic_initirq(0, 8, OPENPIC_VEC_SOURCE, 1, 1);
if ( ppc_md.progress ) ppc_md.progress("openpic map",0x3cc);
/* Processor 0 */
openpic_mapirq(0, 1<<0);
for (i = 1; i < NumSources; i++) {
......@@ -248,6 +256,7 @@ __initfunc(void openpic_init(int main_pic))
openpic_set_priority(0, 0);
openpic_disable_8259_pass_through();
}
if ( ppc_md.progress ) ppc_md.progress("openpic exit",0x222);
}
......
......@@ -9,6 +9,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/config.h>
#include <linux/pci.h>
#include <linux/openpic.h>
#include <asm/processor.h>
......
......@@ -58,6 +58,7 @@
#include <asm/ide.h>
#include <asm/machdep.h>
#include <asm/keyboard.h>
#include <asm/dma.h>
#include "time.h"
#include "local_irq.h"
......@@ -204,11 +205,12 @@ kdev_t sd_find_target(void *host, int tgt)
{
Scsi_Disk *dp;
int i;
#ifdef CONFIG_BLK_DEV_SD
for (dp = rscsi_disks, i = 0; i < sd_template.dev_max; ++i, ++dp)
if (dp->device != NULL && dp->device->host == host
&& dp->device->id == tgt)
return MKDEV_SD(i);
#endif /* CONFIG_BLK_DEV_SD */
return 0;
}
#endif
......@@ -517,13 +519,13 @@ pmac_halt(void)
void
pmac_ide_insw(ide_ioreg_t port, void *buf, int ns)
{
_insw_ns(port+_IO_BASE, buf, ns);
_insw_ns((unsigned short *)(port+_IO_BASE), buf, ns);
}
void
pmac_ide_outsw(ide_ioreg_t port, void *buf, int ns)
{
_outsw_ns(port+_IO_BASE, buf, ns);
_outsw_ns((unsigned short *)(port+_IO_BASE), buf, ns);
}
int
......
......@@ -107,6 +107,7 @@
#include <asm/system.h>
#include <asm/signal.h>
#include <asm/system.h>
#include <asm/kgdb.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
......
......@@ -40,6 +40,7 @@
#include <asm/residual.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <linux/ide.h>
#include <asm/ide.h>
#include <asm/cache.h>
#include <asm/dma.h>
......
/*
* $Id: process.c,v 1.86 1999/06/17 21:53:46 cort Exp $
* $Id: process.c,v 1.87 1999/07/03 08:57:07 davem Exp $
*
* linux/arch/ppc/kernel/process.c
*
......
/*
* $Id: prom.c,v 1.61 1999/06/17 06:05:52 paulus Exp $
* $Id: prom.c,v 1.62 1999/07/02 19:59:31 cort Exp $
*
* Procedures for interfacing to the Open Firmware PROM on
* Power Macintosh computers.
......@@ -502,8 +502,8 @@ prom_init(int r3, int r4, prom_entry pp)
return;
/* copy the holding pattern code to someplace safe (8M) */
memcpy( (void *)(8<<20), RELOC(__secondary_hold), 0x10000 );
for (i = 8<<20; i < ((8<<20)+0x10000); i += 32)
memcpy( (void *)(8<<20), RELOC(__secondary_hold), 0x100 );
for (i = 8<<20; i < ((8<<20)+0x100); i += 32)
{
asm volatile("dcbf 0,%0" : : "r" (i) : "memory");
asm volatile("icbi 0,%0" : : "r" (i) : "memory");
......
......@@ -81,6 +81,7 @@ clear_single_step(struct task_struct *task)
regs->msr &= ~MSR_SE;
}
#if 0
/*
* This routine gets a long from any process space by following the page
* tables. NOTE! You should check that the long isn't on a page boundary,
......@@ -283,11 +284,13 @@ static int write_long(struct task_struct * tsk, unsigned long addr,
put_long(tsk, vma,addr,data);
return 0;
}
#endif
asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
{
struct task_struct *child;
int ret = -EPERM;
unsigned long flags;
lock_kernel();
if (request == PTRACE_TRACEME) {
......@@ -302,7 +305,10 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
if (pid == 1) /* you may not mess with init */
goto out;
ret = -ESRCH;
if (!(child = find_task_by_pid(pid)))
read_lock(&tasklist_lock);
child = find_task_by_pid(pid);
read_unlock(&tasklist_lock); /* FIXME!!! */
if ( !child )
goto out;
ret = -EPERM;
if (request == PTRACE_ATTACH) {
......@@ -322,11 +328,15 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
if (child->flags & PF_PTRACED)
goto out;
child->flags |= PF_PTRACED;
write_lock_irqsave(&tasklist_lock, flags);
if (child->p_pptr != current) {
REMOVE_LINKS(child);
child->p_pptr = current;
SET_LINKS(child);
}
write_unlock_irqrestore(&tasklist_lock, flags);
send_sig(SIGSTOP, child, 1);
ret = 0;
goto out;
......@@ -342,22 +352,19 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
goto out;
switch (request) {
/* If I and D space are separate, these will need to be fixed. */
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA: {
unsigned long tmp;
int copied;
down(&child->mm->mmap_sem);
ret = read_long(child, addr, &tmp);
up(&child->mm->mmap_sem);
if (ret < 0)
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
ret = -EIO;
if (copied != sizeof(tmp))
goto out;
ret = verify_area(VERIFY_WRITE, (void *) data, sizeof(long));
if (!ret)
put_user(tmp, (unsigned long *) data);
ret = put_user(tmp,(unsigned long *) data);
goto out;
}
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
......@@ -391,11 +398,11 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
/* If I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
down(&child->mm->mmap_sem);
ret = write_long(child,addr,data);
up(&child->mm->mmap_sem);
ret = 0;
if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
goto out;
ret = -EIO;
goto out;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
if ((addr & 3) || addr < 0 || addr >= ((PT_FPR0 + 64) << 2))
......@@ -459,9 +466,9 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
goto out;
child->flags &= ~PF_TRACESYS;
set_single_step(child);
wake_up_process(child);
child->exit_code = data;
/* give it a chance to run. */
wake_up_process(child);
ret = 0;
goto out;
}
......@@ -473,9 +480,11 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
child->flags &= ~(PF_PTRACED|PF_TRACESYS);
wake_up_process(child);
child->exit_code = data;
write_lock_irqsave(&tasklist_lock, flags);
REMOVE_LINKS(child);
child->p_pptr = child->p_opptr;
SET_LINKS(child);
write_unlock_irqrestore(&tasklist_lock, flags);
/* make sure the single step bit is not set. */
clear_single_step(child);
ret = 0;
......@@ -493,7 +502,6 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
asmlinkage void syscall_trace(void)
{
lock_kernel();
if ((current->flags & (PF_PTRACED|PF_TRACESYS))
!= (PF_PTRACED|PF_TRACESYS))
goto out;
......@@ -511,5 +519,4 @@ asmlinkage void syscall_trace(void)
current->exit_code = 0;
}
out:
unlock_kernel();
}
/*
* $Id: setup.c,v 1.136 1999/06/18 07:11:35 cort Exp $
* $Id: setup.c,v 1.138 1999/07/11 16:32:21 cort Exp $
* Common prep/pmac/chrp boot and setup code.
*/
......@@ -32,6 +32,7 @@
#endif
#include <asm/bootx.h>
#include <asm/machdep.h>
#include <asm/ide.h>
extern void pmac_init(unsigned long r3,
unsigned long r4,
......@@ -331,6 +332,7 @@ identify_machine(unsigned long r3, unsigned long r4, unsigned long r5,
#ifdef __SMP__
if ( first_cpu_booted ) return 0;
#endif /* __SMP__ */
if ( ppc_md.progress ) ppc_md.progress("id mach(): start", 0x100);
#ifndef CONFIG_MACH_SPECIFIC
/* boot loader will tell us if we're APUS */
......@@ -477,13 +479,13 @@ identify_machine(unsigned long r3, unsigned long r4, unsigned long r5,
default:
printk("Unknown machine type in identify_machine!\n");
}
/* Check for nobats option (used in mapin_ram). */
if (strstr(cmd_line, "nobats")) {
extern int __map_without_bats;
__map_without_bats = 1;
}
if ( ppc_md.progress ) ppc_md.progress("id mach(): done", 0x200);
return 0;
}
......@@ -539,6 +541,8 @@ __initfunc(void setup_arch(char **cmdline_p,
*memory_end_p = (unsigned long) end_of_DRAM;
ppc_md.setup_arch(memory_start_p, memory_end_p);
/* clear the progress line */
if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
}
void ppc_generic_ide_fix_driveid(struct hd_driveid *id)
......
/*
* $Id: smp.c,v 1.54 1999/06/24 17:13:34 cort Exp $
* $Id: smp.c,v 1.55 1999/07/03 08:57:09 davem Exp $
*
* Smp support for ppc.
*
......
/*
* $Id: init.c,v 1.170 1999/06/29 12:33:51 davem Exp $
* $Id: init.c,v 1.171 1999/07/08 23:20:14 cort Exp $
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
......@@ -50,6 +50,7 @@
#include <asm/mbx.h>
#include <asm/smp.h>
#include <asm/bootx.h>
#include <asm/machdep.h>
/* APUS includes */
#include <asm/setup.h>
#include <asm/amigahw.h>
......@@ -961,7 +962,6 @@ static void *MMU_get_page(void)
} else {
p = find_mem_piece(PAGE_SIZE, PAGE_SIZE);
}
/*memset(p, 0, PAGE_SIZE);*/
__clear_user(p, PAGE_SIZE);
return p;
}
......@@ -1027,7 +1027,7 @@ __initfunc(void MMU_init(void))
#ifdef __SMP__
if ( first_cpu_booted ) return;
#endif /* __SMP__ */
if ( ppc_md.progress ) ppc_md.progress("MMU:enter", 0x111);
#ifndef CONFIG_8xx
if (have_of)
end_of_DRAM = pmac_find_end_of_memory();
......@@ -1038,10 +1038,12 @@ __initfunc(void MMU_init(void))
else /* prep */
end_of_DRAM = prep_find_end_of_memory();
if ( ppc_md.progress ) ppc_md.progress("MMU:hash init", 0x300);
hash_init();
_SDR1 = __pa(Hash) | (Hash_mask >> 10);
ioremap_base = 0xf8000000;
if ( ppc_md.progress ) ppc_md.progress("MMU:mapin", 0x301);
/* Map in all of RAM starting at KERNELBASE */
mapin_ram();
......@@ -1050,6 +1052,7 @@ __initfunc(void MMU_init(void))
* the io areas. RAM was mapped by mapin_ram().
* -- Cort
*/
if ( ppc_md.progress ) ppc_md.progress("MMU:setbat", 0x302);
switch (_machine) {
case _MACH_prep:
setbat(0, 0x80000000, 0x80000000, 0x10000000, IO_PAGE);
......@@ -1102,6 +1105,7 @@ __initfunc(void MMU_init(void))
ioremap(0x80000000, 0x4000);
ioremap(0x81000000, 0x4000);
#endif /* CONFIG_8xx */
if ( ppc_md.progress ) ppc_md.progress("MMU:exit", 0x211);
}
/*
......@@ -1310,7 +1314,7 @@ __initfunc(unsigned long *pmac_find_end_of_memory(void))
int i;
/* max amount of RAM we allow -- Cort */
#define RAM_LIMIT (768<<20)
#define RAM_LIMIT (256<<20)
memory_node = find_devices("memory");
if (memory_node == NULL) {
......@@ -1509,6 +1513,7 @@ __initfunc(static void hash_init(void))
extern unsigned int hash_page_patch_A[], hash_page_patch_B[],
hash_page_patch_C[], hash_page[];
if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
/*
* Allow 64k of hash table for every 16MB of memory,
* up to a maximum of 2MB.
......@@ -1542,6 +1547,7 @@ __initfunc(static void hash_init(void))
}
#endif /* NO_RELOAD_HTAB */
if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
/* Find some memory for the hash table. */
if ( Hash_size )
Hash = find_mem_piece(Hash_size, Hash_size);
......@@ -1557,11 +1563,11 @@ __initfunc(static void hash_init(void))
#else
#define b(x) (x)
#endif
/*memset(Hash, 0, Hash_size);*/
__clear_user(Hash, Hash_size);
Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
__clear_user(Hash, Hash_size);
if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
/*
* Patch up the instructions in head.S:hash_page
*/
......@@ -1601,5 +1607,6 @@ __initfunc(static void hash_init(void))
flush_icache_range((unsigned long) b(hash_page),
(unsigned long) b(hash_page + 1));
}
if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
}
#endif /* ndef CONFIG_8xx */
......@@ -375,7 +375,7 @@ static int loop_set_fd(struct loop_device *lo, kdev_t dev, unsigned int arg)
a file structure */
lo->lo_backing_file = NULL;
} else if (S_ISREG(inode->i_mode)) {
if (!inode->i_op->bmap) {
if (!inode->i_op->get_block) {
printk(KERN_ERR "loop: device has no block access/not implemented\n");
goto out_putf;
}
......
......@@ -13,7 +13,7 @@
* This driver is for PCnet32 and PCnetPCI based ethercards
*/
static const char *version = "pcnet32.c:v1.21 31.3.99 tsbogend@alpha.franken.de\n";
static const char *version = "pcnet32.c:v1.23 6.7.1999 tsbogend@alpha.franken.de\n";
#include <linux/config.h>
#include <linux/module.h>
......@@ -149,6 +149,13 @@ static int full_duplex[MAX_UNITS] = {0, };
* rewritten PCI card detection
* added dwio mode to get driver working on some PPC machines
* v1.21: added mii selection and mii ioctl
* v1.22: changed pci scanning code to make PPC people happy
* fixed switching to 32bit mode in pcnet32_open() (thanks
* to Michael Richard <mcr@solidum.com> for noticing this one)
* added sub vendor/device id matching (thanks again to
* Michael Richard <mcr@solidum.com>)
* added chip id for 79c973/975 (thanks to Zach Brown <zab@zabbo.net>)
* v1.23 fixed small bug, when manual selecting MII speed/duplex
*/
......@@ -185,6 +192,16 @@ static int full_duplex[MAX_UNITS] = {0, };
#define PCNET32_TOTAL_SIZE 0x20
/* some PCI ids */
#ifndef PCI_DEVICE_ID_AMD_LANCE
#define PCI_VENDOR_ID_AMD 0x1022
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#endif
#ifndef PCI_DEVICE_ID_AMD_PCNETHOME
#define PCI_DEVICE_ID_AMD_PCNETHOME 0x2001
#endif
#define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
/* The PCNET32 Rx and Tx ring descriptors. */
......@@ -272,14 +289,19 @@ enum pci_flags_bit {
struct pcnet32_pci_id_info {
const char *name;
u16 vendor_id, device_id, device_id_mask, flags;
u16 vendor_id, device_id, svid, sdid, flags;
int io_size;
int (*probe1) (struct device *, unsigned long, unsigned char, int, int);
};
static struct pcnet32_pci_id_info pcnet32_tbl[] = {
{ "AMD PCnetPCI series",
0x1022, 0x2000, 0xfffe, PCI_USES_IO|PCI_USES_MASTER, PCNET32_TOTAL_SIZE,
PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, 0, 0,
PCI_USES_IO|PCI_USES_MASTER, PCNET32_TOTAL_SIZE,
pcnet32_probe1},
{ "AMD PCnetHome series",
PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_PCNETHOME, 0, 0,
PCI_USES_IO|PCI_USES_MASTER, PCNET32_TOTAL_SIZE,
pcnet32_probe1},
{0,}
};
......@@ -418,30 +440,27 @@ int __init pcnet32_probe (struct device *dev)
#if defined(CONFIG_PCI)
if (pci_present()) {
struct pci_dev *pdev;
unsigned char pci_bus, pci_device_fn;
int pci_index;
struct pci_dev *pdev = NULL;
printk("pcnet32.c: PCI bios is present, checking for devices...\n");
for (pci_index = 0; pci_index < 0xff; pci_index++) {
u16 vendor, device, pci_command;
while ((pdev = pci_find_class (PCI_CLASS_NETWORK_ETHERNET<<8, pdev))) {
u16 pci_command;
int chip_idx;
u16 sdid,svid;
if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8,
pci_index, &pci_bus, &pci_device_fn) != PCIBIOS_SUCCESSFUL)
break;
pcibios_read_config_word(pci_bus, pci_device_fn, PCI_VENDOR_ID, &vendor);
pcibios_read_config_word(pci_bus, pci_device_fn, PCI_DEVICE_ID, &device);
pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &sdid);
pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &svid);
for (chip_idx = 0; pcnet32_tbl[chip_idx].vendor_id; chip_idx++)
if (vendor == pcnet32_tbl[chip_idx].vendor_id &&
(device & pcnet32_tbl[chip_idx].device_id_mask) == pcnet32_tbl[chip_idx].device_id)
if ((pdev->vendor == pcnet32_tbl[chip_idx].vendor_id) &&
(pdev->device == pcnet32_tbl[chip_idx].device_id) &&
(pcnet32_tbl[chip_idx].svid == 0 ||
(svid == pcnet32_tbl[chip_idx].svid)) &&
(pcnet32_tbl[chip_idx].sdid == 0 ||
(sdid == pcnet32_tbl[chip_idx].sdid)))
break;
if (pcnet32_tbl[chip_idx].vendor_id == 0)
continue;
pdev = pci_find_slot(pci_bus, pci_device_fn);
ioaddr = pdev->base_address[0] & PCI_BASE_ADDRESS_IO_MASK;
#if defined(ADDR_64BITS) && defined(__alpha__)
ioaddr |= ((long)pdev->base_address[1]) << 32;
......@@ -541,6 +560,10 @@ pcnet32_probe1(struct device *dev, unsigned long ioaddr, unsigned char irq_line,
chipname = "PCnet/FAST+ 79C972";
fdx = 1; mii = 1;
break;
case 0x2625:
chipname = "PCnet/FAST III 79C973";
fdx = 1; mii = 1;
break;
case 0x2626:
chipname = "PCnet/Home 79C978";
fdx = 1;
......@@ -561,6 +584,9 @@ pcnet32_probe1(struct device *dev, unsigned long ioaddr, unsigned char irq_line,
printk("pcnet32: pcnet32 media reset to %#x.\n", media);
a->write_bcr (ioaddr, 49, media);
break;
case 0x2627:
chipname = "PCnet/FAST III 79C975";
fdx = 1; mii = 1;
default:
printk("pcnet32: PCnet version %#x, no PCnet32 chip.\n",chip_version);
return ENODEV;
......@@ -693,7 +719,7 @@ pcnet32_open(struct device *dev)
lp->a.reset (ioaddr);
/* switch pcnet32 to 32bit mode */
lp->a.write_csr (ioaddr, 20, 2);
lp->a.write_bcr (ioaddr, 20, 2);
if (pcnet32_debug > 1)
printk("%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
......@@ -725,7 +751,7 @@ pcnet32_open(struct device *dev)
val |= 0x10;
lp->a.write_csr (ioaddr, 124, val);
if (lp->mii & (lp->options & PORT_ASEL)) {
if (lp->mii & !(lp->options & PORT_ASEL)) {
val = lp->a.read_bcr (ioaddr, 32) & ~0x38; /* disable Auto Negotiation, set 10Mpbs, HD */
if (lp->options & PORT_FD)
val |= 0x10;
......@@ -952,7 +978,7 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
struct device *dev = (struct device *)dev_id;
struct pcnet32_private *lp;
unsigned long ioaddr;
u16 csr0;
u16 csr0,rap;
int boguscnt = max_interrupt_work;
int must_restart;
......@@ -968,6 +994,7 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
dev->interrupt = 1;
rap = lp->a.read_rap(ioaddr);
while ((csr0 = lp->a.read_csr (ioaddr, 0)) & 0x8600 && --boguscnt >= 0) {
/* Acknowledge all of the current interrupt sources ASAP. */
lp->a.write_csr (ioaddr, 0, csr0 & ~0x004f);
......@@ -1069,6 +1096,7 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
/* Clear any other interrupt, and set interrupt enable. */
lp->a.write_csr (ioaddr, 0, 0x7940);
lp->a.write_rap(ioaddr,rap);
if (pcnet32_debug > 4)
printk("%s: exiting interrupt, csr0=%#4.4x.\n",
......
......@@ -757,9 +757,9 @@ static void end_buffer_io_async(struct buffer_head * bh, int uptodate)
free = test_and_clear_bit(PG_free_after, &page->flags);
if (page->owner != -1)
if (page->owner != (void *)-1)
PAGE_BUG(page);
page->owner = (int)current;
page->owner = current;
UnlockPage(page);
if (free)
......@@ -1195,7 +1195,7 @@ static int create_page_buffers(int rw, struct page *page, kdev_t dev, int b[], i
if (!PageLocked(page))
BUG();
if (page->owner != (int)current)
if (page->owner != current)
PAGE_BUG(page);
/*
* Allocate async buffer heads pointing to this page, just for I/O.
......@@ -1557,7 +1557,7 @@ int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size, int bmap)
}
if (!page->buffers)
BUG();
page->owner = -1;
page->owner = (void *)-1;
head = page->buffers;
bh = head;
......@@ -1606,7 +1606,7 @@ int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size, int bmap)
} else {
if (!nr && rw == READ) {
SetPageUptodate(page);
page->owner = (int)current;
page->owner = current;
UnlockPage(page);
}
if (nr && (rw == WRITE))
......@@ -1640,7 +1640,7 @@ int block_read_full_page(struct file * file, struct page * page)
blocks = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
iblock = page->offset >> inode->i_sb->s_blocksize_bits;
page->owner = -1;
page->owner = (void *)-1;
head = page->buffers;
bh = head;
nr = 0;
......@@ -1675,7 +1675,7 @@ int block_read_full_page(struct file * file, struct page * page)
* uptodate as well.
*/
SetPageUptodate(page);
page->owner = (int)current;
page->owner = current;
UnlockPage(page);
}
return 0;
......@@ -1969,8 +1969,25 @@ asmlinkage int sys_bdflush(int func, long data)
goto out;
if (func == 1) {
error = sync_old_buffers();
goto out;
struct mm_struct *user_mm;
/*
* bdflush will spend all of it's time in kernel-space,
* without touching user-space, so we can switch it into
* 'lazy TLB mode' to reduce the cost of context-switches
* to and from bdflush.
*/
user_mm = current->mm;
mmget(user_mm);
current->flags |= PF_LAZY_TLB;
error = sync_old_buffers();
current->flags &= ~PF_LAZY_TLB;
SET_PAGE_DIR(current, user_mm->pgd);
mmput(current->mm);
current->mm = user_mm;
goto out;
}
/* Basically func 1 means read param 1, 2 means write param 1, etc */
......
......@@ -31,6 +31,7 @@ static inline int dupfd(struct file *file, unsigned int arg)
return error;
out_putf:
write_unlock(&files->file_lock);
fput(file);
goto out;
}
......
......@@ -182,7 +182,7 @@ int fs_may_remount_ro(struct super_block *sb)
/* Writable file? */
if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
return 0;
goto too_bad;
}
file_list_unlock();
return 1; /* Tis' cool bro. */
......
......@@ -74,8 +74,8 @@
#define PYXIS_MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
#define PYXIS_MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
#define PYXIS_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
#define PYXIS_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
#define PYXIS_DMA_WIN_BASE_DEFAULT (2UL*1024*1024*1024)
#define PYXIS_DMA_WIN_SIZE_DEFAULT (1UL*1024*1024*1024)
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM_SETUP)
#define PYXIS_DMA_WIN_BASE alpha_mv.dma_win_base
......
......@@ -5,8 +5,12 @@
#include <linux/tasks.h>
extern unsigned int local_irq_count[NR_CPUS];
extern unsigned long hardirq_no[NR_CPUS];
#ifndef __SMP__
extern int __local_irq_count;
#define local_irq_count(cpu) ((void)(cpu), __local_irq_count)
#else
#define local_irq_count(cpu) (cpu_data[cpu].irq_count)
#endif
/*
* Are we in an interrupt context? Either doing bottom half
......@@ -16,16 +20,16 @@ extern unsigned long hardirq_no[NR_CPUS];
#define in_interrupt() \
({ \
int __cpu = smp_processor_id(); \
(local_irq_count[__cpu] + local_bh_count[__cpu]) != 0; \
(local_irq_count(__cpu) + local_bh_count(__cpu)) != 0; \
})
#ifndef __SMP__
#define hardirq_trylock(cpu) (local_irq_count[cpu] == 0)
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) ((void) 0)
#define hardirq_enter(cpu, irq) (local_irq_count[cpu]++)
#define hardirq_exit(cpu, irq) (local_irq_count[cpu]--)
#define hardirq_enter(cpu, irq) (local_irq_count(cpu)++)
#define hardirq_exit(cpu, irq) (local_irq_count(cpu)--)
#define synchronize_irq() barrier()
......@@ -50,21 +54,20 @@ static inline void release_irqlock(int cpu)
static inline void hardirq_enter(int cpu, int irq)
{
++local_irq_count[cpu];
++local_irq_count(cpu);
atomic_inc(&global_irq_count);
hardirq_no[cpu] |= 1L << irq; /* debugging only */
}
static inline void hardirq_exit(int cpu, int irq)
{
hardirq_no[cpu] &= ~(1L << irq); /* debugging only */
atomic_dec(&global_irq_count);
--local_irq_count[cpu];
--local_irq_count(cpu);
}
static inline int hardirq_trylock(int cpu)
{
return !atomic_read(&global_irq_count) && !global_irq_lock.lock;
return (!atomic_read(&global_irq_count)
&& !spin_is_locked(&global_irq_lock));
}
#define hardirq_endlock(cpu) ((void)0)
......
......@@ -66,7 +66,11 @@ extern unsigned long last_asn;
#endif /* __SMP__ */
#define WIDTH_HARDWARE_ASN 8
#ifdef __SMP__
#define WIDTH_THIS_PROCESSOR 5
#else
#define WIDTH_THIS_PROCESSOR 0
#endif
#define ASN_FIRST_VERSION (1UL << (WIDTH_THIS_PROCESSOR + WIDTH_HARDWARE_ASN))
#define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1)
......@@ -95,12 +99,11 @@ __get_new_mmu_context(struct task_struct *p, struct mm_struct *mm)
unsigned long asn = cpu_last_asn(smp_processor_id());
unsigned long next = asn + 1;
if ((next ^ asn) & ~MAX_ASN) {
if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) {
tbiap();
next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
}
cpu_last_asn(smp_processor_id()) = next;
mm->context = next; /* full version + asn */
return next;
}
......@@ -110,6 +113,12 @@ ev4_get_mmu_context(struct task_struct *p)
/* As described, ASN's are broken. But we can optimize for
switching between threads -- if the mm is unchanged from
current we needn't flush. */
/* ??? May not be needed because EV4 PALcode recognizes that
ASN's are broken and does a tbiap itself on swpctx, under
the "Must set ASN or flush" rule. At least this is true
for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
I'm going to leave this here anyway, just to Be Sure. -- r~ */
if (current->mm != p->mm)
tbiap();
}
......@@ -119,17 +128,23 @@ ev5_get_mmu_context(struct task_struct *p)
{
/* Check if our ASN is of an older version, or on a different CPU,
and thus invalid. */
/* ??? If we have two threads on different cpus, we'll continually
fight over the context. Find a way to record a per-mm, per-cpu
value for the asn. */
long asn = cpu_last_asn(smp_processor_id());
unsigned long asn = cpu_last_asn(smp_processor_id());
struct mm_struct *mm = p->mm;
long mmc = mm->context;
unsigned long mmc = mm->context;
if ((p->tss.mm_context ^ asn) & ~HARDWARE_ASN_MASK) {
if ((mmc ^ asn) & ~HARDWARE_ASN_MASK)
mmc = __get_new_mmu_context(p, mm);
p->tss.mm_context = mmc;
p->tss.asn = mmc & HARDWARE_ASN_MASK;
if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) {
mmc = __get_new_mmu_context(p, mm);
mm->context = mmc;
}
/* Always update the PCB ASN. Another thread may have allocated
a new mm->context (via flush_tlb_mm) without the ASN serial
number wrapping. We have no way to detect when this is needed. */
p->tss.asn = mmc & HARDWARE_ASN_MASK;
}
#ifdef CONFIG_ALPHA_GENERIC
......
#ifndef _ASM_ALPHA_PARAM_H
#define _ASM_ALPHA_PARAM_H
/* ??? Gross. I don't want to parameterize this, and supposedly the
hardware ignores reprogramming. We also need userland buy-in to the
change in HZ, since this is visible in the wait4 resources etc. */
#ifndef HZ
# define HZ 1024
# ifndef CONFIG_ALPHA_RAWHIDE
# define HZ 1024
# else
# define HZ 1200
# endif
#endif
#define EXEC_PAGESIZE 8192
......
......@@ -49,7 +49,6 @@ ev4_flush_tlb_other(struct mm_struct *mm)
__EXTERN_INLINE void
ev5_flush_tlb_current(struct mm_struct *mm)
{
mm->context = 0;
get_new_mmu_context(current, mm);
reload_context(current);
}
......
......@@ -61,15 +61,6 @@ struct thread_struct {
*/
unsigned long flags;
/* The full version of the ASN including serial number.
Two threads running on two different processors must of necessity
have different serial numbers. Having this duplicated from
mm->context allows them to be slightly out of sync preventing
the asn from incrementing each and every time the two threads
are scheduled. */
unsigned long mm_context;
/* Perform syscall argument validation (get/set_fs). */
mm_segment_t fs;
......@@ -86,7 +77,7 @@ struct thread_struct {
0, 0, 0, \
0, 0, 0, \
0, 0, 0, \
0, 0, \
0, \
KERNEL_DS \
}
......
......@@ -67,7 +67,7 @@ struct switch_stack {
};
#ifdef __KERNEL__
#define user_mode(regs) ((regs)->ps & 8)
#define user_mode(regs) (((regs)->ps & 8) != 0)
#define instruction_pointer(regs) ((regs)->pc)
extern void show_regs(struct pt_regs *);
#endif
......
......@@ -4,7 +4,6 @@
#ifdef __SMP__
#include <linux/tasks.h>
#include <asm/init.h>
#include <asm/pal.h>
struct cpuinfo_alpha {
......@@ -16,7 +15,8 @@ struct cpuinfo_alpha {
unsigned long ipi_count;
unsigned long prof_multiplier;
unsigned long prof_counter;
} __cacheline_aligned;
int irq_count, bh_count;
} __attribute__((aligned(64)));
extern struct cpuinfo_alpha cpu_data[NR_CPUS];
......
......@@ -5,28 +5,33 @@
#include <asm/atomic.h>
#include <asm/hardirq.h>
extern unsigned int local_bh_count[NR_CPUS];
#ifndef __SMP__
extern int __local_bh_count;
#define local_bh_count(cpu) ((void)(cpu), __local_bh_count)
#else
#define local_bh_count(cpu) (cpu_data[cpu].bh_count)
#endif
extern inline void cpu_bh_disable(int cpu)
{
local_bh_count[cpu]++;
local_bh_count(cpu)++;
mb();
}
extern inline void cpu_bh_enable(int cpu)
{
mb();
local_bh_count[cpu]--;
local_bh_count(cpu)--;
}
extern inline int cpu_bh_trylock(int cpu)
{
return local_bh_count[cpu] ? 0 : (local_bh_count[cpu] = 1);
return local_bh_count(cpu) ? 0 : (local_bh_count(cpu) = 1);
}
extern inline void cpu_bh_endlock(int cpu)
{
local_bh_count[cpu] = 0;
local_bh_count(cpu) = 0;
}
#define local_bh_enable() cpu_bh_enable(smp_processor_id())
......
......@@ -65,11 +65,12 @@
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#endif
#define spin_lock_init(lock) ((void) 0)
#define spin_lock(lock) ((void) 0)
#define spin_trylock(lock) (1)
#define spin_unlock_wait(lock) ((void) 0)
#define spin_unlock(lock) ((void) 0)
#define spin_lock_init(lock) ((void)(lock))
#define spin_lock(lock) ((void)(lock))
#define spin_trylock(lock) ((void)(lock), 1)
#define spin_unlock_wait(lock) ((void)(lock))
#define spin_unlock(lock) ((void)(lock))
#define spin_is_locked(lock) ((void)(lock), 0)
/*
* Read-write spinlocks, allowing multiple readers
......@@ -91,10 +92,10 @@
#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
#endif
#define read_lock(lock) ((void) 0)
#define read_unlock(lock) ((void) 0)
#define write_lock(lock) ((void) 0)
#define write_unlock(lock) ((void) 0)
#define read_lock(lock) ((void)(lock))
#define read_unlock(lock) ((void)(lock))
#define write_lock(lock) ((void)(lock))
#define write_unlock(lock) ((void)(lock))
#else /* __SMP__ */
......@@ -131,8 +132,8 @@ typedef struct {
#define spin_lock_init(x) ((x)->lock = 0)
#endif
#define spin_unlock_wait(x) \
({ do { barrier(); } while(((volatile spinlock_t *)x)->lock); })
#define spin_is_locked(x) ((x)->lock != 0)
#define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); })
typedef struct { unsigned long a[100]; } __dummy_lock_t;
#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
......
......@@ -5,9 +5,23 @@
/*
* get a new mmu context.. x86's don't know much about contexts,
* but we have to reload the new LDT in exec().
* but we have to reload the new LDT in exec().
*
* We implement lazy MMU context-switching on x86 to optimize context
* switches done to/from kernel threads. Kernel threads 'inherit' the
* previous MM, so Linux doesnt have to flush the TLB. In most cases
* we switch back to the same process so we preserve the TLB cache.
* This all means that kernel threads have about as much overhead as
* a function call ...
*/
#define get_mmu_context(tsk) do { } while(0)
#define get_mmu_context(prev, next) \
do { if (next->flags & PF_LAZY_TLB) \
{ mmget(prev->mm); next->mm = prev->mm; \
next->thread.cr3 = prev->thread.cr3; } } while(0)
#define put_mmu_context(prev, next) \
do { if (prev->flags & PF_LAZY_TLB) \
{ mmput(prev->mm); } } while(0)
#define init_new_context(mm) do { } while(0)
/*
......
......@@ -307,6 +307,9 @@ extern pte_t * __bad_pagetable(void);
do { \
unsigned long __pgdir = __pa(pgdir); \
(tsk)->thread.cr3 = __pgdir; \
/* do not inherit lazy-TLB after exec() */ \
if ((pgdir != swapper_pg_dir) && ((tsk)->flags & PF_LAZY_TLB)) \
(tsk)->flags &= ~PF_LAZY_TLB; \
if ((tsk) == current) \
__asm__ __volatile__("movl %0,%%cr3": :"r" (__pgdir)); \
} while (0)
......
......@@ -39,6 +39,8 @@ struct machdep_calls {
unsigned long heartbeat_reset;
unsigned long heartbeat_count;
void (*progress)(char *, unsigned short);
unsigned char (*nvram_read_val)(int addr);
void (*nvram_write_val)(int addr, unsigned char val);
......
......@@ -75,8 +75,6 @@ extern void enable_kernel_fp(void);
extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
extern void cvt_df(double *from, float *to, unsigned long *fpscr);
extern int call_rtas(const char *, int, int, unsigned long *, ...);
extern void chrp_progress(char *);
void chrp_event_scan(void);
struct device_node;
extern void note_scsi_host(struct device_node *, void *);
......
......@@ -128,7 +128,7 @@ typedef struct page {
wait_queue_head_t wait;
struct page **pprev_hash;
struct buffer_head * buffers;
int owner; /* temporary debugging check */
void *owner; /* temporary debugging check */
} mem_map_t;
#define get_page(p) do { atomic_inc(&(p)->count); \
......@@ -167,11 +167,11 @@ typedef struct page {
do { int _ret = test_and_set_bit(PG_locked, &(page)->flags); \
if (_ret) PAGE_BUG(page); \
if (page->owner) PAGE_BUG(page); \
page->owner = (int)current; } while (0)
page->owner = current; } while (0)
#define TryLockPage(page) ({ int _ret = test_and_set_bit(PG_locked, &(page)->flags); \
if (!_ret) page->owner = (int)current; _ret; })
if (!_ret) page->owner = current; _ret; })
#define UnlockPage(page) do { \
if (page->owner != (int)current) { \
if (page->owner != current) { \
BUG(); } page->owner = 0; \
if (!test_and_clear_bit(PG_locked, &(page)->flags)) { \
PAGE_BUG(page); } wake_up(&page->wait); } while (0)
......
......@@ -35,6 +35,7 @@ extern unsigned long event;
#define CLONE_PID 0x00001000 /* set if pid shared */
#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */
#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */
#define CLONE_TLB 0x00008000 /* system thread does lazy TLB flushing (kernel-internal only!) */
/*
* These are the constant used to fake the fixed-point load-average
......@@ -333,6 +334,7 @@ struct task_struct {
#define PF_SIGNALED 0x00000400 /* killed by a signal */
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
#define PF_VFORK 0x00001000 /* Wake up parent in mm_release */
#define PF_LAZY_TLB 0x00002000 /* thread does lazy TLB switching */
#define PF_USEDFPU 0x00100000 /* task used FPU this quantum (SMP) */
#define PF_DTRACE 0x00200000 /* delayed trace (used on m68k, i386) */
......
......@@ -536,6 +536,8 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
new_flags &= ~(PF_PTRACED|PF_TRACESYS);
if (clone_flags & CLONE_VFORK)
new_flags |= PF_VFORK;
if ((clone_flags & CLONE_TLB) && capable(CAP_SYS_ADMIN))
new_flags |= PF_LAZY_TLB;
p->flags = new_flags;
}
......
......@@ -772,8 +772,18 @@ asmlinkage void schedule(void)
#endif /* __SMP__ */
kstat.context_swtch++;
get_mmu_context(next);
/*
* there are 3 processes which are affected by a context switch:
*
* prev == .... ==> (last => next)
*
* It's the 'much more previous' 'prev' that is on next's stack,
* but prev is set to (the just run) 'last' process by switch_to().
* This might sound slightly confusing but makes tons of sense.
*/
get_mmu_context(prev, next);
switch_to(prev, next, prev);
put_mmu_context(prev, next);
__schedule_tail(prev);
same_process:
......@@ -1921,6 +1931,10 @@ static void show_task(struct task_struct * p)
printk("%5d ", p->p_cptr->pid);
else
printk(" ");
if (p->flags & PF_LAZY_TLB)
printk(" (L-TLB) ");
else
printk(" (NOTLB) ");
if (p->p_ysptr)
printk("%7d", p->p_ysptr->pid);
else
......@@ -2010,5 +2024,11 @@ void __init sched_init(void)
init_bh(TIMER_BH, timer_bh);
init_bh(TQUEUE_BH, tqueue_bh);
init_bh(IMMEDIATE_BH, immediate_bh);
/*
* The boot idle thread does lazy MMU switching as well:
*/
mmget(&init_mm);
current->flags |= PF_LAZY_TLB;
}
......@@ -463,7 +463,7 @@ static inline void __add_to_page_cache(struct page * page,
flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error));
page->flags = flags | ((1 << PG_locked) | (1 << PG_referenced));
page->owner = (int)current; /* REMOVEME */
page->owner = current; /* REMOVEME */
get_page(page);
page->offset = offset;
add_page_to_inode_queue(inode, page);
......@@ -1855,7 +1855,7 @@ generic_file_write(struct file *file, const char *buf,
if (!PageLocked(page)) {
PAGE_BUG(page);
} else {
if (page->owner != (int)current) {
if (page->owner != current) {
PAGE_BUG(page);
}
}
......
......@@ -118,7 +118,7 @@ static int move_page_tables(struct mm_struct * mm,
flush_cache_range(mm, new_addr, new_addr + len);
while ((offset += PAGE_SIZE) < len)
move_one_page(mm, new_addr + offset, old_addr + offset);
zap_page_range(mm, new_addr, new_addr + len);
zap_page_range(mm, new_addr, len);
flush_tlb_range(mm, new_addr, new_addr + len);
return -1;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment