Commit ae0a09f7 authored by Anton Altaparmakov's avatar Anton Altaparmakov

Merge cantab.net:/home/src/bklinux-2.6

into cantab.net:/home/src/ntfs-2.6
parents 12b00b39 a063296d
......@@ -119,7 +119,7 @@ Linux kernel mailing list:
Kernel traffic:
Weekly summary of kernel list activity (much easier to read)
[http://kt.zork.net/kernel-traffic]
http://www.kerneltraffic.org/kernel-traffic/
Linux USB project:
http://sourceforge.net/projects/linux-usb/
......
......@@ -694,7 +694,7 @@
produced during the week. Published every Thursday.
* Name: "Kernel Traffic"
URL: http://kt.zork.net/kernel-traffic/
URL: http://www.kerneltraffic.org/kernel-traffic/
Keywords: linux-kernel mailing list, weekly kernel news.
Description: Weekly newsletter covering the most relevant
discussions of the linux-kernel mailing list.
......
......@@ -895,21 +895,21 @@ S: Maintained
HEWLETT-PACKARD FIBRE CHANNEL 64-bit/66MHz PCI non-intelligent HBA
P: Chase Maupin
M: Chase Maupin (support@compaq.com)
L: compaqandlinux@cpqlin.van-dijk.net
S: Supported
M: chase.maupin@hp.com
L: iss_storagedev@hp.com
S: Maintained
HEWLETT-PACKARD SMART2 RAID DRIVER
P: Francis Wiran
M: Francis Wiran <support@compaq.com>
L: compaqandlinux@cpqlin.van-dijk.net
S: Supported
M: francis.wiran@hp.com
L: iss_storagedev@hp.com
S: Maintained
HEWLETT-PACKARD SMART CISS RAID DRIVER
P: Mike Miller, Michael Ni
M: Mike Miller, Michael Ni <support@compaq.com>
L: compaqandlinux@cpqlin.van-dijk.net
S: Supported
P: Mike Miller
M: mike.miller@hp.com
L: iss_storagedev@hp.com
S: Supported
HP100: Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series
P: Jaroslav Kysela
......
......@@ -447,6 +447,16 @@ sys_call_table:
.quad sys_stat64 /* 425 */
.quad sys_lstat64
.quad sys_fstat64
.quad sys_ni_syscall /* sys_vserver */
.quad sys_ni_syscall /* sys_mbind */
.quad sys_ni_syscall /* sys_get_mempolicy */
.quad sys_ni_syscall /* sys_set_mempolicy */
.quad sys_mq_open
.quad sys_mq_unlink
.quad sys_mq_timedsend
.quad sys_mq_timedreceive /* 435 */
.quad sys_mq_notify
.quad sys_mq_getsetattr
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
......
......@@ -636,7 +636,7 @@ dmabounce_unregister_dev(struct device *dev)
}
if (!list_empty(&device_info->safe_buffers)) {
printk(KERN_ERR,
printk(KERN_ERR
"%s: Removing from dmabounce with pending buffers!\n",
dev->bus_id);
BUG();
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -219,9 +219,7 @@ static inline void dump_cache(const char *prefix, unsigned int cache)
static void __init dump_cpu_info(void)
{
unsigned int info;
asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (info));
unsigned int info = read_cpuid(CPUID_CACHETYPE);
if (info != processor_id) {
printk("CPU: D %s cache\n", cache_types[CACHE_TYPE(info)]);
......@@ -803,9 +801,7 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
{
unsigned int cache_info;
asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (cache_info));
unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
if (cache_info != processor_id) {
seq_printf(m, "Cache type\t: %s\n"
"Cache clean\t: %s\n"
......
......@@ -40,7 +40,7 @@ ENTRY(_find_first_zero_bit_le)
*/
ENTRY(_find_next_zero_bit_le)
teq r1, #0
beq 2b
beq 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
ldrb r3, [r0, r2, lsr #3]
......@@ -74,7 +74,7 @@ ENTRY(_find_first_bit_le)
*/
ENTRY(_find_next_bit_le)
teq r1, #0
beq 2b
beq 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
ldrb r3, [r0, r2, lsr #3]
......@@ -101,15 +101,18 @@ ENTRY(_find_first_zero_bit_be)
RETINSTR(mov,pc,lr)
ENTRY(_find_next_zero_bit_be)
teq r1, #0
beq 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3]
eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits
orreq r2, r2, #7 @ if zero, then no bits here
addeq r2, r2, #1 @ align bit pointer
beq 2b @ loop for next bit
bne .found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENTRY(_find_first_bit_be)
teq r1, #0
......@@ -126,14 +129,17 @@ ENTRY(_find_first_bit_be)
RETINSTR(mov,pc,lr)
ENTRY(_find_next_bit_be)
teq r1, #0
beq 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3]
movs r3, r3, lsr ip @ shift off unused bits
orreq r2, r2, #7 @ if zero, then no bits here
addeq r2, r2, #1 @ align bit pointer
beq 2b @ loop for next bit
bne .found
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
#endif
......
......@@ -113,7 +113,7 @@ static void __init idp_map_io(void)
MACHINE_START(PXA_IDP, "Accelent Xscale IDP")
MAINTAINER("Accelent Systems Inc.")
BOOT_MEM(0xa0000000, 0x40000000, 0xfc000000)
BOOT_MEM(0xa0000000, 0x40000000, io_p2v(0x40000000))
MAPIO(idp_map_io)
INITIRQ(idp_init_irq)
INIT_MACHINE(idp_init)
......
......@@ -7,12 +7,10 @@
*
* Original (leds-footbridge.c) by Russell King
*
* See leds.h for bit definitions. The first version defines D28 on the
* Lubbock dev board as the heartbeat, and D27 as the Sys_busy led.
* There's plenty more if you're interested in adding them :)
* Major surgery on April 2004 by Nicolas Pitre for less global
* namespace collision. Mostly adapted the Mainstone version.
*/
#include <linux/config.h>
#include <linux/init.h>
......@@ -22,6 +20,21 @@
#include "leds.h"
/*
* 8 discrete leds available for general use:
*
* Note: bits [15-8] are used to enable/blank the 8 7 segment hex displays
* so be sure to not monkey with them here.
*/
#define D28 (1 << 0)
#define D27 (1 << 1)
#define D26 (1 << 2)
#define D25 (1 << 3)
#define D24 (1 << 4)
#define D23 (1 << 5)
#define D22 (1 << 6)
#define D21 (1 << 7)
#define LED_STATE_ENABLED 1
#define LED_STATE_CLAIMED 2
......@@ -37,7 +50,7 @@ void lubbock_leds_event(led_event_t evt)
switch (evt) {
case led_start:
hw_led_state = HEARTBEAT_LED | SYS_BUSY_LED;
hw_led_state = 0;
led_state = LED_STATE_ENABLED;
break;
......@@ -47,30 +60,27 @@ void lubbock_leds_event(led_event_t evt)
case led_claim:
led_state |= LED_STATE_CLAIMED;
hw_led_state = HEARTBEAT_LED | SYS_BUSY_LED;
hw_led_state = 0;
break;
case led_release:
led_state &= ~LED_STATE_CLAIMED;
hw_led_state = HEARTBEAT_LED | SYS_BUSY_LED;
hw_led_state = 0;
break;
#ifdef CONFIG_LEDS_TIMER
case led_timer:
if (!(led_state & LED_STATE_CLAIMED))
hw_led_state ^= HEARTBEAT_LED;
hw_led_state ^= D26;
break;
#endif
#ifdef CONFIG_LEDS_CPU
case led_idle_start:
if (!(led_state & LED_STATE_CLAIMED))
hw_led_state |= SYS_BUSY_LED;
hw_led_state &= ~D27;
break;
case led_idle_end:
if (!(led_state & LED_STATE_CLAIMED))
hw_led_state &= ~SYS_BUSY_LED;
hw_led_state |= D27;
break;
#endif
......@@ -78,29 +88,27 @@ void lubbock_leds_event(led_event_t evt)
break;
case led_green_on:
if (led_state & LED_STATE_CLAIMED)
hw_led_state &= ~HEARTBEAT_LED;
hw_led_state |= D21;;
break;
case led_green_off:
if (led_state & LED_STATE_CLAIMED)
hw_led_state |= HEARTBEAT_LED;
hw_led_state &= ~D21;
break;
case led_amber_on:
hw_led_state |= D22;;
break;
case led_amber_off:
hw_led_state &= ~D22;
break;
case led_red_on:
if (led_state & LED_STATE_CLAIMED)
hw_led_state &= ~SYS_BUSY_LED;
hw_led_state |= D23;;
break;
case led_red_off:
if (led_state & LED_STATE_CLAIMED)
hw_led_state |= SYS_BUSY_LED;
hw_led_state &= ~D23;
break;
default:
......@@ -108,27 +116,9 @@ void lubbock_leds_event(led_event_t evt)
}
if (led_state & LED_STATE_ENABLED)
{
switch (hw_led_state) {
case 0: // all on
HEARTBEAT_LED_ON;
SYS_BUSY_LED_ON;
break;
case 1: // turn off heartbeat, status on:
HEARTBEAT_LED_OFF;
SYS_BUSY_LED_ON;
break;
case 2: // status off, heartbeat on:
HEARTBEAT_LED_ON;
SYS_BUSY_LED_OFF;
break;
case 3: // turn them both off...
HEARTBEAT_LED_OFF;
SYS_BUSY_LED_OFF;
break;
default:
break;
}
}
LUB_DISC_BLNK_LED = (LUB_DISC_BLNK_LED | 0xff) & ~hw_led_state;
else
LUB_DISC_BLNK_LED |= 0xff;
local_irq_restore(flags);
}
......@@ -28,7 +28,6 @@
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/arch/irq.h>
#include <asm/arch/udc.h>
#include <asm/hardware/sa1111.h>
......@@ -156,11 +155,7 @@ static void __init lubbock_init(void)
}
static struct map_desc lubbock_io_desc[] __initdata = {
/* virtual physical length type */
{ 0xf0000000, 0x08000000, 0x00100000, MT_DEVICE }, /* CPLD */
{ 0xf1000000, 0x0c000000, 0x00100000, MT_DEVICE }, /* LAN91C96 IO */
{ 0xf1100000, 0x0e000000, 0x00100000, MT_DEVICE }, /* LAN91C96 Attr */
{ 0xf4000000, 0x10000000, 0x00800000, MT_DEVICE }, /* SA1111 */
{ LUBBOCK_FPGA_VIRT, LUBBOCK_FPGA_PHYS, 0x00100000, MT_DEVICE }, /* CPLD */
};
static void __init lubbock_map_io(void)
......@@ -169,7 +164,6 @@ static void __init lubbock_map_io(void)
iotable_init(lubbock_io_desc, ARRAY_SIZE(lubbock_io_desc));
/* This enables the BTUART */
CKEN |= CKEN7_BTUART;
pxa_gpio_mode(GPIO42_BTRXD_MD);
pxa_gpio_mode(GPIO43_BTTXD_MD);
pxa_gpio_mode(GPIO44_BTCTS_MD);
......@@ -188,7 +182,7 @@ static void __init lubbock_map_io(void)
PCFR |= PCFR_OPDE;
}
MACHINE_START(LUBBOCK, "Intel DBPXA250 Development Platform")
MACHINE_START(LUBBOCK, "Intel DBPXA250 Development Platform (aka Lubbock)")
MAINTAINER("MontaVista Software Inc.")
BOOT_MEM(0xa0000000, 0x40000000, io_p2v(0x40000000))
MAPIO(lubbock_map_io)
......
......@@ -3,7 +3,7 @@
#
obj-y := consistent.o extable.o fault-armv.o \
fault-common.o init.o ioremap.o mm-armv.o
fault.o init.o ioremap.o mmap.o mm-armv.o
obj-$(CONFIG_MODULES) += proc-syms.o
......
......@@ -10,124 +10,15 @@
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mm.h>
#include <linux/bitops.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "fault.h"
/*
* Some section permission faults need to be handled gracefully.
* They can happen due to a __{get,put}_user during an oops.
*/
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct task_struct *tsk = current;
do_bad_area(tsk, tsk->active_mm, addr, fsr, regs);
return 0;
}
/*
* This abort handler always returns "fault".
*/
static int
do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
return 1;
}
static struct fsr_info {
int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
int sig;
const char *name;
} fsr_info[] = {
/*
* The following are the standard ARMv3 and ARMv4 aborts. ARMv5
* defines these to be "precise" aborts.
*/
{ do_bad, SIGSEGV, "vector exception" },
{ do_bad, SIGILL, "alignment exception" },
{ do_bad, SIGKILL, "terminal exception" },
{ do_bad, SIGILL, "alignment exception" },
{ do_bad, SIGBUS, "external abort on linefetch" },
{ do_translation_fault, SIGSEGV, "section translation fault" },
{ do_bad, SIGBUS, "external abort on linefetch" },
{ do_page_fault, SIGSEGV, "page translation fault" },
{ do_bad, SIGBUS, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, "section domain fault" },
{ do_bad, SIGBUS, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, "page domain fault" },
{ do_bad, SIGBUS, "external abort on translation" },
{ do_sect_fault, SIGSEGV, "section permission fault" },
{ do_bad, SIGBUS, "external abort on translation" },
{ do_page_fault, SIGSEGV, "page permission fault" },
/*
* The following are "imprecise" aborts, which are signalled by bit
* 10 of the FSR, and may not be recoverable. These are only
* supported if the CPU abort handler supports bit 10.
*/
{ do_bad, SIGBUS, "unknown 16" },
{ do_bad, SIGBUS, "unknown 17" },
{ do_bad, SIGBUS, "unknown 18" },
{ do_bad, SIGBUS, "unknown 19" },
{ do_bad, SIGBUS, "lock abort" }, /* xscale */
{ do_bad, SIGBUS, "unknown 21" },
{ do_bad, SIGBUS, "imprecise external abort" }, /* xscale */
{ do_bad, SIGBUS, "unknown 23" },
{ do_bad, SIGBUS, "dcache parity error" }, /* xscale */
{ do_bad, SIGBUS, "unknown 25" },
{ do_bad, SIGBUS, "unknown 26" },
{ do_bad, SIGBUS, "unknown 27" },
{ do_bad, SIGBUS, "unknown 28" },
{ do_bad, SIGBUS, "unknown 29" },
{ do_bad, SIGBUS, "unknown 30" },
{ do_bad, SIGBUS, "unknown 31" }
};
void __init
hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
int sig, const char *name)
{
if (nr >= 0 && nr < ARRAY_SIZE(fsr_info)) {
fsr_info[nr].fn = fn;
fsr_info[nr].sig = sig;
fsr_info[nr].name = name;
}
}
/*
* Dispatch a data abort to the relevant handler.
*/
asmlinkage void
do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6);
if (!inf->fn(addr, fsr, regs))
return;
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr);
force_sig(inf->sig, current);
show_pte(current->mm, addr);
die_if_kernel("Oops", regs, 0);
}
asmlinkage void
do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
{
do_translation_fault(addr, 0, regs);
}
static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
/*
......
/*
* linux/arch/arm/mm/fault-common.c
* linux/arch/arm/mm/fault.c
*
* Copyright (C) 1995 Linus Torvalds
* Modifications for ARM processor (c) 1995-2001 Russell King
* Modifications for ARM processor (c) 1995-2004 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -11,11 +11,8 @@
#include <linux/config.h>
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <asm/system.h>
......@@ -25,20 +22,6 @@
#include "fault.h"
#ifdef CONFIG_CPU_26
#define FAULT_CODE_WRITE 0x02
#define FAULT_CODE_FORCECOW 0x01
#define DO_COW(m) ((m) & (FAULT_CODE_WRITE|FAULT_CODE_FORCECOW))
#define READ_FAULT(m) (!((m) & FAULT_CODE_WRITE))
#else
/*
* "code" is actually the FSR register. Bit 11 set means the
* instruction was performing a write.
*/
#define DO_COW(code) ((code) & (1 << 11))
#define READ_FAULT(code) (!DO_COW(code))
#endif
/*
* This is useful to dump out the page tables associated with
* 'addr' in mm 'mm'.
......@@ -186,10 +169,10 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
* memory access, so we can handle it.
*/
good_area:
if (READ_FAULT(fsr)) /* read? */
mask = VM_READ|VM_EXEC;
else
if (fsr & (1 << 11)) /* write? */
mask = VM_WRITE;
else
mask = VM_READ|VM_EXEC;
fault = VM_FAULT_BADACCESS;
if (!(vma->vm_flags & mask))
......@@ -201,7 +184,7 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
* than endlessly redo the fault.
*/
survive:
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, DO_COW(fsr));
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11));
/*
* Handle the "normal" cases first - successful and sigbus
......@@ -233,7 +216,8 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
return fault;
}
int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
static int
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct task_struct *tsk;
struct mm_struct *mm;
......@@ -332,8 +316,9 @@ int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* interrupt or a critical region, and should only copy the information
* from the master page table, nothing more.
*/
int do_translation_fault(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
static int
do_translation_fault(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
struct task_struct *tsk;
unsigned int index;
......@@ -372,3 +357,108 @@ int do_translation_fault(unsigned long addr, unsigned int fsr,
do_bad_area(tsk, tsk->active_mm, addr, fsr, regs);
return 0;
}
/*
* Some section permission faults need to be handled gracefully.
* They can happen due to a __{get,put}_user during an oops.
*/
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct task_struct *tsk = current;
do_bad_area(tsk, tsk->active_mm, addr, fsr, regs);
return 0;
}
/*
* This abort handler always returns "fault".
*/
static int
do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
return 1;
}
static struct fsr_info {
int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
int sig;
const char *name;
} fsr_info[] = {
/*
* The following are the standard ARMv3 and ARMv4 aborts. ARMv5
* defines these to be "precise" aborts.
*/
{ do_bad, SIGSEGV, "vector exception" },
{ do_bad, SIGILL, "alignment exception" },
{ do_bad, SIGKILL, "terminal exception" },
{ do_bad, SIGILL, "alignment exception" },
{ do_bad, SIGBUS, "external abort on linefetch" },
{ do_translation_fault, SIGSEGV, "section translation fault" },
{ do_bad, SIGBUS, "external abort on linefetch" },
{ do_page_fault, SIGSEGV, "page translation fault" },
{ do_bad, SIGBUS, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, "section domain fault" },
{ do_bad, SIGBUS, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, "page domain fault" },
{ do_bad, SIGBUS, "external abort on translation" },
{ do_sect_fault, SIGSEGV, "section permission fault" },
{ do_bad, SIGBUS, "external abort on translation" },
{ do_page_fault, SIGSEGV, "page permission fault" },
/*
* The following are "imprecise" aborts, which are signalled by bit
* 10 of the FSR, and may not be recoverable. These are only
* supported if the CPU abort handler supports bit 10.
*/
{ do_bad, SIGBUS, "unknown 16" },
{ do_bad, SIGBUS, "unknown 17" },
{ do_bad, SIGBUS, "unknown 18" },
{ do_bad, SIGBUS, "unknown 19" },
{ do_bad, SIGBUS, "lock abort" }, /* xscale */
{ do_bad, SIGBUS, "unknown 21" },
{ do_bad, SIGBUS, "imprecise external abort" }, /* xscale */
{ do_bad, SIGBUS, "unknown 23" },
{ do_bad, SIGBUS, "dcache parity error" }, /* xscale */
{ do_bad, SIGBUS, "unknown 25" },
{ do_bad, SIGBUS, "unknown 26" },
{ do_bad, SIGBUS, "unknown 27" },
{ do_bad, SIGBUS, "unknown 28" },
{ do_bad, SIGBUS, "unknown 29" },
{ do_bad, SIGBUS, "unknown 30" },
{ do_bad, SIGBUS, "unknown 31" }
};
void __init
hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
int sig, const char *name)
{
if (nr >= 0 && nr < ARRAY_SIZE(fsr_info)) {
fsr_info[nr].fn = fn;
fsr_info[nr].sig = sig;
fsr_info[nr].name = name;
}
}
/*
* Dispatch a data abort to the relevant handler.
*/
asmlinkage void
do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6);
if (!inf->fn(addr, fsr, regs))
return;
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr);
force_sig(inf->sig, current);
show_pte(current->mm, addr);
die_if_kernel("Oops", regs, 0);
}
asmlinkage void
do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
{
do_translation_fault(addr, 0, regs);
}
......@@ -3,8 +3,4 @@ void do_bad_area(struct task_struct *tsk, struct mm_struct *mm,
void show_pte(struct mm_struct *mm, unsigned long addr);
int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
int do_translation_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
unsigned long search_exception_table(unsigned long addr);
......@@ -28,8 +28,9 @@
#include <asm/io.h>
#include <asm/tlbflush.h>
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, pgprot_t pgprot)
static inline void
remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, pgprot_t pgprot)
{
unsigned long end;
......@@ -37,22 +38,26 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
if (address >= end)
BUG();
BUG_ON(address >= end);
do {
if (!pte_none(*pte)) {
printk("remap_area_pte: page already exists\n");
BUG();
}
if (!pte_none(*pte))
goto bad;
set_pte(pte, pfn_pte(phys_addr >> PAGE_SHIFT, pgprot));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pte++;
} while (address && (address < end));
return;
bad:
printk("remap_area_pte: page already exists\n");
BUG();
}
static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
static inline int
remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
pgprot_t pgprot;
......@@ -64,8 +69,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo
end = PGDIR_SIZE;
phys_addr -= address;
if (address >= end)
BUG();
BUG_ON(address >= end);
pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags);
do {
......@@ -79,35 +83,38 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo
return 0;
}
static int remap_area_pages(unsigned long address, unsigned long phys_addr,
unsigned long size, unsigned long flags)
static int
remap_area_pages(unsigned long start, unsigned long phys_addr,
unsigned long size, unsigned long flags)
{
int error;
unsigned long address = start;
unsigned long end = start + size;
int err = 0;
pgd_t * dir;
unsigned long end = address + size;
phys_addr -= address;
dir = pgd_offset(&init_mm, address);
flush_cache_all();
if (address >= end)
BUG();
BUG_ON(address >= end);
spin_lock(&init_mm.page_table_lock);
do {
pmd_t *pmd;
pmd = pmd_alloc(&init_mm, dir, address);
error = -ENOMEM;
if (!pmd)
pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
if (!pmd) {
err = -ENOMEM;
break;
}
if (remap_area_pmd(pmd, address, end - address,
phys_addr + address, flags))
phys_addr + address, flags)) {
err = -ENOMEM;
break;
error = 0;
}
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
spin_unlock(&init_mm.page_table_lock);
flush_tlb_all();
return error;
flush_cache_vmap(start, end);
return err;
}
/*
......
/*
* linux/arch/arm/mm/mmap.c
*/
#include <linux/config.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/shm.h>
#include <asm/system.h>
#define COLOUR_ALIGN(addr,pgoff) \
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
/*
* We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches. We need to ensure that
* a specific page of an object is always mapped at a multiple of
* SHMLBA bytes.
*
* We unconditionally provide this function for all cases, however
* in the VIVT case, we optimise out the alignment rules.
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long start_addr;
#ifdef CONFIG_CPU_V6
unsigned int cache_type;
int do_align = 0, aliasing = 0;
/*
* We only need to do colour alignment if either the I or D
* caches alias. This is indicated by bits 9 and 21 of the
* cache type register.
*/
cache_type = read_cpuid(CPUID_CACHETYPE);
if (cache_type != read_cpuid(CPUID_ID)) {
aliasing = (cache_type | cache_type >> 12) & (1 << 9);
if (aliasing)
do_align = filp || flags & MAP_SHARED;
}
#else
#define do_align 0
#define aliasing 0
#endif
/*
* We should enforce the MAP_FIXED case. However, currently
* the generic kernel code doesn't allow us to handle this.
*/
if (flags & MAP_FIXED) {
if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
return -EINVAL;
return addr;
}
if (len > TASK_SIZE)
return -ENOMEM;
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
start_addr = addr = mm->free_area_cache;
full_search:
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr) {
/*
* Start a new search - just in case we missed
* some holes.
*/
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = addr = TASK_UNMAPPED_BASE;
goto full_search;
}
return -ENOMEM;
}
if (!vma || addr + len <= vma->vm_start) {
/*
* Remember the place where we stopped the search:
*/
mm->free_area_cache = addr + len;
return addr;
}
addr = vma->vm_end;
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
}
}
This diff is collapsed.
......@@ -20,7 +20,8 @@
#include <linux/sched.h>
#include <linux/oprofile.h>
#include <linux/interrupt.h>
#include <asm/arch/irqs.h>
#include <asm/irq.h>
#include <asm/system.h>
#include "op_counter.h"
#include "op_arm_model.h"
......@@ -399,8 +400,7 @@ static int xscale_detect_pmu(void)
int ret = 0;
u32 id;
__asm__ __volatile__ ("mrc p15, 0, %0, c0, c0, 0" : "=r" (id));
id = (id >> 13) & 0x7;
id = (read_cpuid(CPUID_ID) >> 13) & 0x7;
switch (id) {
case 1:
......
......@@ -431,11 +431,13 @@ struct irq_routing_table * __devinit pcibios_get_irq_routing_table(void)
"1:"
: "=a" (ret),
"=b" (map),
"+m" (opt)
"=m" (opt)
: "0" (PCIBIOS_GET_ROUTING_OPTIONS),
"1" (0),
"D" ((long) &opt),
"S" (&pci_indirect));
"S" (&pci_indirect),
"m" (opt)
: "memory");
DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
if (ret & 0xff00)
printk(KERN_ERR "PCI: Error %02x when fetching IRQ routing table.\n", (ret >> 8) & 0xff);
......
......@@ -529,7 +529,11 @@ hub_dmaaddr_drain( vertex_hdl_t vhdl,
void
hub_provider_startup(vertex_hdl_t hubv)
{
hubinfo_t hubinfo;
hubinfo_get(hubv, &hubinfo);
hub_pio_init(hubv);
intr_init_vecblk(nasid_to_cnodeid(hubinfo->h_nasid));
}
/*
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment