Commit ec5a5b68 authored by Anton Blanchard's avatar Anton Blanchard

Merge samba.org:/scratch/anton/linux-2.5

into samba.org:/scratch/anton/linux-2.5_ppc64
parents 5e8a4a7d 93f8230d
......@@ -18,7 +18,8 @@ KERNELLOAD =0xc000000000000000
LINKFLAGS = -T arch/ppc64/vmlinux.lds -Bstatic \
-e $(KERNELLOAD) -Ttext $(KERNELLOAD)
CFLAGS := $(CFLAGS) -fsigned-char -msoft-float -pipe \
-Wno-uninitialized -mminimal-toc -mtraceback=full
-Wno-uninitialized -mminimal-toc -mtraceback=full \
-Wa,-mpower4
CPP = $(CC) -E $(CFLAGS)
......
......@@ -153,7 +153,7 @@ make_bi_recs(unsigned long addr)
rec = bi_rec_alloc(rec, 2);
rec->tag = BI_MACHTYPE;
rec->data[0] = _MACH_pSeries;
rec->data[0] = PLATFORM_PSERIES;
rec->data[1] = 1;
if ( initrd_size > 0 ) {
......
......@@ -103,8 +103,12 @@ if [ "$CONFIG_SCSI" != "n" ]; then
fi
endmenu
source drivers/message/fusion/Config.in
source drivers/ieee1394/Config.in
source drivers/message/i2o/Config.in
if [ "$CONFIG_NET" = "y" ]; then
mainmenu_option next_comment
comment 'Network device support'
......@@ -181,6 +185,9 @@ if [ "$CONFIG_VIOCD" = "y" ]; then
fi
source drivers/char/Config.in
source drivers/media/Config.in
source fs/Config.in
mainmenu_option next_comment
......@@ -194,6 +201,8 @@ endmenu
source drivers/usb/Config.in
source net/bluetooth/Config.in
mainmenu_option next_comment
comment 'Kernel hacking'
......
......@@ -146,3 +146,8 @@ int cpu_idle(void)
}
#endif /* CONFIG_PPC_ISERIES */
void default_idle(void)
{
barrier();
}
......@@ -3756,6 +3756,7 @@ COMPATIBLE_IOCTL(TCSETSW),
COMPATIBLE_IOCTL(TCSETSF),
COMPATIBLE_IOCTL(TIOCLINUX),
COMPATIBLE_IOCTL(TIOCSTART),
COMPATIBLE_IOCTL(TIOCSTOP),
/* Little t */
COMPATIBLE_IOCTL(TIOCGETD),
COMPATIBLE_IOCTL(TIOCSETD),
......@@ -4336,8 +4337,6 @@ COMPATIBLE_IOCTL(RNDCLEARPOOL),
COMPATIBLE_IOCTL(HCIDEVUP),
COMPATIBLE_IOCTL(HCIDEVDOWN),
COMPATIBLE_IOCTL(HCIDEVRESET),
COMPATIBLE_IOCTL(HCIRESETSTAT),
COMPATIBLE_IOCTL(HCIGETINFO),
COMPATIBLE_IOCTL(HCIGETDEVLIST),
COMPATIBLE_IOCTL(HCISETRAW),
COMPATIBLE_IOCTL(HCISETSCAN),
......
......@@ -359,7 +359,7 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
/* Invalidate the tlb */
if (!large && local && __is_processor(PV_POWER4)) {
_tlbiel(va, large);
_tlbiel(va);
} else {
spin_lock_irqsave(&pSeries_tlbie_lock, flags);
_tlbie(va, large);
......
......@@ -498,6 +498,8 @@ pcibios_init(void)
}
subsys_initcall(pcibios_init);
int __init
pcibios_assign_all_busses(void)
{
......
This diff is collapsed.
This diff is collapsed.
......@@ -22,7 +22,7 @@ int make_ste(unsigned long stab,
unsigned long esid, unsigned long vsid);
void make_slbe(unsigned long esid, unsigned long vsid,
int large);
extern struct Naca *naca;
extern struct naca_struct *naca;
/*
* Build an entry for the base kernel segment and put it into
......
......@@ -53,6 +53,7 @@ extern void (*xmon_fault_handler)(struct pt_regs *regs);
#endif
#ifdef CONFIG_XMON
#define CONFIG_DEBUGGER
void (*debugger)(struct pt_regs *regs) = xmon;
int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
......@@ -61,6 +62,7 @@ int (*debugger_dabr_match)(struct pt_regs *regs) = xmon_dabr_match;
void (*debugger_fault_handler)(struct pt_regs *regs);
#else
#ifdef CONFIG_KGDB
#define CONFIG_DEBUGGER
void (*debugger)(struct pt_regs *regs);
int (*debugger_bpt)(struct pt_regs *regs);
int (*debugger_sstep)(struct pt_regs *regs);
......@@ -69,29 +71,46 @@ int (*debugger_dabr_match)(struct pt_regs *regs);
void (*debugger_fault_handler)(struct pt_regs *regs);
#endif
#endif
/*
* Trap & Exception support
*/
void
_exception(int signr, struct pt_regs *regs)
/* Should we panic on bad kernel exceptions or try to recover */
#undef PANIC_ON_ERROR
static spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
void die(const char *str, struct pt_regs *regs, long err)
{
if (!user_mode(regs))
{
show_regs(regs);
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
debugger(regs);
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
printk("Oops: %s, sig: %ld\n", str, err);
show_regs(regs);
print_backtrace((unsigned long *)regs->gpr[1]);
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
#ifdef PANIC_ON_ERROR
panic(str);
#else
do_exit(SIGSEGV);
#endif
print_backtrace((unsigned long *)regs->gpr[1]);
panic("Exception in kernel pc %lx signal %d",regs->nip,signr);
#if defined(CONFIG_PPCDBG) && (defined(CONFIG_XMON) || defined(CONFIG_KGDB))
/* Allow us to catch SIGILLs for 64-bit app/glibc debugging. -Peter */
} else if (signr == SIGILL) {
ifppcdebug(PPCDBG_SIGNALXMON)
}
static void
_exception(int signr, siginfo_t *info, struct pt_regs *regs)
{
if (!user_mode(regs)) {
#ifdef CONFIG_DEBUGGER
if (debugger)
debugger(regs);
#endif
die("Exception in kernel mode\n", regs, signr);
}
force_sig(signr, current);
force_sig_info(signr, info, current);
}
/* Get the error information for errors coming through the
......@@ -130,9 +149,8 @@ static void FWNMI_release_errinfo(void)
void
SystemResetException(struct pt_regs *regs)
{
char *msg = "System Reset in kernel mode.\n";
udbg_printf(msg); printk(msg);
if (fwnmi_active) {
char *msg;
unsigned long *r3 = __va(regs->gpr[3]); /* for FWNMI debug */
struct rtas_error_log *errlog;
......@@ -140,17 +158,33 @@ SystemResetException(struct pt_regs *regs)
udbg_printf(msg, r3); printk(msg, r3);
errlog = FWNMI_get_errinfo(regs);
}
#if defined(CONFIG_XMON)
xmon(regs);
udbg_printf("leaving xmon...\n");
#ifdef CONFIG_DEBUGGER
if (debugger)
debugger(regs);
#endif
#ifdef PANIC_ON_ERROR
panic("System Reset");
#else
for(;;);
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
panic("Unrecoverable System Reset");
#endif
/* What should we do here? We could issue a shutdown or hard reset. */
}
static int power4_handle_mce(struct pt_regs *regs)
{
return 0;
}
void
MachineCheckException(struct pt_regs *regs)
{
siginfo_t info;
if (fwnmi_active) {
struct rtas_error_log *errhdr = FWNMI_get_errinfo(regs);
if (errhdr) {
......@@ -158,117 +192,227 @@ MachineCheckException(struct pt_regs *regs)
}
FWNMI_release_errinfo();
}
if ( !user_mode(regs) )
{
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (!user_mode(regs)) {
/* Attempt to recover if the interrupt is recoverable */
if (regs->msr & MSR_RI) {
if (__is_processor(PV_POWER4) &&
power4_handle_mce(regs))
return;
}
#ifdef CONFIG_DEBUGGER
if (debugger_fault_handler) {
debugger_fault_handler(regs);
return;
}
if (debugger)
debugger(regs);
#endif
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
printk("Machine check in kernel mode.\n");
printk("Caused by (from SRR1=%lx): ", regs->msr);
show_regs(regs);
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
debugger(regs);
#endif
print_backtrace((unsigned long *)regs->gpr[1]);
panic("machine check");
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
panic("Unrecoverable Machine Check");
}
_exception(SIGSEGV, regs);
}
void
SMIException(struct pt_regs *regs)
{
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
{
debugger(regs);
return;
}
#endif
show_regs(regs);
print_backtrace((unsigned long *)regs->gpr[1]);
panic("System Management Interrupt");
/*
* XXX we should check RI bit on exception exit and kill the
* task if it was cleared
*/
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void *)regs->nip;
_exception(SIGSEGV, &info, regs);
}
void
UnknownException(struct pt_regs *regs)
{
siginfo_t info;
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
regs->nip, regs->msr, regs->trap);
_exception(SIGTRAP, regs);
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = 0;
info.si_addr = 0;
_exception(SIGTRAP, &info, regs);
}
void
InstructionBreakpointException(struct pt_regs *regs)
{
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (debugger_iabr_match(regs))
siginfo_t info;
#ifdef CONFIG_DEBUGGER
if (debugger_iabr_match && debugger_iabr_match(regs))
return;
#endif
_exception(SIGTRAP, regs);
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = (void *)regs->nip;
_exception(SIGTRAP, &info, regs);
}
static void parse_fpe(struct pt_regs *regs)
{
siginfo_t info;
unsigned int *tmp;
unsigned int fpscr;
if (regs->msr & MSR_FP)
giveup_fpu(current);
tmp = &current->thread.fpscr;
fpscr = *tmp;
/* Invalid operation */
if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
info.si_code = FPE_FLTINV;
/* Overflow */
else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
info.si_code = FPE_FLTOVF;
/* Underflow */
else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
info.si_code = FPE_FLTUND;
/* Divide by zero */
else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
info.si_code = FPE_FLTDIV;
/* Inexact result */
else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
info.si_code = FPE_FLTRES;
else
info.si_code = 0;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void *)regs->nip;
_exception(SIGFPE, &info, regs);
}
void
ProgramCheckException(struct pt_regs *regs)
{
siginfo_t info;
if (regs->msr & 0x100000) {
/* IEEE FP exception */
_exception(SIGFPE, regs);
parse_fpe(regs);
} else if (regs->msr & 0x40000) {
/* Privileged instruction */
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_PRVOPC;
info.si_addr = (void *)regs->nip;
_exception(SIGILL, &info, regs);
} else if (regs->msr & 0x20000) {
/* trap exception */
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (debugger_bpt(regs))
#ifdef CONFIG_DEBUGGER
if (debugger_bpt && debugger_bpt(regs))
return;
#endif
_exception(SIGTRAP, regs);
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = (void *)regs->nip;
_exception(SIGTRAP, &info, regs);
} else {
_exception(SIGILL, regs);
/* Illegal instruction */
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
info.si_addr = (void *)regs->nip;
_exception(SIGILL, &info, regs);
}
}
void
SingleStepException(struct pt_regs *regs)
{
siginfo_t info;
regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (debugger_sstep(regs))
#ifdef CONFIG_DEBUGGER
if (debugger_sstep && debugger_sstep(regs))
return;
#endif
_exception(SIGTRAP, regs);
}
/* Dummy handler for Performance Monitor */
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_TRACE;
info.si_addr = (void *)regs->nip;
_exception(SIGTRAP, &info, regs);
}
void
PerformanceMonitorException(struct pt_regs *regs)
{
_exception(SIGTRAP, regs);
siginfo_t info;
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = 0;
_exception(SIGTRAP, &info, regs);
}
void
AlignmentException(struct pt_regs *regs)
{
int fixed;
siginfo_t info;
fixed = fix_alignment(regs);
if (fixed == 1) {
ifppcdebug(PPCDBG_ALIGNFIXUP)
if (!user_mode(regs))
PPCDBG(PPCDBG_ALIGNFIXUP, "fix alignment at %lx\n", regs->nip);
if (!user_mode(regs))
PPCDBG(PPCDBG_ALIGNFIXUP, "fix alignment at %lx\n",
regs->nip);
regs->nip += 4; /* skip over emulated instruction */
return;
}
/* Operand address was bad */
if (fixed == -EFAULT) {
/* fixed == -EFAULT means the operand address was bad */
if (user_mode(regs))
force_sig(SIGSEGV, current);
else
if (user_mode(regs)) {
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = SEGV_MAPERR;
info.si_addr = (void *)regs->dar;
force_sig_info(SIGSEGV, &info, current);
} else {
/* Search exception table */
bad_page_fault(regs, regs->dar);
}
return;
}
_exception(SIGBUS, regs);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void *)regs->nip;
_exception(SIGBUS, &info, regs);
}
void __init trap_init(void)
......
......@@ -45,9 +45,7 @@ extern int (*debugger_dabr_match)(struct pt_regs *);
int debugger_kernel_faults = 1;
#endif
extern void die_if_kernel(char *, struct pt_regs *, long);
void bad_page_fault(struct pt_regs *, unsigned long);
void do_page_fault(struct pt_regs *, unsigned long, unsigned long);
void bad_page_fault(struct pt_regs *, unsigned long, int);
/*
* For 600- and 800-family processors, the error_code parameter is DSISR
......@@ -86,7 +84,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
#endif /* CONFIG_XMON || CONFIG_KGDB */
if (in_interrupt() || mm == NULL) {
bad_page_fault(regs, address);
bad_page_fault(regs, address, SIGSEGV);
return;
}
down_read(&mm->mmap_sem);
......@@ -143,7 +141,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
bad_area:
up_read(&mm->mmap_sem);
/* User mode accesses cause a SIGSEGV */
if (user_mode(regs)) {
info.si_signo = SIGSEGV;
......@@ -159,7 +157,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
return;
}
bad_page_fault(regs, address);
bad_page_fault(regs, address, SIGSEGV);
return;
/*
......@@ -176,7 +174,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
printk("VM: killing process %s\n", current->comm);
if (user_mode(regs))
do_exit(SIGKILL);
bad_page_fault(regs, address);
bad_page_fault(regs, address, SIGKILL);
return;
do_sigbus:
......@@ -187,7 +185,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
info.si_addr = (void *)address;
force_sig_info (SIGBUS, &info, current);
if (!user_mode(regs))
bad_page_fault(regs, address);
bad_page_fault(regs, address, SIGBUS);
}
/*
......@@ -196,8 +194,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* in traps.c.
*/
void
bad_page_fault(struct pt_regs *regs, unsigned long address)
bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
extern void die(const char *, struct pt_regs *, long);
unsigned long fixup;
/* Are we prepared to handle this fault? */
......@@ -207,13 +207,10 @@ bad_page_fault(struct pt_regs *regs, unsigned long address)
}
/* kernel has accessed a bad area */
show_regs(regs);
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (debugger_kernel_faults)
debugger(regs);
#endif
print_backtrace( (unsigned long *)regs->gpr[1] );
panic("kernel access of bad area pc %lx lr %lx address %lX tsk %s/%d",
regs->nip,regs->link,address,current->comm,current->pid);
die("Kernel access of bad area", regs, sig);
}
......@@ -247,6 +247,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
void
flush_tlb_mm(struct mm_struct *mm)
{
spin_lock(&mm->page_table_lock);
if (mm->map_count) {
struct vm_area_struct *mp;
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
......@@ -261,6 +262,7 @@ flush_tlb_mm(struct mm_struct *mm)
/* XXX are there races with checking cpu_vm_mask? - Anton */
mm->cpu_vm_mask = 0;
spin_unlock(&mm->page_table_lock);
}
/*
......@@ -666,6 +668,8 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
* fault has been handled by updating a PTE in the linux page tables.
* We use it to preload an HPTE into the hash table corresponding to
* the updated linux PTE.
*
* This must always be called with the mm->page_table_lock held
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
pte_t pte)
......
......@@ -26,7 +26,7 @@
#define O_DIRECTORY 040000 /* must be a directory */
#define O_NOFOLLOW 0100000 /* don't follow links */
#define O_LARGEFILE 0200000
#define O_DIRECT 0400000 /* direct disk access hint - currently ignored */
#define O_DIRECT 0400000 /* direct disk access hint */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
......
......@@ -50,7 +50,7 @@ static inline void isync(void)
#define HMT_LOW "\tor 1,1,1 # low priority\n"
#define HMT_MEDIUM "\tor 2,2,2 # medium priority\n"
#define HMT_MEDIUM "\tor 3,3,3 # high priority\n"
#define HMT_HIGH "\tor 3,3,3 # high priority\n"
#else
#define HMT_low() do { } while(0)
#define HMT_medium() do { } while(0)
......
......@@ -211,18 +211,11 @@ static inline void _tlbie(unsigned long va, int large)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
static inline void _tlbiel(unsigned long va, int large)
static inline void _tlbiel(unsigned long va)
{
asm volatile("ptesync": : :"memory");
if (large) {
asm volatile("clrldi %0,%0,16\n\
tlbiel %0,1" : : "r"(va) : "memory");
} else {
asm volatile("clrldi %0,%0,16\n\
tlbiel %0,0" : : "r"(va) : "memory");
}
asm volatile("clrldi %0,%0,16\n\
tlbiel %0" : : "r"(va) : "memory");
asm volatile("ptesync": : :"memory");
}
......
......@@ -215,11 +215,14 @@ static inline int get_order(unsigned long size)
#define __a2p(x) ((void *) absolute_to_phys(x))
#define __a2v(x) ((void *) __va(absolute_to_phys(x)))
#define virt_to_page(kaddr) (mem_map+(__pa((unsigned long)kaddr) >> PAGE_SHIFT))
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(pfn) ((unsigned long)((pfn) - mem_map))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT)
#define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
......@@ -167,24 +167,16 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
* mk_pte_phys takes a physical address as input
*
* mk_pte takes a (struct page *) as input
*/
#define mk_pte_phys(physpage,pgprot) \
({ \
pte_t pte; \
pte_val(pte) = (((physpage)<<(PTE_SHIFT-PAGE_SHIFT)) | pgprot_val(pgprot)); \
pte; \
})
#define mk_pte(page,pgprot) \
({ \
pte_t pte; \
pte_val(pte) = ((unsigned long)((page) - mem_map) << PTE_SHIFT) | \
pgprot_val(pgprot); \
pte; \
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
#define pfn_pte(pfn,pgprot) \
({ \
pte_t pte; \
pte_val(pte) = ((unsigned long)(pfn) << PTE_SHIFT) | \
pgprot_val(pgprot); \
pte; \
})
#define pte_modify(_pte, newprot) \
......@@ -195,8 +187,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
/* pte_clear moved to later in this file */
#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT)))
#define pte_page(x) (mem_map+pte_pagenr(x))
#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT)))
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pmd_set(pmdp, ptep) (pmd_val(*(pmdp)) = (__ba_to_bpn(ptep)))
#define pmd_none(pmd) (!pmd_val(pmd))
......
......@@ -65,6 +65,8 @@ static inline struct thread_info *current_thread_info(void)
#endif /* __ASSEMBLY__ */
#define PREEMPT_ACTIVE 0x4000000
/*
* thread information flag bit numbers
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment