Commit 85d4d006 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://ppc.bkbits.net/for-linus-ppc

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 91a09ae3 f25fcf8b
...@@ -1033,6 +1033,16 @@ CONFIG_XMON ...@@ -1033,6 +1033,16 @@ CONFIG_XMON
Include in-kernel hooks for the xmon kernel monitor/debugger Include in-kernel hooks for the xmon kernel monitor/debugger
supported by the PPC port. supported by the PPC port.
Include BDI2000 debugger support
CONFIG_BDI_SWITCH
Include in-kernel support for the Abatron BDI2000 debugger.
Add additional CFLAGS to the kernel build
CONFIG_MORE_COMPILE_OPTIONS
If you want to add additional CFLAGS to the kernel build, such as
-g for KGDB, XMON or the BDI2000, enable this option and then
enter what you would like to add in the next question.
CONFIG_ADVANCED_OPTIONS CONFIG_ADVANCED_OPTIONS
This option will enable prompting for a variety of advanced kernel This option will enable prompting for a variety of advanced kernel
configuration options. These options can cause the kernel to not configuration options. These options can cause the kernel to not
......
This diff is collapsed.
...@@ -52,49 +52,45 @@ void power_save(void); ...@@ -52,49 +52,45 @@ void power_save(void);
unsigned long zero_paged_on; unsigned long zero_paged_on;
unsigned long powersave_nap; unsigned long powersave_nap;
int idled(void) void default_idle(void)
{ {
int do_power_save = 0; int do_power_save = 0;
if (cur_cpu_spec[smp_processor_id()]->cpu_features & CPU_FTR_CAN_DOZE) if (cur_cpu_spec[smp_processor_id()]->cpu_features & CPU_FTR_CAN_DOZE)
do_power_save = 1; do_power_save = 1;
/* endless loop with no priority at all */
for (;;) {
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
if (!current->need_resched) { if (!current->need_resched) {
/* Turn off the run light */ /* Turn off the run light */
run_light_on(0); run_light_on(0);
yield_shared_processor(); yield_shared_processor();
} }
HMT_low(); HMT_low();
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (!do_power_save) { if (!do_power_save) {
if (!need_resched()) { if (!need_resched()) {
set_thread_flag(TIF_POLLING_NRFLAG); set_thread_flag(TIF_POLLING_NRFLAG);
while (!test_thread_flag(TIF_NEED_RESCHED)) while (!test_thread_flag(TIF_NEED_RESCHED))
barrier(); barrier();
clear_thread_flag(TIF_POLLING_NRFLAG); clear_thread_flag(TIF_POLLING_NRFLAG);
}
} }
}
#endif #endif
if (do_power_save && !need_resched()) if (do_power_save && !need_resched())
power_save(); power_save();
if (need_resched()) { if (need_resched()) {
run_light_on(1); run_light_on(1);
schedule(); schedule();
} }
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
else { else {
run_light_on(0); run_light_on(0);
yield_shared_processor(); yield_shared_processor();
HMT_low(); HMT_low();
}
#endif /* CONFIG_PPC_ISERIES */
} }
return 0; #endif /* CONFIG_PPC_ISERIES */
} }
/* /*
...@@ -103,7 +99,8 @@ int idled(void) ...@@ -103,7 +99,8 @@ int idled(void)
*/ */
int cpu_idle(void) int cpu_idle(void)
{ {
idled(); for (;;)
default_idle();
return 0; return 0;
} }
......
...@@ -79,6 +79,10 @@ extern unsigned char __res[]; ...@@ -79,6 +79,10 @@ extern unsigned char __res[];
extern unsigned long ret_to_user_hook; extern unsigned long ret_to_user_hook;
extern unsigned long mm_ptov (unsigned long paddr); extern unsigned long mm_ptov (unsigned long paddr);
extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
extern void consistent_free(void *vaddr);
extern void consistent_sync(void *vaddr, size_t size, int direction);
EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(do_signal); EXPORT_SYMBOL(do_signal);
EXPORT_SYMBOL(do_syscall_trace); EXPORT_SYMBOL(do_syscall_trace);
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
#include <asm/errno.h>
/* /*
* Atomically update sem->count. * Atomically update sem->count.
......
...@@ -594,8 +594,8 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs) ...@@ -594,8 +594,8 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
info.si_signo = signr; info.si_signo = signr;
info.si_errno = 0; info.si_errno = 0;
info.si_code = SI_USER; info.si_code = SI_USER;
info.si_pid = current->p_pptr->pid; info.si_pid = current->parent->pid;
info.si_uid = current->p_pptr->uid; info.si_uid = current->parent->uid;
} }
/* If the (new) signal is now blocked, requeue it. */ /* If the (new) signal is now blocked, requeue it. */
...@@ -634,7 +634,7 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs) ...@@ -634,7 +634,7 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
case SIGSTOP: case SIGSTOP:
current->state = TASK_STOPPED; current->state = TASK_STOPPED;
current->exit_code = signr; current->exit_code = signr;
if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) if (!(current->parent->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
notify_parent(current, SIGCHLD); notify_parent(current, SIGCHLD);
schedule(); schedule();
continue; continue;
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
* *
*/ */
#include <asm/tlbflush.h>
extern void mapin_ram(void); extern void mapin_ram(void);
extern void bat_mapin_ram(void); extern void bat_mapin_ram(void);
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/highmem.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -56,6 +57,70 @@ void setbat(int index, unsigned long virt, unsigned long phys, ...@@ -56,6 +57,70 @@ void setbat(int index, unsigned long virt, unsigned long phys,
#define p_mapped_by_bats(x) (0UL) #define p_mapped_by_bats(x) (0UL)
#endif /* HAVE_BATS */ #endif /* HAVE_BATS */
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret;
if ((ret = (pgd_t *)__get_free_page(GFP_KERNEL)) != NULL)
clear_page(ret);
return ret;
}
void pgd_free(pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
extern int mem_init_done;
extern void *early_get_page(void);
int timeout = 0;
if (mem_init_done) {
while ((pte = (pte_t *) __get_free_page(GFP_KERNEL)) == NULL
&& ++timeout < 10) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ);
}
} else
pte = (pte_t *) early_get_page();
if (pte != NULL)
clear_page(pte);
return pte;
}
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
int timeout = 0;
#ifdef CONFIG_HIGHPTE
int flags = GFP_KERNEL | __GFP_HIGHMEM;
#else
int flags = GFP_KERNEL;
#endif
while ((pte = alloc_pages(flags, 0)) == NULL) {
if (++timeout >= 10)
return NULL;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ);
}
clear_highpage(pte);
return pte;
}
void pte_free_kernel(pte_t *pte)
{
free_page((unsigned long)pte);
}
void pte_free(struct page *pte)
{
__free_page(pte);
}
#ifndef CONFIG_PPC_ISERIES #ifndef CONFIG_PPC_ISERIES
void * void *
ioremap(unsigned long addr, unsigned long size) ioremap(unsigned long addr, unsigned long size)
......
/*
* BK Id: %F% %I% %G% %U% %#%
*/
/*
* include/asm-ppc/cacheflush.h
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifdef __KERNEL__
#ifndef _PPC_CACHEFLUSH_H
#define _PPC_CACHEFLUSH_H
#include <linux/mm.h>
/*
* No cache flushing is required when address mappings are
* changed, because the caches on PowerPCs are physically
* addressed. -- paulus
* Also, when SMP we use the coherency (M) bit of the
* BATs and PTEs. -- Cort
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, a, b) do { } while (0)
#define flush_cache_page(vma, p) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
extern void flush_dcache_page(struct page *page);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
extern void flush_icache_range(unsigned long, unsigned long);
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len);
extern void __flush_dcache_icache(void *page_va);
extern void __flush_dcache_icache_phys(unsigned long physaddr);
#endif _PPC_CACHEFLUSH_H
#endif __KERNEL__
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
#include <asm/pgtable.h> #include <asm/tlbflush.h>
/* undef for production */ /* undef for production */
#define HIGHMEM_DEBUG 1 #define HIGHMEM_DEBUG 1
......
...@@ -7,24 +7,12 @@ ...@@ -7,24 +7,12 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/highmem.h>
#include <asm/processor.h> #include <asm/processor.h>
extern void __bad_pte(pmd_t *pmd); extern void __bad_pte(pmd_t *pmd);
static inline pgd_t *pgd_alloc(struct mm_struct *mm) extern pgd_t *pgd_alloc(struct mm_struct *mm);
{ extern void pgd_free(pgd_t *pgd);
pgd_t *ret;
if ((ret = (pgd_t *)__get_free_page(GFP_KERNEL)) != NULL)
clear_page(ret);
return ret;
}
extern __inline__ void pgd_free(pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
/* /*
* We don't have any real pmd's, and this code never triggers because * We don't have any real pmd's, and this code never triggers because
...@@ -34,64 +22,17 @@ extern __inline__ void pgd_free(pgd_t *pgd) ...@@ -34,64 +22,17 @@ extern __inline__ void pgd_free(pgd_t *pgd)
#define pmd_free(x) do { } while (0) #define pmd_free(x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG() #define pgd_populate(mm, pmd, pte) BUG()
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
extern int mem_init_done;
extern void *early_get_page(void);
int timeout = 0;
if (mem_init_done) {
while ((pte = (pte_t *) __get_free_page(GFP_KERNEL)) == NULL
&& ++timeout < 10) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ);
}
} else
pte = (pte_t *) early_get_page();
if (pte != NULL)
clear_page(pte);
return pte;
}
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
int timeout = 0;
#ifdef CONFIG_HIGHPTE
int flags = GFP_KERNEL | __GFP_HIGHMEM;
#else
int flags = GFP_KERNEL;
#endif
while ((pte = alloc_pages(flags, 0)) == NULL) {
if (++timeout >= 10)
return NULL;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ);
}
clear_highpage(pte);
return pte;
}
static inline void pte_free_kernel(pte_t *pte)
{
free_page((unsigned long)pte);
}
static inline void pte_free(struct page *pte)
{
__free_page(pte);
}
#define pmd_populate_kernel(mm, pmd, pte) \ #define pmd_populate_kernel(mm, pmd, pte) \
(pmd_val(*(pmd)) = __pa(pte)) (pmd_val(*(pmd)) = __pa(pte))
#define pmd_populate(mm, pmd, pte) \ #define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = ((pte) - mem_map) << PAGE_SHIFT) (pmd_val(*(pmd)) = ((pte) - mem_map) << PAGE_SHIFT)
extern int do_check_pgt_cache(int, int); extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
extern void pte_free_kernel(pte_t *pte);
extern void pte_free(struct page *pte);
#define check_pgt_cache() do { } while (0)
#endif /* _PPC_PGALLOC_H */ #endif /* _PPC_PGALLOC_H */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
...@@ -13,95 +13,6 @@ ...@@ -13,95 +13,6 @@
#include <asm/processor.h> /* For TASK_SIZE */ #include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/kmap_types.h>
extern void _tlbie(unsigned long address);
extern void _tlbia(void);
#if defined(CONFIG_4xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
static inline void local_flush_tlb_all(void)
{ __tlbia(); }
static inline void local_flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); }
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{ _tlbie(vmaddr); }
static inline void local_flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{ __tlbia(); }
#define update_mmu_cache(vma, addr, pte) do { } while (0)
#elif defined(CONFIG_8xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
static inline void local_flush_tlb_all(void)
{ __tlbia(); }
static inline void local_flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); }
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{ _tlbie(vmaddr); }
static inline void local_flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{ __tlbia(); }
#define update_mmu_cache(vma, addr, pte) do { } while (0)
#else /* 6xx, 7xx, 7xxx cpus */
struct mm_struct;
struct vm_area_struct;
extern void local_flush_tlb_all(void);
extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to put a corresponding HPTE into the hash table
* ahead of time, instead of waiting for the inevitable extra
* hash-table miss exception.
*/
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
#endif
#define flush_tlb_all local_flush_tlb_all
#define flush_tlb_mm local_flush_tlb_mm
#define flush_tlb_page local_flush_tlb_page
#define flush_tlb_range local_flush_tlb_range
/*
* This is called in munmap when we have freed up some page-table
* pages. We don't need to do anything here, there's nothing special
* about our page-table pages. -- paulus
*/
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
/*
* No cache flushing is required when address mappings are
* changed, because the caches on PowerPCs are physically
* addressed. -- paulus
* Also, when SMP we use the coherency (M) bit of the
* BATs and PTEs. -- Cort
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, a, b) do { } while (0)
#define flush_cache_page(vma, p) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len);
extern void flush_icache_range(unsigned long, unsigned long);
extern void __flush_dcache_icache(void *page_va);
extern void __flush_dcache_icache_phys(unsigned long physaddr);
extern void flush_dcache_page(struct page *page);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
extern unsigned long va_to_phys(unsigned long address); extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(unsigned long address); extern pte_t *va_to_pte(unsigned long address);
......
...@@ -68,6 +68,8 @@ static inline struct thread_info *current_thread_info(void) ...@@ -68,6 +68,8 @@ static inline struct thread_info *current_thread_info(void)
#define TI_FLAGS 8 #define TI_FLAGS 8
#define TI_CPU 12 #define TI_CPU 12
#define PREEMPT_ACTIVE 0x4000000
/* /*
* thread information flag bit numbers * thread information flag bit numbers
*/ */
......
/*
* BK Id: %F% %I% %G% %U% %#%
*/
/*
* include/asm-ppc/tlbflush.h
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifdef __KERNEL__
#ifndef _PPC_TLBFLUSH_H
#define _PPC_TLBFLUSH_H
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/processor.h>
extern void _tlbie(unsigned long address);
extern void _tlbia(void);
#if defined(CONFIG_4xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
static inline void local_flush_tlb_all(void)
{ __tlbia(); }
static inline void local_flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); }
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{ _tlbie(vmaddr); }
static inline void local_flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{ __tlbia(); }
#define update_mmu_cache(vma, addr, pte) do { } while (0)
#elif defined(CONFIG_8xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
static inline void local_flush_tlb_all(void)
{ __tlbia(); }
static inline void local_flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); }
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{ _tlbie(vmaddr); }
static inline void local_flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{ __tlbia(); }
#define update_mmu_cache(vma, addr, pte) do { } while (0)
#else /* 6xx, 7xx, 7xxx cpus */
struct mm_struct;
struct vm_area_struct;
extern void local_flush_tlb_all(void);
extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to put a corresponding HPTE into the hash table
* ahead of time, instead of waiting for the inevitable extra
* hash-table miss exception.
*/
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
#endif
#define flush_tlb_all local_flush_tlb_all
#define flush_tlb_mm local_flush_tlb_mm
#define flush_tlb_page local_flush_tlb_page
#define flush_tlb_range local_flush_tlb_range
/*
* This is called in munmap when we have freed up some page-table
* pages. We don't need to do anything here, there's nothing special
* about our page-table pages. -- paulus
*/
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
#endif _PPC_TLBFLUSH_H
#endif __KERNEL__
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment