Commit 5e2995a5 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.5

into home.osdl.org:/home/torvalds/v2.5/linux
parents 67e6a91e 889be673
...@@ -14,6 +14,9 @@ ...@@ -14,6 +14,9 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/processor.h>
#include <asm/spitfire.h>
static struct vm_struct * modvmlist = NULL; static struct vm_struct * modvmlist = NULL;
static void module_unmap(void * addr) static void module_unmap(void * addr)
...@@ -279,6 +282,16 @@ int module_finalize(const Elf_Ehdr *hdr, ...@@ -279,6 +282,16 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, const Elf_Shdr *sechdrs,
struct module *me) struct module *me)
{ {
/* Cheetah's I-cache is fully coherent. */
if (tlb_type == spitfire) {
unsigned long va;
flushw_all();
for (va = 0; va < (PAGE_SIZE << 1); va += 32)
spitfire_put_icache_tag(va, 0x0);
__asm__ __volatile__("flush %g6");
}
return 0; return 0;
} }
......
...@@ -118,7 +118,6 @@ void __init smp_callin(void) ...@@ -118,7 +118,6 @@ void __init smp_callin(void)
inherit_locked_prom_mappings(0); inherit_locked_prom_mappings(0);
__flush_cache_all();
__flush_tlb_all(); __flush_tlb_all();
smp_setup_percpu_timer(); smp_setup_percpu_timer();
...@@ -661,7 +660,6 @@ extern unsigned long xcall_flush_tlb_range; ...@@ -661,7 +660,6 @@ extern unsigned long xcall_flush_tlb_range;
extern unsigned long xcall_flush_tlb_kernel_range; extern unsigned long xcall_flush_tlb_kernel_range;
extern unsigned long xcall_flush_tlb_all_spitfire; extern unsigned long xcall_flush_tlb_all_spitfire;
extern unsigned long xcall_flush_tlb_all_cheetah; extern unsigned long xcall_flush_tlb_all_cheetah;
extern unsigned long xcall_flush_cache_all_spitfire;
extern unsigned long xcall_report_regs; extern unsigned long xcall_report_regs;
extern unsigned long xcall_receive_signal; extern unsigned long xcall_receive_signal;
extern unsigned long xcall_flush_dcache_page_cheetah; extern unsigned long xcall_flush_dcache_page_cheetah;
...@@ -776,15 +774,6 @@ void smp_report_regs(void) ...@@ -776,15 +774,6 @@ void smp_report_regs(void)
smp_cross_call(&xcall_report_regs, 0, 0, 0); smp_cross_call(&xcall_report_regs, 0, 0, 0);
} }
void smp_flush_cache_all(void)
{
/* Cheetah need do nothing. */
if (tlb_type == spitfire) {
smp_cross_call(&xcall_flush_cache_all_spitfire, 0, 0, 0);
__flush_cache_all();
}
}
void smp_flush_tlb_all(void) void smp_flush_tlb_all(void)
{ {
if (tlb_type == spitfire) if (tlb_type == spitfire)
......
...@@ -1025,19 +1025,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end) ...@@ -1025,19 +1025,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
} }
} }
void __flush_cache_all(void)
{
/* Cheetah should be fine here too. */
if (tlb_type == spitfire) {
unsigned long va;
flushw_all();
for (va = 0; va < (PAGE_SIZE << 1); va += 32)
spitfire_put_icache_tag(va, 0x0);
__asm__ __volatile__("flush %g6");
}
}
/* If not locked, zap it. */ /* If not locked, zap it. */
void __flush_tlb_all(void) void __flush_tlb_all(void)
{ {
......
...@@ -721,20 +721,6 @@ xcall_flush_tlb_all_cheetah: ...@@ -721,20 +721,6 @@ xcall_flush_tlb_all_cheetah:
stxa %g0, [%g2] ASI_IMMU_DEMAP stxa %g0, [%g2] ASI_IMMU_DEMAP
retry retry
.globl xcall_flush_cache_all_spitfire
xcall_flush_cache_all_spitfire:
sethi %hi(16383), %g2
or %g2, %lo(16383), %g2
clr %g3
1: stxa %g0, [%g3] ASI_IC_TAG
membar #Sync
add %g3, 32, %g3
cmp %g3, %g2
bleu,pt %xcc, 1b
nop
flush %g6
retry
/* These just get rescheduled to PIL vectors. */ /* These just get rescheduled to PIL vectors. */
.globl xcall_call_function .globl xcall_call_function
xcall_call_function: xcall_call_function:
......
...@@ -169,7 +169,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec, ...@@ -169,7 +169,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
} }
/* Create the ELF interpreter info */ /* Create the ELF interpreter info */
elf_info = current->mm->saved_auxv; elf_info = (elf_addr_t *) current->mm->saved_auxv;
#define NEW_AUX_ENT(id, val) \ #define NEW_AUX_ENT(id, val) \
do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0) do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
...@@ -197,8 +197,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec, ...@@ -197,8 +197,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
if (k_platform) { if (k_platform) {
NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform); NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
} }
NEW_AUX_ENT(AT_NULL, 0);
#undef NEW_AUX_ENT #undef NEW_AUX_ENT
/* AT_NULL is zero; clear the rest too */
memset(&elf_info[ei_index], 0,
sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
/* And advance past the AT_NULL entry. */
ei_index += 2;
sp = STACK_ADD(p, ei_index); sp = STACK_ADD(p, ei_index);
...@@ -1209,6 +1214,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file) ...@@ -1209,6 +1214,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
elf_fpxregset_t *xfpu = NULL; elf_fpxregset_t *xfpu = NULL;
#endif #endif
int thread_status_size = 0; int thread_status_size = 0;
elf_addr_t *auxv;
/* /*
* We no longer stop all VM operations. * We no longer stop all VM operations.
...@@ -1290,13 +1296,14 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file) ...@@ -1290,13 +1296,14 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
numnote = 3; numnote = 3;
auxv = (elf_addr_t *) current->mm->saved_auxv;
i = 0; i = 0;
do do
i += 2; i += 2;
while (current->mm->saved_auxv[i - 2] != AT_NULL); while (auxv[i - 2] != AT_NULL);
fill_note(&notes[numnote++], "CORE", NT_AUXV, fill_note(&notes[numnote++], "CORE", NT_AUXV,
i * sizeof current->mm->saved_auxv[0], i * sizeof (elf_addr_t), auxv);
current->mm->saved_auxv);
/* Try to dump the FPU. */ /* Try to dump the FPU. */
if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu))) if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
......
...@@ -33,18 +33,6 @@ extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page); ...@@ -33,18 +33,6 @@ extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
extern void __flush_dcache_range(unsigned long start, unsigned long end); extern void __flush_dcache_range(unsigned long start, unsigned long end);
extern void __flush_cache_all(void);
#ifndef CONFIG_SMP
#define flush_cache_all() __flush_cache_all()
#else /* CONFIG_SMP */
extern void smp_flush_cache_all(void);
#endif /* ! CONFIG_SMP */
#define flush_icache_page(vma, pg) do { } while(0) #define flush_icache_page(vma, pg) do { } while(0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
...@@ -55,7 +43,7 @@ extern void smp_flush_cache_all(void); ...@@ -55,7 +43,7 @@ extern void smp_flush_cache_all(void);
extern void flush_dcache_page(struct page *page); extern void flush_dcache_page(struct page *page);
#define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) do { } while (0)
#endif /* _SPARC64_CACHEFLUSH_H */ #endif /* _SPARC64_CACHEFLUSH_H */
...@@ -70,7 +70,6 @@ extern void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start, ...@@ -70,7 +70,6 @@ extern void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page); extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
#define flush_cache_all() smp_flush_cache_all()
#define flush_tlb_all() smp_flush_tlb_all() #define flush_tlb_all() smp_flush_tlb_all()
#define flush_tlb_mm(mm) smp_flush_tlb_mm(mm) #define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
#define flush_tlb_range(vma, start, end) \ #define flush_tlb_range(vma, start, end) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment