Commit c33725ec authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/lord/xfs-2.6

into laptop.osdl.org:/home/torvalds/v2.5/linux
parents 4c2e1cee a37dfa1d
......@@ -172,22 +172,6 @@ bp:; $(Q)$(MAKE) $(build)=$(boot) $(boot)/bootpImage
i:; $(Q)$(MAKE) $(build)=$(boot) install
zi:; $(Q)$(MAKE) $(build)=$(boot) zinstall
#
# Configuration targets. Use these to select a
# configuration for your architecture
%_config:
@( \
CFG=$(@:_config=); \
if [ -f arch/arm/def-configs/$$CFG ]; then \
[ -f .config ] && mv -f .config .config.old; \
cp arch/arm/def-configs/$$CFG .config; \
echo "*** Default configuration for $$CFG installed"; \
echo "*** Next, you may run 'make oldconfig'"; \
else \
echo "$$CFG does not exist"; \
fi; \
)
arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \
include/asm-arm/.arch \
include/config/MARKER
......
/*
* linux/arch/arm/boot/bootp/init.S
*
* Copyright (C) 2000-2002 Russell King
* Copyright (C) 2000-2003 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Header file for splitting kernel + initrd. Note that we pass
* "Header" file for splitting kernel + initrd. Note that we pass
* r0 through to r3 straight through.
*
* This demonstrates how to append code to the start of the kernel
* zImage, and boot the kernel without copying it around. This
* example would be simpler; if we didn't have an object of unknown
* size immediately following the kernel, we could build this into
* a binary blob, and concatenate the zImage using the cat command.
*/
.section .start,#alloc,#execinstr
.type _start, #function
.globl _start
_start: adr r10, initdata
ldr r11, initdata
sub r11, r10, r11 @ work out exec offset
b splitify
.size _entry,. - _entry
.type initdata, #object
initdata: .word initdata @ compiled address of this
.size initdata,. - initdata
splitify: adr r13, data
ldmia r13!, {r4-r6} @ move the initrd
add r4, r4, r11 @ correction
bl move
_start: adr r12, kernel_start @ offset of kernel zImage
ldr r4, [r12, #0x2c] @ length of zImage
adr r13, data
add r4, r4, r12 @ end of zImage, start of initrd
ldmia r13!, {r5-r6} @ r5 = dest, r6 = length
bl move @ move the initrd
/*
* Setup the initrd parameters to pass to the kernel. This can either be
* passed in via a param_struct or a tag list. We spot the param_struct
* method by looking at the first word; this should either indicate a page
* size of 4K, 16K or 32K.
* Setup the initrd parameters to pass to the kernel. This can only be
* passed in via the tagged list.
*/
ldmia r13, {r4-r8} @ get size and addr of initrd
@ r5 = ATAG_INITRD
@ r6 = initrd start
@ r7 = initrd end
@ r8 = param_struct address
ldr r9, [r8, #0] @ no param struct?
teq r9, #0x1000 @ 4K?
teqne r9, #0x4000 @ 16K?
teqne r9, #0x8000 @ 32K?
beq param_struct
ldr r9, [r8, #4] @ get first tag
teq r9, r4
bne taglist @ ok, we have a tag list
ldmia r13, {r5-r9} @ get size and addr of initrd
@ r5 = ATAG_CORE
@ r6 = ATAG_INITRD2
@ r7 = initrd start
@ r8 = initrd end
@ r9 = param_struct address
ldr r10, [r9, #4] @ get first tag
teq r10, r5 @ is it ATAG_CORE?
/*
* We didn't find a valid tag list - create one.
* If we didn't find a valid tag list, create a dummy ATAG_CORE entry.
*/
str r4, [r8, #4]
mov r4, #8
str r4, [r8, #0]
mov r4, #0
str r4, [r8, #8]
movne r10, #0 @ terminator
movne r4, #2 @ Size of this entry (2 words)
stmneia r8, {r4, r5, r10} @ Size, ATAG_CORE, terminator
/*
* find the end of the tag list, and then add an INITRD tag on the end.
* If there is already an INITRD tag, then we ignore it; the last INITRD
* tag takes precidence.
*/
taglist: ldr r9, [r8, #0] @ tag length
teq r9, #0 @ last tag?
addne r8, r8, r9
taglist: ldr r10, [r9, #0] @ tag length
teq r10, #0 @ last tag (zero length)?
addne r9, r9, r10, lsl #2
bne taglist
mov r4, #16 @ length of initrd tag
mov r9, #0 @ end of tag list terminator
stmia r8, {r4, r5, r6, r7, r9}
adr r12, kernel_start
mov r5, #4 @ Size of initrd tag (4 words)
stmia r9, {r5, r6, r7, r8, r10}
mov pc, r12 @ call kernel
/*
* We found a param struct. Modify the param struct for the initrd
* Move the block of memory length r6 from address r4 to address r5
*/
param_struct: add r8, r8, #16*4
stmia r8, {r6,r7} @ save in param_struct
mov pc, r12 @ call kernel
move: ldmia r4!, {r7 - r10} @ move 32-bytes at a time
stmia r5!, {r7 - r10}
ldmia r4!, {r7 - r10}
......@@ -89,15 +72,18 @@ move: ldmia r4!, {r7 - r10} @ move 32-bytes at a time
bcs move
mov pc, lr
data: .word initrd_start
.word initrd_addr
.word initrd_len
.size _start, . - _start
.type data,#object
data: .word initrd_addr @ destination initrd address
.word initrd_len @ initrd size
.word 0x54410001 @ r4 = ATAG_CORE
.word 0x54420005 @ r5 = ATAG_INITRD
.word 0x54420005 @ r5 = ATAG_INITRD2
.word initrd_addr @ r6
.word initrd_len @ r7
.word params @ r8
.size data, . - _data
.type initrd_start,#object
......
......@@ -729,7 +729,7 @@ __dabt_svc: sub sp, sp, #S_FRAME_SIZE
* This routine must not corrupt r9
*/
#ifdef MULTI_ABORT
ldr r4, .LCprocfns @ pass r0, r3 to
ldr r4, .LCprocfns @ pass r2, r3 to
mov lr, pc @ processor code
ldr pc, [r4] @ call processor specific code
#else
......@@ -871,7 +871,7 @@ __dabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
alignment_trap r7, r7, __temp_abt
zero_fp
#ifdef MULTI_ABORT
ldr r4, .LCprocfns @ pass r0, r3 to
ldr r4, .LCprocfns @ pass r2, r3 to
mov lr, pc @ processor code
ldr pc, [r4] @ call processor specific code
#else
......
......@@ -9,7 +9,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/hardware.h>
.insb_align: rsb ip, ip, #4
cmp ip, r2
......@@ -37,32 +36,36 @@ ENTRY(__raw_readsb)
.insb_16_lp: ldrb r3, [r0]
ldrb r4, [r0]
orr r3, r3, r4, lsl #8
ldrb r4, [r0]
orr r3, r3, r4, lsl #16
ldrb r4, [r0]
orr r3, r3, r4, lsl #24
ldrb r4, [r0]
ldrb r5, [r0]
orr r4, r4, r5, lsl #8
ldrb r5, [r0]
orr r4, r4, r5, lsl #16
ldrb r5, [r0]
orr r4, r4, r5, lsl #24
ldrb r5, [r0]
mov r3, r3, lsl #byte(0)
ldrb r6, [r0]
orr r5, r5, r6, lsl #8
ldrb r6, [r0]
orr r5, r5, r6, lsl #16
orr r3, r3, r4, lsl #byte(1)
ldrb r4, [r0]
orr r3, r3, r5, lsl #byte(2)
ldrb r5, [r0]
orr r3, r3, r6, lsl #byte(3)
ldrb r6, [r0]
orr r5, r5, r6, lsl #24
mov r4, r4, lsl #byte(0)
ldrb ip, [r0]
orr r4, r4, r5, lsl #byte(1)
ldrb r5, [r0]
orr r4, r4, r6, lsl #byte(2)
ldrb r6, [r0]
orr r4, r4, ip, lsl #byte(3)
ldrb ip, [r0]
orr r6, r6, ip, lsl #8
mov r5, r5, lsl #byte(0)
ldrb lr, [r0]
orr r5, r5, r6, lsl #byte(1)
ldrb r6, [r0]
orr r5, r5, ip, lsl #byte(2)
ldrb ip, [r0]
orr r6, r6, ip, lsl #16
orr r5, r5, lr, lsl #byte(3)
ldrb lr, [r0]
mov r6, r6, lsl #byte(0)
orr r6, r6, ip, lsl #byte(1)
ldrb ip, [r0]
orr r6, r6, ip, lsl #24
orr r6, r6, lr, lsl #byte(2)
orr r6, r6, ip, lsl #byte(3)
stmia r1!, {r3 - r6}
subs r2, r2, #16
......@@ -76,18 +79,20 @@ ENTRY(__raw_readsb)
ldrb r3, [r0]
ldrb r4, [r0]
orr r3, r3, r4, lsl #8
ldrb r4, [r0]
orr r3, r3, r4, lsl #16
ldrb r4, [r0]
orr r3, r3, r4, lsl #24
ldrb r4, [r0]
ldrb r5, [r0]
orr r4, r4, r5, lsl #8
ldrb r5, [r0]
orr r4, r4, r5, lsl #16
mov r3, r3, lsl #byte(0)
ldrb r6, [r0]
orr r3, r3, r4, lsl #byte(1)
ldrb r4, [r0]
orr r3, r3, r5, lsl #byte(2)
ldrb r5, [r0]
orr r4, r4, r5, lsl #24
orr r3, r3, r6, lsl #byte(3)
ldrb r6, [r0]
mov r4, r4, lsl #byte(0)
ldrb ip, [r0]
orr r4, r4, r5, lsl #byte(1)
orr r4, r4, r6, lsl #byte(2)
orr r4, r4, ip, lsl #byte(3)
stmia r1!, {r3, r4}
.insb_no_8: tst r2, #4
......@@ -95,11 +100,12 @@ ENTRY(__raw_readsb)
ldrb r3, [r0]
ldrb r4, [r0]
orr r3, r3, r4, lsl #8
ldrb r4, [r0]
orr r3, r3, r4, lsl #16
ldrb r4, [r0]
orr r3, r3, r4, lsl #24
ldrb r5, [r0]
ldrb r6, [r0]
mov r3, r3, lsl #byte(0)
orr r3, r3, r4, lsl #byte(1)
orr r3, r3, r5, lsl #byte(2)
orr r3, r3, r6, lsl #byte(3)
str r3, [r1], #4
.insb_no_4: ands r2, r2, #3
......
......@@ -9,7 +9,26 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/hardware.h>
.macro outword, rd
#ifndef __ARMEB__
strb \rd, [r0]
mov \rd, \rd, lsr #8
strb \rd, [r0]
mov \rd, \rd, lsr #8
strb \rd, [r0]
mov \rd, \rd, lsr #8
strb \rd, [r0]
#else
mov lr, \rd, lsr #24
strb lr, [r0]
mov lr, \rd, lsr #16
strb lr, [r0]
mov lr, \rd, lsr #8
strb lr, [r0]
strb \rd, [r0]
#endif
.endm
.outsb_align: rsb ip, ip, #4
cmp ip, r2
......@@ -30,86 +49,37 @@ ENTRY(__raw_writesb)
ands ip, r1, #3
bne .outsb_align
.outsb_aligned: stmfd sp!, {r4 - r6, lr}
.outsb_aligned: stmfd sp!, {r4, r5, lr}
subs r2, r2, #16
bmi .outsb_no_16
.outsb_16_lp: ldmia r1!, {r3 - r6}
strb r3, [r0]
mov r3, r3, lsr #8
strb r3, [r0]
mov r3, r3, lsr #8
strb r3, [r0]
mov r3, r3, lsr #8
strb r3, [r0]
strb r4, [r0]
mov r4, r4, lsr #8
strb r4, [r0]
mov r4, r4, lsr #8
strb r4, [r0]
mov r4, r4, lsr #8
strb r4, [r0]
strb r5, [r0]
mov r5, r5, lsr #8
strb r5, [r0]
mov r5, r5, lsr #8
strb r5, [r0]
mov r5, r5, lsr #8
strb r5, [r0]
strb r6, [r0]
mov r6, r6, lsr #8
strb r6, [r0]
mov r6, r6, lsr #8
strb r6, [r0]
mov r6, r6, lsr #8
strb r6, [r0]
.outsb_16_lp: ldmia r1!, {r3, r4, r5, ip}
outword r3
outword r4
outword r5
outword ip
subs r2, r2, #16
bpl .outsb_16_lp
tst r2, #15
LOADREGS(eqfd, sp!, {r4 - r6, pc})
LOADREGS(eqfd, sp!, {r4, r5, pc})
.outsb_no_16: tst r2, #8
beq .outsb_no_8
ldmia r1!, {r3, r4}
strb r3, [r0]
mov r3, r3, lsr #8
strb r3, [r0]
mov r3, r3, lsr #8
strb r3, [r0]
mov r3, r3, lsr #8
strb r3, [r0]
strb r4, [r0]
mov r4, r4, lsr #8
strb r4, [r0]
mov r4, r4, lsr #8
strb r4, [r0]
mov r4, r4, lsr #8
strb r4, [r0]
outword r3
outword r4
.outsb_no_8: tst r2, #4
beq .outsb_no_4
ldr r3, [r1], #4
strb r3, [r0]
mov r3, r3, lsr #8
strb r3, [r0]
mov r3, r3, lsr #8
strb r3, [r0]
mov r3, r3, lsr #8
strb r3, [r0]
outword r3
.outsb_no_4: ands r2, r2, #3
LOADREGS(eqfd, sp!, {r4 - r6, pc})
LOADREGS(eqfd, sp!, {r4, r5, pc})
cmp r2, #2
ldrb r3, [r1], #1
......@@ -119,4 +89,4 @@ ENTRY(__raw_writesb)
ldrgtb r3, [r1]
strgtb r3, [r0]
LOADREGS(fd, sp!, {r4 - r6, pc})
LOADREGS(fd, sp!, {r4, r5, pc})
......@@ -17,10 +17,15 @@ ENTRY(__raw_writesl)
ands ip, r1, #3
bne 2f
1: ldr r3, [r1], #4
str r3, [r0]
subs r2, r2, #1
bne 1b
tst r2, #1
ldrne r3, [r1], #4
strne r3, [r0, #0]
1: subs r2, r2, #2
ldrcs r3, [r1], #4
ldrcs ip, [r1], #4
strcs r3, [r0, #0]
strcs ip, [r0, #0]
bcs 1b
mov pc, lr
2: bic r1, r1, #3
......@@ -31,25 +36,25 @@ ENTRY(__raw_writesl)
3: mov ip, r3, lsr #16
ldr r3, [r1], #4
orr ip, ip, r3, lsl #16
str ip, [r0]
subs r2, r2, #1
orr ip, ip, r3, lsl #16
str ip, [r0, #0]
bne 3b
mov pc, lr
4: mov ip, r3, lsr #24
ldr r3, [r1], #4
orr ip, ip, r3, lsl #8
str ip, [r0]
subs r2, r2, #1
orr ip, ip, r3, lsl #8
str ip, [r0, #0]
bne 4b
mov pc, lr
5: mov ip, r3, lsr #8
ldr r3, [r1], #4
orr ip, ip, r3, lsl #24
str ip, [r0]
subs r2, r2, #1
orr ip, ip, r3, lsl #24
str ip, [r0, #0]
bne 5b
mov pc, lr
......
......@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
......@@ -107,6 +108,9 @@ ENTRY(v3_dma_flush_range)
ENTRY(v3_dma_clean_range)
mov pc, lr
__INITDATA
.type v3_cache_fns, #object
ENTRY(v3_cache_fns)
.long v3_flush_kern_cache_all
.long v3_flush_user_cache_all
......@@ -116,3 +120,4 @@ ENTRY(v3_cache_fns)
.long v3_dma_inv_range
.long v3_dma_clean_range
.long v3_dma_flush_range
.size v3_cache_fns, . - v3_cache_fns
......@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
......@@ -109,6 +110,9 @@ ENTRY(v4_dma_flush_range)
ENTRY(v4_dma_clean_range)
mov pc, lr
__INITDATA
.type v4_cache_fns, #object
ENTRY(v4_cache_fns)
.long v4_flush_kern_cache_all
.long v4_flush_user_cache_all
......@@ -118,3 +122,4 @@ ENTRY(v4_cache_fns)
.long v4_dma_inv_range
.long v4_dma_clean_range
.long v4_dma_flush_range
.size v4_cache_fns, . - v4_cache_fns
......@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
......@@ -185,6 +186,9 @@ ENTRY(v4wb_dma_clean_range)
.globl v4wb_dma_flush_range
.set v4wb_dma_flush_range, v4wb_coherent_kern_range
__INITDATA
.type v4wb_cache_fns, #object
ENTRY(v4wb_cache_fns)
.long v4wb_flush_kern_cache_all
.long v4wb_flush_user_cache_all
......@@ -194,3 +198,4 @@ ENTRY(v4wb_cache_fns)
.long v4wb_dma_inv_range
.long v4wb_dma_clean_range
.long v4wb_dma_flush_range
.size v4wb_cache_fns, . - v4wb_cache_fns
......@@ -12,6 +12,7 @@
* We assume that the write buffer is not enabled.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
......@@ -158,6 +159,9 @@ ENTRY(v4wt_dma_clean_range)
.globl v4wt_dma_flush_range
.equ v4wt_dma_flush_range, v4wt_dma_inv_range
__INITDATA
.type v4wt_cache_fns, #object
ENTRY(v4wt_cache_fns)
.long v4wt_flush_kern_cache_all
.long v4wt_flush_user_cache_all
......@@ -167,4 +171,4 @@ ENTRY(v4wt_cache_fns)
.long v4wt_dma_inv_range
.long v4wt_dma_clean_range
.long v4wt_dma_flush_range
.size v4wt_cache_fns, . - v4wt_cache_fns
......@@ -58,8 +58,10 @@ ENTRY(v3_clear_user_page)
bne 1b @ 1
ldr pc, [sp], #4
__INIT
__INITDATA
.type v3_user_fns, #object
ENTRY(v3_user_fns)
.long v3_clear_user_page
.long v3_copy_user_page
.size v3_user_fns, . - v3_user_fns
......@@ -71,9 +71,10 @@ ENTRY(v4_mc_clear_user_page)
bne 1b @ 1
ldr pc, [sp], #4
__INIT
__INITDATA
.type v4_mc_user_fns, #object
ENTRY(v4_mc_user_fns)
.long v4_mc_clear_user_page
.long v4_mc_copy_user_page
.size v4_mc_user_fns, . - v4_mc_user_fns
......@@ -70,9 +70,10 @@ ENTRY(v4wb_clear_user_page)
mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB
ldr pc, [sp], #4
__INIT
__INITDATA
.type v4wb_user_fns, #object
ENTRY(v4wb_user_fns)
.long v4wb_clear_user_page
.long v4wb_copy_user_page
.size v4wb_user_fns, . - v4wb_user_fns
......@@ -64,9 +64,10 @@ ENTRY(v4wt_clear_user_page)
mcr p15, 0, r2, c7, c7, 0 @ flush ID cache
ldr pc, [sp], #4
__INIT
__INITDATA
.type v4wt_user_fns, #object
ENTRY(v4wt_user_fns)
.long v4wt_clear_user_page
.long v4wt_copy_user_page
.size v4wt_user_fns, . - v4wt_user_fns
......@@ -104,8 +104,10 @@ ENTRY(xscale_mc_clear_user_page)
bne 1b
mov pc, lr
__INIT
__INITDATA
.type xscale_mc_user_fns, #object
ENTRY(xscale_mc_user_fns)
.long xscale_mc_clear_user_page
.long xscale_mc_copy_user_page
.size xscale_mc_user_fns, . - xscale_mc_user_fns
......@@ -409,31 +409,9 @@ ENTRY(cpu_arm1020_set_pte)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(cpu_arm1020_name)
.ascii "ARM1020"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#endif
#ifndef CONFIG_CPU_BPREDICT_DISABLE
.ascii "B"
#endif
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
.ascii "RR"
#endif
.ascii "\0"
.align
__INIT
.type __arm1020_setup, #function
__arm1020_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
......@@ -467,8 +445,9 @@ __arm1020_setup:
orr r0, r0, #0x1000 @ I Cache on
#endif
mov pc, lr
.size __arm1020_setup, . - __arm1020_setup
.text
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
......@@ -484,9 +463,10 @@ arm1020_processor_functions:
.word cpu_arm1020_dcache_clean_area
.word cpu_arm1020_switch_mm
.word cpu_arm1020_set_pte
.size arm1020_processor_functions, . - arm1020_processor_functions
.section ".rodata"
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv5t"
......@@ -496,6 +476,30 @@ cpu_arch_name:
cpu_elf_name:
.asciz "v5"
.size cpu_elf_name, . - cpu_elf_name
.type cpu_arm1020_name, #object
cpu_arm1020_name:
.ascii "ARM1020"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#endif
#ifndef CONFIG_CPU_BPREDICT_DISABLE
.ascii "B"
#endif
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
.ascii "RR"
#endif
.ascii "\0"
.size cpu_arm1020_name, . - cpu_arm1020_name
.align
.section ".proc.info", #alloc, #execinstr
......
......@@ -392,31 +392,9 @@ ENTRY(cpu_arm1020e_set_pte)
#endif
mov pc, lr
ENTRY(cpu_arm1020e_name)
.ascii "ARM1020E"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#endif
#ifndef CONFIG_CPU_BPREDICT_DISABLE
.ascii "B"
#endif
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
.ascii "RR"
#endif
.ascii "\0"
.align
__INIT
.type __arm1020e_setup, #function
__arm1020e_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
......@@ -450,8 +428,9 @@ __arm1020e_setup:
orr r0, r0, #0x1000 @ I Cache on
#endif
mov pc, lr
.size __arm1020e_setup, . - __arm1020e_setup
.text
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
......@@ -467,9 +446,10 @@ arm1020e_processor_functions:
.word cpu_arm1020e_dcache_clean_area
.word cpu_arm1020e_switch_mm
.word cpu_arm1020e_set_pte
.size arm1020e_processor_functions, . - arm1020e_processor_functions
.section ".rodata"
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv5te"
......@@ -479,6 +459,30 @@ cpu_arch_name:
cpu_elf_name:
.asciz "v5"
.size cpu_elf_name, . - cpu_elf_name
.type cpu_arm1020e_name, #object
cpu_arm1020e_name:
.ascii "ARM1020E"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#endif
#ifndef CONFIG_CPU_BPREDICT_DISABLE
.ascii "B"
#endif
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
.ascii "RR"
#endif
.ascii "\0"
.size cpu_arm1020e_name, . - cpu_arm1020e_name
.align
.section ".proc.info", #alloc, #execinstr
......
......@@ -372,31 +372,9 @@ ENTRY(cpu_arm1022_set_pte)
#endif
mov pc, lr
ENTRY(cpu_arm1022_name)
.ascii "arm1022"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#endif
#ifndef CONFIG_CPU_BPREDICT_DISABLE
.ascii "B"
#endif
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
.ascii "RR"
#endif
.ascii "\0"
.align
__INIT
.type __arm1022_setup, #function
__arm1022_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
......@@ -430,8 +408,9 @@ __arm1022_setup:
orr r0, r0, #0x1000 @ ...I............
#endif
mov pc, lr
.size __arm1022_setup, . - __arm1022_setup
.text
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
......@@ -447,9 +426,10 @@ arm1022_processor_functions:
.word cpu_arm1022_dcache_clean_area
.word cpu_arm1022_switch_mm
.word cpu_arm1022_set_pte
.size arm1022_processor_functions, . - arm1022_processor_functions
.section ".rodata"
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv5te"
......@@ -459,6 +439,30 @@ cpu_arch_name:
cpu_elf_name:
.asciz "v5"
.size cpu_elf_name, . - cpu_elf_name
.type cpu_arm1022_name, #object
cpu_arm1022_name:
.ascii "arm1022"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#endif
#ifndef CONFIG_CPU_BPREDICT_DISABLE
.ascii "B"
#endif
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
.ascii "RR"
#endif
.ascii "\0"
.size cpu_arm1022_name, . - cpu_arm1022_name
.align
.section ".proc.info", #alloc, #execinstr
......
......@@ -365,6 +365,7 @@ ENTRY(cpu_arm1026_set_pte)
__INIT
.type __arm1026_setup, #function
__arm1026_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
......@@ -402,6 +403,9 @@ __arm1026_setup:
orr r0, r0, #0x1000 @ ...I............
#endif
mov pc, lr
.size __arm1026_setup, . - __arm1026_setup
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
......@@ -409,7 +413,7 @@ __arm1026_setup:
*/
.type arm1026_processor_functions, #object
arm1026_processor_functions:
.word ev5t_early_abort
.word v5t_early_abort
.word cpu_arm1026_proc_init
.word cpu_arm1026_proc_fin
.word cpu_arm1026_reset
......@@ -417,7 +421,6 @@ arm1026_processor_functions:
.word cpu_arm1026_dcache_clean_area
.word cpu_arm1026_switch_mm
.word cpu_arm1026_set_pte
.size arm1026_processor_functions, . - arm1026_processor_functions
.section .rodata
......@@ -434,7 +437,7 @@ cpu_elf_name:
.align
.type cpu_arm1026_name, #object
ENTRY(cpu_arm1026_name)
cpu_arm1026_name:
.ascii "ARM1026EJ-S"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
......@@ -454,9 +457,10 @@ ENTRY(cpu_arm1026_name)
.ascii "RR"
#endif
.ascii "\0"
.align
.size cpu_arm1026_name, . - cpu_arm1026_name
.align
.section ".proc.info", #alloc, #execinstr
.type __arm1026_proc_info,#object
......
......@@ -247,16 +247,9 @@ ENTRY(cpu_arm7_reset)
mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc
mov pc, r0
cpu_arm6_name: .asciz "ARM6"
cpu_arm610_name:
.asciz "ARM610"
cpu_arm7_name: .asciz "ARM7"
cpu_arm710_name:
.asciz "ARM710"
.align
__INIT
.type __arm6_setup, #function
__arm6_setup: mov r0, #0
mcr p15, 0, r0, c7, c0 @ flush caches on v3
mcr p15, 0, r0, c5, c0 @ flush TLBs on v3
......@@ -266,7 +259,9 @@ __arm6_setup: mov r0, #0
mov r0, #0x3d @ . ..RS BLDP WCAM
orr r0, r0, #0x100 @ . ..01 0011 1101
mov pc, lr
.size __arm6_setup, . - __arm6_setup
.type __arm7_setup, #function
__arm7_setup: mov r0, #0
mcr p15, 0, r0, c7, c0 @ flush caches on v3
mcr p15, 0, r0, c5, c0 @ flush TLBs on v3
......@@ -276,6 +271,9 @@ __arm7_setup: mov r0, #0
mov r0, #0x7d @ . ..RS BLDP WCAM
orr r0, r0, #0x100 @ . ..01 0111 1101
mov pc, lr
.size __arm7_setup, . - __arm7_setup
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
......@@ -309,6 +307,8 @@ ENTRY(arm7_processor_functions)
.word cpu_arm7_set_pte
.size arm7_processor_functions, . - arm7_processor_functions
.section ".rodata"
.type cpu_arch_name, #object
cpu_arch_name: .asciz "armv3"
.size cpu_arch_name, . - cpu_arch_name
......@@ -316,6 +316,25 @@ cpu_arch_name: .asciz "armv3"
.type cpu_elf_name, #object
cpu_elf_name: .asciz "v3"
.size cpu_elf_name, . - cpu_elf_name
.type cpu_arm6_name, #object
cpu_arm6_name: .asciz "ARM6"
.size cpu_arm6_name, . - cpu_arm6_name
.type cpu_arm610_name, #object
cpu_arm610_name:
.asciz "ARM610"
.size cpu_arm610_name, . - cpu_arm610_name
.type cpu_arm7_name, #object
cpu_arm7_name: .asciz "ARM7"
.size cpu_arm7_name, . - cpu_arm7_name
.type cpu_arm710_name, #object
cpu_arm710_name:
.asciz "ARM710"
.size cpu_arm710_name, . - cpu_arm710_name
.align
.section ".proc.info", #alloc, #execinstr
......
......@@ -124,13 +124,9 @@ ENTRY(cpu_arm720_reset)
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
cpu_arm720_name:
.asciz "ARM720T"
.align
__INIT
.type __arm720_setup, #function
__arm720_setup: mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ invalidate caches
mcr p15, 0, r0, c8, c7, 0 @ flush TLB (v4)
......@@ -143,6 +139,9 @@ __arm720_setup: mov r0, #0
orr r0, r0, #0x2100 @ .... .... .111 .... (old)
orr r0, r0, #0x003d @ ..1. ..01 ..11 1101 (new)
mov pc, lr @ __ret (head-armv.S)
.size __arm720_setup, . - __arm720_setup
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
......@@ -160,6 +159,8 @@ ENTRY(arm720_processor_functions)
.word cpu_arm720_set_pte
.size arm720_processor_functions, . - arm720_processor_functions
.section ".rodata"
.type cpu_arch_name, #object
cpu_arch_name: .asciz "armv4t"
.size cpu_arch_name, . - cpu_arch_name
......@@ -167,6 +168,12 @@ cpu_arch_name: .asciz "armv4t"
.type cpu_elf_name, #object
cpu_elf_name: .asciz "v4"
.size cpu_elf_name, . - cpu_elf_name
.type cpu_arm720_name, #object
cpu_arm720_name:
.asciz "ARM720T"
.size cpu_arm720_name, . - cpu_arm720_name
.align
/*
......
......@@ -360,25 +360,9 @@ ENTRY(cpu_arm920_set_pte)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(cpu_arm920_name)
.ascii "ARM920T"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#endif
.ascii "\0"
.align
__INIT
.type __arm920_setup, #function
__arm920_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
......@@ -409,8 +393,9 @@ __arm920_setup:
orr r0, r0, #0x1000 @ ...1 .... .... ....
#endif
mov pc, lr
.size __arm920_setup, . - __arm920_setup
.text
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
......@@ -428,6 +413,8 @@ arm920_processor_functions:
.word cpu_arm920_set_pte
.size arm920_processor_functions, . - arm920_processor_functions
.section ".rodata"
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv4t"
......@@ -437,6 +424,24 @@ cpu_arch_name:
cpu_elf_name:
.asciz "v4"
.size cpu_elf_name, . - cpu_elf_name
.type cpu_arm920_name, #object
cpu_arm920_name:
.ascii "ARM920T"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#endif
.ascii "\0"
.size cpu_arm920_name, . - cpu_arm920_name
.align
.section ".proc.info", #alloc, #execinstr
......
......@@ -364,25 +364,9 @@ ENTRY(cpu_arm922_set_pte)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(cpu_arm922_name)
.ascii "ARM922T"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#endif
.ascii "\0"
.align
__INIT
.type __arm922_setup, #function
__arm922_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
......@@ -413,8 +397,9 @@ __arm922_setup:
orr r0, r0, #0x1000 @ ...1 .... .... ....
#endif
mov pc, lr
.size __arm922_setup, . - __arm922_setup
.text
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
......@@ -432,6 +417,8 @@ arm922_processor_functions:
.word cpu_arm922_set_pte
.size arm922_processor_functions, . - arm922_processor_functions
.section ".rodata"
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv4t"
......@@ -441,6 +428,24 @@ cpu_arch_name:
cpu_elf_name:
.asciz "v4"
.size cpu_elf_name, . - cpu_elf_name
.type cpu_arm922_name, #object
cpu_arm922_name:
.ascii "ARM922T"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#endif
.ascii "\0"
.size cpu_arm922_name, . - cpu_arm922_name
.align
.section ".proc.info", #alloc, #execinstr
......
......@@ -366,28 +366,9 @@ ENTRY(cpu_arm926_set_pte)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(cpu_arm926_name)
.ascii "ARM926EJ-S"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
.ascii "RR"
#endif
#endif
.ascii "\0"
.align
__INIT
.type __arm926_setup, #function
__arm926_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
......@@ -428,8 +409,9 @@ __arm926_setup:
orr r0, r0, #0x1000 @ ...1 .... .... ....
#endif
mov pc, lr
.size __arm926_setup, . - __arm926_setup
.text
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
......@@ -447,6 +429,8 @@ arm926_processor_functions:
.word cpu_arm926_set_pte
.size arm926_processor_functions, . - arm926_processor_functions
.section ".rodata"
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv5tej"
......@@ -456,6 +440,27 @@ cpu_arch_name:
cpu_elf_name:
.asciz "v5"
.size cpu_elf_name, . - cpu_elf_name
.type cpu_arm926_name, #object
cpu_arm926_name:
.ascii "ARM926EJ-S"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
.ascii "RR"
#endif
#endif
.ascii "\0"
.size cpu_arm926_name, . - cpu_arm926_name
.align
.section ".proc.info", #alloc, #execinstr
......
......@@ -182,13 +182,9 @@ ENTRY(cpu_sa110_set_pte)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
cpu_sa110_name:
.asciz "StrongARM-110"
.align
__INIT
.type __sa110_setup, #function
__sa110_setup:
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, #0x2e00 @ ..VI ZFRS BLDP WCAM
......@@ -203,8 +199,9 @@ __sa110_setup:
mov r10, #0x1f @ Domains 0, 1 = client
mcr p15, 0, r10, c3, c0 @ load domain access register
mov pc, lr
.size __sa110_setup, . - __sa110_setup
.text
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
......@@ -218,16 +215,13 @@ ENTRY(sa110_processor_functions)
.word cpu_sa110_proc_fin
.word cpu_sa110_reset
.word cpu_sa110_do_idle
/* dcache */
.word cpu_sa110_dcache_clean_area
/* pgtable */
.word cpu_sa110_switch_mm
.word cpu_sa110_set_pte
.size sa110_processor_functions, . - sa110_processor_functions
.section ".rodata"
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv4"
......@@ -237,6 +231,12 @@ cpu_arch_name:
cpu_elf_name:
.asciz "v4"
.size cpu_elf_name, . - cpu_elf_name
.type cpu_sa110_name, #object
cpu_sa110_name:
.asciz "StrongARM-110"
.size cpu_sa110_name, . - cpu_sa110_name
.align
.section ".proc.info", #alloc, #execinstr
......
......@@ -206,15 +206,9 @@ ENTRY(cpu_sa1100_set_pte)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
cpu_sa1100_name:
.asciz "StrongARM-1100"
cpu_sa1110_name:
.asciz "StrongARM-1110"
.align
__INIT
.type __sa1100_setup, #function
__sa1100_setup:
mov r10, #0
mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4
......@@ -229,8 +223,9 @@ __sa1100_setup:
orr r0, r0, #0x003d
orr r0, r0, #0x3100 @ ..11 ...1 ..11 11.1
mov pc, lr
.size __sa1100_setup, . - __sa1100_setup
.text
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
......@@ -252,6 +247,8 @@ ENTRY(sa1100_processor_functions)
.word cpu_sa1100_set_pte
.size sa1100_processor_functions, . - sa1100_processor_functions
.section ".rodata"
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv4"
......@@ -261,6 +258,17 @@ cpu_arch_name:
cpu_elf_name:
.asciz "v4"
.size cpu_elf_name, . - cpu_elf_name
.type cpu_sa1100_name, #object
cpu_sa1100_name:
.asciz "StrongARM-1100"
.size cpu_sa1100_name, . - cpu_sa1100_name
.type cpu_sa1110_name, #object
cpu_sa1110_name:
.asciz "StrongARM-1110"
.size cpu_sa1110_name, . - cpu_sa1110_name
.align
.section ".proc.info", #alloc, #execinstr
......
......@@ -571,22 +571,11 @@ ENTRY(cpu_xscale_set_pte)
.ltorg
cpu_80200_name:
.asciz "XScale-80200"
cpu_80321_name:
.asciz "XScale-IOP80321"
cpu_pxa250_name:
.asciz "XScale-PXA250"
cpu_pxa210_name:
.asciz "XScale-PXA210"
.align
__INIT
.type __xscale_setup, #function
__xscale_setup:
mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r0
......@@ -605,8 +594,9 @@ __xscale_setup:
orr r0, r0, #0x0005 @ .... .... .... .C.M
orr r0, r0, #0x3900 @ ..VI Z..S .... ....
mov pc, lr
.size __xscale_setup, . - __xscale_setup
.text
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
......@@ -625,6 +615,8 @@ ENTRY(xscale_processor_functions)
.word cpu_xscale_set_pte
.size xscale_processor_functions, . - xscale_processor_functions
.section ".rodata"
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv5te"
......@@ -634,6 +626,27 @@ cpu_arch_name:
cpu_elf_name:
.asciz "v5"
.size cpu_elf_name, . - cpu_elf_name
.type cpu_80200_name, #object
cpu_80200_name:
.asciz "XScale-80200"
.size cpu_80200_name, . - cpu_80200_name
.type cpu_80321_name, #object
cpu_80321_name:
.asciz "XScale-IOP80321"
.size cpu_80321_name, . - cpu_80321_name
.type cpu_pxa250_name, #object
cpu_pxa250_name:
.asciz "XScale-PXA250"
.size cpu_pxa250_name, . - cpu_pxa250_name
.type cpu_pxa210_name, #object
cpu_pxa210_name:
.asciz "XScale-PXA210"
.size cpu_pxa210_name, . - cpu_pxa210_name
.align
.section ".proc.info", #alloc, #execinstr
......
......@@ -42,7 +42,7 @@ ENTRY(v3_flush_kern_tlb_range)
blo 1b
mov pc, lr
__INIT
__INITDATA
.type v3_tlb_fns, #object
ENTRY(v3_tlb_fns)
......
......@@ -55,7 +55,7 @@ ENTRY(v4_flush_user_tlb_range)
.globl v4_flush_kern_tlb_range
.equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range
__INIT
__INITDATA
.type v4_tlb_fns, #object
ENTRY(v4_tlb_fns)
......
......@@ -67,7 +67,7 @@ ENTRY(v4wb_flush_kern_tlb_range)
blo 1b
mov pc, lr
__INIT
__INITDATA
.type v4wb_tlb_fns, #object
ENTRY(v4wb_tlb_fns)
......
......@@ -58,7 +58,7 @@ ENTRY(v4wbi_flush_kern_tlb_range)
blo 1b
mov pc, lr
__INIT
__INITDATA
.type v4wbi_tlb_fns, #object
ENTRY(v4wbi_tlb_fns)
......
......@@ -6,7 +6,7 @@
# To add an entry into this database, please see Documentation/arm/README,
# or contact rmk@arm.linux.org.uk
#
# Last update: Sat Sep 13 00:22:34 2003
# Last update: Thu Sep 18 17:15:55 2003
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
......@@ -384,3 +384,4 @@ prayoglite ARCH_PRAYOGLITE PRAYOGLITE 372
gumstik ARCH_GUMSTIK GUMSTIK 373
rcube ARCH_RCUBE RCUBE 374
rea_olv ARCH_REA_OLV REA_OLV 375
pxa_iphone ARCH_PXA_IPHONE PXA_IPHONE 376
......@@ -196,6 +196,7 @@ static void change_FID(int fid)
if (fidvidctl.bits.FID != fid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.FID = fid;
fidvidctl.bits.VIDC = 0;
fidvidctl.bits.FIDC = 1;
wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val);
}
......@@ -208,7 +209,9 @@ static void change_VID(int vid)
rdmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val);
if (fidvidctl.bits.VID != vid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.VID = vid;
fidvidctl.bits.FIDC = 0;
fidvidctl.bits.VIDC = 1;
wrmsrl (MSR_K7_FID_VID_CTL, fidvidctl.val);
}
......@@ -298,8 +301,14 @@ static int powernow_decode_bios (int maxfid, int startvid)
dprintk (" voltage regulator)\n");
latency = psb->settlingtime;
if (latency < 100) {
printk (KERN_INFO PFX "BIOS set settling time to %d microseconds."
"Should be at least 100. Correcting.\n", latency);
latency = 100;
}
dprintk (KERN_INFO PFX "Settling Time: %d microseconds.\n", psb->settlingtime);
dprintk (KERN_INFO PFX "Has %d PST tables. (Only dumping ones relevant to this CPU).\n", psb->numpst);
latency *= 100; /* SGTC needs to be in units of 10ns */
p += sizeof (struct psb_s);
......
......@@ -413,6 +413,16 @@ config IA64_PALINFO
To use this option, you have to ensure that the "/proc file system
support" (CONFIG_PROC_FS) is enabled, too.
config IA64_SALINFO
tristate "/proc/sal support"
help
The /proc/sal directory exports the SAL (system abstraction layer)
feature bits, like whether the platform is subject to ITC drift. It
is intended to be used by user programs that care about such things.
To use this option, you have to ensure that the "/proc file system
support" (CONFIG_PROC_FS) is enabled, too.
config EFI_VARS
tristate "/proc/efi/vars support"
help
......@@ -424,9 +434,15 @@ config EFI_VARS
support" (CONFIG_PROC_FS) is enabled, too.
config NR_CPUS
int "Maximum number of CPUs (2-64)"
int "Maximum number of CPUs"
depends on SMP
default "64"
help
You should set this to the number of CPUs in your system, but
keep in mind that a kernel compiled for, e.g., 2 CPUs will boot but
only use 2 CPUs on a >2 CPU system. Setting this to a value larger
than 64 will cause the use of a CPU mask array, causing a small
performance hit.
source "fs/Kconfig.binfmt"
......
......@@ -54,6 +54,11 @@
*/
#define ALLOW_IOV_BYPASS
#ifdef CONFIG_PROC_FS
/* turn it off for now; without per-CPU counters, it's too much of a scalability bottleneck: */
# define SBA_PROC_FS 0
#endif
/*
** If a device prefetches beyond the end of a valid pdir entry, it will cause
** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
......@@ -193,7 +198,7 @@ struct ioc {
} saved[DELAYED_RESOURCE_CNT];
#endif
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
#define SBA_SEARCH_SAMPLE 0x100
unsigned long avg_search[SBA_SEARCH_SAMPLE];
unsigned long avg_idx; /* current index into avg_search */
......@@ -517,7 +522,7 @@ static int
sba_alloc_range(struct ioc *ioc, size_t size)
{
unsigned int pages_needed = size >> IOVP_SHIFT;
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
unsigned long itc_start = ia64_get_itc();
#endif
unsigned long pide;
......@@ -551,7 +556,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
(uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
ioc->res_bitshift );
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
{
unsigned long itc_end = ia64_get_itc();
unsigned long tmp = itc_end - itc_start;
......@@ -593,7 +598,7 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
__FUNCTION__, (uint) iova, size,
bits_not_wanted, m, pide, res_ptr, *res_ptr);
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
ioc->used_pages -= bits_not_wanted;
#endif
......@@ -785,7 +790,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
** Device is bit capable of DMA'ing to the buffer...
** just return the PCI address of ptr
*/
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
spin_lock_irqsave(&ioc->res_lock, flags);
ioc->msingle_bypass++;
spin_unlock_irqrestore(&ioc->res_lock, flags);
......@@ -811,7 +816,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
panic("Sanity check failed");
#endif
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
ioc->msingle_calls++;
ioc->msingle_pages += size >> IOVP_SHIFT;
#endif
......@@ -870,7 +875,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
/*
** Address does not fall w/in IOVA, must be bypassing
*/
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
spin_lock_irqsave(&ioc->res_lock, flags);
ioc->usingle_bypass++;
spin_unlock_irqrestore(&ioc->res_lock, flags);
......@@ -895,7 +900,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
size = ROUNDUP(size, IOVP_SIZE);
spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
ioc->usingle_calls++;
ioc->usingle_pages += size >> IOVP_SHIFT;
#endif
......@@ -957,20 +962,20 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
/**
* sba_alloc_consistent - allocate/map shared mem for DMA
* @hwdev: instance of PCI owned by the driver that's asking.
* sba_alloc_coherent - allocate/map shared mem for DMA
* @dev: instance of PCI owned by the driver that's asking.
* @size: number of bytes mapped in driver buffer.
* @dma_handle: IOVA of new buffer.
*
* See Documentation/DMA-mapping.txt
*/
void *
sba_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, int flags)
sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flags)
{
struct ioc *ioc;
void *addr;
addr = (void *) __get_free_pages(flags, get_order(size));
addr = (void *) __get_free_pages(flags, get_order(size));
if (!addr)
return NULL;
......@@ -978,7 +983,7 @@ sba_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, i
* REVISIT: if sba_map_single starts needing more than dma_mask from the
* device, this needs to be updated.
*/
ioc = GET_IOC(hwdev);
ioc = GET_IOC(dev);
ASSERT(ioc);
*dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0);
......@@ -988,17 +993,17 @@ sba_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, i
/**
* sba_free_consistent - free/unmap shared mem for DMA
* @hwdev: instance of PCI owned by the driver that's asking.
* sba_free_coherent - free/unmap shared mem for DMA
* @dev: instance of PCI owned by the driver that's asking.
* @size: number of bytes mapped in driver buffer.
* @vaddr: virtual address IOVA of "consistent" buffer.
* @dma_handler: IO virtual address of "consistent" buffer.
*
* See Documentation/DMA-mapping.txt
*/
void sba_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
{
sba_unmap_single(hwdev, dma_handle, size, 0);
sba_unmap_single(dev, dma_handle, size, 0);
free_pages((unsigned long) vaddr, get_order(size));
}
......@@ -1078,7 +1083,7 @@ sba_fill_pdir(
cnt += dma_offset;
dma_offset=0; /* only want offset on first chunk */
cnt = ROUNDUP(cnt, IOVP_SIZE);
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
ioc->msg_pages += cnt >> IOVP_SHIFT;
#endif
do {
......@@ -1268,7 +1273,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
sg->dma_length = sg->length;
sg->dma_address = virt_to_phys(sba_sg_address(sg));
}
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
spin_lock_irqsave(&ioc->res_lock, flags);
ioc->msg_bypass++;
spin_unlock_irqrestore(&ioc->res_lock, flags);
......@@ -1281,7 +1286,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
sglist->dma_length = sglist->length;
sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length,
dir);
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
/*
** Should probably do some stats counting, but trying to
** be precise quickly starts wasting CPU time.
......@@ -1300,7 +1305,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
}
#endif
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
ioc->msg_calls++;
#endif
......@@ -1363,7 +1368,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
ioc = GET_IOC(dev);
ASSERT(ioc);
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
ioc->usg_calls++;
#endif
......@@ -1376,7 +1381,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
while (nents && sglist->dma_length) {
sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
/*
** This leaves inconsistent data in the stats, but we can't
** tell which sg lists were mapped by map_single and which
......@@ -1704,7 +1709,7 @@ ioc_init(u64 hpa, void *handle)
**
**************************************************************************/
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
static void *
ioc_start(struct seq_file *s, loff_t *pos)
{
......@@ -1758,7 +1763,7 @@ ioc_show(struct seq_file *s, void *v)
if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
}
avg /= SBA_SEARCH_SAMPLE;
avg /= SBA_SEARCH_SAMPLE;
seq_printf(s, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", min, avg, max);
seq_printf(s, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
......@@ -1859,7 +1864,7 @@ ioc_proc_init(void)
}
#endif
void
static void
sba_connect_bus(struct pci_bus *bus)
{
acpi_handle handle, parent;
......@@ -1867,7 +1872,7 @@ sba_connect_bus(struct pci_bus *bus)
struct ioc *ioc;
if (!PCI_CONTROLLER(bus))
panic(PFX "no sysdata on bus %d!\n",bus->number);
panic(PFX "no sysdata on bus %d!\n", bus->number);
if (PCI_CONTROLLER(bus)->iommu)
return;
......@@ -1950,7 +1955,7 @@ sba_init(void)
}
#endif
#ifdef CONFIG_PROC_FS
#if SBA_PROC_FS
ioc_proc_init();
#endif
return 0;
......
......@@ -76,6 +76,7 @@
#define OFFSET4K(a) ((a) & 0xfff)
#define PAGE_START(addr) ((addr) & PAGE_MASK)
#define MINSIGSTKSZ_IA32 2048
#define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid))
#define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid))
......@@ -2262,10 +2263,18 @@ sys32_sigaltstack (ia32_stack_t *uss32, ia32_stack_t *uoss32,
return -EFAULT;
uss.ss_sp = (void *) (long) buf32.ss_sp;
uss.ss_flags = buf32.ss_flags;
uss.ss_size = buf32.ss_size;
/* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the
check and set it to the user requested value later */
if (buf32.ss_size < MINSIGSTKSZ_IA32) {
ret = -ENOMEM;
goto out;
}
uss.ss_size = MINSIGSTKSZ;
set_fs(KERNEL_DS);
ret = do_sigaltstack(uss32 ? &uss : NULL, &uoss, pt->r12);
current->sas_ss_size = buf32.ss_size;
set_fs(old_fs);
out:
if (ret < 0)
return(ret);
if (uoss32) {
......
......@@ -14,6 +14,7 @@ obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
obj-$(CONFIG_IA64_MCA) += mca.o mca_asm.o
obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_IA64_SALINFO) += salinfo.o
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
......
......@@ -331,7 +331,7 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
#ifdef CONFIG_ACPI_NUMA
#define SLIT_DEBUG
#undef SLIT_DEBUG
#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
......
......@@ -446,10 +446,12 @@ efi_map_pal_code (void)
panic("Woah! PAL code size bigger than a granule!");
mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
#if EFI_DEBUG
printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
smp_processor_id(), md->phys_addr,
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
#endif
/*
* Cannot write to CRx with PSR.ic=1
......
......@@ -1193,7 +1193,7 @@ static void
ia64_mca_cmc_poll (unsigned long dummy)
{
/* Trigger a CMC interrupt cascade */
platform_send_ipi(__ffs(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
}
/*
......@@ -1260,7 +1260,7 @@ static void
ia64_mca_cpe_poll (unsigned long dummy)
{
/* Trigger a CPE interrupt cascade */
platform_send_ipi(__ffs(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
}
/*
......
......@@ -2905,7 +2905,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* - system-wide session: PMCx.pm=1 (privileged monitor)
* - per-task : PMCx.pm=0 (user monitor)
*/
if ((is_monitor || is_counting) && value != PMC_DFL_VAL(i) && PFM_CHECK_PMC_PM(ctx, cnum, value)) {
if ((is_monitor || is_counting) && value != PMC_DFL_VAL(cnum) && PFM_CHECK_PMC_PM(ctx, cnum, value)) {
DPRINT(("pmc%u pmc_pm=%ld fl_system=%d\n",
cnum,
PMC_PM(cnum, value),
......
......@@ -5,6 +5,7 @@
*
* Copyright (c) 2001 Silicon Graphics, Inc. All rights reserved.
*
* 09/11/2003 jbarnes@sgi.com updated for 2.6
* 10/30/2001 jbarnes@sgi.com copied much of Stephane's palinfo
* code to create this file
*/
......@@ -59,7 +60,7 @@ salinfo_init(void)
*sdir = create_proc_read_entry (salinfo_entries[i].name, 0, salinfo_dir,
salinfo_read, (void *)salinfo_entries[i].feature);
if (*sdir)
*sdir->owner = THIS_MODULE;
(*sdir)->owner = THIS_MODULE;
sdir++;
}
*sdir++ = salinfo_dir;
......
......@@ -187,7 +187,7 @@ static inline void
ia64_do_profile (struct pt_regs * regs)
{
unsigned long ip, slot;
extern unsigned long prof_cpu_mask;
extern cpumask_t prof_cpu_mask;
profile_hook(regs);
......
......@@ -568,10 +568,10 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
}
siginfo.si_signo = SIGTRAP;
siginfo.si_errno = 0;
siginfo.si_flags = 0;
siginfo.si_isr = 0;
siginfo.si_addr = (void *) ifa;
siginfo.si_imm = 0;
siginfo.si_addr = (void *) ifa;
siginfo.si_imm = 0;
siginfo.si_flags = __ISR_VALID;
siginfo.si_isr = isr;
force_sig_info(SIGTRAP, &siginfo, current);
return;
......
......@@ -1171,9 +1171,10 @@ desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word
static inline unw_hash_index_t
hash (unsigned long ip)
{
# define magic 0x9e3779b97f4a7c16 /* based on (sqrt(5)/2-1)*2^64 */
# define hashmagic 0x9e3779b97f4a7c16 /* based on (sqrt(5)/2-1)*2^64 */
return (ip >> 4)*magic >> (64 - UNW_LOG_HASH_SIZE);
return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE);
#undef hashmagic
}
static inline long
......
......@@ -6,13 +6,13 @@
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/pci.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/sn/simulator.h>
#include <asm/sn/pda.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/sn2/shub_mmr.h>
/**
* sn_io_addr - convert an in/out port to an i/o address
......
......@@ -39,6 +39,7 @@
#include <asm/sn/sn2/shubio.h>
#include <asm/sal.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/sn2/shub_mmr.h>
extern irqpda_t *irqpdaindr;
extern cnodeid_t master_node_get(vertex_hdl_t vhdl);
......
......@@ -34,6 +34,8 @@
#include <asm/sal.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/sndrv.h>
#include <asm/sn/sn2/shubio.h>
#include <asm/sn/sn2/shub_mmr.h>
/*
* Shub WAR for Xbridge Little Endian problem:
......
......@@ -35,7 +35,6 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <asm/current.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
......@@ -44,7 +43,6 @@
#include <asm/sn/sgi.h>
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
#include <linux/devfs_fs_kernel.h>
#include <asm/sn/hcl.h>
#include <asm/sn/types.h>
#include <asm/sn/pci/bridge.h>
......@@ -62,6 +60,7 @@
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/bitops.h>
#include <asm/sn/sn2/shub_mmr.h>
int irq_to_bit_pos(int irq);
static void force_interrupt(int irq);
......
......@@ -73,7 +73,7 @@ static int amba_kmi_open(struct serio *io)
writeb(kmi->divisor, KMICLKDIV);
writeb(KMICR_EN, KMICR);
ret = request_irq(kmi->irq, amba_kmi_int, 0, kmi->io.phys, kmi);
ret = request_irq(kmi->irq, amba_kmi_int, 0, "kmi-pl050", kmi);
if (ret) {
printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq);
writeb(0, KMICR);
......@@ -108,11 +108,11 @@ static int amba_kmi_probe(struct amba_device *dev, void *id)
kmi->io.write = amba_kmi_write;
kmi->io.open = amba_kmi_open;
kmi->io.close = amba_kmi_close;
kmi->io.name = dev->dev.name;
kmi->io.name = dev->dev.bus_id;
kmi->io.phys = dev->dev.bus_id;
kmi->io.driver = kmi;
kmi->res = request_mem_region(dev->res.start, KMI_SIZE, kmi->io.phys);
kmi->res = request_mem_region(dev->res.start, KMI_SIZE, "kmi-pl050");
if (!kmi->res) {
kfree(kmi);
return -EBUSY;
......@@ -147,14 +147,12 @@ static int amba_kmi_remove(struct amba_device *dev)
return 0;
}
static int amba_kmi_resume(struct amba_device *dev, u32 level)
static int amba_kmi_resume(struct amba_device *dev)
{
struct amba_kmi_port *kmi = amba_get_drvdata(dev);
if (level == RESUME_ENABLE) {
/* kick the serio layer to rescan this port */
serio_rescan(&kmi->io);
}
/* kick the serio layer to rescan this port */
serio_rescan(&kmi->io);
return 0;
}
......
......@@ -90,7 +90,7 @@
# endif
#endif
#ifdef CONFIG_CPU_ABORT_EV5T
#ifdef CONFIG_CPU_ABRT_EV5T
# ifdef CPU_ABORT_HANDLER
# define MULTI_ABORT 1
# else
......
......@@ -30,6 +30,8 @@
#ifdef __KERNEL__
#include <asm/system.h>
#define COMPILER_DEPENDENT_INT64 long
#define COMPILER_DEPENDENT_UINT64 unsigned long
......
......@@ -23,7 +23,7 @@
#include <linux/cache.h>
extern volatile char cpu_to_node_map[NR_CPUS] __cacheline_aligned;
extern volatile unsigned long node_to_cpu_mask[NR_NODES] __cacheline_aligned;
extern volatile cpumask_t node_to_cpu_mask[NR_NODES] __cacheline_aligned;
/* Stuff below this line could be architecture independent */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment