Commit de950cef authored by Russell King's avatar Russell King

[ARM] Remove more 26-bit ARM support.

Remove more of the redundant 26-bit ARM CPU support from the
32-bit ARM tree.
parent 3581ab9b
......@@ -64,28 +64,14 @@ AFLAGS +=$(apcs-y) $(arch-y) $(tune-y) -msoft-float -Wa,-mno-fpu
#Default value
DATAADDR := .
ifeq ($(CONFIG_CPU_26),y)
PROCESSOR := armo
head-y := arch/arm/mach-arc/head.o arch/arm/kernel/init_task.o
LDFLAGS_BLOB += --oformat elf26-littlearm
ifeq ($(CONFIG_ROM_KERNEL),y)
DATAADDR := 0x02080000
textaddr-y := 0x03800000
else
textaddr-y := 0x02080000
endif
endif
ifeq ($(CONFIG_CPU_32),y)
PROCESSOR := armv
head-y := arch/arm/kernel/head.o arch/arm/kernel/init_task.o
ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
LDFLAGS_BLOB += --oformat elf32-bigarm
else
LDFLAGS_BLOB += --oformat elf32-littlearm
endif
textaddr-y := 0xC0008000
ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
LDFLAGS_BLOB += --oformat elf32-bigarm
else
LDFLAGS_BLOB += --oformat elf32-littlearm
endif
textaddr-y := 0xC0008000
machine-$(CONFIG_ARCH_ARCA5K) := arc
machine-$(CONFIG_ARCH_RPC) := rpc
......@@ -160,10 +146,10 @@ include/asm-arm/.arch: $(wildcard include/config/arch/*.h)
@ln -sf arch-$(INCDIR) include/asm-arm/arch
@touch $@
include/asm-arm/.proc: $(wildcard include/config/cpu/32.h) $(wildcard include/config/cpu/26.h)
@echo ' Making asm-arm/proc -> asm-arm/proc-$(PROCESSOR) symlink'
include/asm-arm/.proc: $(wildcard include/config/cpu/32.h)
@echo ' Making asm-arm/proc -> asm-arm/proc-armv symlink'
@rm -f include/asm-arm/proc
@ln -sf proc-$(PROCESSOR) include/asm-arm/proc
@ln -sf proc-armv include/asm-arm/proc
@touch $@
prepare: maketools
......
......@@ -2,13 +2,11 @@
# Makefile for the linux kernel.
#
ENTRY_OBJ = entry-$(PROCESSOR).o
AFLAGS_head.o := -DTEXTADDR=$(TEXTADDR)
# Object file lists.
obj-y := arch.o compat.o dma.o $(ENTRY_OBJ) entry-common.o irq.o \
obj-y := arch.o compat.o dma.o entry-armv.o entry-common.o irq.o \
process.o ptrace.o semaphore.o setup.o signal.o sys_arm.o \
time.o traps.o
......@@ -34,6 +32,5 @@ extra-y := $(head-y) init_task.o vmlinux.lds.s
# Spell out some dependencies that `make dep' doesn't spot
$(obj)/entry-armv.o: $(obj)/entry-header.S include/asm-arm/constants.h
$(obj)/entry-armo.o: $(obj)/entry-header.S include/asm-arm/constants.h
$(obj)/entry-common.o: $(obj)/entry-header.S include/asm-arm/constants.h \
$(obj)/calls.S
/*
* linux/arch/arm/kernel/entry-armo.S
*
* Copyright (C) 1995,1996,1997,1998 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Low-level vector interface routines
*
* Design issues:
* - We have several modes that each vector can be called from,
* each with its own set of registers. On entry to any vector,
* we *must* save the registers used in *that* mode.
*
* - This code must be as fast as possible.
*
* There are a few restrictions on the vectors:
* - the SWI vector cannot be called from *any* non-user mode
*
* - the FP emulator is *never* called from *any* non-user mode undefined
* instruction.
*
* Ok, so this file may be a mess, but its as efficient as possible while
* adhering to the above criteria.
*/
#include <linux/config.h>
#include <linux/init.h>
#include "entry-header.S"
.text
#ifdef IOC_BASE
/* IOC / IOMD based hardware */
.equ ioc_base_high, IOC_BASE & 0xff000000
.equ ioc_base_low, IOC_BASE & 0x00ff0000
.macro disable_fiq
mov r12, #ioc_base_high
.if ioc_base_low
orr r12, r12, #ioc_base_low
.endif
strb r12, [r12, #0x38] @ Disable FIQ register
.endm
.macro get_irqnr_and_base, irqnr, base
mov r4, #ioc_base_high @ point at IOC
.if ioc_base_low
orr r4, r4, #ioc_base_low
.endif
ldrb \irqnr, [r4, #0x24] @ get high priority first
adr \base, irq_prio_h
teq \irqnr, #0
ldreqb \irqnr, [r4, #0x14] @ get low priority
adreq \base, irq_prio_l
.endm
/*
* Interrupt table (incorporates priority)
*/
.macro irq_prio_table
irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
.byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
.byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
.byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
.byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
.byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
.byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
.byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
.byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
.byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
.byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
.endm
#else
#error Unknown architecture
#endif
/*=============================================================================
* For entry-common.S
*/
#if 0
/*
* Uncomment these if you wish to get more debugging into about data aborts.
*/
#define FAULT_CODE_LDRSTRPOST 0x80
#define FAULT_CODE_LDRSTRPRE 0x40
#define FAULT_CODE_LDRSTRREG 0x20
#define FAULT_CODE_LDMSTM 0x10
#define FAULT_CODE_LDCSTC 0x08
#endif
#define FAULT_CODE_PREFETCH 0x04
#define FAULT_CODE_WRITE 0x02
#define FAULT_CODE_FORCECOW 0x01
#define SVC_SAVE_ALL \
str sp, [sp, #-16]! ;\
str lr, [sp, #8] ;\
str lr, [sp, #4] ;\
stmfd sp!, {r0 - r12} ;\
mov r0, #-1 ;\
str r0, [sp, #S_OLD_R0] ;\
zero_fp
#define SVC_IRQ_SAVE_ALL \
str sp, [sp, #-16]! ;\
str lr, [sp, #4] ;\
ldr lr, .LCirq ;\
ldr lr, [lr] ;\
str lr, [sp, #8] ;\
stmfd sp!, {r0 - r12} ;\
mov r0, #-1 ;\
str r0, [sp, #S_OLD_R0] ;\
zero_fp
#define SVC_RESTORE_ALL \
ldmfd sp, {r0 - pc}^
/*=============================================================================
* Undefined FIQs
*-----------------------------------------------------------------------------
*/
_unexp_fiq: ldr sp, .LCfiq
mov r12, #IOC_BASE
strb r12, [r12, #0x38] @ Disable FIQ register
teqp pc, #0x0c000003
mov r0, r0
stmfd sp!, {r0 - r3, ip, lr}
adr r0, Lfiqmsg
bl printk
ldmfd sp!, {r0 - r3, ip, lr}
teqp pc, #0x0c000001
mov r0, r0
movs pc, lr
Lfiqmsg: .ascii "*** Unexpected FIQ\n\0"
.align
.LCfiq: .word __temp_fiq
.LCirq: .word __temp_irq
/*=============================================================================
* Undefined instruction handler
*-----------------------------------------------------------------------------
* Handles floating point instructions
*/
vector_undefinstr:
tst lr,#3
bne __und_svc
save_user_regs
zero_fp
teqp pc, #PSR_I_BIT | MODE_SVC
.Lbug_undef:
ldr r4, .LC2
ldr pc, [r4] @ Call FP module USR entry point
.globl fpundefinstr
fpundefinstr: @ Called by FP module on undefined instr
mov r0, lr
mov r1, sp
teqp pc, #MODE_SVC
bl do_undefinstr
b ret_from_exception @ Normal FP exit
__und_svc: SVC_SAVE_ALL @ Non-user mode
mask_pc r0, lr
and r2, lr, #3
sub r0, r0, #4
mov r1, sp
bl do_undefinstr
SVC_RESTORE_ALL
#if defined CONFIG_FPE_NWFPE || defined CONFIG_FPE_FASTFPE
/* The FPE is always present */
.equ fpe_not_present, 0
#else
/* We get here if an undefined instruction happens and the floating
* point emulator is not present. If the offending instruction was
* a WFS, we just perform a normal return as if we had emulated the
* operation. This is a hack to allow some basic userland binaries
* to run so that the emulator module proper can be loaded. --philb
*/
fpe_not_present:
adr r10, wfs_mask_data
ldmia r10, {r4, r5, r6, r7, r8}
ldr r10, [sp, #S_PC] @ Load PC
sub r10, r10, #4
mask_pc r10, r10
ldrt r10, [r10] @ get instruction
and r5, r10, r5
teq r5, r4 @ Is it WFS?
beq ret_from_exception
and r5, r10, r8
teq r5, r6 @ Is it LDF/STF on sp or fp?
teqne r5, r7
bne fpundefinstr
tst r10, #0x00200000 @ Does it have WB
beq ret_from_exception
and r4, r10, #255 @ get offset
and r6, r10, #0x000f0000
tst r10, #0x00800000 @ +/-
ldr r5, [sp, r6, lsr #14] @ Load reg
rsbeq r4, r4, #0
add r5, r5, r4, lsl #2
str r5, [sp, r6, lsr #14] @ Save reg
b ret_from_exception
wfs_mask_data: .word 0x0e200110 @ WFS/RFS
.word 0x0fef0fff
.word 0x0d0d0100 @ LDF [sp]/STF [sp]
.word 0x0d0b0100 @ LDF [fp]/STF [fp]
.word 0x0f0f0f00
#endif
.LC2: .word fp_enter
/*=============================================================================
* Prefetch abort handler
*-----------------------------------------------------------------------------
*/
vector_prefetch:
sub lr, lr, #4
tst lr, #3
bne __pabt_invalid
save_user_regs
teqp pc, #0x00000003 @ NOT a problem - doesn't change mode
mask_pc r0, lr @ Address of abort
mov r1, sp @ Tasks registers
bl do_PrefetchAbort
teq r0, #0 @ If non-zero, we believe this abort..
bne ret_from_exception
#ifdef DEBUG_UNDEF
adr r0, t
bl printk
#endif
ldr lr, [sp,#S_PC] @ program to test this on. I think its
b .Lbug_undef @ broken at the moment though!)
__pabt_invalid: SVC_SAVE_ALL
mov r0, sp @ Prefetch aborts are definitely *not*
mov r1, #BAD_PREFETCH @ allowed in non-user modes. We cant
and r2, lr, #3 @ recover from this problem.
b bad_mode
#ifdef DEBUG_UNDEF
t: .ascii "*** undef ***\r\n\0"
.align
#endif
/*=============================================================================
* Address exception handler
*-----------------------------------------------------------------------------
* These aren't too critical.
* (they're not supposed to happen).
* In order to debug the reason for address exceptions in non-user modes,
* we have to obtain all the registers so that we can see what's going on.
*/
vector_addrexcptn:
sub lr, lr, #8
tst lr, #3
bne Laddrexcptn_not_user
save_user_regs
teq pc, #0x00000003
mask_pc r0, lr @ Point to instruction
mov r1, sp @ Point to registers
mov r2, #0x400
mov lr, pc
bl do_excpt
b ret_from_exception
Laddrexcptn_not_user:
SVC_SAVE_ALL
and r2, lr, #3
teq r2, #3
bne Laddrexcptn_illegal_mode
teqp pc, #0x00000003 @ NOT a problem - doesn't change mode
mask_pc r0, lr
mov r1, sp
orr r2, r2, #0x400
bl do_excpt
ldmia sp, {r0 - lr} @ I cant remember the reason I changed this...
add sp, sp, #15*4
movs pc, lr
Laddrexcptn_illegal_mode:
mov r0, sp
str lr, [sp, #-4]!
orr r1, r2, #0x0c000000
teqp r1, #0 @ change into mode (wont be user mode)
mov r0, r0
mov r1, r8 @ Any register from r8 - r14 can be banked
mov r2, r9
mov r3, r10
mov r4, r11
mov r5, r12
mov r6, r13
mov r7, r14
teqp pc, #0x04000003 @ back to svc
mov r0, r0
stmfd sp!, {r1-r7}
ldmia r0, {r0-r7}
stmfd sp!, {r0-r7}
mov r0, sp
mov r1, #BAD_ADDREXCPTN
b bad_mode
/*=============================================================================
* Interrupt (IRQ) handler
*-----------------------------------------------------------------------------
* Note: if in user mode, then *no* kernel routine is running, so do not have
* to save svc lr
* (r13 points to irq temp save area)
*/
vector_IRQ: ldr r13, .LCirq @ I will leave this one in just in case...
sub lr, lr, #4
str lr, [r13]
tst lr, #3
bne __irq_svc
teqp pc, #0x08000003
mov r0, r0
ldr lr, .LCirq
ldr lr, [lr]
save_user_regs
1: get_irqnr_and_base r6, r5
teq r6, #0
ldrneb r0, [r5, r6] @ get IRQ number
movne r1, sp
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
adr lr, 1b
orr lr, lr, #0x08000003 @ Force SVC
bne asm_do_IRQ
mov why, #0
get_current_task r5
b ret_to_user
irq_prio_table
__irq_svc: teqp pc, #0x08000003
mov r0, r0
SVC_IRQ_SAVE_ALL
and r2, lr, #3
teq r2, #3
bne __irq_invalid
1: get_irqnr_and_base r6, r5
teq r6, #0
ldrneb r0, [r5, r6] @ get IRQ number
movne r1, sp
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
adr lr, 1b
orr lr, lr, #0x08000003 @ Force SVC
bne asm_do_IRQ @ Returns to 1b
SVC_RESTORE_ALL
__irq_invalid: mov r0, sp
mov r1, #BAD_IRQ
b bad_mode
/*=============================================================================
* Data abort handler code
*-----------------------------------------------------------------------------
*
* This handles both exceptions from user and SVC modes, computes the address
* range of the problem, and does any correction that is required. It then
* calls the kernel data abort routine.
*
* This is where I wish that the ARM would tell you which address aborted.
*/
vector_data: sub lr, lr, #8 @ Correct lr
tst lr, #3
bne Ldata_not_user
save_user_regs
teqp pc, #0x00000003 @ NOT a problem - doesn't change mode
mask_pc r0, lr
bl Ldata_do
b ret_from_exception
Ldata_not_user:
SVC_SAVE_ALL
and r2, lr, #3
teq r2, #3
bne Ldata_illegal_mode
tst lr, #0x08000000
teqeqp pc, #0x00000003 @ NOT a problem - doesn't change mode
mask_pc r0, lr
bl Ldata_do
SVC_RESTORE_ALL
Ldata_illegal_mode:
mov r0, sp
mov r1, #BAD_DATA
b bad_mode
Ldata_do: mov r3, sp
ldr r4, [r0] @ Get instruction
mov r2, #0
tst r4, #1 << 20 @ Check to see if it is a write instruction
orreq r2, r2, #FAULT_CODE_WRITE @ Indicate write instruction
mov r1, r4, lsr #22 @ Now branch to the relevant processing routine
and r1, r1, #15 << 2
add pc, pc, r1
movs pc, lr
b Ldata_unknown
b Ldata_unknown
b Ldata_unknown
b Ldata_unknown
b Ldata_ldrstr_post @ ldr rd, [rn], #m
b Ldata_ldrstr_numindex @ ldr rd, [rn, #m] @ RegVal
b Ldata_ldrstr_post @ ldr rd, [rn], rm
b Ldata_ldrstr_regindex @ ldr rd, [rn, rm]
b Ldata_ldmstm @ ldm*a rn, <rlist>
b Ldata_ldmstm @ ldm*b rn, <rlist>
b Ldata_unknown
b Ldata_unknown
b Ldata_ldrstr_post @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
b Ldata_ldcstc_pre @ ldc rd, [rn, #m]
b Ldata_unknown
Ldata_unknown: @ Part of jumptable
mov r0, r1
mov r1, r4
mov r2, r3
b baddataabort
Ldata_ldrstr_post:
mov r0, r4, lsr #14 @ Get Rn
and r0, r0, #15 << 2 @ Mask out reg.
teq r0, #15 << 2
ldr r0, [r3, r0] @ Get register
biceq r0, r0, #PCMASK
mov r1, r0
#ifdef FAULT_CODE_LDRSTRPOST
orr r2, r2, #FAULT_CODE_LDRSTRPOST
#endif
b do_DataAbort
Ldata_ldrstr_numindex:
mov r0, r4, lsr #14 @ Get Rn
and r0, r0, #15 << 2 @ Mask out reg.
teq r0, #15 << 2
ldr r0, [r3, r0] @ Get register
mov r1, r4, lsl #20
biceq r0, r0, #PCMASK
tst r4, #1 << 23
addne r0, r0, r1, lsr #20
subeq r0, r0, r1, lsr #20
mov r1, r0
#ifdef FAULT_CODE_LDRSTRPRE
orr r2, r2, #FAULT_CODE_LDRSTRPRE
#endif
b do_DataAbort
Ldata_ldrstr_regindex:
mov r0, r4, lsr #14 @ Get Rn
and r0, r0, #15 << 2 @ Mask out reg.
teq r0, #15 << 2
ldr r0, [r3, r0] @ Get register
and r7, r4, #15
biceq r0, r0, #PCMASK
teq r7, #15 @ Check for PC
ldr r7, [r3, r7, lsl #2] @ Get Rm
and r8, r4, #0x60 @ Get shift types
biceq r7, r7, #PCMASK
mov r9, r4, lsr #7 @ Get shift amount
and r9, r9, #31
teq r8, #0
moveq r7, r7, lsl r9
teq r8, #0x20 @ LSR shift
moveq r7, r7, lsr r9
teq r8, #0x40 @ ASR shift
moveq r7, r7, asr r9
teq r8, #0x60 @ ROR shift
moveq r7, r7, ror r9
tst r4, #1 << 23
addne r0, r0, r7
subeq r0, r0, r7 @ Apply correction
mov r1, r0
#ifdef FAULT_CODE_LDRSTRREG
orr r2, r2, #FAULT_CODE_LDRSTRREG
#endif
b do_DataAbort
Ldata_ldmstm:
mov r7, #0x11
orr r7, r7, r7, lsl #8
and r0, r4, r7
and r1, r4, r7, lsl #1
add r0, r0, r1, lsr #1
and r1, r4, r7, lsl #2
add r0, r0, r1, lsr #2
and r1, r4, r7, lsl #3
add r0, r0, r1, lsr #3
add r0, r0, r0, lsr #8
add r0, r0, r0, lsr #4
and r7, r0, #15 @ r7 = no. of registers to transfer.
mov r5, r4, lsr #14 @ Get Rn
and r5, r5, #15 << 2
ldr r0, [r3, r5] @ Get reg
eor r6, r4, r4, lsl #2
tst r6, #1 << 23 @ Check inc/dec ^ writeback
rsbeq r7, r7, #0
add r7, r0, r7, lsl #2 @ Do correction (signed)
subne r1, r7, #1
subeq r1, r0, #1
moveq r0, r7
tst r4, #1 << 21 @ Check writeback
strne r7, [r3, r5]
eor r6, r4, r4, lsl #1
tst r6, #1 << 24 @ Check Pre/Post ^ inc/dec
addeq r0, r0, #4
addeq r1, r1, #4
teq r5, #15*4 @ CHECK FOR PC
biceq r1, r1, #PCMASK
biceq r0, r0, #PCMASK
#ifdef FAULT_CODE_LDMSTM
orr r2, r2, #FAULT_CODE_LDMSTM
#endif
b do_DataAbort
Ldata_ldcstc_pre:
mov r0, r4, lsr #14 @ Get Rn
and r0, r0, #15 << 2 @ Mask out reg.
teq r0, #15 << 2
ldr r0, [r3, r0] @ Get register
mov r1, r4, lsl #24 @ Get offset
biceq r0, r0, #PCMASK
tst r4, #1 << 23
addne r0, r0, r1, lsr #24
subeq r0, r0, r1, lsr #24
mov r1, r0
#ifdef FAULT_CODE_LDCSTC
orr r2, r2, #FAULT_CODE_LDCSTC
#endif
b do_DataAbort
/*
* This is the return code to user mode for abort handlers
*/
ENTRY(ret_from_exception)
get_current_task tsk
mov why, #0
b ret_to_user
.data
ENTRY(fp_enter)
.word fpe_not_present
.text
/*
* Register switch for older 26-bit only ARMs
*/
ENTRY(__switch_to)
stmfd sp!, {r4 - sl, fp, lr} @ Store most regs on stack
str sp, [r0, #TSS_SAVE] @ Save sp_SVC
ldr sp, [r1, #TSS_SAVE] @ Get saved sp_SVC
ldmfd sp!, {r4 - sl, fp, pc}^ @ Load all regs saved previously
/*
*=============================================================================
* Low-level interface code
*-----------------------------------------------------------------------------
* Trap initialisation
*-----------------------------------------------------------------------------
*
* Note - FIQ code has changed. The default is a couple of words in 0x1c, 0x20
* that call _unexp_fiq. Nowever, we now copy the FIQ routine to 0x1c (removes
* some excess cycles).
*
* What we need to put into 0-0x1c are branches to branch to the kernel.
*/
__INIT
.Ljump_addresses:
swi SYS_ERROR0
.word vector_undefinstr - 12
.word vector_swi - 16
.word vector_prefetch - 20
.word vector_data - 24
.word vector_addrexcptn - 28
.word vector_IRQ - 32
.word _unexp_fiq - 36
b . + 8
/*
* initialise the trap system
*/
ENTRY(__trap_init)
stmfd sp!, {r4 - r7, lr}
adr r1, .Ljump_addresses
ldmia r1, {r1 - r7, ip, lr}
orr r2, lr, r2, lsr #2
orr r3, lr, r3, lsr #2
orr r4, lr, r4, lsr #2
orr r5, lr, r5, lsr #2
orr r6, lr, r6, lsr #2
orr r7, lr, r7, lsr #2
orr ip, lr, ip, lsr #2
mov r0, #0
stmia r0, {r1 - r7, ip}
ldmfd sp!, {r4 - r7, pc}^
.bss
__temp_irq: .space 4 @ saved lr_irq
__temp_fiq: .space 128
......@@ -4,21 +4,13 @@
# Object file lists.
obj-y := init.o extable.o fault-common.o
obj-m :=
obj-n :=
obj- :=
ifeq ($(CONFIG_CPU_32),y)
obj-y += consistent.o fault-armv.o ioremap.o mm-armv.o
obj-y := consistent.o extable.o fault-armv.o fault-common.o \
init.o ioremap.o mm-armv.o
obj-$(CONFIG_MODULES) += proc-syms.o
endif
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
obj-$(CONFIG_DISCONTIGMEM) += discontig.o
# Select the processor-specific files
p-$(CONFIG_CPU_26) += proc-arm2_3.o
# ARMv3
p-$(CONFIG_CPU_ARM610) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM710) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
......
/*
* linux/arch/arm/mm/proc-arm2,3.S
*
* Copyright (C) 1997-1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* MMU functions for ARM2,3
*
* These are the low level assembler for performing cache
* and memory functions on ARM2, ARM250 and ARM3 processors.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
/*
* MEMC workhorse code. It's both a horse which things it's a pig.
*/
/*
* Function: cpu_memc_update_entry(pgd_t *pgd, unsigned long phys_pte, unsigned long addr)
* Params : pgd Page tables/MEMC mapping
* : phys_pte physical address, or PTE
* : addr virtual address
*/
ENTRY(cpu_memc_update_entry)
tst r1, #PAGE_PRESENT @ is the page present
orreq r1, r1, #PAGE_OLD | PAGE_CLEAN
moveq r2, #0x01f00000
mov r3, r1, lsr #13 @ convert to physical page nr
and r3, r3, #0x3fc
adr ip, memc_phys_table_32
ldr r3, [ip, r3]
tst r1, #PAGE_OLD | PAGE_NOT_USER
biceq r3, r3, #0x200
tsteq r1, #PAGE_READONLY | PAGE_CLEAN
biceq r3, r3, #0x300
mov r2, r2, lsr #15 @ virtual -> nr
orr r3, r3, r2, lsl #15
and r2, r2, #0x300
orr r3, r3, r2, lsl #2
and r2, r3, #255
sub r0, r0, #256 * 4
str r3, [r0, r2, lsl #2]
strb r3, [r3]
movs pc, lr
/*
* Params : r0 = preserved
* : r1 = memc table base (preserved)
* : r2 = page table entry
* : r3 = preserved
* : r4 = unused
* : r5 = memc physical address translation table
* : ip = virtual address (preserved)
*/
update_pte:
mov r4, r2, lsr #13
and r4, r4, #0x3fc
ldr r4, [r5, r4] @ covert to MEMC page
tst r2, #PAGE_OLD | PAGE_NOT_USER @ check for MEMC read
biceq r4, r4, #0x200
tsteq r2, #PAGE_READONLY | PAGE_CLEAN @ check for MEMC write
biceq r4, r4, #0x300
orr r4, r4, ip
and r2, ip, #0x01800000
orr r4, r4, r2, lsr #13
and r2, r4, #255
str r4, [r1, r2, lsl #2]
movs pc, lr
/*
* Params : r0 = preserved
* : r1 = memc table base (preserved)
* : r2 = page table base
* : r3 = preserved
* : r4 = unused
* : r5 = memc physical address translation table
* : ip = virtual address (updated)
*/
update_pte_table:
stmfd sp!, {r0, lr}
bic r0, r2, #3
1: ldr r2, [r0], #4 @ get entry
tst r2, #PAGE_PRESENT @ page present
blne update_pte @ process pte
add ip, ip, #32768 @ increment virt addr
ldr r2, [r0], #4 @ get entry
tst r2, #PAGE_PRESENT @ page present
blne update_pte @ process pte
add ip, ip, #32768 @ increment virt addr
ldr r2, [r0], #4 @ get entry
tst r2, #PAGE_PRESENT @ page present
blne update_pte @ process pte
add ip, ip, #32768 @ increment virt addr
ldr r2, [r0], #4 @ get entry
tst r2, #PAGE_PRESENT @ page present
blne update_pte @ process pte
add ip, ip, #32768 @ increment virt addr
tst ip, #32768 * 31 @ finished?
bne 1b
ldmfd sp!, {r0, pc}^
/*
* Function: cpu_memc_update_all(pgd_t *pgd)
* Params : pgd Page tables/MEMC mapping
* Notes : this is optimised for 32k pages
*/
ENTRY(cpu_memc_update_all)
stmfd sp!, {r4, r5, lr}
bl clear_tables
sub r1, r0, #256 * 4 @ start of MEMC tables
adr r5, memc_phys_table_32 @ Convert to logical page number
mov ip, #0 @ virtual address
1: ldmia r0!, {r2, r3}
tst r2, #PAGE_PRESENT
addeq ip, ip, #1048576
blne update_pte_table
mov r2, r3
tst r2, #PAGE_PRESENT
addeq ip, ip, #1048576
blne update_pte_table
teq ip, #32 * 1048576
bne 1b
ldmfd sp!, {r4, r5, pc}^
/*
* Build the table to map from physical page number to memc page number
*/
.type memc_phys_table_32, #object
memc_phys_table_32:
.irp b7, 0x00, 0x80
.irp b6, 0x00, 0x02
.irp b5, 0x00, 0x04
.irp b4, 0x00, 0x01
.irp b3, 0x00, 0x40
.irp b2, 0x00, 0x20
.irp b1, 0x00, 0x10
.irp b0, 0x00, 0x08
.long 0x03800300 + \b7 + \b6 + \b5 + \b4 + \b3 + \b2 + \b1 + \b0
.endr
.endr
.endr
.endr
.endr
.endr
.endr
.endr
.size memc_phys_table_32, . - memc_phys_table_32
/*
* helper for cpu_memc_update_all, this clears out all
* mappings, setting them close to the top of memory,
* and inaccessible (0x01f00000).
* Params : r0 = page table pointer
*/
clear_tables: ldr r1, _arm3_switch_mm - 4
ldr r2, [r1]
sub r1, r0, #256 * 4 @ start of MEMC tables
add r2, r1, r2, lsl #2 @ end of tables
mov r3, #0x03f00000 @ Default mapping (null mapping)
orr r3, r3, #0x00000f00
orr r4, r3, #1
orr r5, r3, #2
orr ip, r3, #3
1: stmia r1!, {r3, r4, r5, ip}
add r3, r3, #4
add r4, r4, #4
add r5, r5, #4
add ip, ip, #4
stmia r1!, {r3, r4, r5, ip}
add r3, r3, #4
add r4, r4, #4
add r5, r5, #4
add ip, ip, #4
teq r1, r2
bne 1b
mov pc, lr
/*
* Function: *_switch_mm(pgd_t *pgd)
* Params : pgd New page tables/MEMC mapping
* Purpose : update MEMC hardware with new mapping
*/
.word page_nr
_arm3_switch_mm:
mcr p15, 0, r1, c1, c0, 0 @ flush cache
_arm2_switch_mm:
stmfd sp!, {lr}
ldr r1, _arm3_switch_mm - 4
ldr r2, [r1]
sub r0, r0, #256 * 4 @ start of MEMC tables
add r1, r0, r2, lsl #2 @ end of tables
1: ldmia r0!, {r2, r3, ip, lr}
strb r2, [r2]
strb r3, [r3]
strb ip, [ip]
strb lr, [lr]
ldmia r0!, {r2, r3, ip, lr}
strb r2, [r2]
strb r3, [r3]
strb ip, [ip]
strb lr, [lr]
teq r0, r1
bne 1b
ldmfd sp!, {pc}^
/*
* Function: *_proc_init (void)
* Purpose : Initialise the cache control registers
*/
_arm3_proc_init:
mov r0, #0x001f0000
orr r0, r0, #0x0000ff00
orr r0, r0, #0x000000ff
mcr p15, 0, r0, c3, c0 @ ARM3 Cacheable
mcr p15, 0, r0, c4, c0 @ ARM3 Updateable
mov r0, #0
mcr p15, 0, r0, c5, c0 @ ARM3 Disruptive
mcr p15, 0, r0, c1, c0 @ ARM3 Flush
mov r0, #3
mcr p15, 0, r0, c2, c0 @ ARM3 Control
_arm2_proc_init:
movs pc, lr
/*
* Function: *_proc_fin (void)
* Purpose : Finalise processor (disable caches)
*/
_arm3_proc_fin: mov r0, #2
mcr p15, 0, r0, c2, c0
_arm2_proc_fin: orrs pc, lr, #PSR_I_BIT|PSR_F_BIT
/*
* Function: *_xchg_1 (int new, volatile void *ptr)
* Params : new New value to store at...
* : ptr pointer to byte-wide location
* Purpose : Performs an exchange operation
* Returns : Original byte data at 'ptr'
*/
_arm2_xchg_1: mov r2, pc
orr r2, r2, #PSR_I_BIT
teqp r2, #0
ldrb r2, [r1]
strb r0, [r1]
mov r0, r2
movs pc, lr
_arm3_xchg_1: swpb r0, r0, [r1]
movs pc, lr
/*
* Function: *_xchg_4 (int new, volatile void *ptr)
* Params : new New value to store at...
* : ptr pointer to word-wide location
* Purpose : Performs an exchange operation
* Returns : Original word data at 'ptr'
*/
_arm2_xchg_4: mov r2, pc
orr r2, r2, #PSR_I_BIT
teqp r2, #0
ldr r2, [r1]
str r0, [r1]
mov r0, r2
movs pc, lr
_arm3_xchg_4: swp r0, r0, [r1]
movs pc, lr
cpu_arm2_name:
.asciz "ARM 2"
cpu_arm250_name:
.asciz "ARM 250"
cpu_arm3_name:
.asciz "ARM 3"
__INIT
/*
* Purpose : Function pointers used to access above functions - all calls
* come through these
*/
.globl arm2_processor_functions
arm2_processor_functions:
.word _arm2_proc_init
.word _arm2_proc_fin
.word _arm2_switch_mm
.word _arm2_xchg_1
.word _arm2_xchg_4
.globl arm250_processor_functions
arm250_processor_functions:
.word _arm2_proc_init
.word _arm2_proc_fin
.word _arm2_switch_mm
.word _arm3_xchg_1
.word _arm3_xchg_4
.globl arm3_processor_functions
arm3_processor_functions:
.word _arm3_proc_init
.word _arm3_proc_fin
.word _arm3_switch_mm
.word _arm3_xchg_1
.word _arm3_xchg_4
arm2_arch_name: .asciz "armv1"
arm3_arch_name: .asciz "armv2"
arm2_elf_name: .asciz "v1"
arm3_elf_name: .asciz "v2"
.align
.section ".proc.info", #alloc, #execinstr
.long 0x41560200
.long 0xfffffff0
.long 0
mov pc, lr
.long arm2_arch_name
.long arm2_elf_name
.long 0
.long cpu_arm2_name
.long arm2_processor_functions
.long 0
.long 0
.long 0
.long 0x41560250
.long 0xfffffff0
.long 0
mov pc, lr
.long arm3_arch_name
.long arm3_elf_name
.long 0
.long cpu_arm250_name
.long arm250_processor_functions
.long 0
.long 0
.long 0
.long 0x41560300
.long 0xfffffff0
.long 0
mov pc, lr
.long arm3_arch_name
.long arm3_elf_name
.long 0
.long cpu_arm3_name
.long arm3_processor_functions
.long 0
.long 0
.long 0
/*
* linux/include/asm-arm/cpu-multi26.h
*
* Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASSEMBLY__
#include <asm/page.h>
/* forward-declare task_struct */
struct task_struct;
/*
* Don't change this structure - ASM code
* relies on it.
*/
extern struct processor {
/* Set up any processor specifics */
void (*_proc_init)(void);
/* Disable any processor specifics */
void (*_proc_fin)(void);
/* set the MEMC hardware mappings */
void (*_switch_mm)(pgd_t *pgd);
/* XCHG */
unsigned long (*_xchg_1)(unsigned long x, volatile void *ptr);
unsigned long (*_xchg_4)(unsigned long x, volatile void *ptr);
} processor;
extern const struct processor arm2_processor_functions;
extern const struct processor arm250_processor_functions;
extern const struct processor arm3_processor_functions;
#define cpu_proc_init() processor._proc_init()
#define cpu_proc_fin() processor._proc_fin()
#define cpu_do_idle() do { } while (0)
#define cpu_switch_mm(pgd,mm) processor._switch_mm(pgd)
#define cpu_xchg_1(x,ptr) processor._xchg_1(x,ptr)
#define cpu_xchg_4(x,ptr) processor._xchg_4(x,ptr)
extern void cpu_memc_update_all(pgd_t *pgd);
extern void cpu_memc_update_entry(pgd_t *pgd, unsigned long phys_pte, unsigned long log_addr);
#endif
/*
* linux/asm-arm/proc-armo/assembler.h
*
* Copyright (C) 1996 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains arm architecture specific defines
* for the different processors
*/
#define MODE_USR USR26_MODE
#define MODE_FIQ FIQ26_MODE
#define MODE_IRQ IRQ26_MODE
#define MODE_SVC SVC26_MODE
#define DEFAULT_FIQ MODE_FIQ
#ifdef __STDC__
#define LOADREGS(cond, base, reglist...)\
ldm##cond base,reglist^
#define RETINSTR(instr, regs...)\
instr##s regs
#else
#define LOADREGS(cond, base, reglist...)\
ldm/**/cond base,reglist^
#define RETINSTR(instr, regs...)\
instr/**/s regs
#endif
#define MODENOP\
mov r0, r0
#define MODE(savereg,tmpreg,mode) \
mov savereg, pc; \
bic tmpreg, savereg, $0x0c000003; \
orr tmpreg, tmpreg, $mode; \
teqp tmpreg, $0
#define RESTOREMODE(savereg) \
teqp savereg, $0
#define SAVEIRQS(tmpreg)
#define RESTOREIRQS(tmpreg)
#define DISABLEIRQS(tmpreg)\
teqp pc, $0x08000003
#define ENABLEIRQS(tmpreg)\
teqp pc, $0x00000003
#define USERMODE(tmpreg)\
teqp pc, $0x00000000;\
mov r0, r0
#define SVCMODE(tmpreg)\
teqp pc, $0x00000003;\
mov r0, r0
/*
* Save the current IRQ state and disable IRQs
* Note that this macro assumes FIQs are enabled, and
* that the processor is in SVC mode.
*/
.macro save_and_disable_irqs, oldcpsr, temp
mov \oldcpsr, pc
orr \temp, \oldcpsr, #0x08000000
teqp \temp, #0
.endm
/*
* Restore interrupt state previously stored in
* a register
* ** Actually do nothing on Arc - hope that the caller uses a MOVS PC soon
* after!
*/
.macro restore_irqs, oldcpsr
@ This be restore_irqs
.endm
/*
* These two are used to save LR/restore PC over a user-based access.
* The old 26-bit architecture requires that we do. On 32-bit
* architecture, we can safely ignore this requirement.
*/
.macro save_lr
str lr, [sp, #-4]!
.endm
.macro restore_pc
ldmfd sp!, {pc}^
.endm
#define USER(x...) \
9999: x; \
.section __ex_table,"a"; \
.align 3; \
.long 9999b,9001f; \
.previous
/*
* linux/include/asm-arm/proc-armo/cache.h
*
* Copyright (C) 1999-2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Cache handling for 26-bit ARM processors.
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma,start,end) do { } while (0)
#define flush_cache_page(vma,vmaddr) do { } while (0)
#define invalidate_dcache_range(start,end) do { } while (0)
#define clean_dcache_range(start,end) do { } while (0)
#define flush_dcache_range(start,end) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define clean_dcache_entry(_s) do { } while (0)
#define clean_cache_entry(_start) do { } while (0)
#define flush_icache_range(start,end) do { } while (0)
#define flush_icache_page(vma,page) do { } while (0)
/* DAG: ARM3 will flush cache on MEMC updates anyway? so don't bother */
#define clean_cache_area(_start,_size) do { } while (0)
/*
* ELF definitions for 26-bit CPUs
*/
#define ELF_EXEC_PAGESIZE 32768
#ifdef __KERNEL__
/* We can only execute 26-bit code. */
#define ELF_PROC_OK(x) \
((x)->e_flags & EF_ARM_APCS26)
#define SET_PERSONALITY(ex,ibcs2) set_personality(PER_LINUX)
#endif
/*
* linux/include/asm-arm/proc-armo/locks.h
*
* Copyright (C) 2000 Russell King
* Fixes for 26 bit machines, (C) 2000 Dave Gilbert
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Interrupt safe locking assembler.
*/
#ifndef __ASM_PROC_LOCKS_H
#define __ASM_PROC_LOCKS_H
/* Decrements by 1, fails if value < 0 */
#define __down_op(ptr,fail) \
({ \
__asm__ __volatile__ ( \
"@ atomic down operation\n" \
" mov ip, pc\n" \
" orr lr, ip, #0x08000000\n" \
" teqp lr, #0\n" \
" ldr lr, [%0]\n" \
" and ip, ip, #0x0c000003\n" \
" subs lr, lr, #1\n" \
" str lr, [%0]\n" \
" orrmi ip, ip, #0x80000000 @ set N\n" \
" teqp ip, #0\n" \
" movmi ip, %0\n" \
" blmi " #fail \
: \
: "r" (ptr) \
: "ip", "lr", "cc"); \
})
#define __down_op_ret(ptr,fail) \
({ \
unsigned int result; \
__asm__ __volatile__ ( \
" @ down_op_ret\n" \
" mov ip, pc\n" \
" orr lr, ip, #0x08000000\n" \
" teqp lr, #0\n" \
" ldr lr, [%1]\n" \
" and ip, ip, #0x0c000003\n" \
" subs lr, lr, #1\n" \
" str lr, [%1]\n" \
" orrmi ip, ip, #0x80000000 @ set N\n" \
" teqp ip, #0\n" \
" movmi ip, %1\n" \
" movpl ip, #0\n" \
" blmi " #fail "\n" \
" mov %0, ip" \
: "=&r" (result) \
: "r" (ptr) \
: "ip", "lr", "cc"); \
result; \
})
#define __up_op(ptr,wake) \
({ \
__asm__ __volatile__ ( \
"@ up_op\n" \
" mov ip, pc\n" \
" orr lr, ip, #0x08000000\n" \
" teqp lr, #0\n" \
" ldr lr, [%0]\n" \
" and ip, ip, #0x0c000003\n" \
" adds lr, lr, #1\n" \
" str lr, [%0]\n" \
" orrle ip, ip, #0x80000000 @ set N - should this be mi ??? DAG ! \n" \
" teqp ip, #0\n" \
" movmi ip, %0\n" \
" blmi " #wake \
: \
: "r" (ptr) \
: "ip", "lr", "cc"); \
})
/*
* The value 0x01000000 supports up to 128 processors and
* lots of processes. BIAS must be chosen such that sub'ing
* BIAS once per CPU will result in the long remaining
* negative.
*/
#define RW_LOCK_BIAS 0x01000000
#define RW_LOCK_BIAS_STR "0x01000000"
/* Decrements by RW_LOCK_BIAS rather than 1, fails if value != 0 */
#define __down_op_write(ptr,fail) \
({ \
__asm__ __volatile__( \
"@ down_op_write\n" \
" mov ip, pc\n" \
" orr lr, ip, #0x08000000\n" \
" teqp lr, #0\n" \
" and ip, ip, #0x0c000003\n" \
\
" ldr lr, [%0]\n" \
" subs lr, lr, %1\n" \
" str lr, [%0]\n" \
\
" orreq ip, ip, #0x40000000 @ set Z \n"\
" teqp ip, #0\n" \
" movne ip, %0\n" \
" blne " #fail \
: \
: "r" (ptr), "I" (RW_LOCK_BIAS) \
: "ip", "lr", "cc"); \
})
/* Increments by RW_LOCK_BIAS, wakes if value >= 0 */
#define __up_op_write(ptr,wake) \
({ \
__asm__ __volatile__( \
"@ up_op_read\n" \
" mov ip, pc\n" \
" orr lr, ip, #0x08000000\n" \
" teqp lr, #0\n" \
\
" ldr lr, [%0]\n" \
" and ip, ip, #0x0c000003\n" \
" adds lr, lr, %1\n" \
" str lr, [%0]\n" \
\
" orrcs ip, ip, #0x20000000 @ set C\n" \
" teqp ip, #0\n" \
" movcs ip, %0\n" \
" blcs " #wake \
: \
: "r" (ptr), "I" (RW_LOCK_BIAS) \
: "ip", "lr", "cc"); \
})
#define __down_op_read(ptr,fail) \
__down_op(ptr, fail)
#define __up_op_read(ptr,wake) \
({ \
__asm__ __volatile__( \
"@ up_op_read\n" \
" mov ip, pc\n" \
" orr lr, ip, #0x08000000\n" \
" teqp lr, #0\n" \
\
" ldr lr, [%0]\n" \
" and ip, ip, #0x0c000003\n" \
" adds lr, lr, %1\n" \
" str lr, [%0]\n" \
\
" orreq ip, ip, #0x40000000 @ Set Z \n" \
" teqp ip, #0\n" \
" moveq ip, %0\n" \
" bleq " #wake \
: \
: "r" (ptr), "I" (1) \
: "ip", "lr", "cc"); \
})
#endif
/*
* linux/include/asm-arm/proc-armo/page.h
*
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_PROC_PAGE_H
#define __ASM_PROC_PAGE_H
#include <linux/config.h>
/* PAGE_SHIFT determines the page size. This is configurable. */
#if defined(CONFIG_PAGESIZE_16)
#define PAGE_SHIFT 14 /* 16K */
#else /* default */
#define PAGE_SHIFT 15 /* 32K */
#endif
#define EXEC_PAGESIZE 32768
#ifndef __ASSEMBLY__
#ifdef STRICT_MM_TYPECHECKS
typedef struct { unsigned long pgd; } pgd_t;
#define pgd_val(x) ((x).pgd)
#else
typedef unsigned long pgd_t;
#define pgd_val(x) (x)
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROC_PAGE_H */
/*
* linux/include/asm-arm/proc-armo/pgalloc.h
*
* Copyright (C) 2001-2002 Russell King
*
* Page table allocation/freeing primitives for 26-bit ARM processors.
*/
#include <linux/slab.h>
extern kmem_cache_t *pte_cache;
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
return kmem_cache_alloc(pte_cache, GFP_KERNEL);
}
static inline void pte_free_kernel(pte_t *pte)
{
if (pte)
kmem_cache_free(pte_cache, pte);
}
/*
* Populate the pmdp entry with a pointer to the pte. This pmd is part
* of the mm address space.
*
* If 'mm' is the init tasks mm, then we are doing a vmalloc, and we
* need to set stuff up correctly for it.
*/
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
{
set_pmd(pmdp, __mk_pmd(ptep, _PAGE_TABLE));
}
/*
* We use the old 2.5.5-rmk1 hack for this.
* This is not truly correct, but should be functional.
*/
#define pte_alloc_one(mm,addr) ((struct page *)pte_alloc_one_kernel(mm,addr))
#define pte_free(pte) pte_free_kernel((pte_t *)pte)
#define pmd_populate(mm,pmdp,ptep) pmd_populate_kernel(mm,pmdp,(pte_t *)ptep)
/*
* linux/include/asm-arm/proc-armo/pgtable.h
*
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* 18-Oct-1997 RMK Now two-level (32x32)
*/
#ifndef __ASM_PROC_PGTABLE_H
#define __ASM_PROC_PGTABLE_H
/*
* entries per page directory level: they are two-level, so
* we don't really have any PMD directory.
*/
#define PTRS_PER_PTE 32
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 32
/*
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
#define VMALLOC_START 0x01a00000
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#define VMALLOC_END 0x01c00000
#define _PAGE_TABLE (0x01)
#define pmd_bad(pmd) ((pmd_val(pmd) & 0xfc000002))
#define set_pmd(pmdp,pmd) ((*(pmdp)) = (pmd))
#define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0))
static inline pmd_t __mk_pmd(pte_t *ptep, unsigned long prot)
{
unsigned long pte_ptr = (unsigned long)ptep;
pmd_t pmd;
pmd_val(pmd) = __virt_to_phys(pte_ptr) | prot;
return pmd;
}
static inline unsigned long pmd_page(pmd_t pmd)
{
return __phys_to_virt(pmd_val(pmd) & ~_PAGE_TABLE);
}
#define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
#define _PAGE_PRESENT 0x01
#define _PAGE_READONLY 0x02
#define _PAGE_NOT_USER 0x04
#define _PAGE_OLD 0x08
#define _PAGE_CLEAN 0x10
/* -- present -- -- !dirty -- --- !write --- ---- !user --- */
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_CLEAN | _PAGE_READONLY | _PAGE_NOT_USER)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_CLEAN )
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_CLEAN | _PAGE_READONLY )
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_CLEAN | _PAGE_READONLY )
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_NOT_USER)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_OLD | _PAGE_CLEAN)
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
#define pte_read(pte) (!(pte_val(pte) & _PAGE_NOT_USER))
#define pte_write(pte) (!(pte_val(pte) & _PAGE_READONLY))
#define pte_exec(pte) (!(pte_val(pte) & _PAGE_NOT_USER))
#define pte_dirty(pte) (!(pte_val(pte) & _PAGE_CLEAN))
#define pte_young(pte) (!(pte_val(pte) & _PAGE_OLD))
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_READONLY; return pte; }
static inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_NOT_USER; return pte; }
static inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_NOT_USER; return pte; }
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) |= _PAGE_CLEAN; return pte; }
static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) |= _PAGE_OLD; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_READONLY; return pte; }
static inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= ~_PAGE_NOT_USER; return pte; }
static inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= ~_PAGE_NOT_USER; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) &= ~_PAGE_CLEAN; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) &= ~_PAGE_OLD; return pte; }
/*
* We don't store cache state bits in the page table here.
*/
#define pgprot_noncached(prot) (prot)
extern void pgtable_cache_init(void);
#endif /* __ASM_PROC_PGTABLE_H */
/*
* linux/include/asm-arm/proc-armo/processor.h
*
* Copyright (C) 1996 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Changelog:
* 27-06-1996 RMK Created
* 10-10-1996 RMK Brought up to date with SA110
* 26-09-1996 RMK Added 'EXTRA_THREAD_STRUCT*'
* 28-09-1996 RMK Moved start_thread into the processor dependencies
* 11-01-1998 RMK Added new uaccess_t
* 09-09-1998 PJB Delete redundant `wp_works_ok'
* 30-05-1999 PJB Save sl across context switches
*/
#ifndef __ASM_PROC_PROCESSOR_H
#define __ASM_PROC_PROCESSOR_H
#include <linux/string.h>
#define KERNEL_STACK_SIZE 4096
typedef struct {
void (*put_byte)(void); /* Special calling convention */
void (*get_byte)(void); /* Special calling convention */
void (*put_half)(void); /* Special calling convention */
void (*get_half)(void); /* Special calling convention */
void (*put_word)(void); /* Special calling convention */
void (*get_word)(void); /* Special calling convention */
unsigned long (*copy_from_user)(void *to, const void *from, unsigned long sz);
unsigned long (*copy_to_user)(void *to, const void *from, unsigned long sz);
unsigned long (*clear_user)(void *addr, unsigned long sz);
unsigned long (*strncpy_from_user)(char *to, const char *from, unsigned long sz);
unsigned long (*strnlen_user)(const char *s, long n);
} uaccess_t;
extern uaccess_t uaccess_user, uaccess_kernel;
#define EXTRA_THREAD_STRUCT \
uaccess_t *uaccess; /* User access functions*/
#define EXTRA_THREAD_STRUCT_INIT \
.uaccess = &uaccess_kernel,
#define start_thread(regs,pc,sp) \
({ \
unsigned long *stack = (unsigned long *)sp; \
set_fs(USER_DS); \
memzero(regs->uregs, sizeof (regs->uregs)); \
regs->ARM_pc = pc; /* pc */ \
regs->ARM_sp = sp; /* sp */ \
regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
})
#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1020])
#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1018])
#endif
/*
* linux/include/asm-arm/proc-armo/ptrace.h
*
* Copyright (C) 1996-2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_PROC_PTRACE_H
#define __ASM_PROC_PTRACE_H
#define USR26_MODE 0x00000000
#define FIQ26_MODE 0x00000001
#define IRQ26_MODE 0x00000002
#define SVC26_MODE 0x00000003
#define USR_MODE USR26_MODE
#define FIQ_MODE FIQ26_MODE
#define IRQ_MODE IRQ26_MODE
#define SVC_MODE SVC26_MODE
#define MODE_MASK 0x00000003
#define PSR_F_BIT 0x04000000
#define PSR_I_BIT 0x08000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
#define PSR_Z_BIT 0x40000000
#define PSR_N_BIT 0x80000000
#define PCMASK 0xfc000003
#ifndef __ASSEMBLY__
/* this struct defines the way the registers are stored on the
stack during a system call. */
struct pt_regs {
long uregs[17];
};
#define ARM_pc uregs[15]
#define ARM_lr uregs[14]
#define ARM_sp uregs[13]
#define ARM_ip uregs[12]
#define ARM_fp uregs[11]
#define ARM_r10 uregs[10]
#define ARM_r9 uregs[9]
#define ARM_r8 uregs[8]
#define ARM_r7 uregs[7]
#define ARM_r6 uregs[6]
#define ARM_r5 uregs[5]
#define ARM_r4 uregs[4]
#define ARM_r3 uregs[3]
#define ARM_r2 uregs[2]
#define ARM_r1 uregs[1]
#define ARM_r0 uregs[0]
#define ARM_ORIG_r0 uregs[16]
#ifdef __KERNEL__
#define processor_mode(regs) \
((regs)->ARM_pc & MODE_MASK)
#define user_mode(regs) \
(processor_mode(regs) == USR26_MODE)
#define thumb_mode(regs) (0)
#define interrupts_enabled(regs) \
(!((regs)->ARM_pc & PSR_I_BIT))
#define fast_interrupts_enabled(regs) \
(!((regs)->ARM_pc & PSR_F_BIT))
#define condition_codes(regs) \
((regs)->ARM_pc & (PSR_V_BIT|PSR_C_BIT|PSR_Z_BIT|PSR_N_BIT))
/* Are the current registers suitable for user mode?
* (used to maintain security in signal handlers)
*/
static inline int valid_user_regs(struct pt_regs *regs)
{
if (user_mode(regs) &&
(regs->ARM_pc & (PSR_F_BIT | PSR_I_BIT)) == 0)
return 1;
/*
* force it to be something sensible
*/
regs->ARM_pc &= ~(MODE_MASK | PSR_F_BIT | PSR_I_BIT);
return 0;
}
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif
/*
* linux/include/asm-arm/proc-armo/shmparam.h
*
* Copyright (C) 1996 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* definitions for the shared process memory on the ARM3
*/
#ifndef __ASM_PROC_SHMPARAM_H
#define __ASM_PROC_SHMPARAM_H
#ifndef SHMMAX
#define SHMMAX 0x003fa000
#endif
#endif
/*
* linux/include/asm-arm/proc-armo/system.h
*
* Copyright (C) 1995, 1996 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_PROC_SYSTEM_H
#define __ASM_PROC_SYSTEM_H
#define vectors_base() (0)
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
{
extern void __bad_xchg(volatile void *, int);
switch (size) {
case 1: return cpu_xchg_1(x, ptr);
case 4: return cpu_xchg_4(x, ptr);
default: __bad_xchg(ptr, size);
}
return 0;
}
/*
* We need to turn the caches off before calling the reset vector - RiscOS
* messes up if we don't
*/
#define proc_hard_reset() cpu_proc_fin()
/*
* A couple of speedups for the ARM
*/
/*
* Save the current interrupt enable state & disable IRQs
*/
#define local_save_flags_cli(x) \
do { \
unsigned long temp; \
__asm__ __volatile__( \
" mov %0, pc @ save_flags_cli\n" \
" orr %1, %0, #0x08000000\n" \
" and %0, %0, #0x0c000000\n" \
" teqp %1, #0\n" \
: "=r" (x), "=r" (temp) \
: \
: "memory"); \
} while (0)
/*
* Enable IRQs
*/
#define local_irq_enable() \
do { \
unsigned long temp; \
__asm__ __volatile__( \
" mov %0, pc @ sti\n" \
" bic %0, %0, #0x08000000\n" \
" teqp %0, #0\n" \
: "=r" (temp) \
: \
: "memory"); \
} while(0)
/*
* Disable IRQs
*/
#define local_irq_disable() \
do { \
unsigned long temp; \
__asm__ __volatile__( \
" mov %0, pc @ cli\n" \
" orr %0, %0, #0x08000000\n" \
" teqp %0, #0\n" \
: "=r" (temp) \
: \
: "memory"); \
} while(0)
#define __clf() do { \
unsigned long temp; \
__asm__ __volatile__( \
" mov %0, pc @ clf\n" \
" orr %0, %0, #0x04000000\n" \
" teqp %0, #0\n" \
: "=r" (temp)); \
} while(0)
#define __stf() do { \
unsigned long temp; \
__asm__ __volatile__( \
" mov %0, pc @ stf\n" \
" bic %0, %0, #0x04000000\n" \
" teqp %0, #0\n" \
: "=r" (temp)); \
} while(0)
/*
* save current IRQ & FIQ state
*/
#define local_save_flags(x) \
do { \
__asm__ __volatile__( \
" mov %0, pc @ save_flags\n" \
" and %0, %0, #0x0c000000\n" \
: "=r" (x)); \
} while (0)
/*
* restore saved IRQ & FIQ state
*/
#define local_irq_restore(x) \
do { \
unsigned long temp; \
__asm__ __volatile__( \
" mov %0, pc @ restore_flags\n" \
" bic %0, %0, #0x0c000000\n" \
" orr %0, %0, %1\n" \
" teqp %0, #0\n" \
: "=&r" (temp) \
: "r" (x) \
: "memory"); \
} while (0)
#endif
/*
* TLB flushing:
*
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
*/
#define flush_tlb_all() memc_update_all()
#define flush_tlb_mm(mm) memc_update_mm(mm)
#define flush_tlb_range(vma,start,end) \
do { memc_update_mm(vma->vm_mm); (void)(start); (void)(end); } while (0)
#define flush_tlb_page(vma, vmaddr) do { } while (0)
/*
* The following handle the weird MEMC chip
*/
static inline void memc_update_all(void)
{
struct task_struct *p;
cpu_memc_update_all(init_mm.pgd);
for_each_task(p) {
if (!p->mm)
continue;
cpu_memc_update_all(p->mm->pgd);
}
processor._set_pgd(current->active_mm->pgd);
}
static inline void memc_update_mm(struct mm_struct *mm)
{
cpu_memc_update_all(mm->pgd);
if (mm == current->active_mm)
processor._set_pgd(mm->pgd);
}
static inline void
memc_clear(struct mm_struct *mm, struct page *page)
{
cpu_memc_update_entry(mm->pgd, (unsigned long) page_address(page), 0);
if (mm == current->active_mm)
processor._set_pgd(mm->pgd);
}
static inline void
memc_update_addr(struct mm_struct *mm, pte_t pte, unsigned long vaddr)
{
cpu_memc_update_entry(mm->pgd, pte_val(pte), vaddr);
if (mm == current->active_mm)
processor._set_pgd(mm->pgd);
}
static inline void
update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
struct mm_struct *mm = vma->vm_mm;
memc_update_addr(mm, pte, addr);
}
/*
* linux/include/asm-arm/proc-armo/segment.h
*
* Copyright (C) 1996 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* The fs functions are implemented on the ARM2 and ARM3 architectures
* manually.
* Use *_user functions to access user memory with faulting behaving
* as though the user is accessing the memory.
* Use set_fs(get_ds()) and then the *_user functions to allow them to
* access kernel memory.
*/
/*
* These are the values used to represent the user `fs' and the kernel `ds'
*/
#define KERNEL_DS 0x03000000
#define USER_DS 0x02000000
extern uaccess_t uaccess_user, uaccess_kernel;
static inline void set_fs (mm_segment_t fs)
{
current->addr_limit = fs;
current->thread.uaccess = fs == USER_DS ? &uaccess_user : &uaccess_kernel;
}
#define __range_ok(addr,size) ({ \
unsigned long flag, sum; \
__asm__ __volatile__("subs %1, %0, %3; cmpcs %1, %2; movcs %0, #0" \
: "=&r" (flag), "=&r" (sum) \
: "r" (addr), "Ir" (size), "0" (current->addr_limit) \
: "cc"); \
flag; })
#define __addr_ok(addr) ({ \
unsigned long flag; \
__asm__ __volatile__("cmp %2, %0; movlo %0, #0" \
: "=&r" (flag) \
: "0" (current->addr_limit), "r" (addr) \
: "cc"); \
(flag == 0); })
#define __put_user_asm_byte(x,addr,err) \
__asm__ __volatile__( \
" mov r0, %1\n" \
" mov r1, %2\n" \
" mov r2, %0\n" \
" mov lr, pc\n" \
" mov pc, %3\n" \
" mov %0, r2\n" \
: "=r" (err) \
: "r" (x), "r" (addr), "r" (current->thread.uaccess->put_byte), \
"0" (err) \
: "r0", "r1", "r2", "lr")
#define __put_user_asm_half(x,addr,err) \
__asm__ __volatile__( \
" mov r0, %1\n" \
" mov r1, %2\n" \
" mov r2, %0\n" \
" mov lr, pc\n" \
" mov pc, %3\n" \
" mov %0, r2\n" \
: "=r" (err) \
: "r" (x), "r" (addr), "r" (current->thread.uaccess->put_half), \
"0" (err) \
: "r0", "r1", "r2", "lr")
#define __put_user_asm_word(x,addr,err) \
__asm__ __volatile__( \
" mov r0, %1\n" \
" mov r1, %2\n" \
" mov r2, %0\n" \
" mov lr, pc\n" \
" mov pc, %3\n" \
" mov %0, r2\n" \
: "=r" (err) \
: "r" (x), "r" (addr), "r" (current->thread.uaccess->put_word), \
"0" (err) \
: "r0", "r1", "r2", "lr")
#define __get_user_asm_byte(x,addr,err) \
__asm__ __volatile__( \
" mov r0, %2\n" \
" mov r1, %0\n" \
" mov lr, pc\n" \
" mov pc, %3\n" \
" mov %0, r1\n" \
" mov %1, r0\n" \
: "=r" (err), "=r" (x) \
: "r" (addr), "r" (current->thread.uaccess->get_byte), "0" (err) \
: "r0", "r1", "r2", "lr")
#define __get_user_asm_half(x,addr,err) \
__asm__ __volatile__( \
" mov r0, %2\n" \
" mov r1, %0\n" \
" mov lr, pc\n" \
" mov pc, %3\n" \
" mov %0, r1\n" \
" mov %1, r0\n" \
: "=r" (err), "=r" (x) \
: "r" (addr), "r" (current->thread.uaccess->get_half), "0" (err) \
: "r0", "r1", "r2", "lr")
#define __get_user_asm_word(x,addr,err) \
__asm__ __volatile__( \
" mov r0, %2\n" \
" mov r1, %0\n" \
" mov lr, pc\n" \
" mov pc, %3\n" \
" mov %0, r1\n" \
" mov %1, r0\n" \
: "=r" (err), "=r" (x) \
: "r" (addr), "r" (current->thread.uaccess->get_word), "0" (err) \
: "r0", "r1", "r2", "lr")
#define __do_copy_from_user(to,from,n) \
(n) = current->thread.uaccess->copy_from_user((to),(from),(n))
#define __do_copy_to_user(to,from,n) \
(n) = current->thread.uaccess->copy_to_user((to),(from),(n))
#define __do_clear_user(addr,sz) \
(sz) = current->thread.uaccess->clear_user((addr),(sz))
#define __do_strncpy_from_user(dst,src,count,res) \
(res) = current->thread.uaccess->strncpy_from_user(dst,src,count)
#define __do_strnlen_user(s,n,res) \
(res) = current->thread.uaccess->strnlen_user(s,n)
......@@ -21,11 +21,6 @@
#undef MULTI_CPU
#undef CPU_NAME
#ifdef CONFIG_CPU_26
# define CPU_INCLUDE_NAME "asm/cpu-multi26.h"
# define MULTI_CPU
#endif
/*
* CPU_NAME - the prefix for CPU related functions
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment