Commit a113994e authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://bk.arm.linux.org.uk/linux-2.5-rmk

into home.transmeta.com:/home/torvalds/v2.5/linux
parents e847679b 27510721
...@@ -31,13 +31,15 @@ apcs-$(CONFIG_CPU_32) :=-mapcs-32 ...@@ -31,13 +31,15 @@ apcs-$(CONFIG_CPU_32) :=-mapcs-32
apcs-$(CONFIG_CPU_26) :=-mapcs-26 -mcpu=arm3 apcs-$(CONFIG_CPU_26) :=-mapcs-26 -mcpu=arm3
# This selects which instruction set is used. # This selects which instruction set is used.
# Note that GCC is lame - it doesn't numerically define an # Note that GCC does not numerically define an architecture version
# architecture version macro, but instead defines a whole # macro, but instead defines a whole series of macros which makes
# series of macros. # testing for a specific architecture or later rather impossible.
arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3 #
# Note - GCC does accept -march=armv5te, but someone messed up the assembler or the
# gcc specs file - this needs fixing properly - ie in gcc and/or binutils.
arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 -march=armv5t
arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4 arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4
arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 -march=armv5te arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3
arch-$(CONFIG_CPU_XSCALE) :=-D__LINUX_ARM_ARCH__=5 -march=armv4 -Wa,-mxscale #-march=armv5te
# This selects how we optimise for the processor. # This selects how we optimise for the processor.
tune-$(CONFIG_CPU_ARM610) :=-mtune=arm610 tune-$(CONFIG_CPU_ARM610) :=-mtune=arm610
...@@ -48,13 +50,13 @@ tune-$(CONFIG_CPU_ARM922T) :=-mtune=arm9tdmi ...@@ -48,13 +50,13 @@ tune-$(CONFIG_CPU_ARM922T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110 tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110
tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100 tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100
tune-$(CONFIG_CPU_XSCALE) :=-mtune=strongarm #-mtune=xscale tune-$(CONFIG_CPU_XSCALE) :=-mtune=strongarm -Wa,-mxscale #-mtune=xscale
# Force -mno-fpu to be passed to the assembler. Some versions of gcc don't # Force -mno-fpu to be passed to the assembler. Some versions of gcc don't
# do this with -msoft-float # do this with -msoft-float
CFLAGS_BOOT :=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm CFLAGS_BOOT :=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
CFLAGS +=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm CFLAGS +=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
AFLAGS +=$(apcs-y) $(arch-y) -mno-fpu -msoft-float -Wa,-mno-fpu AFLAGS +=$(apcs-y) $(arch-y) $(tune-y) -mno-fpu -msoft-float -Wa,-mno-fpu
#Default value #Default value
DATAADDR := . DATAADDR := .
...@@ -208,6 +210,7 @@ zi:; $(Q)$(MAKE) $(build)=$(boot) zinstall ...@@ -208,6 +210,7 @@ zi:; $(Q)$(MAKE) $(build)=$(boot) zinstall
) )
arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \ arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \
include/asm-arm/.arch include/asm-arm/.proc \
include/config/MARKER include/config/MARKER
include/asm-$(ARCH)/constants.h: arch/$(ARCH)/kernel/asm-offsets.s include/asm-$(ARCH)/constants.h: arch/$(ARCH)/kernel/asm-offsets.s
......
...@@ -50,6 +50,10 @@ __SA1100_start: ...@@ -50,6 +50,10 @@ __SA1100_start:
10: 10:
#endif #endif
mrc p15, 0, r0, c1, c0, 0 @ read control reg
ands r0, r0, #0x0d
beq 99f
@ Data cache might be active. @ Data cache might be active.
@ Be sure to flush kernel binary out of the cache, @ Be sure to flush kernel binary out of the cache,
@ whatever state it is, before it is turned off. @ whatever state it is, before it is turned off.
...@@ -68,11 +72,4 @@ __SA1100_start: ...@@ -68,11 +72,4 @@ __SA1100_start:
bic r0, r0, #0x0d @ clear WB, DC, MMU bic r0, r0, #0x0d @ clear WB, DC, MMU
bic r0, r0, #0x1000 @ clear Icache bic r0, r0, #0x1000 @ clear Icache
mcr p15, 0, r0, c1, c0, 0 mcr p15, 0, r0, c1, c0, 0
99:
/*
* Pause for a short time so that we give enough time
* for the host to start a terminal up.
*/
mov r0, #0x00200000
1: subs r0, r0, #1
bne 1b
...@@ -2,14 +2,9 @@ ...@@ -2,14 +2,9 @@
# Automatically generated make config: don't edit # Automatically generated make config: don't edit
# #
CONFIG_ARM=y CONFIG_ARM=y
# CONFIG_EISA is not set CONFIG_MMU=y
# CONFIG_SBUS is not set
# CONFIG_MCA is not set
CONFIG_UID16=y CONFIG_UID16=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
# CONFIG_GENERIC_BUST_SPINLOCK is not set
# CONFIG_GENERIC_ISA_DMA is not set
# #
# Code maturity level options # Code maturity level options
...@@ -19,15 +14,19 @@ CONFIG_EXPERIMENTAL=y ...@@ -19,15 +14,19 @@ CONFIG_EXPERIMENTAL=y
# #
# General setup # General setup
# #
CONFIG_NET=y CONFIG_SWAP=y
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
# CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y CONFIG_SYSCTL=y
CONFIG_LOG_BUF_SHIFT=14
# #
# Loadable module support # Loadable module support
# #
CONFIG_MODULES=y CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_OBSOLETE_MODPARM=y
# CONFIG_MODVERSIONS is not set # CONFIG_MODVERSIONS is not set
CONFIG_KMOD=y CONFIG_KMOD=y
...@@ -58,112 +57,41 @@ CONFIG_ARCH_SHARK=y ...@@ -58,112 +57,41 @@ CONFIG_ARCH_SHARK=y
# #
# Archimedes/A5000 Implementations (select only ONE) # Archimedes/A5000 Implementations (select only ONE)
# #
# CONFIG_ARCH_ARC is not set
# CONFIG_ARCH_A5K is not set
# #
# Footbridge Implementations # CLPS711X/EP721X Implementations
# #
# CONFIG_ARCH_CATS is not set
# CONFIG_ARCH_PERSONAL_SERVER is not set
# CONFIG_ARCH_EBSA285_ADDIN is not set
# CONFIG_ARCH_EBSA285_HOST is not set
# CONFIG_ARCH_NETWINDER is not set
# #
# SA11x0 Implementations # Epxa10db
# #
# CONFIG_SA1100_ASSABET is not set
# CONFIG_ASSABET_NEPONSET is not set
# CONFIG_SA1100_ADSBITSY is not set
# CONFIG_SA1100_BRUTUS is not set
# CONFIG_SA1100_CERF is not set
# CONFIG_SA1100_H3100 is not set
# CONFIG_SA1100_H3600 is not set
# CONFIG_SA1100_H3800 is not set
# CONFIG_SA1100_H3XXX is not set
# CONFIG_SA1100_EXTENEX1 is not set
# CONFIG_SA1100_FLEXANET is not set
# CONFIG_SA1100_FREEBIRD is not set
# CONFIG_SA1100_GRAPHICSCLIENT is not set
# CONFIG_SA1100_GRAPHICSMASTER is not set
# CONFIG_SA1100_BADGE4 is not set
# CONFIG_SA1100_JORNADA720 is not set
# CONFIG_SA1100_HUW_WEBPANEL is not set
# CONFIG_SA1100_ITSY is not set
# CONFIG_SA1100_LART is not set
# CONFIG_SA1100_NANOENGINE is not set
# CONFIG_SA1100_OMNIMETER is not set
# CONFIG_SA1100_PANGOLIN is not set
# CONFIG_SA1100_PLEB is not set
# CONFIG_SA1100_PT_SYSTEM3 is not set
# CONFIG_SA1100_SHANNON is not set
# CONFIG_SA1100_SHERMAN is not set
# CONFIG_SA1100_SIMPAD is not set
# CONFIG_SA1100_PFS168 is not set
# CONFIG_SA1100_VICTOR is not set
# CONFIG_SA1100_XP860 is not set
# CONFIG_SA1100_YOPY is not set
# CONFIG_SA1100_STORK is not set
# CONFIG_SA1100_USB is not set
# CONFIG_SA1100_USB_NETLINK is not set
# CONFIG_SA1100_USB_CHAR is not set
# CONFIG_H3600_SLEEVE is not set
# #
# Intel PXA250/210 Implementations # Footbridge Implementations
# #
# CONFIG_ARCH_LUBBOCK is not set
# CONFIG_ARCH_PXA_IDP is not set
# #
# CLPS711X/EP721X Implementations # IOP310 Implementation Options
# #
# CONFIG_ARCH_AUTCPU12 is not set
# CONFIG_ARCH_CDB89712 is not set
# CONFIG_ARCH_CLEP7312 is not set
# CONFIG_ARCH_EDB7211 is not set
# CONFIG_ARCH_P720T is not set
# CONFIG_ARCH_FORTUNET is not set
# CONFIG_ARCH_EP7211 is not set
# CONFIG_ARCH_EP7212 is not set
# #
# IOP310 Implementation Options # IOP310 Chipset Features
# #
# CONFIG_ARCH_IQ80310 is not set
# #
# IOP310 Chipset Features # Intel PXA250/210 Implementations
#
#
# SA11x0 Implementations
# #
# CONFIG_IOP310_AAU is not set
# CONFIG_IOP310_DMA is not set
# CONFIG_IOP310_MU is not set
# CONFIG_IOP310_PMON is not set
# CONFIG_ARCH_ACORN is not set
# CONFIG_FOOTBRIDGE is not set
# CONFIG_FOOTBRIDGE_HOST is not set
# CONFIG_FOOTBRIDGE_ADDIN is not set
CONFIG_CPU_32=y
# CONFIG_CPU_26 is not set
# #
# Processor Type # Processor Type
# #
# CONFIG_CPU_32v3 is not set CONFIG_CPU_32=y
CONFIG_CPU_32v4=y
# CONFIG_CPU_32v5 is not set
# CONFIG_CPU_ARM610 is not set
# CONFIG_CPU_ARM710 is not set
# CONFIG_CPU_ARM720T is not set
# CONFIG_CPU_ARM920T is not set
# CONFIG_CPU_ARM922T is not set
# CONFIG_CPU_ARM926T is not set
# CONFIG_CPU_ARM1020 is not set
CONFIG_CPU_SA110=y CONFIG_CPU_SA110=y
# CONFIG_CPU_SA1100 is not set CONFIG_CPU_32v4=y
# CONFIG_CPU_XSCALE is not set
# CONFIG_XSCALE_PMU is not set
# #
# Processor Features # Processor Features
...@@ -172,19 +100,16 @@ CONFIG_CPU_SA110=y ...@@ -172,19 +100,16 @@ CONFIG_CPU_SA110=y
# #
# General setup # General setup
# #
# CONFIG_DISCONTIGMEM is not set
CONFIG_PCI=y CONFIG_PCI=y
# CONFIG_PCI_HOST_PLX90X0 is not set
CONFIG_PCI_HOST_VIA82C505=y CONFIG_PCI_HOST_VIA82C505=y
CONFIG_ISA=y CONFIG_ISA=y
CONFIG_ISA_DMA=y CONFIG_ISA_DMA=y
# CONFIG_FIQ is not set
# CONFIG_ZBOOT_ROM is not set # CONFIG_ZBOOT_ROM is not set
CONFIG_ZBOOT_ROM_TEXT=0 CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0 CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_PCI_LEGACY_PROC=y
# CONFIG_PCI_NAMES is not set # CONFIG_PCI_NAMES is not set
# CONFIG_HOTPLUG is not set # CONFIG_HOTPLUG is not set
# CONFIG_PCMCIA is not set
# #
# At least one math emulation must be selected # At least one math emulation must be selected
...@@ -198,7 +123,6 @@ CONFIG_BINFMT_ELF=y ...@@ -198,7 +123,6 @@ CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set # CONFIG_BINFMT_MISC is not set
# CONFIG_PM is not set # CONFIG_PM is not set
# CONFIG_PREEMPT is not set # CONFIG_PREEMPT is not set
# CONFIG_APM is not set
# CONFIG_ARTHUR is not set # CONFIG_ARTHUR is not set
CONFIG_CMDLINE="" CONFIG_CMDLINE=""
CONFIG_LEDS=y CONFIG_LEDS=y
...@@ -216,11 +140,6 @@ CONFIG_PARPORT_PC_CML1=y ...@@ -216,11 +140,6 @@ CONFIG_PARPORT_PC_CML1=y
# CONFIG_PARPORT_PC_FIFO is not set # CONFIG_PARPORT_PC_FIFO is not set
# CONFIG_PARPORT_PC_SUPERIO is not set # CONFIG_PARPORT_PC_SUPERIO is not set
# CONFIG_PARPORT_ARC is not set # CONFIG_PARPORT_ARC is not set
# CONFIG_PARPORT_AMIGA is not set
# CONFIG_PARPORT_MFC3 is not set
# CONFIG_PARPORT_ATARI is not set
# CONFIG_PARPORT_GSC is not set
# CONFIG_PARPORT_SUNBPP is not set
# CONFIG_PARPORT_OTHER is not set # CONFIG_PARPORT_OTHER is not set
# CONFIG_PARPORT_1284 is not set # CONFIG_PARPORT_1284 is not set
...@@ -230,11 +149,9 @@ CONFIG_PARPORT_PC_CML1=y ...@@ -230,11 +149,9 @@ CONFIG_PARPORT_PC_CML1=y
# CONFIG_MTD is not set # CONFIG_MTD is not set
# #
# Plug and Play configuration # Plug and Play support
# #
# CONFIG_PNP is not set # CONFIG_PNP is not set
# CONFIG_ISAPNP is not set
# CONFIG_PNPBIOS is not set
# #
# Block devices # Block devices
...@@ -244,7 +161,6 @@ CONFIG_PARPORT_PC_CML1=y ...@@ -244,7 +161,6 @@ CONFIG_PARPORT_PC_CML1=y
# CONFIG_PARIDE is not set # CONFIG_PARIDE is not set
# CONFIG_BLK_CPQ_DA is not set # CONFIG_BLK_CPQ_DA is not set
# CONFIG_BLK_CPQ_CISS_DA is not set # CONFIG_BLK_CPQ_CISS_DA is not set
# CONFIG_CISS_SCSI_TAPE is not set
# CONFIG_BLK_DEV_DAC960 is not set # CONFIG_BLK_DEV_DAC960 is not set
# CONFIG_BLK_DEV_UMEM is not set # CONFIG_BLK_DEV_UMEM is not set
CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP=y
...@@ -257,13 +173,11 @@ CONFIG_BLK_DEV_RAM_SIZE=4096 ...@@ -257,13 +173,11 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
# Multi-device support (RAID and LVM) # Multi-device support (RAID and LVM)
# #
# CONFIG_MD is not set # CONFIG_MD is not set
# CONFIG_BLK_DEV_MD is not set
# CONFIG_MD_LINEAR is not set #
# CONFIG_MD_RAID0 is not set # Networking support
# CONFIG_MD_RAID1 is not set #
# CONFIG_MD_RAID5 is not set CONFIG_NET=y
# CONFIG_MD_MULTIPATH is not set
# CONFIG_BLK_DEV_LVM is not set
# #
# Networking options # Networking options
...@@ -272,8 +186,8 @@ CONFIG_PACKET=y ...@@ -272,8 +186,8 @@ CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set # CONFIG_PACKET_MMAP is not set
# CONFIG_NETLINK_DEV is not set # CONFIG_NETLINK_DEV is not set
# CONFIG_NETFILTER is not set # CONFIG_NETFILTER is not set
CONFIG_FILTER=y
CONFIG_UNIX=y CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set # CONFIG_IP_MULTICAST is not set
# CONFIG_IP_ADVANCED_ROUTER is not set # CONFIG_IP_ADVANCED_ROUTER is not set
...@@ -283,25 +197,23 @@ CONFIG_INET=y ...@@ -283,25 +197,23 @@ CONFIG_INET=y
# CONFIG_ARPD is not set # CONFIG_ARPD is not set
# CONFIG_INET_ECN is not set # CONFIG_INET_ECN is not set
# CONFIG_SYN_COOKIES is not set # CONFIG_SYN_COOKIES is not set
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_IPV6 is not set # CONFIG_IPV6 is not set
# CONFIG_ATM is not set # CONFIG_XFRM_USER is not set
# CONFIG_VLAN_8021Q is not set
#
#
#
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# #
# Appletalk devices # SCTP Configuration (EXPERIMENTAL)
# #
# CONFIG_DEV_APPLETALK is not set CONFIG_IPV6_SCTP__=y
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_LLC is not set
# CONFIG_DECNET is not set # CONFIG_DECNET is not set
# CONFIG_BRIDGE is not set # CONFIG_BRIDGE is not set
# CONFIG_X25 is not set # CONFIG_X25 is not set
# CONFIG_LAPB is not set # CONFIG_LAPB is not set
# CONFIG_LLC is not set
# CONFIG_NET_DIVERT is not set # CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set # CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set # CONFIG_WAN_ROUTER is not set
...@@ -314,8 +226,9 @@ CONFIG_INET=y ...@@ -314,8 +226,9 @@ CONFIG_INET=y
# CONFIG_NET_SCHED is not set # CONFIG_NET_SCHED is not set
# #
# Network device support # Network testing
# #
# CONFIG_NET_PKTGEN is not set
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
# #
...@@ -332,47 +245,43 @@ CONFIG_NETDEVICES=y ...@@ -332,47 +245,43 @@ CONFIG_NETDEVICES=y
# Ethernet (10 or 100Mbit) # Ethernet (10 or 100Mbit)
# #
CONFIG_NET_ETHERNET=y CONFIG_NET_ETHERNET=y
# CONFIG_ARM_AM79C961A is not set # CONFIG_MII is not set
# CONFIG_SUNLANCE is not set
# CONFIG_HAPPYMEAL is not set # CONFIG_HAPPYMEAL is not set
# CONFIG_SUNBMAC is not set
# CONFIG_SUNQE is not set
# CONFIG_SUNGEM is not set # CONFIG_SUNGEM is not set
# CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_3COM is not set
# CONFIG_LANCE is not set # CONFIG_LANCE is not set
# CONFIG_NET_VENDOR_SMC is not set # CONFIG_NET_VENDOR_SMC is not set
# CONFIG_NET_VENDOR_RACAL is not set # CONFIG_NET_VENDOR_RACAL is not set
#
# Tulip family network device support
#
# CONFIG_NET_TULIP is not set
# CONFIG_AT1700 is not set # CONFIG_AT1700 is not set
# CONFIG_DEPCA is not set # CONFIG_DEPCA is not set
# CONFIG_HP100 is not set # CONFIG_HP100 is not set
# CONFIG_NET_ISA is not set # CONFIG_NET_ISA is not set
CONFIG_NET_PCI=y CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set # CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set # CONFIG_ADAPTEC_STARFIRE is not set
# CONFIG_AC3200 is not set # CONFIG_AC3200 is not set
# CONFIG_APRICOT is not set # CONFIG_APRICOT is not set
# CONFIG_B44 is not set
CONFIG_CS89x0=y CONFIG_CS89x0=y
# CONFIG_DGRS is not set # CONFIG_DGRS is not set
# CONFIG_EEPRO100 is not set # CONFIG_EEPRO100 is not set
# CONFIG_E100 is not set # CONFIG_E100 is not set
# CONFIG_LNE390 is not set
# CONFIG_FEALNX is not set # CONFIG_FEALNX is not set
# CONFIG_NATSEMI is not set # CONFIG_NATSEMI is not set
# CONFIG_NE2K_PCI is not set # CONFIG_NE2K_PCI is not set
# CONFIG_NE3210 is not set
# CONFIG_ES3210 is not set
# CONFIG_8139CP is not set # CONFIG_8139CP is not set
# CONFIG_8139TOO is not set # CONFIG_8139TOO is not set
# CONFIG_8139TOO_PIO is not set
# CONFIG_8139TOO_TUNE_TWISTER is not set
# CONFIG_8139TOO_8129 is not set
# CONFIG_8139_NEW_RX_RESET is not set
# CONFIG_SIS900 is not set # CONFIG_SIS900 is not set
# CONFIG_EPIC100 is not set # CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set # CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set # CONFIG_TLAN is not set
# CONFIG_VIA_RHINE is not set # CONFIG_VIA_RHINE is not set
# CONFIG_VIA_RHINE_MMIO is not set
# CONFIG_NET_POCKET is not set # CONFIG_NET_POCKET is not set
# #
...@@ -381,10 +290,10 @@ CONFIG_CS89x0=y ...@@ -381,10 +290,10 @@ CONFIG_CS89x0=y
# CONFIG_ACENIC is not set # CONFIG_ACENIC is not set
# CONFIG_DL2K is not set # CONFIG_DL2K is not set
# CONFIG_E1000 is not set # CONFIG_E1000 is not set
# CONFIG_MYRI_SBUS is not set
# CONFIG_NS83820 is not set # CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set # CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set # CONFIG_YELLOWFIN is not set
# CONFIG_R8169 is not set
# CONFIG_SK98LIN is not set # CONFIG_SK98LIN is not set
# CONFIG_TIGON3 is not set # CONFIG_TIGON3 is not set
# CONFIG_FDDI is not set # CONFIG_FDDI is not set
...@@ -399,9 +308,8 @@ CONFIG_CS89x0=y ...@@ -399,9 +308,8 @@ CONFIG_CS89x0=y
# CONFIG_NET_RADIO is not set # CONFIG_NET_RADIO is not set
# #
# Token Ring devices # Token Ring devices (depends on LLC=y)
# #
# CONFIG_TR is not set
# CONFIG_NET_FC is not set # CONFIG_NET_FC is not set
# CONFIG_RCPCI is not set # CONFIG_RCPCI is not set
# CONFIG_SHAPER is not set # CONFIG_SHAPER is not set
...@@ -412,9 +320,9 @@ CONFIG_CS89x0=y ...@@ -412,9 +320,9 @@ CONFIG_CS89x0=y
# CONFIG_WAN is not set # CONFIG_WAN is not set
# #
# Tulip family network device support # IrDA (infrared) support
# #
# CONFIG_NET_TULIP is not set # CONFIG_IRDA is not set
# #
# Amateur Radio support # Amateur Radio support
...@@ -422,75 +330,32 @@ CONFIG_CS89x0=y ...@@ -422,75 +330,32 @@ CONFIG_CS89x0=y
# CONFIG_HAMRADIO is not set # CONFIG_HAMRADIO is not set
# #
# IrDA (infrared) support # ATA/ATAPI/MFM/RLL support
# #
# CONFIG_IRDA is not set CONFIG_IDE=y
# #
# ATA/ATAPI/MFM/RLL support # IDE, ATA and ATAPI Block devices
# #
CONFIG_IDE=y
CONFIG_BLK_DEV_IDE=y CONFIG_BLK_DEV_IDE=y
# CONFIG_BLK_DEV_HD_IDE is not set
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
# CONFIG_BLK_DEV_HD is not set # CONFIG_BLK_DEV_HD is not set
CONFIG_BLK_DEV_IDEDISK=y CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set # CONFIG_IDEDISK_MULTI_MODE is not set
# CONFIG_IDEDISK_STROKE is not set # CONFIG_IDEDISK_STROKE is not set
CONFIG_ATAPI=y
CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_IDECD=y
# CONFIG_BLK_DEV_IDETAPE is not set
CONFIG_BLK_DEV_IDEFLOPPY=y CONFIG_BLK_DEV_IDEFLOPPY=y
# CONFIG_BLK_DEV_IDESCSI is not set # CONFIG_BLK_DEV_IDESCSI is not set
# CONFIG_BLK_DEV_IDECS is not set # CONFIG_IDE_TASK_IOCTL is not set
# #
# ATA host controller support # IDE chipset support/bugfixes
# #
# CONFIG_BLK_DEV_RZ1000 is not set # CONFIG_BLK_DEV_IDEPCI is not set
# CONFIG_BLK_DEV_CMD640 is not set
# CONFIG_BLK_DEV_CMD640_ENHANCED is not set
# CONFIG_BLK_DEV_ISAPNP is not set
#
# PCI host controller support
#
# CONFIG_BLK_DEV_OFFBOARD is not set
# CONFIG_IDEPCI_SHARE_IRQ is not set
# CONFIG_BLK_DEV_IDEDMA_PCI is not set
# CONFIG_IDEDMA_PCI_AUTO is not set
# CONFIG_IDEDMA_ONLYDISK is not set
# CONFIG_BLK_DEV_IDEDMA is not set
# CONFIG_BLK_DEV_IDE_TCQ is not set
# CONFIG_BLK_DEV_IDE_TCQ_DEFAULT is not set
# CONFIG_IDEDMA_NEW_DRIVE_LISTINGS is not set
# CONFIG_BLK_DEV_AEC62XX is not set
# CONFIG_AEC6280_BURST is not set
# CONFIG_BLK_DEV_ALI15X3 is not set
# CONFIG_WDC_ALI15X3 is not set
# CONFIG_BLK_DEV_AMD74XX is not set
# CONFIG_BLK_DEV_CMD64X is not set
# CONFIG_BLK_DEV_CY82C693 is not set
# CONFIG_BLK_DEV_CS5530 is not set
# CONFIG_BLK_DEV_HPT34X is not set
# CONFIG_HPT34X_AUTODMA is not set
# CONFIG_BLK_DEV_HPT366 is not set
# CONFIG_BLK_DEV_PIIX is not set
# CONFIG_BLK_DEV_NS87415 is not set
# CONFIG_BLK_DEV_OPTI621 is not set
# CONFIG_BLK_DEV_PDC202XX is not set
# CONFIG_PDC202XX_BURST is not set
# CONFIG_PDC202XX_FORCE is not set
# CONFIG_BLK_DEV_SVWKS is not set
# CONFIG_BLK_DEV_SIS5513 is not set
# CONFIG_BLK_DEV_TRM290 is not set
# CONFIG_BLK_DEV_VIA82CXXX is not set
# CONFIG_BLK_DEV_SL82C105 is not set
# CONFIG_IDE_CHIPSETS is not set # CONFIG_IDE_CHIPSETS is not set
# CONFIG_IDEDMA_IVB is not set
# CONFIG_IDEDMA_AUTO is not set
# CONFIG_BLK_DEV_ATARAID is not set
# CONFIG_BLK_DEV_ATARAID_PDC is not set
# CONFIG_BLK_DEV_ATARAID_HPT is not set
# #
# SCSI support # SCSI support
...@@ -501,12 +366,10 @@ CONFIG_SCSI=m ...@@ -501,12 +366,10 @@ CONFIG_SCSI=m
# SCSI support type (disk, tape, CD-ROM) # SCSI support type (disk, tape, CD-ROM)
# #
CONFIG_BLK_DEV_SD=m CONFIG_BLK_DEV_SD=m
CONFIG_SD_EXTRA_DEVS=40
CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_ST=m
# CONFIG_CHR_DEV_OSST is not set # CONFIG_CHR_DEV_OSST is not set
CONFIG_BLK_DEV_SR=m CONFIG_BLK_DEV_SR=m
# CONFIG_BLK_DEV_SR_VENDOR is not set # CONFIG_BLK_DEV_SR_VENDOR is not set
CONFIG_SR_EXTRA_DEVS=2
CONFIG_CHR_DEV_SG=m CONFIG_CHR_DEV_SG=m
# #
...@@ -525,8 +388,10 @@ CONFIG_CHR_DEV_SG=m ...@@ -525,8 +388,10 @@ CONFIG_CHR_DEV_SG=m
# CONFIG_SCSI_ACARD is not set # CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AHA152X is not set # CONFIG_SCSI_AHA152X is not set
# CONFIG_SCSI_AHA1542 is not set # CONFIG_SCSI_AHA1542 is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set # CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set # CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
# CONFIG_SCSI_DPT_I2O is not set # CONFIG_SCSI_DPT_I2O is not set
# CONFIG_SCSI_ADVANSYS is not set # CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_IN2000 is not set # CONFIG_SCSI_IN2000 is not set
...@@ -537,11 +402,11 @@ CONFIG_CHR_DEV_SG=m ...@@ -537,11 +402,11 @@ CONFIG_CHR_DEV_SG=m
# CONFIG_SCSI_DMX3191D is not set # CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_DTC3280 is not set # CONFIG_SCSI_DTC3280 is not set
# CONFIG_SCSI_EATA is not set # CONFIG_SCSI_EATA is not set
# CONFIG_SCSI_EATA_DMA is not set
# CONFIG_SCSI_EATA_PIO is not set # CONFIG_SCSI_EATA_PIO is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set # CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_GDTH is not set # CONFIG_SCSI_GDTH is not set
# CONFIG_SCSI_GENERIC_NCR5380 is not set # CONFIG_SCSI_GENERIC_NCR5380 is not set
# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
# CONFIG_SCSI_INITIO is not set # CONFIG_SCSI_INITIO is not set
# CONFIG_SCSI_INIA100 is not set # CONFIG_SCSI_INIA100 is not set
# CONFIG_SCSI_PPA is not set # CONFIG_SCSI_PPA is not set
...@@ -559,11 +424,11 @@ CONFIG_CHR_DEV_SG=m ...@@ -559,11 +424,11 @@ CONFIG_CHR_DEV_SG=m
# CONFIG_SCSI_QLOGIC_ISP is not set # CONFIG_SCSI_QLOGIC_ISP is not set
# CONFIG_SCSI_QLOGIC_FC is not set # CONFIG_SCSI_QLOGIC_FC is not set
# CONFIG_SCSI_QLOGIC_1280 is not set # CONFIG_SCSI_QLOGIC_1280 is not set
# CONFIG_SCSI_SIM710 is not set
# CONFIG_SCSI_SYM53C416 is not set # CONFIG_SCSI_SYM53C416 is not set
# CONFIG_SCSI_DC390T is not set # CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_T128 is not set # CONFIG_SCSI_T128 is not set
# CONFIG_SCSI_U14_34F is not set # CONFIG_SCSI_U14_34F is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set # CONFIG_SCSI_DEBUG is not set
# #
...@@ -575,11 +440,6 @@ CONFIG_CHR_DEV_SG=m ...@@ -575,11 +440,6 @@ CONFIG_CHR_DEV_SG=m
# I2O device support # I2O device support
# #
# CONFIG_I2O is not set # CONFIG_I2O is not set
# CONFIG_I2O_PCI is not set
# CONFIG_I2O_BLOCK is not set
# CONFIG_I2O_LAN is not set
# CONFIG_I2O_SCSI is not set
# CONFIG_I2O_PROC is not set
# #
# ISDN subsystem # ISDN subsystem
...@@ -589,47 +449,55 @@ CONFIG_CHR_DEV_SG=m ...@@ -589,47 +449,55 @@ CONFIG_CHR_DEV_SG=m
# #
# Input device support # Input device support
# #
# CONFIG_INPUT is not set CONFIG_INPUT=y
# #
# Userland interfaces # Userland interfaces
# #
# CONFIG_INPUT_KEYBDEV is not set CONFIG_INPUT_MOUSEDEV=y
# CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_MOUSEDEV_PSAUX=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_JOYDEV is not set # CONFIG_INPUT_JOYDEV is not set
# CONFIG_INPUT_TSDEV is not set # CONFIG_INPUT_TSDEV is not set
# CONFIG_INPUT_TSLIBDEV is not set # CONFIG_INPUT_TSLIBDEV is not set
# CONFIG_INPUT_EVDEV is not set # CONFIG_INPUT_EVDEV is not set
# CONFIG_INPUT_EVBUG is not set # CONFIG_INPUT_EVBUG is not set
# CONFIG_INPUT_UINPUT is not set
# #
# Input I/O drivers # Input I/O drivers
# #
# CONFIG_GAMEPORT is not set # CONFIG_GAMEPORT is not set
CONFIG_SOUND_GAMEPORT=y CONFIG_SOUND_GAMEPORT=y
# CONFIG_GAMEPORT_NS558 is not set CONFIG_SERIO=y
# CONFIG_GAMEPORT_L4 is not set CONFIG_SERIO_I8042=y
# CONFIG_GAMEPORT_EMU10K1 is not set
# CONFIG_GAMEPORT_VORTEX is not set
# CONFIG_GAMEPORT_FM801 is not set
# CONFIG_GAMEPORT_CS461x is not set
# CONFIG_SERIO is not set
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set # CONFIG_SERIO_SERPORT is not set
# CONFIG_SERIO_CT82C710 is not set # CONFIG_SERIO_CT82C710 is not set
# CONFIG_SERIO_PARKBD is not set # CONFIG_SERIO_PARKBD is not set
# CONFIG_SERIO_PCIPS2 is not set
# #
# Input Device Drivers # Input Device Drivers
# #
CONFIG_INPUT_KEYBOARD=y
CONFIG_KEYBOARD_ATKBD=y
# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
# CONFIG_MOUSE_SERIAL is not set
# CONFIG_MOUSE_INPORT is not set
# CONFIG_MOUSE_LOGIBM is not set
# CONFIG_MOUSE_PC110PAD is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
# #
# Character devices # Character devices
# #
CONFIG_VT=y # CONFIG_VT is not set
CONFIG_VT_CONSOLE=y
# CONFIG_SERIAL_NONSTANDARD is not set # CONFIG_SERIAL_NONSTANDARD is not set
# #
...@@ -637,33 +505,12 @@ CONFIG_VT_CONSOLE=y ...@@ -637,33 +505,12 @@ CONFIG_VT_CONSOLE=y
# #
CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_CS is not set
# CONFIG_SERIAL_8250_EXTENDED is not set # CONFIG_SERIAL_8250_EXTENDED is not set
# CONFIG_SERIAL_8250_MANY_PORTS is not set
# CONFIG_SERIAL_8250_SHARE_IRQ is not set
# CONFIG_SERIAL_8250_DETECT_IRQ is not set
# CONFIG_SERIAL_8250_MULTIPORT is not set
# CONFIG_SERIAL_8250_RSA is not set
# #
# Non-8250 serial port support # Non-8250 serial port support
# #
# CONFIG_ATOMWIDE_SERIAL is not set # CONFIG_SERIAL_DZ is not set
# CONFIG_DUALSP_SERIAL is not set
# CONFIG_SERIAL_ANAKIN is not set
# CONFIG_SERIAL_ANAKIN_CONSOLE is not set
# CONFIG_SERIAL_AMBA is not set
# CONFIG_SERIAL_AMBA_CONSOLE is not set
# CONFIG_SERIAL_CLPS711X is not set
# CONFIG_SERIAL_CLPS711X_CONSOLE is not set
# CONFIG_SERIAL_CLPS711X_OLD_NAME is not set
# CONFIG_SERIAL_21285 is not set
# CONFIG_SERIAL_21285_OLD is not set
# CONFIG_SERIAL_21285_CONSOLE is not set
# CONFIG_SERIAL_UART00 is not set
# CONFIG_SERIAL_UART00_CONSOLE is not set
# CONFIG_SERIAL_SA1100 is not set
# CONFIG_SERIAL_SA1100_CONSOLE is not set
CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y CONFIG_UNIX98_PTYS=y
...@@ -671,6 +518,7 @@ CONFIG_UNIX98_PTY_COUNT=256 ...@@ -671,6 +518,7 @@ CONFIG_UNIX98_PTY_COUNT=256
CONFIG_PRINTER=m CONFIG_PRINTER=m
# CONFIG_LP_CONSOLE is not set # CONFIG_LP_CONSOLE is not set
# CONFIG_PPDEV is not set # CONFIG_PPDEV is not set
# CONFIG_TIPAR is not set
# #
# I2C support # I2C support
...@@ -678,25 +526,29 @@ CONFIG_PRINTER=m ...@@ -678,25 +526,29 @@ CONFIG_PRINTER=m
# CONFIG_I2C is not set # CONFIG_I2C is not set
# #
# L3 serial bus support # I2C Hardware Sensors Mainboard support
# #
# CONFIG_L3 is not set
# CONFIG_L3_ALGOBIT is not set
# CONFIG_L3_BIT_SA1100_GPIO is not set
# #
# Other L3 adapters # I2C Hardware Sensors Chip support
# #
# CONFIG_L3_SA1111 is not set
# CONFIG_BIT_SA1100_GPIO is not set #
# L3 serial bus support
#
# CONFIG_L3 is not set
# #
# Mice # Mice
# #
# CONFIG_BUSMOUSE is not set # CONFIG_BUSMOUSE is not set
CONFIG_PSMOUSE=y
# CONFIG_QIC02_TAPE is not set # CONFIG_QIC02_TAPE is not set
#
# IPMI
#
# CONFIG_IPMI_HANDLER is not set
# #
# Watchdog Cards # Watchdog Cards
# #
...@@ -714,6 +566,7 @@ CONFIG_RTC=y ...@@ -714,6 +566,7 @@ CONFIG_RTC=y
# CONFIG_AGP is not set # CONFIG_AGP is not set
# CONFIG_DRM is not set # CONFIG_DRM is not set
# CONFIG_RAW_DRIVER is not set # CONFIG_RAW_DRIVER is not set
# CONFIG_HANGCHECK_TIMER is not set
# #
# Multimedia devices # Multimedia devices
...@@ -723,84 +576,83 @@ CONFIG_RTC=y ...@@ -723,84 +576,83 @@ CONFIG_RTC=y
# #
# File systems # File systems
# #
# CONFIG_QUOTA is not set CONFIG_EXT2_FS=y
# CONFIG_QFMT_V1 is not set # CONFIG_EXT2_FS_XATTR is not set
# CONFIG_QFMT_V2 is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_REISERFS_CHECK is not set
# CONFIG_REISERFS_PROC_INFO is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
# CONFIG_BFS_FS is not set
CONFIG_EXT3_FS=y CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_XATTR=y
# CONFIG_EXT3_FS_POSIX_ACL is not set
CONFIG_JBD=y CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set # CONFIG_JBD_DEBUG is not set
CONFIG_FAT_FS=y CONFIG_FS_MBCACHE=y
CONFIG_MSDOS_FS=y # CONFIG_REISERFS_FS is not set
# CONFIG_UMSDOS_FS is not set # CONFIG_JFS_FS is not set
CONFIG_VFAT_FS=y # CONFIG_XFS_FS is not set
# CONFIG_EFS_FS is not set # CONFIG_MINIX_FS is not set
# CONFIG_JFFS_FS is not set # CONFIG_ROMFS_FS is not set
# CONFIG_JFFS2_FS is not set # CONFIG_QUOTA is not set
# CONFIG_CRAMFS is not set # CONFIG_AUTOFS_FS is not set
# CONFIG_TMPFS is not set # CONFIG_AUTOFS4_FS is not set
CONFIG_RAMFS=y
#
# CD-ROM/DVD Filesystems
#
CONFIG_ISO9660_FS=y CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y CONFIG_JOLIET=y
# CONFIG_ZISOFS is not set # CONFIG_ZISOFS is not set
# CONFIG_JFS_FS is not set # CONFIG_UDF_FS is not set
# CONFIG_JFS_DEBUG is not set
# CONFIG_JFS_STATISTICS is not set #
# CONFIG_MINIX_FS is not set # DOS/FAT/NT Filesystems
# CONFIG_VXFS_FS is not set #
CONFIG_FAT_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
# CONFIG_NTFS_FS is not set # CONFIG_NTFS_FS is not set
# CONFIG_NTFS_DEBUG is not set
# CONFIG_NTFS_RW is not set #
# CONFIG_HPFS_FS is not set # Pseudo filesystems
#
CONFIG_PROC_FS=y CONFIG_PROC_FS=y
CONFIG_DEVFS_FS=y CONFIG_DEVFS_FS=y
CONFIG_DEVFS_MOUNT=y CONFIG_DEVFS_MOUNT=y
# CONFIG_DEVFS_DEBUG is not set # CONFIG_DEVFS_DEBUG is not set
# CONFIG_DEVPTS_FS is not set # CONFIG_DEVPTS_FS is not set
# CONFIG_TMPFS is not set
CONFIG_RAMFS=y
#
# Miscellaneous filesystems
#
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set # CONFIG_QNX4FS_FS is not set
# CONFIG_QNX4FS_RW is not set
# CONFIG_ROMFS_FS is not set
CONFIG_EXT2_FS=y
# CONFIG_SYSV_FS is not set # CONFIG_SYSV_FS is not set
# CONFIG_UDF_FS is not set
# CONFIG_UDF_RW is not set
# CONFIG_UFS_FS is not set # CONFIG_UFS_FS is not set
# CONFIG_UFS_FS_WRITE is not set
# #
# Network File Systems # Network File Systems
# #
# CONFIG_CODA_FS is not set
# CONFIG_INTERMEZZO_FS is not set
CONFIG_NFS_FS=y CONFIG_NFS_FS=y
# CONFIG_NFS_V3 is not set # CONFIG_NFS_V3 is not set
# CONFIG_ROOT_NFS is not set # CONFIG_NFS_V4 is not set
# CONFIG_NFSD is not set # CONFIG_NFSD is not set
# CONFIG_NFSD_V3 is not set
# CONFIG_NFSD_TCP is not set
CONFIG_SUNRPC=y
CONFIG_LOCKD=y CONFIG_LOCKD=y
# CONFIG_EXPORTFS is not set # CONFIG_EXPORTFS is not set
CONFIG_SUNRPC=y
# CONFIG_SUNRPC_GSS is not set
# CONFIG_SMB_FS is not set # CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set # CONFIG_NCP_FS is not set
# CONFIG_NCPFS_PACKET_SIGNING is not set # CONFIG_CODA_FS is not set
# CONFIG_NCPFS_IOCTL_LOCKING is not set # CONFIG_INTERMEZZO_FS is not set
# CONFIG_NCPFS_STRONG is not set # CONFIG_AFS_FS is not set
# CONFIG_NCPFS_NFS_NS is not set
# CONFIG_NCPFS_OS2_NS is not set
# CONFIG_NCPFS_SMALLDOS is not set
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
# CONFIG_ZISOFS_FS is not set
# #
# Partition Types # Partition Types
...@@ -817,11 +669,11 @@ CONFIG_MSDOS_PARTITION=y ...@@ -817,11 +669,11 @@ CONFIG_MSDOS_PARTITION=y
# CONFIG_SOLARIS_X86_PARTITION is not set # CONFIG_SOLARIS_X86_PARTITION is not set
# CONFIG_UNIXWARE_DISKLABEL is not set # CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_LDM_PARTITION is not set # CONFIG_LDM_PARTITION is not set
# CONFIG_NEC98_PARTITION is not set
# CONFIG_SGI_PARTITION is not set # CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set # CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set # CONFIG_SUN_PARTITION is not set
# CONFIG_EFI_PARTITION is not set # CONFIG_EFI_PARTITION is not set
# CONFIG_SMB_NLS is not set
CONFIG_NLS=y CONFIG_NLS=y
# #
...@@ -867,28 +719,18 @@ CONFIG_NLS_ISO8859_1=y ...@@ -867,28 +719,18 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_NLS_UTF8 is not set # CONFIG_NLS_UTF8 is not set
# #
# Console drivers # Graphics support
#
# CONFIG_VGA_CONSOLE is not set
#
# Frame-buffer support
# #
CONFIG_FB=y CONFIG_FB=y
CONFIG_DUMMY_CONSOLE=y # CONFIG_FB_CIRRUS is not set
# CONFIG_FB_CLGEN is not set
# CONFIG_FB_PM2 is not set # CONFIG_FB_PM2 is not set
# CONFIG_FB_PM3 is not set
# CONFIG_FB_ACORN is not set
# CONFIG_FB_ANAKIN is not set
# CONFIG_FB_CLPS711X is not set
# CONFIG_FB_SA1100 is not set
CONFIG_FB_CYBER2000=y CONFIG_FB_CYBER2000=y
# CONFIG_FB_IMSTT is not set
# CONFIG_FB_RIVA is not set # CONFIG_FB_RIVA is not set
# CONFIG_FB_MATROX is not set # CONFIG_FB_MATROX is not set
# CONFIG_FB_ATY is not set
# CONFIG_FB_RADEON is not set # CONFIG_FB_RADEON is not set
# CONFIG_FB_ATY128 is not set # CONFIG_FB_ATY128 is not set
# CONFIG_FB_ATY is not set
# CONFIG_FB_SIS is not set # CONFIG_FB_SIS is not set
# CONFIG_FB_NEOMAGIC is not set # CONFIG_FB_NEOMAGIC is not set
# CONFIG_FB_3DFX is not set # CONFIG_FB_3DFX is not set
...@@ -896,20 +738,22 @@ CONFIG_FB_CYBER2000=y ...@@ -896,20 +738,22 @@ CONFIG_FB_CYBER2000=y
# CONFIG_FB_TRIDENT is not set # CONFIG_FB_TRIDENT is not set
# CONFIG_FB_PM3 is not set # CONFIG_FB_PM3 is not set
# CONFIG_FB_VIRTUAL is not set # CONFIG_FB_VIRTUAL is not set
# CONFIG_FBCON_ADVANCED is not set
CONFIG_FBCON_CFB8=y #
CONFIG_FBCON_CFB16=y # Logo configuration
CONFIG_FBCON_CFB24=y #
# CONFIG_FBCON_FONTWIDTH8_ONLY is not set # CONFIG_LOGO is not set
# CONFIG_FBCON_FONTS is not set
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
# #
# Sound # Sound
# #
CONFIG_SOUND=m CONFIG_SOUND=m
#
# Advanced Linux Sound Architecture
#
# CONFIG_SND is not set
# #
# Open Sound System # Open Sound System
# #
...@@ -917,7 +761,6 @@ CONFIG_SOUND_PRIME=m ...@@ -917,7 +761,6 @@ CONFIG_SOUND_PRIME=m
# CONFIG_SOUND_BT878 is not set # CONFIG_SOUND_BT878 is not set
# CONFIG_SOUND_CMPCI is not set # CONFIG_SOUND_CMPCI is not set
# CONFIG_SOUND_EMU10K1 is not set # CONFIG_SOUND_EMU10K1 is not set
# CONFIG_MIDI_EMU10K1 is not set
# CONFIG_SOUND_FUSION is not set # CONFIG_SOUND_FUSION is not set
# CONFIG_SOUND_CS4281 is not set # CONFIG_SOUND_CS4281 is not set
# CONFIG_SOUND_ES1370 is not set # CONFIG_SOUND_ES1370 is not set
...@@ -932,7 +775,6 @@ CONFIG_SOUND_PRIME=m ...@@ -932,7 +775,6 @@ CONFIG_SOUND_PRIME=m
# CONFIG_SOUND_MSNDCLAS is not set # CONFIG_SOUND_MSNDCLAS is not set
# CONFIG_SOUND_MSNDPIN is not set # CONFIG_SOUND_MSNDPIN is not set
# CONFIG_SOUND_VIA82CXXX is not set # CONFIG_SOUND_VIA82CXXX is not set
# CONFIG_MIDI_VIA82CXXX is not set
CONFIG_SOUND_OSS=m CONFIG_SOUND_OSS=m
# CONFIG_SOUND_TRACEINIT is not set # CONFIG_SOUND_TRACEINIT is not set
# CONFIG_SOUND_DMAP is not set # CONFIG_SOUND_DMAP is not set
...@@ -950,7 +792,6 @@ CONFIG_SOUND_ADLIB=m ...@@ -950,7 +792,6 @@ CONFIG_SOUND_ADLIB=m
# CONFIG_SOUND_NM256 is not set # CONFIG_SOUND_NM256 is not set
# CONFIG_SOUND_MAD16 is not set # CONFIG_SOUND_MAD16 is not set
# CONFIG_SOUND_PAS is not set # CONFIG_SOUND_PAS is not set
# CONFIG_PAS_JOYSTICK is not set
# CONFIG_SOUND_PSS is not set # CONFIG_SOUND_PSS is not set
CONFIG_SOUND_SB=m CONFIG_SOUND_SB=m
# CONFIG_SOUND_AWE32_SYNTH is not set # CONFIG_SOUND_AWE32_SYNTH is not set
...@@ -960,32 +801,22 @@ CONFIG_SOUND_SB=m ...@@ -960,32 +801,22 @@ CONFIG_SOUND_SB=m
# CONFIG_SOUND_OPL3SA1 is not set # CONFIG_SOUND_OPL3SA1 is not set
# CONFIG_SOUND_OPL3SA2 is not set # CONFIG_SOUND_OPL3SA2 is not set
# CONFIG_SOUND_YMFPCI is not set # CONFIG_SOUND_YMFPCI is not set
# CONFIG_SOUND_YMFPCI_LEGACY is not set
# CONFIG_SOUND_UART6850 is not set # CONFIG_SOUND_UART6850 is not set
# CONFIG_SOUND_AEDSP16 is not set # CONFIG_SOUND_AEDSP16 is not set
# CONFIG_SOUND_WAVEARTIST is not set
# CONFIG_SOUND_TVMIXER is not set
# #
# Advanced Linux Sound Architecture # Misc devices
# #
# CONFIG_SND is not set
# #
# Multimedia Capabilities Port drivers # Multimedia Capabilities Port drivers
# #
# CONFIG_MCP is not set # CONFIG_MCP is not set
# CONFIG_MCP_SA1100 is not set
# CONFIG_MCP_UCB1200 is not set
# CONFIG_MCP_UCB1200_AUDIO is not set
# CONFIG_MCP_UCB1200_TS is not set
# #
# Console Switches # Console Switches
# #
# CONFIG_SWITCHES is not set # CONFIG_SWITCHES is not set
# CONFIG_SWITCHES_SA1100 is not set
# CONFIG_SWITCHES_UCB1X00 is not set
# #
# USB support # USB support
...@@ -1004,24 +835,18 @@ CONFIG_FRAME_POINTER=y ...@@ -1004,24 +835,18 @@ CONFIG_FRAME_POINTER=y
CONFIG_DEBUG_USER=y CONFIG_DEBUG_USER=y
# CONFIG_DEBUG_INFO is not set # CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_KERNEL is not set # CONFIG_DEBUG_KERNEL is not set
# CONFIG_DEBUG_SLAB is not set
# CONFIG_MAGIC_SYSRQ is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_WAITQ is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_DEBUG_ERRORS is not set
# CONFIG_DEBUG_LL is not set
# CONFIG_DEBUG_DC21285_PORT is not set
# CONFIG_DEBUG_CLPS711X_UART2 is not set
# #
# Security options # Security options
# #
CONFIG_SECURITY_CAPABILITIES=y # CONFIG_SECURITY is not set
#
# Cryptographic options
#
# CONFIG_CRYPTO is not set
# #
# Library routines # Library routines
# #
CONFIG_CRC32=y CONFIG_CRC32=y
# CONFIG_ZLIB_INFLATE is not set
# CONFIG_ZLIB_DEFLATE is not set
...@@ -418,7 +418,7 @@ unsigned long get_wchan(struct task_struct *p) ...@@ -418,7 +418,7 @@ unsigned long get_wchan(struct task_struct *p)
if (!p || p == current || p->state == TASK_RUNNING) if (!p || p == current || p->state == TASK_RUNNING)
return 0; return 0;
stack_page = 4096 + (unsigned long)p; stack_page = 4096 + (unsigned long)p->thread_info;
fp = thread_saved_fp(p); fp = thread_saved_fp(p);
do { do {
if (fp < stack_page || fp > 4092+stack_page) if (fp < stack_page || fp > 4092+stack_page)
......
...@@ -74,6 +74,9 @@ struct cpu_tlb_fns cpu_tlb; ...@@ -74,6 +74,9 @@ struct cpu_tlb_fns cpu_tlb;
#ifdef MULTI_USER #ifdef MULTI_USER
struct cpu_user_fns cpu_user; struct cpu_user_fns cpu_user;
#endif #endif
#ifdef MULTI_CACHE
struct cpu_cache_fns cpu_cache;
#endif
unsigned char aux_device_present; unsigned char aux_device_present;
char elf_platform[ELF_PLATFORM_SIZE]; char elf_platform[ELF_PLATFORM_SIZE];
...@@ -282,6 +285,9 @@ static void __init setup_processor(void) ...@@ -282,6 +285,9 @@ static void __init setup_processor(void)
#ifdef MULTI_USER #ifdef MULTI_USER
cpu_user = *list->user; cpu_user = *list->user;
#endif #endif
#ifdef MULTI_CACHE
cpu_cache = *list->cache;
#endif
printk("CPU: %s [%08x] revision %d (ARMv%s)\n", printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
cpu_name, processor_id, (int)processor_id & 15, cpu_name, processor_id, (int)processor_id & 15,
...@@ -323,24 +329,29 @@ static struct machine_desc * __init setup_machine(unsigned int nr) ...@@ -323,24 +329,29 @@ static struct machine_desc * __init setup_machine(unsigned int nr)
return list; return list;
} }
static void __init early_initrd(char **p)
{
unsigned long start, size;
start = memparse(*p, p);
if (**p == ',') {
size = memparse((*p) + 1, p);
phys_initrd_start = start;
phys_initrd_size = size;
}
}
__early_param("initrd=", early_initrd);
/* /*
* Initial parsing of the command line. We need to pick out the * Pick out the memory size. We look for mem=size@start,
* memory size. We look for mem=size@start, where start and size * where start and size are "size[KkMm]"
* are "size[KkMm]"
*/ */
static void __init static void __init early_mem(char **p)
parse_cmdline(struct meminfo *mi, char **cmdline_p, char *from)
{ {
char c = ' ', *to = command_line; static int usermem __initdata = 0;
int usermem = 0, len = 0;
for (;;) {
if (c == ' ' && !memcmp(from, "mem=", 4)) {
unsigned long size, start; unsigned long size, start;
if (to != command_line)
to -= 1;
/* /*
* If the user specifies memory size, we * If the user specifies memory size, we
* blow away any automatically generated * blow away any automatically generated
...@@ -348,33 +359,47 @@ parse_cmdline(struct meminfo *mi, char **cmdline_p, char *from) ...@@ -348,33 +359,47 @@ parse_cmdline(struct meminfo *mi, char **cmdline_p, char *from)
*/ */
if (usermem == 0) { if (usermem == 0) {
usermem = 1; usermem = 1;
mi->nr_banks = 0; meminfo.nr_banks = 0;
} }
start = PHYS_OFFSET; start = PHYS_OFFSET;
size = memparse(from + 4, &from); size = memparse(*p, p);
if (*from == '@') if (**p == '@')
start = memparse(from + 1, &from); start = memparse(*p + 1, p);
mi->bank[mi->nr_banks].start = start;
mi->bank[mi->nr_banks].size = size;
mi->bank[mi->nr_banks].node = PHYS_TO_NID(start);
mi->nr_banks += 1;
} else if (c == ' ' && !memcmp(from, "initrd=", 7)) {
unsigned long start, size;
/* meminfo.bank[meminfo.nr_banks].start = start;
* Remove space character meminfo.bank[meminfo.nr_banks].size = size;
meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start);
meminfo.nr_banks += 1;
}
__early_param("mem=", early_mem);
/*
* Initial parsing of the command line.
*/ */
static void __init parse_cmdline(char **cmdline_p, char *from)
{
char c = ' ', *to = command_line;
int len = 0;
for (;;) {
if (c == ' ') {
extern struct early_params __early_begin, __early_end;
struct early_params *p;
for (p = &__early_begin; p < &__early_end; p++) {
int len = strlen(p->arg);
if (memcmp(from, p->arg, len) == 0) {
if (to != command_line) if (to != command_line)
to -= 1; to -= 1;
from += len;
p->fn(&from);
start = memparse(from + 7, &from); while (*from != ' ' && *from != '\0')
if (*from == ',') { from++;
size = memparse(from + 1, &from); break;
}
phys_initrd_start = start;
phys_initrd_size = size;
} }
} }
c = *from++; c = *from++;
...@@ -536,6 +561,8 @@ __tagtable(ATAG_RAMDISK, parse_tag_ramdisk); ...@@ -536,6 +561,8 @@ __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
static int __init parse_tag_initrd(const struct tag *tag) static int __init parse_tag_initrd(const struct tag *tag)
{ {
printk(KERN_WARNING "ATAG_INITRD is deprecated; "
"please update your bootloader.\n");
phys_initrd_start = __virt_to_phys(tag->u.initrd.start); phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
phys_initrd_size = tag->u.initrd.size; phys_initrd_size = tag->u.initrd.size;
return 0; return 0;
...@@ -668,7 +695,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -668,7 +695,7 @@ void __init setup_arch(char **cmdline_p)
memcpy(saved_command_line, from, COMMAND_LINE_SIZE); memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
parse_cmdline(&meminfo, cmdline_p, from); parse_cmdline(cmdline_p, from);
bootmem_init(&meminfo); bootmem_init(&meminfo);
paging_init(&meminfo, mdesc); paging_init(&meminfo, mdesc);
request_standard_resources(&meminfo, mdesc); request_standard_resources(&meminfo, mdesc);
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
* CPU support functions * CPU support functions
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
...@@ -173,9 +174,9 @@ static int integrator_set_target(struct cpufreq_policy *policy, ...@@ -173,9 +174,9 @@ static int integrator_set_target(struct cpufreq_policy *policy,
return 0; return 0;
} }
static int integrator_cpufreq_init(struct cpufreq *policy) static int integrator_cpufreq_init(struct cpufreq_policy *policy)
{ {
unsigned long cus_allowed; unsigned long cpus_allowed;
unsigned int cpu = policy->cpu; unsigned int cpu = policy->cpu;
u_int cm_osc, cm_stat, mem_freq_khz; u_int cm_osc, cm_stat, mem_freq_khz;
struct vco vco; struct vco vco;
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/list.h>
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/irq.h> #include <asm/irq.h>
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/tty.h> #include <linux/tty.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/serial_core.h> #include <linux/serial_core.h>
#include <linux/delay.h> #include <linux/delay.h>
...@@ -25,6 +26,7 @@ ...@@ -25,6 +26,7 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
......
...@@ -20,16 +20,16 @@ obj-$(CONFIG_DISCONTIGMEM) += discontig.o ...@@ -20,16 +20,16 @@ obj-$(CONFIG_DISCONTIGMEM) += discontig.o
p-$(CONFIG_CPU_26) += proc-arm2_3.o p-$(CONFIG_CPU_26) += proc-arm2_3.o
# ARMv3 # ARMv3
p-$(CONFIG_CPU_ARM610) += proc-arm6_7.o tlb-v3.o copypage-v3.o p-$(CONFIG_CPU_ARM610) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM710) += proc-arm6_7.o tlb-v3.o copypage-v3.o p-$(CONFIG_CPU_ARM710) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
# ARMv4 # ARMv4
p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o copypage-v4wt.o abort-lv4t.o p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o cache-v4.o copypage-v4wt.o abort-lv4t.o
p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o copypage-v4wb.o abort-ev4.o minicache.o p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o cache-v4wb.o copypage-v4wb.o abort-ev4.o
p-$(CONFIG_CPU_SA1100) += proc-sa110.o tlb-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o p-$(CONFIG_CPU_SA1100) += proc-sa1100.o tlb-v4wb.o cache-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o
# ARMv5 # ARMv5
p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wbi.o copypage-v4wb.o abort-ev5tej.o p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wbi.o copypage-v4wb.o abort-ev5tej.o
......
/*
* linux/arch/arm/mm/cache-v3.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*
* - mm - mm_struct describing address space
*/
ENTRY(v3_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v3_flush_kern_cache_all)
/* FALLTHROUGH */
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v3_flush_user_cache_range)
mov ip, #0
mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_coherent_kern_range)
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v3_flush_kern_dcache_page)
/* FALLTHROUGH */
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_dma_inv_range)
/* FALLTHROUGH */
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
/* FALLTHROUGH */
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_dma_clean_range)
mov pc, lr
ENTRY(v3_cache_fns)
.long v3_flush_kern_cache_all
.long v3_flush_user_cache_all
.long v3_flush_user_cache_range
.long v3_coherent_kern_range
.long v3_flush_kern_dcache_page
.long v3_dma_inv_range
.long v3_dma_clean_range
.long v3_dma_flush_range
/*
* linux/arch/arm/mm/cache-v4.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*
* - mm - mm_struct describing address space
*/
ENTRY(v4_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v4_flush_kern_cache_all)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
mov pc, lr
/*
* flush_user_cache_range(start, end, vma)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v4_flush_user_cache_range)
mov ip, #0
mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_coherent_kern_range)
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v4_flush_kern_dcache_page)
/* FALLTHROUGH */
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_dma_inv_range)
/* FALLTHROUGH */
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
/* FALLTHROUGH */
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_dma_clean_range)
mov pc, lr
ENTRY(v4_cache_fns)
.long v4_flush_kern_cache_all
.long v4_flush_user_cache_all
.long v4_flush_user_cache_range
.long v4_coherent_kern_range
.long v4_flush_kern_dcache_page
.long v4_dma_inv_range
.long v4_dma_clean_range
.long v4_dma_flush_range
/*
* linux/arch/arm/mm/cache-v4wb.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The total size of the data cache.
*/
#if defined(CONFIG_CPU_SA110)
# define CACHE_DSIZE 16384
#elif defined(CONFIG_CPU_SA1100)
# define CACHE_DSIZE 8192
#else
# error Unknown cache size
#endif
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*
* Size Clean (ticks) Dirty (ticks)
* 4096 21 20 21 53 55 54
* 8192 40 41 40 106 100 102
* 16384 77 77 76 140 140 138
* 32768 150 149 150 214 216 212 <---
* 65536 296 297 296 351 358 361
* 131072 591 591 591 656 657 651
* Whole 132 136 132 221 217 207 <---
*/
#define CACHE_DLIMIT (CACHE_DSIZE * 4)
/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular address
* space.
*/
ENTRY(v4wb_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v4wb_flush_kern_cache_all)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
__flush_whole_cache:
mov r0, #FLUSH_BASE
add r1, r0, #CACHE_DSIZE
1: ldr r2, [r0], #32
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive, page aligned)
* - end - end address (exclusive, page aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v4wb_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
tst r2, #VM_EXEC @ executable region?
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
cmp r3, #CACHE_DLIMIT @ total size >= limit?
bhs __flush_whole_cache @ flush whole D cache
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v4wb_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
/* fall through */
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_coherent_kern_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* This is actually the same as v4wb_coherent_kern_range()
*/
.globl v4wb_dma_flush_range
.set v4wb_dma_flush_range, v4wb_coherent_kern_range
ENTRY(v4wb_cache_fns)
.long v4wb_flush_kern_cache_all
.long v4wb_flush_user_cache_all
.long v4wb_flush_user_cache_range
.long v4wb_coherent_kern_range
.long v4wb_flush_kern_dcache_page
.long v4wb_dma_inv_range
.long v4wb_dma_clean_range
.long v4wb_dma_flush_range
/*
* linux/arch/arm/mm/cache-v4wt.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ARMv4 write through cache operations support.
*
* We assume that the write buffer is not enabled.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The number of data cache segments.
*/
#define CACHE_DSEGMENTS 8
/*
* The number of lines in a cache segment.
*/
#define CACHE_DENTRIES 64
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*
* *** This needs benchmarking
*/
#define CACHE_DLIMIT 16384
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*/
ENTRY(v4wt_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v4wt_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
mov pc, lr
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Clean and invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive, page aligned)
* - end - end address (exclusive, page aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v4wt_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
tst r2, #VM_EXEC
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_coherent_kern_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v4wt_flush_kern_dcache_page)
mov r2, #0
mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
add r1, r0, #PAGE_SZ
/* fallthrough */
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_dma_inv_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
/* FALLTHROUGH */
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_dma_clean_range)
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
.globl v4wt_dma_flush_range
.equ v4wt_dma_flush_range, v4wt_dma_inv_range
ENTRY(v4wt_cache_fns)
.long v4wt_flush_kern_cache_all
.long v4wt_flush_user_cache_all
.long v4wt_flush_user_cache_range
.long v4wt_coherent_kern_range
.long v4wt_flush_kern_dcache_page
.long v4wt_dma_inv_range
.long v4wt_dma_clean_range
.long v4wt_dma_flush_range
...@@ -161,11 +161,11 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle, ...@@ -161,11 +161,11 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle,
/* /*
* Invalidate any data that might be lurking in the * Invalidate any data that might be lurking in the
* kernel direct-mapped region. * kernel direct-mapped region for device DMA.
*/ */
{ {
unsigned long kaddr = (unsigned long)page_address(page); unsigned long kaddr = (unsigned long)page_address(page);
invalidate_dcache_range(kaddr, kaddr + size); dmac_inv_range(kaddr, kaddr + size);
} }
/* /*
...@@ -330,7 +330,7 @@ static int __init consistent_init(void) ...@@ -330,7 +330,7 @@ static int __init consistent_init(void)
core_initcall(consistent_init); core_initcall(consistent_init);
/* /*
* make an area consistent. * make an area consistent for devices.
*/ */
void consistent_sync(void *vaddr, size_t size, int direction) void consistent_sync(void *vaddr, size_t size, int direction)
{ {
...@@ -339,13 +339,13 @@ void consistent_sync(void *vaddr, size_t size, int direction) ...@@ -339,13 +339,13 @@ void consistent_sync(void *vaddr, size_t size, int direction)
switch (direction) { switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */ case DMA_FROM_DEVICE: /* invalidate only */
invalidate_dcache_range(start, end); dmac_inv_range(start, end);
break; break;
case DMA_TO_DEVICE: /* writeback only */ case DMA_TO_DEVICE: /* writeback only */
clean_dcache_range(start, end); dmac_clean_range(start, end);
break; break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */ case DMA_BIDIRECTIONAL: /* writeback and invalidate */
flush_dcache_range(start, end); dmac_flush_range(start, end);
break; break;
default: default:
BUG(); BUG();
......
...@@ -184,9 +184,8 @@ void __flush_dcache_page(struct page *page) ...@@ -184,9 +184,8 @@ void __flush_dcache_page(struct page *page)
{ {
struct mm_struct *mm = current->active_mm; struct mm_struct *mm = current->active_mm;
struct list_head *l; struct list_head *l;
unsigned long kaddr = (unsigned long)page_address(page);
cpu_cache_clean_invalidate_range(kaddr, kaddr + PAGE_SIZE, 0); __cpuc_flush_dcache_page(page_address(page));
if (!page->mapping) if (!page->mapping)
return; return;
...@@ -291,10 +290,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) ...@@ -291,10 +290,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (page->mapping) { if (page->mapping) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
unsigned long kaddr = (unsigned long)page_address(page);
if (dirty) if (dirty)
cpu_cache_clean_invalidate_range(kaddr, kaddr + PAGE_SIZE, 0); __cpuc_flush_dcache_page(page_address(page));
make_coherent(vma, addr, page, dirty); make_coherent(vma, addr, page, dirty);
} }
......
...@@ -12,19 +12,15 @@ ...@@ -12,19 +12,15 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "fault.h" #include "fault.h"
......
...@@ -24,30 +24,82 @@ ...@@ -24,30 +24,82 @@
#include <asm/mach/map.h> #include <asm/mach/map.h>
static unsigned int cachepolicy __initdata = PMD_SECT_WB;
static unsigned int ecc_mask __initdata = 0;
struct cachepolicy {
char *policy;
unsigned int cr_mask;
unsigned int pmd;
};
static struct cachepolicy cache_policies[] __initdata = {
{ "uncached", CR1_W|CR1_C, PMD_SECT_UNCACHED },
{ "buffered", CR1_C, PMD_SECT_BUFFERED },
{ "writethrough", 0, PMD_SECT_WT },
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
{ "writeback", 0, PMD_SECT_WB },
{ "writealloc", 0, PMD_SECT_WBWA }
#endif
};
/* /*
* These are useful for identifing cache coherency * These are useful for identifing cache coherency
* problems by allowing the cache or the cache and * problems by allowing the cache or the cache and
* writebuffer to be turned off. (Note: the write * writebuffer to be turned off. (Note: the write
* buffer should not be on and the cache off). * buffer should not be on and the cache off).
*/ */
static int __init nocache_setup(char *__unused) static void __init early_cachepolicy(char **p)
{ {
cr_alignment &= ~CR1_C; int i;
cr_no_alignment &= ~CR1_C;
for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
int len = strlen(cache_policies[i].policy);
if (memcmp(*p, cache_policies[i].policy, len) == 0) {
cachepolicy = cache_policies[i].pmd;
cr_alignment &= ~cache_policies[i].cr_mask;
cr_no_alignment &= ~cache_policies[i].cr_mask;
*p += len;
break;
}
}
if (i == ARRAY_SIZE(cache_policies))
printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
flush_cache_all(); flush_cache_all();
set_cr(cr_alignment); set_cr(cr_alignment);
return 1;
} }
static int __init nowrite_setup(char *__unused) static void __init early_nocache(char **__unused)
{ {
cr_alignment &= ~(CR1_W|CR1_C); char *p = "buffered";
cr_no_alignment &= ~(CR1_W|CR1_C); printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
flush_cache_all(); early_cachepolicy(&p);
set_cr(cr_alignment); }
return 1;
static void __init early_nowrite(char **__unused)
{
char *p = "uncached";
printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
early_cachepolicy(&p);
}
static void __init early_ecc(char **p)
{
if (memcmp(*p, "on", 2) == 0) {
ecc_mask = PMD_PROTECTION;
*p += 2;
} else if (memcmp(*p, "off", 3) == 0) {
ecc_mask = 0;
*p += 3;
}
} }
__early_param("nocache", early_nocache);
__early_param("nowb", early_nowrite);
__early_param("cachepolicy=", early_cachepolicy);
__early_param("ecc=", early_ecc);
static int __init noalign_setup(char *__unused) static int __init noalign_setup(char *__unused)
{ {
cr_alignment &= ~CR1_A; cr_alignment &= ~CR1_A;
...@@ -57,8 +109,6 @@ static int __init noalign_setup(char *__unused) ...@@ -57,8 +109,6 @@ static int __init noalign_setup(char *__unused)
} }
__setup("noalign", noalign_setup); __setup("noalign", noalign_setup);
__setup("nocache", nocache_setup);
__setup("nowb", nowrite_setup);
#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
...@@ -197,7 +247,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg ...@@ -197,7 +247,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg
pmdval = __pa(ptep) | prot_l1; pmdval = __pa(ptep) | prot_l1;
pmdp[0] = __pmd(pmdval); pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
cpu_flush_pmd(pmdp); flush_pmd_entry(pmdp);
} }
ptep = pte_offset_kernel(pmdp, virt); ptep = pte_offset_kernel(pmdp, virt);
...@@ -231,32 +281,20 @@ static struct mem_types mem_types[] __initdata = { ...@@ -231,32 +281,20 @@ static struct mem_types mem_types[] __initdata = {
.domain = DOMAIN_IO, .domain = DOMAIN_IO,
}, },
[MT_CACHECLEAN] = { [MT_CACHECLEAN] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE | L_PTE_BUFFERABLE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4, .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
}, },
[MT_MINICLEAN] = { [MT_MINICLEAN] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE, .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
}, },
[MT_VECTORS] = { [MT_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE | L_PTE_BUFFERABLE |
L_PTE_EXEC, L_PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4, .prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4,
.domain = DOMAIN_USER, .domain = DOMAIN_USER,
}, },
[MT_MEMORY] = { [MT_MEMORY] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE | L_PTE_BUFFERABLE |
L_PTE_EXEC | L_PTE_WRITE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE, .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
} }
...@@ -268,37 +306,50 @@ static struct mem_types mem_types[] __initdata = { ...@@ -268,37 +306,50 @@ static struct mem_types mem_types[] __initdata = {
static void __init build_mem_type_table(void) static void __init build_mem_type_table(void)
{ {
int cpu_arch = cpu_architecture(); int cpu_arch = cpu_architecture();
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH const char *policy;
int writethrough = 1;
#else
int writethrough = 0;
#endif
int writealloc = 0, ecc = 0;
if (cpu_arch < CPU_ARCH_ARMv5) { /*
writealloc = 0; * ARMv5 can use ECC memory.
ecc = 0; */
if (cpu_arch == CPU_ARCH_ARMv5) {
mem_types[MT_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask;
} else {
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
if (cachepolicy == PMD_SECT_WBWA)
cachepolicy = PMD_SECT_WB;
ecc_mask = 0;
} }
if (writethrough) { mem_types[MT_MEMORY].prot_sect |= cachepolicy;
switch (cachepolicy) {
default:
case PMD_SECT_UNCACHED:
policy = "uncached";
break;
case PMD_SECT_BUFFERED:
mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE;
policy = "buffered";
break;
case PMD_SECT_WT:
mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE|PTE_CACHEABLE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
mem_types[MT_VECTORS].prot_sect |= PMD_SECT_WT; policy = "write through";
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WT; break;
} else { case PMD_SECT_WB:
mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE|PTE_CACHEABLE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
mem_types[MT_VECTORS].prot_sect |= PMD_SECT_WB; policy = "write back";
break;
if (writealloc) case PMD_SECT_WBWA:
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WBWA; mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE|PTE_CACHEABLE;
else mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WB; policy = "write back, write allocate";
} break;
if (ecc) {
mem_types[MT_VECTORS].prot_sect |= PMD_PROTECTION;
mem_types[MT_MEMORY].prot_sect |= PMD_PROTECTION;
} }
printk("Memory policy: ECC %sabled, Data cache %s\n",
ecc_mask ? "en" : "dis", policy);
} }
/* /*
...@@ -330,6 +381,14 @@ static void __init create_mapping(struct map_desc *md) ...@@ -330,6 +381,14 @@ static void __init create_mapping(struct map_desc *md)
off = md->physical - virt; off = md->physical - virt;
length = md->length; length = md->length;
if (mem_types[md->type].prot_l1 == 0 &&
(virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
printk(KERN_WARNING "MM: map for 0x%08lx at 0x%08lx can not "
"be mapped using pages, ignoring.\n",
md->physical, md->virtual);
return;
}
while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
alloc_init_page(virt, virt + off, prot_l1, prot_pte); alloc_init_page(virt, virt + off, prot_l1, prot_pte);
......
...@@ -43,26 +43,28 @@ ...@@ -43,26 +43,28 @@
#define MAX_AREA_SIZE 32768 #define MAX_AREA_SIZE 32768
/* /*
* the cache line size of the I and D cache * The size of one data cache line.
*/ */
#define DCACHELINESIZE 32 #define CACHE_DLINESIZE 32
#define ICACHELINESIZE 32
/* /*
* and the page size * The number of data cache segments.
*/ */
#define PAGESIZE 4096 #define CACHE_DSEGMENTS 16
.text
/* /*
* cpu_arm1020_check_bugs() * The number of lines in a cache segment.
*/ */
ENTRY(cpu_arm1020_check_bugs) #define CACHE_DENTRIES 64
mrs ip, cpsr
bic ip, ip, #PSR_F_BIT
msr cpsr, ip
mov pc, lr
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*/
#define CACHE_DLIMIT 32768
.text
/* /*
* cpu_arm1020_proc_init() * cpu_arm1020_proc_init()
*/ */
...@@ -114,230 +116,233 @@ ENTRY(cpu_arm1020_do_idle) ...@@ -114,230 +116,233 @@ ENTRY(cpu_arm1020_do_idle)
/* ================================= CACHE ================================ */ /* ================================= CACHE ================================ */
.align 5
/* /*
* cpu_arm1020_cache_clean_invalidate_all() * flush_user_cache_all()
* *
* clean and invalidate all cache lines * Invalidate all cache entries in a particular address
* space.
*/
ENTRY(arm1020_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
* *
* Note: * Clean and invalidate the entire cache.
* 1. we should preserve r0 at all times
*/ */
.align 5 ENTRY(arm1020_flush_kern_cache_all)
ENTRY(cpu_arm1020_cache_clean_invalidate_all) mov r2, #VM_EXEC
mov r2, #1 mov ip, #0
cpu_arm1020_cache_clean_invalidate_all_r2: __flush_whole_cache:
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, ip, c7, c10, 4
mov r1, #0xf @ 16 segments
1: mov r3, #0x3F @ 64 entries
2: mov ip, r3, LSL #26 @ shift up entry
orr ip, ip, r1, LSL #5 @ shift in/up index
mcr p15, 0, ip, c7, c14, 2 @ Clean & Inval DCache entry
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
subs r3, r3, #1 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
cmp r3, #0 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
bge 2b @ entries 3F to 0 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r1, r1, #1 mcr p15, 0, ip, c7, c10, 4 @ drain WB
cmp r1, #0 subs r3, r3, #1 << 26
bge 1b @ segments 7 to 0 bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5
bcs 1b @ segments 15 to 0
#endif #endif
tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE #ifndef CONFIG_CPU_ICACHE_DISABLE
teq r2, #0
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm1020_cache_clean_invalidate_range(start, end, flags) * flush_user_cache_range(start, end, flags)
* *
* clean and invalidate all cache lines associated with this area of memory * Invalidate a range of cache entries in the specified
* address space.
* *
* start: Area start address * - start - start address (inclusive)
* end: Area end address * - end - end address (exclusive)
* flags: nonzero for I cache as well * - flags - vm_flags for this space
*/ */
.align 5 ENTRY(arm1020_flush_user_cache_range)
ENTRY(cpu_arm1020_cache_clean_invalidate_range) mov ip, #0
bic r0, r0, #DCACHELINESIZE - 1 sub r3, r1, r0 @ calculate total size
sub r3, r1, r0 cmp r3, #CACHE_DLIMIT
cmp r3, #MAX_AREA_SIZE bhs __flush_whole_cache
bgt cpu_arm1020_cache_clean_invalidate_all_r2
mcr p15, 0, r3, c7, c10, 4
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcr p15, 0, ip, c7, c10, 4
mcr p15, 0, r3, c7, c10, 4 @ drain WB 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #DCACHELINESIZE mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r3, c7, c10, 4 @ drain WB
add r0, r0, #DCACHELINESIZE
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
#endif #endif
tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE #ifndef CONFIG_CPU_ICACHE_DISABLE
teq r2, #0 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
movne r0, #0
mcrne p15, 0, r0, c7, c5, 0 @ invalidate I cache
#endif #endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ D-CACHE =============================== */
/* /*
* cpu_arm1020_dcache_invalidate_range(start, end) * coherent_kern_range(start, end)
* *
* throw away all D-cached data in specified region without an obligation * Ensure coherency between the Icache and the Dcache in the
* to write them back. Note however that we must clean the D-cached entries * region described by start. If you have non-snooping
* around the boundaries if the start and/or end address are not cache * Harvard caches, you need to implement this function.
* aligned.
* *
* start: virtual start address * - start - virtual start address
* end: virtual end address * - end - virtual end address
*/ */
.align 5 ENTRY(arm1020_coherent_kern_range)
ENTRY(cpu_arm1020_dcache_invalidate_range) mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1
mcr p15, 0, ip, c7, c10, 4
1:
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
/* D cache are on */ mcr p15, 0, r0, c7, c10, 1 @ clean D entry
tst r0, #DCACHELINESIZE - 1 mcr p15, 0, ip, c7, c10, 4 @ drain WB
bic r0, r0, #DCACHELINESIZE - 1 #endif
mcrne p15, 0, r0, c7, c10, 4 #ifndef CONFIG_CPU_ICACHE_DISABLE
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry at start mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
mcrne p15, 0, r0, c7, c10, 4 @ drain WB
tst r1, #DCACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 4
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry at end
mcrne p15, 0, r1, c7, c10, 4 @ drain WB
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE
cmp r0, r1
blt 1b
#else
/* D cache off, but still drain the write buffer */
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
#endif #endif
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm1020_dcache_clean_range(start, end) * flush_kern_dcache_page(void *page)
* *
* For the specified virtual address range, ensure that all caches contain * Ensure no D cache aliasing occurs, either with itself or
* clean data, such that peripheral accesses to the physical RAM fetch * the I cache
* correct data.
* *
* start: virtual start address * - page - page aligned address
* end: virtual end address
*/ */
.align 5 ENTRY(arm1020_flush_kern_dcache_page)
ENTRY(cpu_arm1020_dcache_clean_range) mov ip, #0
bic r0, r0, #DCACHELINESIZE - 1
sub r3, r1, r0
cmp r3, #MAX_AREA_SIZE
bgt cpu_arm1020_cache_clean_invalidate_all_r2
mcr p15, 0, r3, c7, c10, 4
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry add r1, r0, #PAGE_SZ
mcr p15, 0, r3, c7, c10, 4 @ drain WB 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #DCACHELINESIZE mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r3, c7, c10, 4 @ drain WB
add r0, r0, #DCACHELINESIZE
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm1020_dcache_clean_page(page) * dma_inv_range(start, end)
* *
* Cleans a single page of dcache so that if we have any future aliased * Invalidate (discard) the specified virtual address range.
* mappings, they will be consistent at the time that they are created. * May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
* *
* page: virtual address of page to clean from dcache * - start - virtual start address
* - end - virtual end address
* *
* Note: * (same as v4wb)
* 1. we don't need to flush the write buffer in this case.
* 2. we don't invalidate the entries since when we write the page
* out to disk, the entries may get reloaded into the cache.
*/ */
.align 5 ENTRY(arm1020_dma_inv_range)
ENTRY(cpu_arm1020_dcache_clean_page) mov ip, #0
mov r1, #PAGESIZE
mcr p15, 0, r0, c7, c10, 4
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry (drain is done by TLB fns) tst r0, #CACHE_DLINESIZE - 1
mcr p15, 0, r0, c7, c10, 4 @ drain WB bic r0, r0, #CACHE_DLINESIZE - 1
add r0, r0, #DCACHELINESIZE mcrne p15, 0, ip, c7, c10, 4
mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
add r0, r0, #DCACHELINESIZE tst r1, #CACHE_DLINESIZE - 1
subs r1, r1, #2 * DCACHELINESIZE mcrne p15, 0, ip, c7, c10, 4
bhi 1b mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm1020_dcache_clean_entry(addr) * dma_clean_range(start, end)
*
* Clean the specified virtual address range.
* *
* Clean the specified entry of any caches such that the MMU * - start - virtual start address
* translation fetches will obtain correct data. * - end - virtual end address
* *
* addr: cache-unaligned virtual address * (same as v4wb)
*/ */
.align 5 ENTRY(arm1020_dma_clean_range)
ENTRY(cpu_arm1020_dcache_clean_entry) mov ip, #0
mov r1, #0
mcr p15, 0, r1, c7, c10, 4
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean single D entry bic r0, r0, #CACHE_DLINESIZE - 1
mcr p15, 0, r1, c7, c10, 4 @ drain WB 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif mcr p15, 0, ip, c7, c10, 4 @ drain WB
#ifndef CONFIG_CPU_ICACHE_DISABLE add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r1, c7, c5, 1 @ invalidate I entry cmp r0, r1
blo 1b
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ I-CACHE =============================== */
/* /*
* cpu_arm1020_icache_invalidate_range(start, end) * dma_flush_range(start, end)
* *
* invalidate a range of virtual addresses from the Icache * Clean and invalidate the specified virtual address range.
* *
* start: virtual start address * - start - virtual start address
* end: virtual end address * - end - virtual end address
*/ */
.align 5 ENTRY(arm1020_dma_flush_range)
ENTRY(cpu_arm1020_icache_invalidate_range) mov ip, #0
1: mcr p15, 0, r0, c7, c10, 4
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ Clean D entry bic r0, r0, #CACHE_DLINESIZE - 1
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4
add r0, r0, #DCACHELINESIZE 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
mcr p15, 0, r0, c7, c10, 1 @ Clean D entry mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c7, c10, 4 @ drain WB add r0, r0, #CACHE_DLINESIZE
#endif
add r0, r0, #DCACHELINESIZE
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
ENTRY(cpu_arm1020_icache_invalidate_page) #endif
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(arm1020_cache_fns)
.long arm1020_flush_kern_cache_all
.long arm1020_flush_user_cache_all
.long arm1020_flush_user_cache_range
.long arm1020_coherent_kern_range
.long arm1020_flush_kern_dcache_page
.long arm1020_dma_inv_range
.long arm1020_dma_clean_range
.long arm1020_dma_flush_range
.align 5
ENTRY(cpu_arm1020_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, ip, c7, c10, 4 @ drain WB
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
#endif
mov pc, lr mov pc, lr
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
/* /*
* cpu_arm1020_set_pgd(pgd) * cpu_arm1020_switch_mm(pgd)
* *
* Set the translation base pointer to be as described by pgd. * Set the translation base pointer to be as described by pgd.
* *
* pgd: new page tables * pgd: new page tables
*/ */
.align 5 .align 5
ENTRY(cpu_arm1020_set_pgd) ENTRY(cpu_arm1020_switch_mm)
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r3, c7, c10, 4 mcr p15, 0, r3, c7, c10, 4
mov r1, #0xF @ 16 segments mov r1, #0xF @ 16 segments
...@@ -364,23 +369,6 @@ ENTRY(cpu_arm1020_set_pgd) ...@@ -364,23 +369,6 @@ ENTRY(cpu_arm1020_set_pgd)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr mov pc, lr
/*
* cpu_arm1020_flush_pmd(pmdp)
*
* Set a level 1 translation table entry, and clean it out of
* any caches such that the MMUs can load it correctly.
*
* pmdp: pointer to PMD entry
*/
.align 5
ENTRY(cpu_arm1020_flush_pmd)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 4
mcr p15, 0, r0, c7, c10, 1 @ clean D entry (drain is done by TLB fns)
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/* /*
* cpu_arm1020_set_pte(ptep, pte) * cpu_arm1020_set_pte(ptep, pte)
* *
...@@ -396,7 +384,7 @@ ENTRY(cpu_arm1020_set_pte) ...@@ -396,7 +384,7 @@ ENTRY(cpu_arm1020_set_pte)
bic r2, r2, #3 bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL orr r2, r2, #HPTE_TYPE_SMALL
tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec? tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
...@@ -489,29 +477,12 @@ __arm1020_setup: ...@@ -489,29 +477,12 @@ __arm1020_setup:
.type arm1020_processor_functions, #object .type arm1020_processor_functions, #object
arm1020_processor_functions: arm1020_processor_functions:
.word v4t_early_abort .word v4t_early_abort
.word cpu_arm1020_check_bugs
.word cpu_arm1020_proc_init .word cpu_arm1020_proc_init
.word cpu_arm1020_proc_fin .word cpu_arm1020_proc_fin
.word cpu_arm1020_reset .word cpu_arm1020_reset
.word cpu_arm1020_do_idle .word cpu_arm1020_do_idle
.word cpu_arm1020_dcache_clean_area
/* cache */ .word cpu_arm1020_switch_mm
.word cpu_arm1020_cache_clean_invalidate_all
.word cpu_arm1020_cache_clean_invalidate_range
/* dcache */
.word cpu_arm1020_dcache_invalidate_range
.word cpu_arm1020_dcache_clean_range
.word cpu_arm1020_dcache_clean_page
.word cpu_arm1020_dcache_clean_entry
/* icache */
.word cpu_arm1020_icache_invalidate_range
.word cpu_arm1020_icache_invalidate_page
/* pgtable */
.word cpu_arm1020_set_pgd
.word cpu_arm1020_flush_pmd
.word cpu_arm1020_set_pte .word cpu_arm1020_set_pte
.size arm1020_processor_functions, . - arm1020_processor_functions .size arm1020_processor_functions, . - arm1020_processor_functions
...@@ -542,4 +513,5 @@ __arm1020_proc_info: ...@@ -542,4 +513,5 @@ __arm1020_proc_info:
.long arm1020_processor_functions .long arm1020_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long v4wb_user_fns .long v4wb_user_fns
.long arm1020_cache_fns
.size __arm1020_proc_info, . - __arm1020_proc_info .size __arm1020_proc_info, . - __arm1020_proc_info
...@@ -162,7 +162,7 @@ memc_phys_table_32: ...@@ -162,7 +162,7 @@ memc_phys_table_32:
* and inaccessible (0x01f00000). * and inaccessible (0x01f00000).
* Params : r0 = page table pointer * Params : r0 = page table pointer
*/ */
clear_tables: ldr r1, _arm3_set_pgd - 4 clear_tables: ldr r1, _arm3_switch_mm - 4
ldr r2, [r1] ldr r2, [r1]
sub r1, r0, #256 * 4 @ start of MEMC tables sub r1, r0, #256 * 4 @ start of MEMC tables
add r2, r1, r2, lsl #2 @ end of tables add r2, r1, r2, lsl #2 @ end of tables
...@@ -186,14 +186,16 @@ clear_tables: ldr r1, _arm3_set_pgd - 4 ...@@ -186,14 +186,16 @@ clear_tables: ldr r1, _arm3_set_pgd - 4
mov pc, lr mov pc, lr
/* /*
* Function: *_set_pgd(pgd_t *pgd) * Function: *_switch_mm(pgd_t *pgd)
* Params : pgd New page tables/MEMC mapping * Params : pgd New page tables/MEMC mapping
* Purpose : update MEMC hardware with new mapping * Purpose : update MEMC hardware with new mapping
*/ */
.word page_nr .word page_nr
_arm3_set_pgd: mcr p15, 0, r1, c1, c0, 0 @ flush cache _arm3_switch_mm:
_arm2_set_pgd: stmfd sp!, {lr} mcr p15, 0, r1, c1, c0, 0 @ flush cache
ldr r1, _arm3_set_pgd - 4 _arm2_switch_mm:
stmfd sp!, {lr}
ldr r1, _arm3_switch_mm - 4
ldr r2, [r1] ldr r2, [r1]
sub r0, r0, #256 * 4 @ start of MEMC tables sub r0, r0, #256 * 4 @ start of MEMC tables
add r1, r0, r2, lsl #2 @ end of tables add r1, r0, r2, lsl #2 @ end of tables
...@@ -273,9 +275,6 @@ _arm2_xchg_4: mov r2, pc ...@@ -273,9 +275,6 @@ _arm2_xchg_4: mov r2, pc
_arm3_xchg_4: swp r0, r0, [r1] _arm3_xchg_4: swp r0, r0, [r1]
movs pc, lr movs pc, lr
_arm2_3_check_bugs:
bics pc, lr, #0x04000000 @ Clear FIQ disable bit
cpu_arm2_name: cpu_arm2_name:
.asciz "ARM 2" .asciz "ARM 2"
cpu_arm250_name: cpu_arm250_name:
...@@ -290,28 +289,25 @@ cpu_arm3_name: ...@@ -290,28 +289,25 @@ cpu_arm3_name:
*/ */
.globl arm2_processor_functions .globl arm2_processor_functions
arm2_processor_functions: arm2_processor_functions:
.word _arm2_3_check_bugs
.word _arm2_proc_init .word _arm2_proc_init
.word _arm2_proc_fin .word _arm2_proc_fin
.word _arm2_set_pgd .word _arm2_switch_mm
.word _arm2_xchg_1 .word _arm2_xchg_1
.word _arm2_xchg_4 .word _arm2_xchg_4
.globl arm250_processor_functions .globl arm250_processor_functions
arm250_processor_functions: arm250_processor_functions:
.word _arm2_3_check_bugs
.word _arm2_proc_init .word _arm2_proc_init
.word _arm2_proc_fin .word _arm2_proc_fin
.word _arm2_set_pgd .word _arm2_switch_mm
.word _arm3_xchg_1 .word _arm3_xchg_1
.word _arm3_xchg_4 .word _arm3_xchg_4
.globl arm3_processor_functions .globl arm3_processor_functions
arm3_processor_functions: arm3_processor_functions:
.word _arm2_3_check_bugs
.word _arm3_proc_init .word _arm3_proc_init
.word _arm3_proc_fin .word _arm3_proc_fin
.word _arm3_set_pgd .word _arm3_switch_mm
.word _arm3_xchg_1 .word _arm3_xchg_1
.word _arm3_xchg_4 .word _arm3_xchg_4
......
...@@ -188,20 +188,6 @@ Ldata_lateldrpostreg: ...@@ -188,20 +188,6 @@ Ldata_lateldrpostreg:
addeq r7, r0, r2 addeq r7, r0, r2
b Ldata_saver7 b Ldata_saver7
/*
* Function: arm6_7_check_bugs (void)
* : arm6_7_proc_init (void)
* : arm6_7_proc_fin (void)
*
* Notes : This processor does not require these
*/
ENTRY(cpu_arm6_check_bugs)
ENTRY(cpu_arm7_check_bugs)
mrs ip, cpsr
bic ip, ip, #PSR_F_BIT
msr cpsr, ip
mov pc, lr
ENTRY(cpu_arm6_proc_init) ENTRY(cpu_arm6_proc_init)
ENTRY(cpu_arm7_proc_init) ENTRY(cpu_arm7_proc_init)
mov pc, lr mov pc, lr
...@@ -220,30 +206,19 @@ ENTRY(cpu_arm7_do_idle) ...@@ -220,30 +206,19 @@ ENTRY(cpu_arm7_do_idle)
mov pc, lr mov pc, lr
/* /*
* Function: arm6_7_set_pgd(unsigned long pgd_phys) * Function: arm6_7_switch_mm(unsigned long pgd_phys)
* Params : pgd_phys Physical address of page table * Params : pgd_phys Physical address of page table
* Purpose : Perform a task switch, saving the old processes state, and restoring * Purpose : Perform a task switch, saving the old processes state, and restoring
* the new. * the new.
*/ */
ENTRY(cpu_arm6_set_pgd) ENTRY(cpu_arm6_switch_mm)
ENTRY(cpu_arm7_set_pgd) ENTRY(cpu_arm7_switch_mm)
mov r1, #0 mov r1, #0
mcr p15, 0, r1, c7, c0, 0 @ flush cache mcr p15, 0, r1, c7, c0, 0 @ flush cache
mcr p15, 0, r0, c2, c0, 0 @ update page table ptr mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
mcr p15, 0, r1, c5, c0, 0 @ flush TLBs mcr p15, 0, r1, c5, c0, 0 @ flush TLBs
mov pc, lr mov pc, lr
/*
* Function: arm6_flush_pmd(pmdp)
*
* Params : r0 = Address to set
*
* Purpose : Set a PMD and flush it out of any WB cache
*/
ENTRY(cpu_arm6_flush_pmd)
ENTRY(cpu_arm7_flush_pmd)
mov pc, lr
/* /*
* Function: arm6_7_set_pte(pte_t *ptep, pte_t pte) * Function: arm6_7_set_pte(pte_t *ptep, pte_t pte)
* Params : r0 = Address to set * Params : r0 = Address to set
...@@ -324,7 +299,6 @@ __arm7_setup: mov r0, #0 ...@@ -324,7 +299,6 @@ __arm7_setup: mov r0, #0
.type arm6_processor_functions, #object .type arm6_processor_functions, #object
ENTRY(arm6_processor_functions) ENTRY(arm6_processor_functions)
.word cpu_arm6_data_abort .word cpu_arm6_data_abort
.word cpu_arm6_check_bugs
.word cpu_arm6_proc_init .word cpu_arm6_proc_init
.word cpu_arm6_proc_fin .word cpu_arm6_proc_fin
.word cpu_arm6_reset .word cpu_arm6_reset
...@@ -345,8 +319,7 @@ ENTRY(arm6_processor_functions) ...@@ -345,8 +319,7 @@ ENTRY(arm6_processor_functions)
.word cpu_arm6_icache_invalidate_page .word cpu_arm6_icache_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_arm6_set_pgd .word cpu_arm6_switch_mm
.word cpu_arm6_flush_pmd
.word cpu_arm6_set_pte .word cpu_arm6_set_pte
.size arm6_processor_functions, . - arm6_processor_functions .size arm6_processor_functions, . - arm6_processor_functions
...@@ -358,7 +331,6 @@ ENTRY(arm6_processor_functions) ...@@ -358,7 +331,6 @@ ENTRY(arm6_processor_functions)
.type arm7_processor_functions, #object .type arm7_processor_functions, #object
ENTRY(arm7_processor_functions) ENTRY(arm7_processor_functions)
.word cpu_arm7_data_abort .word cpu_arm7_data_abort
.word cpu_arm7_check_bugs
.word cpu_arm7_proc_init .word cpu_arm7_proc_init
.word cpu_arm7_proc_fin .word cpu_arm7_proc_fin
.word cpu_arm7_reset .word cpu_arm7_reset
...@@ -379,8 +351,7 @@ ENTRY(arm7_processor_functions) ...@@ -379,8 +351,7 @@ ENTRY(arm7_processor_functions)
.word cpu_arm7_icache_invalidate_page .word cpu_arm7_icache_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_arm7_set_pgd .word cpu_arm7_switch_mm
.word cpu_arm7_flush_pmd
.word cpu_arm7_set_pte .word cpu_arm7_set_pte
.size arm7_processor_functions, . - arm7_processor_functions .size arm7_processor_functions, . - arm7_processor_functions
......
...@@ -38,47 +38,12 @@ ...@@ -38,47 +38,12 @@
#include <asm/hardware.h> #include <asm/hardware.h>
/* /*
* Function: arm720_cache_clean_invalidate_all (void) * Function: arm720_proc_init (void)
* : arm720_cache_clean_invalidate_page (unsigned long address, int size,
* int flags)
*
* Params : address Area start address
* : size size of area
* : flags b0 = I cache as well
*
* Purpose : Flush all cache lines
*/
ENTRY(cpu_arm720_cache_clean_invalidate_all)
ENTRY(cpu_arm720_cache_clean_invalidate_range)
ENTRY(cpu_arm720_icache_invalidate_range)
ENTRY(cpu_arm720_icache_invalidate_page)
ENTRY(cpu_arm720_dcache_invalidate_range)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush cache
mov pc, lr
/*
* These just expect cache lines to be cleaned. Since we have a writethrough
* cache, we never have any dirty cachelines to worry about.
*/
ENTRY(cpu_arm720_dcache_clean_range)
ENTRY(cpu_arm720_dcache_clean_page)
ENTRY(cpu_arm720_dcache_clean_entry)
mov pc, lr
/*
* Function: arm720_check_bugs (void)
* : arm720_proc_init (void)
* : arm720_proc_fin (void) * : arm720_proc_fin (void)
* *
* Notes : This processor does not require these * Notes : This processor does not require these
*/ */
ENTRY(cpu_arm720_check_bugs) ENTRY(cpu_arm720_dcache_clean_area)
mrs ip, cpsr
bic ip, ip, #PSR_F_BIT
msr cpsr, ip
mov pc, lr
ENTRY(cpu_arm720_proc_init) ENTRY(cpu_arm720_proc_init)
mov pc, lr mov pc, lr
...@@ -102,28 +67,18 @@ ENTRY(cpu_arm720_do_idle) ...@@ -102,28 +67,18 @@ ENTRY(cpu_arm720_do_idle)
mov pc, lr mov pc, lr
/* /*
* Function: arm720_set_pgd(unsigned long pgd_phys) * Function: arm720_switch_mm(unsigned long pgd_phys)
* Params : pgd_phys Physical address of page table * Params : pgd_phys Physical address of page table
* Purpose : Perform a task switch, saving the old process' state and restoring * Purpose : Perform a task switch, saving the old process' state and restoring
* the new. * the new.
*/ */
ENTRY(cpu_arm720_set_pgd) ENTRY(cpu_arm720_switch_mm)
mov r1, #0 mov r1, #0
mcr p15, 0, r1, c7, c7, 0 @ invalidate cache mcr p15, 0, r1, c7, c7, 0 @ invalidate cache
mcr p15, 0, r0, c2, c0, 0 @ update page table ptr mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4) mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4)
mov pc, lr mov pc, lr
/*
* Function: arm720_flush_pmd(pmdp)
*
* Params : r0 = Address to set
*
* Purpose : Set a PMD and flush it out of any WB cache
*/
ENTRY(cpu_arm720_flush_pmd)
mov pc, lr
/* /*
* Function: arm720_set_pte(pte_t *ptep, pte_t pte) * Function: arm720_set_pte(pte_t *ptep, pte_t pte)
* Params : r0 = Address to set * Params : r0 = Address to set
...@@ -140,7 +95,7 @@ ENTRY(cpu_arm720_set_pte) ...@@ -140,7 +95,7 @@ ENTRY(cpu_arm720_set_pte)
bic r2, r2, #3 bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL orr r2, r2, #HPTE_TYPE_SMALL
tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec? tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
...@@ -194,31 +149,13 @@ __arm720_setup: mov r0, #0 ...@@ -194,31 +149,13 @@ __arm720_setup: mov r0, #0
.type arm720_processor_functions, #object .type arm720_processor_functions, #object
ENTRY(arm720_processor_functions) ENTRY(arm720_processor_functions)
.word v4t_late_abort .word v4t_late_abort
.word cpu_arm720_check_bugs
.word cpu_arm720_proc_init .word cpu_arm720_proc_init
.word cpu_arm720_proc_fin .word cpu_arm720_proc_fin
.word cpu_arm720_reset .word cpu_arm720_reset
.word cpu_arm720_do_idle .word cpu_arm720_do_idle
.word cpu_arm720_dcache_clean_area
/* cache */ .word cpu_arm720_switch_mm
.word cpu_arm720_cache_clean_invalidate_all
.word cpu_arm720_cache_clean_invalidate_range
/* dcache */
.word cpu_arm720_dcache_invalidate_range
.word cpu_arm720_dcache_clean_range
.word cpu_arm720_dcache_clean_page
.word cpu_arm720_dcache_clean_entry
/* icache */
.word cpu_arm720_icache_invalidate_range
.word cpu_arm720_icache_invalidate_page
/* pgtable */
.word cpu_arm720_set_pgd
.word cpu_arm720_flush_pmd
.word cpu_arm720_set_pte .word cpu_arm720_set_pte
.size arm720_processor_functions, . - arm720_processor_functions .size arm720_processor_functions, . - arm720_processor_functions
.type cpu_arch_name, #object .type cpu_arch_name, #object
...@@ -249,4 +186,5 @@ __arm720_proc_info: ...@@ -249,4 +186,5 @@ __arm720_proc_info:
.long arm720_processor_functions .long arm720_processor_functions
.long v4_tlb_fns .long v4_tlb_fns
.long v4wt_user_fns .long v4wt_user_fns
.long v4_cache_fns
.size __arm720_proc_info, . - __arm720_proc_info .size __arm720_proc_info, . - __arm720_proc_info
...@@ -28,41 +28,35 @@ ...@@ -28,41 +28,35 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h> #include <asm/procinfo.h>
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/* /*
* This is the maximum size of an area which will be invalidated * The size of one data cache line.
* using the single invalidate entry instructions. Anything larger
* than this, and we go for the whole cache.
*
* This value should be chosen such that we choose the cheapest
* alternative.
*/ */
#define MAX_AREA_SIZE 16384 #define CACHE_DLINESIZE 32
/* /*
* the cache line size of the I and D cache * The number of data cache segments.
*/ */
#define DCACHELINESIZE 32 #define CACHE_DSEGMENTS 8
#define ICACHELINESIZE 32
/* /*
* and the page size * The number of lines in a cache segment.
*/ */
#define PAGESIZE 4096 #define CACHE_DENTRIES 64
.text
/* /*
* cpu_arm920_check_bugs() * This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*/ */
ENTRY(cpu_arm920_check_bugs) #define CACHE_DLIMIT 65536
mrs ip, cpsr
bic ip, ip, #PSR_F_BIT
msr cpsr, ip
mov pc, lr
.text
/* /*
* cpu_arm920_proc_init() * cpu_arm920_proc_init()
*/ */
...@@ -76,7 +70,11 @@ ENTRY(cpu_arm920_proc_fin) ...@@ -76,7 +70,11 @@ ENTRY(cpu_arm920_proc_fin)
stmfd sp!, {lr} stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip msr cpsr_c, ip
bl cpu_arm920_cache_clean_invalidate_all #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bl arm920_flush_kern_cache_all
#else
bl v4wt_flush_kern_cache_all
#endif
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
...@@ -112,249 +110,207 @@ ENTRY(cpu_arm920_do_idle) ...@@ -112,249 +110,207 @@ ENTRY(cpu_arm920_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mov pc, lr mov pc, lr
/* ================================= CACHE ================================ */
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
/* /*
* cpu_arm920_cache_clean_invalidate_all() * flush_user_cache_all()
*
* clean and invalidate all cache lines
* *
* Note: * Invalidate all cache entries in a particular address
* 1. we should preserve r0 at all times * space.
*/ */
.align 5 ENTRY(arm920_flush_user_cache_all)
ENTRY(cpu_arm920_cache_clean_invalidate_all) /* FALLTHROUGH */
mov r2, #1
cpu_arm920_cache_clean_invalidate_all_r2:
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
/* /*
* 'Clean & Invalidate whole DCache' * flush_kern_cache_all()
* Re-written to use Index Ops. *
* Uses registers r1, r3 and ip * Clean and invalidate the entire cache.
*/ */
mov r1, #7 << 5 @ 8 segments ENTRY(arm920_flush_kern_cache_all)
1: orr r3, r1, #63 << 26 @ 64 entries mov r2, #VM_EXEC
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index mov ip, #0
__flush_whole_cache:
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r3, r3, #1 << 26 subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0 bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5 subs r1, r1, #1 << 5
bcs 1b @ segments 7 to 0 bcs 1b @ segments 7 to 0
#endif tst r2, #VM_EXEC
teq r2, #0
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm920_cache_clean_invalidate_range(start, end, flags) * flush_user_cache_range(start, end, flags)
* *
* clean and invalidate all cache lines associated with this area of memory * Invalidate a range of cache entries in the specified
* address space.
* *
* start: Area start address * - start - start address (inclusive)
* end: Area end address * - end - end address (exclusive)
* flags: nonzero for I cache as well * - flags - vm_flags for address space
*/ */
.align 5 ENTRY(arm920_flush_user_cache_range)
ENTRY(cpu_arm920_cache_clean_invalidate_range) mov ip, #0
bic r0, r0, #DCACHELINESIZE - 1 @ && added by PGM sub r3, r1, r0 @ calculate total size
bic r1, r1, #DCACHELINESIZE - 1 @ && added by DHM cmp r3, #CACHE_DLIMIT
sub r3, r1, r0 bhs __flush_whole_cache
cmp r3, #MAX_AREA_SIZE
bgt cpu_arm920_cache_clean_invalidate_all_r2 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
1: teq r2, #0 tst r2, #VM_EXEC
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE
#else
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
#endif
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
tst r2, #VM_EXEC
mcr p15, 0, r1, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ D-CACHE =============================== */
/* /*
* cpu_arm920_dcache_invalidate_range(start, end) * coherent_kern_range(start, end)
* *
* throw away all D-cached data in specified region without an obligation * Ensure coherency between the Icache and the Dcache in the
* to write them back. Note however that we must clean the D-cached entries * region described by start, end. If you have non-snooping
* around the boundaries if the start and/or end address are not cache * Harvard caches, you need to implement this function.
* aligned.
* *
* start: virtual start address * - start - virtual start address
* end: virtual end address * - end - virtual end address
*/ */
.align 5 ENTRY(arm920_coherent_kern_range)
ENTRY(cpu_arm920_dcache_invalidate_range) bic r0, r0, #CACHE_DLINESIZE - 1
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
tst r0, #DCACHELINESIZE - 1 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE
tst r1, #DCACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
#endif @ clean D entry
bic r0, r0, #DCACHELINESIZE - 1
bic r1, r1, #DCACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm920_dcache_clean_range(start, end) * flush_kern_dcache_page(void *page)
* *
* For the specified virtual address range, ensure that all caches contain * Ensure no D cache aliasing occurs, either with itself or
* clean data, such that peripheral accesses to the physical RAM fetch * the I cache
* correct data.
* *
* start: virtual start address * - addr - page aligned address
* end: virtual end address
*/ */
.align 5 ENTRY(arm920_flush_kern_dcache_page)
ENTRY(cpu_arm920_dcache_clean_range) add r1, r0, #PAGE_SZ
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
bic r0, r0, #DCACHELINESIZE - 1 add r0, r0, #CACHE_DLINESIZE
sub r1, r1, r0 cmp r0, r1
cmp r1, #MAX_AREA_SIZE blo 1b
mov r2, #0 mov r0, #0
bgt cpu_arm920_cache_clean_invalidate_all_r2 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
bic r1, r1, #DCACHELINESIZE -1
add r1, r1, #DCACHELINESIZE
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bpl 1b
#endif
mcr p15, 0, r2, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm920_dcache_clean_page(page) * dma_inv_range(start, end)
* *
* Cleans a single page of dcache so that if we have any future aliased * Invalidate (discard) the specified virtual address range.
* mappings, they will be consistent at the time that they are created. * May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
* *
* page: virtual address of page to clean from dcache * - start - virtual start address
* - end - virtual end address
* *
* Note: * (same as v4wb)
* 1. we don't need to flush the write buffer in this case.
* 2. we don't invalidate the entries since when we write the page
* out to disk, the entries may get reloaded into the cache.
*/ */
.align 5 ENTRY(arm920_dma_inv_range)
ENTRY(cpu_arm920_dcache_clean_page) tst r0, #CACHE_DLINESIZE - 1
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1
mov r1, #PAGESIZE mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1
add r0, r0, #DCACHELINESIZE mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #2 * DCACHELINESIZE cmp r0, r1
bne 1b blo 1b
#endif mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm920_dcache_clean_entry(addr) * dma_clean_range(start, end)
*
* Clean the specified virtual address range.
* *
* Clean the specified entry of any caches such that the MMU * - start - virtual start address
* translation fetches will obtain correct data. * - end - virtual end address
* *
* addr: cache-unaligned virtual address * (same as v4wb)
*/ */
.align 5 ENTRY(arm920_dma_clean_range)
ENTRY(cpu_arm920_dcache_clean_entry) bic r0, r0, #CACHE_DLINESIZE - 1
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE
#endif cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ I-CACHE =============================== */
/* /*
* cpu_arm920_icache_invalidate_range(start, end) * dma_flush_range(start, end)
* *
* invalidate a range of virtual addresses from the Icache * Clean and invalidate the specified virtual address range.
* *
* This is a little misleading, it is not intended to clean out * - start - virtual start address
* the i-cache but to make sure that any data written to the * - end - virtual end address
* range is made consistent. This means that when we execute code
* in that region, everything works as we expect.
*
* This generally means writing back data in the Dcache and
* write buffer and flushing the Icache over that region
*
* start: virtual start address
* end: virtual end address
*
* NOTE: ICACHELINESIZE == DCACHELINESIZE (so we don't need to
* loop twice, once for i-cache, once for d-cache)
*/ */
.align 5 ENTRY(arm920_dma_flush_range)
ENTRY(cpu_arm920_icache_invalidate_range) bic r0, r0, #CACHE_DLINESIZE - 1
bic r0, r0, #ICACHELINESIZE - 1 @ Safety check 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
sub r1, r1, r0 add r0, r0, #CACHE_DLINESIZE
cmp r1, #MAX_AREA_SIZE cmp r0, r1
bgt cpu_arm920_cache_clean_invalidate_all_r2 blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
bic r1, r1, #ICACHELINESIZE - 1 ENTRY(arm920_cache_fns)
add r1, r1, #ICACHELINESIZE .long arm920_flush_kern_cache_all
.long arm920_flush_user_cache_all
.long arm920_flush_user_cache_range
.long arm920_coherent_kern_range
.long arm920_flush_kern_dcache_page
.long arm920_dma_inv_range
.long arm920_dma_clean_range
.long arm920_dma_flush_range
1: mcr p15, 0, r0, c7, c5, 1 @ Clean I entry #endif
mcr p15, 0, r0, c7, c10, 1 @ Clean D entry
add r0, r0, #ICACHELINESIZE
subs r1, r1, #ICACHELINESIZE
bne 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(cpu_arm920_icache_invalidate_page) ENTRY(cpu_arm920_dcache_clean_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
mov pc, lr mov pc, lr
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
/* /*
* cpu_arm920_set_pgd(pgd) * cpu_arm920_switch_mm(pgd)
* *
* Set the translation base pointer to be as described by pgd. * Set the translation base pointer to be as described by pgd.
* *
* pgd: new page tables * pgd: new page tables
*/ */
.align 5 .align 5
ENTRY(cpu_arm920_set_pgd) ENTRY(cpu_arm920_switch_mm)
mov ip, #0 mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
/* Any reason why we don't use mcr p15, 0, r0, c7, c7, 0 here? --rmk */
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else #else
@ && 'Clean & Invalidate whole DCache' @ && 'Clean & Invalidate whole DCache'
@ && Re-written to use Index Ops. @ && Re-written to use Index Ops.
@ && Uses registers r1, r3 and ip @ && Uses registers r1, r3 and ip
mov r1, #7 << 5 @ 8 segments mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
1: orr r3, r1, #63 << 26 @ 64 entries 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
subs r3, r3, #1 << 26 subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0 bcs 2b @ entries 63 to 0
...@@ -367,20 +323,6 @@ ENTRY(cpu_arm920_set_pgd) ...@@ -367,20 +323,6 @@ ENTRY(cpu_arm920_set_pgd)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr mov pc, lr
/*
* cpu_arm920_flush_pmd(pmdp)
*
* Set a level 1 translation table entry, and clean it out of
* any caches such that the MMUs can load it correctly.
*
* pmdp: pointer to PMD entry
*/
.align 5
ENTRY(cpu_arm920_flush_pmd)
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/* /*
* cpu_arm920_set_pte(ptep, pte) * cpu_arm920_set_pte(ptep, pte)
* *
...@@ -388,8 +330,6 @@ ENTRY(cpu_arm920_flush_pmd) ...@@ -388,8 +330,6 @@ ENTRY(cpu_arm920_flush_pmd)
*/ */
.align 5 .align 5
ENTRY(cpu_arm920_set_pte) ENTRY(cpu_arm920_set_pte)
tst r0, #2048
streq r0, [r0, -r0] @ BUG_ON
str r1, [r0], #-2048 @ linux version str r1, [r0], #-2048 @ linux version
eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
...@@ -398,7 +338,7 @@ ENTRY(cpu_arm920_set_pte) ...@@ -398,7 +338,7 @@ ENTRY(cpu_arm920_set_pte)
bic r2, r2, #3 bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL orr r2, r2, #HPTE_TYPE_SMALL
tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec? tst r1, #LPTE_USER @ User or Exec?
orrne r2, r2, #HPTE_AP_READ orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
...@@ -477,31 +417,13 @@ __arm920_setup: ...@@ -477,31 +417,13 @@ __arm920_setup:
.type arm920_processor_functions, #object .type arm920_processor_functions, #object
arm920_processor_functions: arm920_processor_functions:
.word v4t_early_abort .word v4t_early_abort
.word cpu_arm920_check_bugs
.word cpu_arm920_proc_init .word cpu_arm920_proc_init
.word cpu_arm920_proc_fin .word cpu_arm920_proc_fin
.word cpu_arm920_reset .word cpu_arm920_reset
.word cpu_arm920_do_idle .word cpu_arm920_do_idle
.word cpu_arm920_dcache_clean_area
/* cache */ .word cpu_arm920_switch_mm
.word cpu_arm920_cache_clean_invalidate_all
.word cpu_arm920_cache_clean_invalidate_range
/* dcache */
.word cpu_arm920_dcache_invalidate_range
.word cpu_arm920_dcache_clean_range
.word cpu_arm920_dcache_clean_page
.word cpu_arm920_dcache_clean_entry
/* icache */
.word cpu_arm920_icache_invalidate_range
.word cpu_arm920_icache_invalidate_page
/* pgtable */
.word cpu_arm920_set_pgd
.word cpu_arm920_flush_pmd
.word cpu_arm920_set_pte .word cpu_arm920_set_pte
.size arm920_processor_functions, . - arm920_processor_functions .size arm920_processor_functions, . - arm920_processor_functions
.type cpu_arch_name, #object .type cpu_arch_name, #object
...@@ -530,4 +452,9 @@ __arm920_proc_info: ...@@ -530,4 +452,9 @@ __arm920_proc_info:
.long arm920_processor_functions .long arm920_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long v4wb_user_fns .long v4wb_user_fns
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
.long arm920_cache_fns
#else
.long v4wt_cache_fns
#endif
.size __arm920_proc_info, . - __arm920_proc_info .size __arm920_proc_info, . - __arm920_proc_info
...@@ -29,41 +29,36 @@ ...@@ -29,41 +29,36 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h> #include <asm/procinfo.h>
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/* /*
* This is the maximum size of an area which will be invalidated * The size of one data cache line.
* using the single invalidate entry instructions. Anything larger
* than this, and we go for the whole cache.
*
* This value should be chosen such that we choose the cheapest
* alternative.
*/ */
#define MAX_AREA_SIZE 8192 #define CACHE_DLINESIZE 32
/* /*
* the cache line size of the I and D cache * The number of data cache segments.
*/ */
#define DCACHELINESIZE 32 #define CACHE_DSEGMENTS 4
#define ICACHELINESIZE 32
/* /*
* and the page size * The number of lines in a cache segment.
*/ */
#define PAGESIZE 4096 #define CACHE_DENTRIES 64
.text
/* /*
* cpu_arm922_check_bugs() * This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions. (I think this should
* be 32768).
*/ */
ENTRY(cpu_arm922_check_bugs) #define CACHE_DLIMIT 8192
mrs ip, cpsr
bic ip, ip, #PSR_F_BIT
msr cpsr, ip
mov pc, lr
.text
/* /*
* cpu_arm922_proc_init() * cpu_arm922_proc_init()
*/ */
...@@ -77,7 +72,11 @@ ENTRY(cpu_arm922_proc_fin) ...@@ -77,7 +72,11 @@ ENTRY(cpu_arm922_proc_fin)
stmfd sp!, {lr} stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip msr cpsr_c, ip
bl cpu_arm922_cache_clean_invalidate_all #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bl arm922_flush_kern_cache_all
#else
bl v4wt_flush_kern_cache_all
#endif
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
...@@ -113,249 +112,209 @@ ENTRY(cpu_arm922_do_idle) ...@@ -113,249 +112,209 @@ ENTRY(cpu_arm922_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mov pc, lr mov pc, lr
/* ================================= CACHE ================================ */
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
/* /*
* cpu_arm922_cache_clean_invalidate_all() * flush_user_cache_all()
*
* clean and invalidate all cache lines
* *
* Note: * Clean and invalidate all cache entries in a particular
* 1. we should preserve r0 at all times * address space.
*/ */
.align 5 ENTRY(arm922_flush_user_cache_all)
ENTRY(cpu_arm922_cache_clean_invalidate_all) /* FALLTHROUGH */
mov r2, #1
cpu_arm922_cache_clean_invalidate_all_r2:
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
/* /*
* 'Clean & Invalidate whole DCache' * flush_kern_cache_all()
* Re-written to use Index Ops. *
* Uses registers r1, r3 and ip * Clean and invalidate the entire cache.
*/ */
mov r1, #3 << 5 @ 4 segments ENTRY(arm922_flush_kern_cache_all)
1: orr r3, r1, #63 << 26 @ 64 entries mov r2, #VM_EXEC
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index mov ip, #0
__flush_whole_cache:
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r3, r3, #1 << 26 subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0 bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5 subs r1, r1, #1 << 5
bcs 1b @ segments 7 to 0 bcs 1b @ segments 7 to 0
#endif tst r2, #VM_EXEC
teq r2, #0
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm922_cache_clean_invalidate_range(start, end, flags) * flush_user_cache_range(start, end, flags)
* *
* clean and invalidate all cache lines associated with this area of memory * Clean and invalidate a range of cache entries in the
* specified address range.
* *
* start: Area start address * - start - start address (inclusive)
* end: Area end address * - end - end address (exclusive)
* flags: nonzero for I cache as well * - flags - vm_flags describing address space
*/ */
.align 5 ENTRY(arm922_flush_user_cache_range)
ENTRY(cpu_arm922_cache_clean_invalidate_range) mov ip, #0
bic r0, r0, #DCACHELINESIZE - 1 @ && added by PGM sub r3, r1, r0 @ calculate total size
bic r1, r1, #DCACHELINESIZE - 1 @ && added by DHM cmp r3, #CACHE_DLIMIT
sub r3, r1, r0 bhs __flush_whole_cache
cmp r3, #MAX_AREA_SIZE
bgt cpu_arm922_cache_clean_invalidate_all_r2 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
1: teq r2, #0 tst r2, #VM_EXEC
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE
#else
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
#endif
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
tst r2, #VM_EXEC
mcr p15, 0, r1, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ D-CACHE =============================== */
/* /*
* cpu_arm922_dcache_invalidate_range(start, end) * coherent_kern_range(start, end)
* *
* throw away all D-cached data in specified region without an obligation * Ensure coherency between the Icache and the Dcache in the
* to write them back. Note however that we must clean the D-cached entries * region described by start, end. If you have non-snooping
* around the boundaries if the start and/or end address are not cache * Harvard caches, you need to implement this function.
* aligned.
* *
* start: virtual start address * - start - virtual start address
* end: virtual end address * - end - virtual end address
*/ */
.align 5 ENTRY(arm922_coherent_kern_range)
ENTRY(cpu_arm922_dcache_invalidate_range) bic r0, r0, #CACHE_DLINESIZE - 1
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
tst r0, #DCACHELINESIZE - 1 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE
tst r1, #DCACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
#endif @ clean D entry
bic r0, r0, #DCACHELINESIZE - 1
bic r1, r1, #DCACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm922_dcache_clean_range(start, end) * flush_kern_dcache_page(void *page)
* *
* For the specified virtual address range, ensure that all caches contain * Ensure no D cache aliasing occurs, either with itself or
* clean data, such that peripheral accesses to the physical RAM fetch * the I cache
* correct data.
* *
* start: virtual start address * - addr - page aligned address
* end: virtual end address
*/ */
.align 5 ENTRY(arm922_flush_kern_dcache_page)
ENTRY(cpu_arm922_dcache_clean_range) add r1, r0, #PAGE_SZ
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
bic r0, r0, #DCACHELINESIZE - 1 add r0, r0, #CACHE_DLINESIZE
sub r1, r1, r0 cmp r0, r1
cmp r1, #MAX_AREA_SIZE blo 1b
mov r2, #0 mov r0, #0
bgt cpu_arm922_cache_clean_invalidate_all_r2 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
bic r1, r1, #DCACHELINESIZE -1
add r1, r1, #DCACHELINESIZE
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bpl 1b
#endif
mcr p15, 0, r2, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm922_dcache_clean_page(page) * dma_inv_range(start, end)
* *
* Cleans a single page of dcache so that if we have any future aliased * Invalidate (discard) the specified virtual address range.
* mappings, they will be consistent at the time that they are created. * May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
* *
* page: virtual address of page to clean from dcache * - start - virtual start address
* - end - virtual end address
* *
* Note: * (same as v4wb)
* 1. we don't need to flush the write buffer in this case.
* 2. we don't invalidate the entries since when we write the page
* out to disk, the entries may get reloaded into the cache.
*/ */
.align 5 ENTRY(arm922_dma_inv_range)
ENTRY(cpu_arm922_dcache_clean_page) tst r0, #CACHE_DLINESIZE - 1
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1
mov r1, #PAGESIZE mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1
add r0, r0, #DCACHELINESIZE mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #2 * DCACHELINESIZE cmp r0, r1
bne 1b blo 1b
#endif mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm922_dcache_clean_entry(addr) * dma_clean_range(start, end)
*
* Clean the specified virtual address range.
* *
* Clean the specified entry of any caches such that the MMU * - start - virtual start address
* translation fetches will obtain correct data. * - end - virtual end address
* *
* addr: cache-unaligned virtual address * (same as v4wb)
*/ */
.align 5 ENTRY(arm922_dma_clean_range)
ENTRY(cpu_arm922_dcache_clean_entry) bic r0, r0, #CACHE_DLINESIZE - 1
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE
#endif cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ I-CACHE =============================== */
/* /*
* cpu_arm922_icache_invalidate_range(start, end) * dma_flush_range(start, end)
*
* invalidate a range of virtual addresses from the Icache
*
* This is a little misleading, it is not intended to clean out
* the i-cache but to make sure that any data written to the
* range is made consistent. This means that when we execute code
* in that region, everything works as we expect.
* *
* This generally means writing back data in the Dcache and * Clean and invalidate the specified virtual address range.
* write buffer and flushing the Icache over that region
* *
* start: virtual start address * - start - virtual start address
* end: virtual end address * - end - virtual end address
*
* NOTE: ICACHELINESIZE == DCACHELINESIZE (so we don't need to
* loop twice, once for i-cache, once for d-cache)
*/ */
.align 5 ENTRY(arm922_dma_flush_range)
ENTRY(cpu_arm922_icache_invalidate_range) bic r0, r0, #CACHE_DLINESIZE - 1
bic r0, r0, #ICACHELINESIZE - 1 @ Safety check 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
sub r1, r1, r0 add r0, r0, #CACHE_DLINESIZE
cmp r1, #MAX_AREA_SIZE cmp r0, r1
bgt cpu_arm922_cache_clean_invalidate_all_r2 blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
bic r1, r1, #ICACHELINESIZE - 1 ENTRY(arm922_cache_fns)
add r1, r1, #ICACHELINESIZE .long arm922_flush_kern_cache_all
.long arm922_flush_user_cache_all
.long arm922_flush_user_cache_range
.long arm922_coherent_kern_range
.long arm922_flush_kern_dcache_page
.long arm922_dma_inv_range
.long arm922_dma_clean_range
.long arm922_dma_flush_range
1: mcr p15, 0, r0, c7, c5, 1 @ Clean I entry #endif
mcr p15, 0, r0, c7, c10, 1 @ Clean D entry
add r0, r0, #ICACHELINESIZE
subs r1, r1, #ICACHELINESIZE
bne 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(cpu_arm922_icache_invalidate_page) ENTRY(cpu_arm922_dcache_clean_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
#endif
mov pc, lr mov pc, lr
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
/* /*
* cpu_arm922_set_pgd(pgd) * cpu_arm922_switch_mm(pgd)
* *
* Set the translation base pointer to be as described by pgd. * Set the translation base pointer to be as described by pgd.
* *
* pgd: new page tables * pgd: new page tables
*/ */
.align 5 .align 5
ENTRY(cpu_arm922_set_pgd) ENTRY(cpu_arm922_switch_mm)
mov ip, #0 mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
/* Any reason why we don't use mcr p15, 0, r0, c7, c7, 0 here? --rmk */
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else #else
@ && 'Clean & Invalidate whole DCache' @ && 'Clean & Invalidate whole DCache'
@ && Re-written to use Index Ops. @ && Re-written to use Index Ops.
@ && Uses registers r1, r3 and ip @ && Uses registers r1, r3 and ip
mov r1, #3 << 5 @ 4 segments mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 4 segments
1: orr r3, r1, #63 << 26 @ 64 entries 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
subs r3, r3, #1 << 26 subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0 bcs 2b @ entries 63 to 0
...@@ -368,20 +327,6 @@ ENTRY(cpu_arm922_set_pgd) ...@@ -368,20 +327,6 @@ ENTRY(cpu_arm922_set_pgd)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr mov pc, lr
/*
* cpu_arm922_flush_pmd(pmdp)
*
* Set a level 1 translation table entry, and clean it out of
* any caches such that the MMUs can load it correctly.
*
* pmdp: pointer to PMD entry
*/
.align 5
ENTRY(cpu_arm922_flush_pmd)
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/* /*
* cpu_arm922_set_pte(ptep, pte) * cpu_arm922_set_pte(ptep, pte)
* *
...@@ -397,7 +342,7 @@ ENTRY(cpu_arm922_set_pte) ...@@ -397,7 +342,7 @@ ENTRY(cpu_arm922_set_pte)
bic r2, r2, #3 bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL orr r2, r2, #HPTE_TYPE_SMALL
tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec? tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
...@@ -476,31 +421,13 @@ __arm922_setup: ...@@ -476,31 +421,13 @@ __arm922_setup:
.type arm922_processor_functions, #object .type arm922_processor_functions, #object
arm922_processor_functions: arm922_processor_functions:
.word v4t_early_abort .word v4t_early_abort
.word cpu_arm922_check_bugs
.word cpu_arm922_proc_init .word cpu_arm922_proc_init
.word cpu_arm922_proc_fin .word cpu_arm922_proc_fin
.word cpu_arm922_reset .word cpu_arm922_reset
.word cpu_arm922_do_idle .word cpu_arm922_do_idle
.word cpu_arm922_dcache_clean_area
/* cache */ .word cpu_arm922_switch_mm
.word cpu_arm922_cache_clean_invalidate_all
.word cpu_arm922_cache_clean_invalidate_range
/* dcache */
.word cpu_arm922_dcache_invalidate_range
.word cpu_arm922_dcache_clean_range
.word cpu_arm922_dcache_clean_page
.word cpu_arm922_dcache_clean_entry
/* icache */
.word cpu_arm922_icache_invalidate_range
.word cpu_arm922_icache_invalidate_page
/* pgtable */
.word cpu_arm922_set_pgd
.word cpu_arm922_flush_pmd
.word cpu_arm922_set_pte .word cpu_arm922_set_pte
.size arm922_processor_functions, . - arm922_processor_functions .size arm922_processor_functions, . - arm922_processor_functions
.type cpu_arch_name, #object .type cpu_arch_name, #object
...@@ -529,4 +456,9 @@ __arm922_proc_info: ...@@ -529,4 +456,9 @@ __arm922_proc_info:
.long arm922_processor_functions .long arm922_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long v4wb_user_fns .long v4wb_user_fns
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
.long arm922_cache_fns
#else
.long v4wt_cache_fns
#endif
.size __arm922_proc_info, . - __arm922_proc_info .size __arm922_proc_info, . - __arm922_proc_info
...@@ -28,9 +28,10 @@ ...@@ -28,9 +28,10 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h> #include <asm/procinfo.h>
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/* /*
* This is the maximum size of an area which will be invalidated * This is the maximum size of an area which will be invalidated
...@@ -40,30 +41,14 @@ ...@@ -40,30 +41,14 @@
* This value should be chosen such that we choose the cheapest * This value should be chosen such that we choose the cheapest
* alternative. * alternative.
*/ */
#define MAX_AREA_SIZE 16384 #define CACHE_DLIMIT 16384
/* /*
* the cache line size of the I and D cache * the cache line size of the I and D cache
*/ */
#define DCACHELINESIZE 32 #define CACHE_DLINESIZE 32
#define ICACHELINESIZE 32
/*
* and the page size
*/
#define PAGESIZE 4096
.text .text
/*
* cpu_arm926_check_bugs()
*/
ENTRY(cpu_arm926_check_bugs)
mrs ip, cpsr
bic ip, ip, #PSR_F_BIT
msr cpsr, ip
mov pc, lr
/* /*
* cpu_arm926_proc_init() * cpu_arm926_proc_init()
*/ */
...@@ -77,7 +62,7 @@ ENTRY(cpu_arm926_proc_fin) ...@@ -77,7 +62,7 @@ ENTRY(cpu_arm926_proc_fin)
stmfd sp!, {lr} stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip msr cpsr_c, ip
bl cpu_arm926_cache_clean_invalidate_all bl arm926_flush_kern_cache_all
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
...@@ -107,236 +92,229 @@ ENTRY(cpu_arm926_reset) ...@@ -107,236 +92,229 @@ ENTRY(cpu_arm926_reset)
/* /*
* cpu_arm926_do_idle() * cpu_arm926_do_idle()
*
* Called with IRQs disabled
*/ */
.align 5 .align 10
ENTRY(cpu_arm926_do_idle) ENTRY(cpu_arm926_do_idle)
mov r0, #0
mrc p15, 0, r1, c1, c0, 0 @ Read control register
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
bic r2, r1, #1 << 12
mcr p15, 0, r2, c1, c0, 0 @ Disable I cache
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
mov pc, lr mov pc, lr
/* ================================= CACHE ================================ */
/* /*
* cpu_arm926_cache_clean_invalidate_all() * flush_user_cache_all()
* *
* clean and invalidate all cache lines * Clean and invalidate all cache entries in a particular
* address space.
*/
ENTRY(arm926_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
* *
* Note: * Clean and invalidate the entire cache.
* 1. we should preserve r0 at all times
*/ */
.align 5 ENTRY(arm926_flush_kern_cache_all)
ENTRY(cpu_arm926_cache_clean_invalidate_all) mov r2, #VM_EXEC
mov r2, #1
cpu_arm926_cache_clean_invalidate_all_r2:
mov ip, #0 mov ip, #0
__flush_whole_cache:
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else #else
1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate
bne 1b bne 1b
#endif #endif
teq r2, #0 tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm926_cache_clean_invalidate_range(start, end, flags) * flush_user_cache_range(start, end, flags)
*
* clean and invalidate all cache lines associated with this area of memory
* *
* This is a little misleading, it is not intended to clean out * Clean and invalidate a range of cache entries in the
* the i-cache but to make sure that any data written to the * specified address range.
* range is made consistent. This means that when we execute code
* in that region, everything works as we expect.
* *
* This generally means writing back data in the Dcache and * - start - start address (inclusive)
* write buffer and flushing the Icache over that region * - end - end address (exclusive)
* start: Area start address * - flags - vm_flags describing address space
* end: Area end address
* flags: nonzero for I cache as well
*/ */
.align 5 ENTRY(arm926_flush_user_cache_range)
ENTRY(cpu_arm926_cache_clean_invalidate_range) mov ip, #0
bic r0, r0, #DCACHELINESIZE - 1 @ && added by PGM sub r3, r1, r0 @ calculate total size
bic r1, r1, #DCACHELINESIZE - 1 @ && added by DHM cmp r3, #CACHE_DLIMIT
sub r3, r1, r0 bgt __flush_whole_cache
cmp r3, #MAX_AREA_SIZE 1: tst r2, #VM_EXEC
bgt cpu_arm926_cache_clean_invalidate_all_r2
1: teq r2, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
#else #else
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
#endif #endif
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
tst r2, #VM_EXEC
mcr p15, 0, r1, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ D-CACHE =============================== */
/* /*
* cpu_arm926_dcache_invalidate_range(start, end) * coherent_kern_range(start, end)
* *
* throw away all D-cached data in specified region without an obligation * Ensure coherency between the Icache and the Dcache in the
* to write them back. Note however that we must clean the D-cached entries * region described by start, end. If you have non-snooping
* around the boundaries if the start and/or end address are not cache * Harvard caches, you need to implement this function.
* aligned.
* *
* start: virtual start address * - start - virtual start address
* end: virtual end address * - end - virtual end address
*/ */
.align 5 ENTRY(arm926_coherent_kern_range)
ENTRY(cpu_arm926_dcache_invalidate_range) bic r0, r0, #CACHE_DLINESIZE - 1
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
tst r0, #DCACHELINESIZE - 1 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE
tst r1, #DCACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1
#endif @ clean D entry
bic r0, r0, #DCACHELINESIZE - 1
bic r1, r1, #DCACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm926_dcache_clean_range(start, end) * flush_kern_dcache_page(void *page)
* *
* For the specified virtual address range, ensure that all caches contain * Ensure no D cache aliasing occurs, either with itself or
* clean data, such that peripheral accesses to the physical RAM fetch * the I cache
* correct data.
* *
* start: virtual start address * - addr - page aligned address
* end: virtual end address
*/ */
.align 5 ENTRY(arm926_flush_kern_dcache_page)
ENTRY(cpu_arm926_dcache_clean_range) add r1, r0, #PAGE_SZ
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
bic r0, r0, #DCACHELINESIZE - 1 add r0, r0, #CACHE_DLINESIZE
sub r1, r1, r0 cmp r0, r1
cmp r1, #MAX_AREA_SIZE blo 1b
mov r2, #0 mov r0, #0
bgt cpu_arm926_cache_clean_invalidate_all_r2 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
bic r1, r1, #DCACHELINESIZE -1
add r1, r1, #DCACHELINESIZE
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bpl 1b
#endif
mcr p15, 0, r2, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm926_dcache_clean_page(page) * dma_inv_range(start, end)
* *
* Cleans a single page of dcache so that if we have any future aliased * Invalidate (discard) the specified virtual address range.
* mappings, they will be consistent at the time that they are created. * May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
* *
* page: virtual address of page to clean from dcache * - start - virtual start address
* - end - virtual end address
* *
* Note: * (same as v4wb)
* 1. we don't need to flush the write buffer in this case.
* 2. we don't invalidate the entries since when we write the page
* out to disk, the entries may get reloaded into the cache.
*/ */
.align 5 ENTRY(arm926_dma_inv_range)
ENTRY(cpu_arm926_dcache_clean_page)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mov r1, #PAGESIZE tst r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE tst r1, #CACHE_DLINESIZE - 1
mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #2 * DCACHELINESIZE
bne 1b
#endif #endif
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm926_dcache_clean_entry(addr) * dma_clean_range(start, end)
* *
* Clean the specified entry of any caches such that the MMU * Clean the specified virtual address range.
* translation fetches will obtain correct data.
* *
* addr: cache-unaligned virtual address * - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/ */
.align 5 ENTRY(arm926_dma_clean_range)
ENTRY(cpu_arm926_dcache_clean_entry)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c10, 1 @ clean D entry bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif #endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ I-CACHE =============================== */
/* /*
* cpu_arm926_icache_invalidate_range(start, end) * dma_flush_range(start, end)
* *
* invalidate a range of virtual addresses from the Icache * Clean and invalidate the specified virtual address range.
* *
* start: virtual start address * - start - virtual start address
* end: virtual end address * - end - virtual end address
*/ */
.align 5 ENTRY(arm926_dma_flush_range)
ENTRY(cpu_arm926_icache_invalidate_range) bic r0, r0, #CACHE_DLINESIZE - 1
bic r0, r0, #DCACHELINESIZE - 1 @ Safety check 1:
sub r1, r1, r0 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
cmp r1, #MAX_AREA_SIZE mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
bgt cpu_arm926_cache_clean_invalidate_all_r2 #else
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
bic r1, r1, #DCACHELINESIZE - 1 #endif
add r1, r1, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
1: mcr p15, 0, r0, c7, c5, 1 @ clean I entries blo 1b
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bne 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
ENTRY(cpu_arm926_icache_invalidate_page) ENTRY(arm926_cache_fns)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache .long arm926_flush_kern_cache_all
.long arm926_flush_user_cache_all
.long arm926_flush_user_cache_range
.long arm926_coherent_kern_range
.long arm926_flush_kern_dcache_page
.long arm926_dma_inv_range
.long arm926_dma_clean_range
.long arm926_dma_flush_range
ENTRY(cpu_arm926_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
/* /*
* cpu_arm926_set_pgd(pgd) * cpu_arm926_switch_mm(pgd)
* *
* Set the translation base pointer to be as described by pgd. * Set the translation base pointer to be as described by pgd.
* *
* pgd: new page tables * pgd: new page tables
*/ */
.align 5 .align 5
ENTRY(cpu_arm926_set_pgd) ENTRY(cpu_arm926_switch_mm)
mov ip, #0 mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
/* Any reason why we don't use mcr p15, 0, r0, c7, c7, 0 here? --rmk */
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else #else
@ && 'Clean & Invalidate whole DCache' @ && 'Clean & Invalidate whole DCache'
...@@ -349,22 +327,6 @@ ENTRY(cpu_arm926_set_pgd) ...@@ -349,22 +327,6 @@ ENTRY(cpu_arm926_set_pgd)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr mov pc, lr
/*
* cpu_arm926_flush_pmd(pmdp)
*
* Set a level 1 translation table entry, and clean it out of
* any caches such that the MMUs can load it correctly.
*
* pmdp: pointer to PMD entry
*/
.align 5
ENTRY(cpu_arm926_flush_pmd)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/* /*
* cpu_arm926_set_pte(ptep, pte) * cpu_arm926_set_pte(ptep, pte)
* *
...@@ -380,7 +342,7 @@ ENTRY(cpu_arm926_set_pte) ...@@ -380,7 +342,7 @@ ENTRY(cpu_arm926_set_pte)
bic r2, r2, #3 bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL orr r2, r2, #HPTE_TYPE_SMALL
tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec? tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
...@@ -474,31 +436,13 @@ __arm926_setup: ...@@ -474,31 +436,13 @@ __arm926_setup:
.type arm926_processor_functions, #object .type arm926_processor_functions, #object
arm926_processor_functions: arm926_processor_functions:
.word v5tej_early_abort .word v5tej_early_abort
.word cpu_arm926_check_bugs
.word cpu_arm926_proc_init .word cpu_arm926_proc_init
.word cpu_arm926_proc_fin .word cpu_arm926_proc_fin
.word cpu_arm926_reset .word cpu_arm926_reset
.word cpu_arm926_do_idle .word cpu_arm926_do_idle
.word cpu_arm926_dcache_clean_area
/* cache */ .word cpu_arm926_switch_mm
.word cpu_arm926_cache_clean_invalidate_all
.word cpu_arm926_cache_clean_invalidate_range
/* dcache */
.word cpu_arm926_dcache_invalidate_range
.word cpu_arm926_dcache_clean_range
.word cpu_arm926_dcache_clean_page
.word cpu_arm926_dcache_clean_entry
/* icache */
.word cpu_arm926_icache_invalidate_range
.word cpu_arm926_icache_invalidate_page
/* pgtable */
.word cpu_arm926_set_pgd
.word cpu_arm926_flush_pmd
.word cpu_arm926_set_pte .word cpu_arm926_set_pte
.size arm926_processor_functions, . - arm926_processor_functions .size arm926_processor_functions, . - arm926_processor_functions
.type cpu_arch_name, #object .type cpu_arch_name, #object
...@@ -522,10 +466,10 @@ __arm926_proc_info: ...@@ -522,10 +466,10 @@ __arm926_proc_info:
b __arm926_setup b __arm926_setup
.long cpu_arch_name .long cpu_arch_name
.long cpu_elf_name .long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | \ .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | HWCAP_JAVA
HWCAP_FAST_MULT | HWCAP_JAVA
.long cpu_arm926_name .long cpu_arm926_name
.long arm926_processor_functions .long arm926_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long v4wb_user_fns .long v4wb_user_fns
.long arm926_cache_fns
.size __arm926_proc_info, . - __arm926_proc_info .size __arm926_proc_info, . - __arm926_proc_info
...@@ -10,12 +10,7 @@ ...@@ -10,12 +10,7 @@
* MMU functions for SA110 * MMU functions for SA110
* *
* These are the low level assembler for performing cache and TLB * These are the low level assembler for performing cache and TLB
* functions on the StrongARM-110, StrongARM-1100 and StrongARM-1110. * functions on the StrongARM-110.
*
* Note that SA1100 and SA1110 share everything but their name and CPU ID.
*
* 12-jun-2000, Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
* Flush the read buffer at context switches
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -25,21 +20,10 @@ ...@@ -25,21 +20,10 @@
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/proc/pgtable.h> #include <asm/proc/pgtable.h>
/* This is the maximum size of an area which will be flushed. If the area
* is larger than this, then we flush the whole cache
*/
#define MAX_AREA_SIZE 32768
/* /*
* the cache line size of the I and D cache * the cache line size of the I and D cache
*/ */
#define DCACHELINESIZE 32 #define DCACHELINESIZE 32
/*
* and the page size
*/
#define PAGESIZE 4096
#define FLUSH_OFFSET 32768 #define FLUSH_OFFSET 32768
.macro flush_110_dcache rd, ra, re .macro flush_110_dcache rd, ra, re
...@@ -53,43 +37,15 @@ ...@@ -53,43 +37,15 @@
bne 1001b bne 1001b
.endm .endm
.macro flush_1100_dcache rd, ra, re
ldr \rd, =flush_base
ldr \ra, [\rd]
eor \ra, \ra, #FLUSH_OFFSET
str \ra, [\rd]
add \re, \ra, #8192 @ only necessary for 8k
1001: ldr \rd, [\ra], #DCACHELINESIZE
teq \re, \ra
bne 1001b
#ifdef FLUSH_BASE_MINICACHE
add \ra, \ra, #FLUSH_BASE_MINICACHE - FLUSH_BASE
add \re, \ra, #512 @ only 512 bytes
1002: ldr \rd, [\ra], #DCACHELINESIZE
teq \re, \ra
bne 1002b
#endif
.endm
.data .data
flush_base: .long FLUSH_BASE flush_base:
.long FLUSH_BASE
.text .text
/*
* cpu_sa110_check_bugs()
*/
ENTRY(cpu_sa110_check_bugs)
ENTRY(cpu_sa1100_check_bugs)
mrs ip, cpsr
bic ip, ip, #PSR_F_BIT
msr cpsr, ip
mov pc, lr
/* /*
* cpu_sa110_proc_init() * cpu_sa110_proc_init()
*/ */
ENTRY(cpu_sa110_proc_init) ENTRY(cpu_sa110_proc_init)
ENTRY(cpu_sa1100_proc_init)
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
mov pc, lr mov pc, lr
...@@ -101,7 +57,7 @@ ENTRY(cpu_sa110_proc_fin) ...@@ -101,7 +57,7 @@ ENTRY(cpu_sa110_proc_fin)
stmfd sp!, {lr} stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip msr cpsr_c, ip
bl cpu_sa110_cache_clean_invalidate_all @ clean caches bl v4wb_flush_kern_cache_all @ clean caches
1: mov r0, #0 1: mov r0, #0
mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
...@@ -110,13 +66,6 @@ ENTRY(cpu_sa110_proc_fin) ...@@ -110,13 +66,6 @@ ENTRY(cpu_sa110_proc_fin)
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc} ldmfd sp!, {pc}
ENTRY(cpu_sa1100_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
bl cpu_sa1100_cache_clean_invalidate_all @ clean caches
b 1b
/* /*
* cpu_sa110_reset(loc) * cpu_sa110_reset(loc)
* *
...@@ -128,7 +77,6 @@ ENTRY(cpu_sa1100_proc_fin) ...@@ -128,7 +77,6 @@ ENTRY(cpu_sa1100_proc_fin)
*/ */
.align 5 .align 5
ENTRY(cpu_sa110_reset) ENTRY(cpu_sa110_reset)
ENTRY(cpu_sa1100_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
...@@ -151,204 +99,25 @@ ENTRY(cpu_sa1100_reset) ...@@ -151,204 +99,25 @@ ENTRY(cpu_sa1100_reset)
* 3 = switch to fast processor clock * 3 = switch to fast processor clock
*/ */
.align 5 .align 5
idle: mcr p15, 0, r0, c15, c8, 2 @ Wait for interrupt, cache aligned
mov r0, r0 @ safety
mov pc, lr
ENTRY(cpu_sa110_do_idle) ENTRY(cpu_sa110_do_idle)
mov ip, #0
cmp r0, #4
addcc pc, pc, r0, lsl #2
mov pc, lr
b idle
b idle
b slow_clock
b fast_clock
fast_clock:
mcr p15, 0, ip, c15, c1, 2 @ enable clock switching
mov pc, lr
slow_clock:
mcr p15, 0, ip, c15, c2, 2 @ disable clock switching mcr p15, 0, ip, c15, c2, 2 @ disable clock switching
ldr r1, =UNCACHEABLE_ADDR @ load from uncacheable loc ldr r1, =UNCACHEABLE_ADDR @ load from uncacheable loc
ldr r1, [r1, #0] @ force switch to MCLK ldr r1, [r1, #0] @ force switch to MCLK
mov pc, lr mov r0, r0 @ safety
mov r0, r0 @ safety
.align 5 mov r0, r0 @ safety
ENTRY(cpu_sa1100_do_idle) mcr p15, 0, r0, c15, c8, 2 @ Wait for interrupt, cache aligned
mov r0, r0 @ 4 nop padding mov r0, r0 @ safety
mov r0, r0 mov r0, r0 @ safety
mov r0, r0
mov r0, #0
ldr r1, =UNCACHEABLE_ADDR @ ptr to uncacheable address
mrs r2, cpsr
orr r3, r2, #192 @ disallow interrupts
msr cpsr_c, r3
@ --- aligned to a cache line
mcr p15, 0, r0, c15, c2, 2 @ disable clock switching
ldr r1, [r1, #0] @ force switch to MCLK
mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt
mov r0, r0 @ safety mov r0, r0 @ safety
mcr p15, 0, r0, c15, c1, 2 @ enable clock switching mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
msr cpsr_c, r2 @ allow interrupts
mov pc, lr mov pc, lr
/* ================================= CACHE ================================ */ /* ================================= CACHE ================================ */
/*
* cpu_sa110_cache_clean_invalidate_all (void)
*
* clean and invalidate all cache lines
*
* Note:
* 1. we should preserve r0 at all times
*/
.align 5
ENTRY(cpu_sa110_cache_clean_invalidate_all)
mov r2, #1
cpu_sa110_cache_clean_invalidate_all_r2:
flush_110_dcache r3, ip, r1
mov ip, #0
teq r2, #0
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
.align 5
ENTRY(cpu_sa1100_cache_clean_invalidate_all)
mov r2, #1
cpu_sa1100_cache_clean_invalidate_all_r2:
flush_1100_dcache r3, ip, r1
mov ip, #0
teq r2, #0
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r1, c9, c0, 0 @ invalidate RB
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/* /*
* cpu_sa110_cache_clean_invalidate_range(start, end, flags) * cpu_sa110_dcache_clean_area(addr,sz)
*
* clean and invalidate all cache lines associated with this area of memory
*
* start: Area start address
* end: Area end address
* flags: nonzero for I cache as well
*/
.align 5
ENTRY(cpu_sa110_cache_clean_invalidate_range)
bic r0, r0, #DCACHELINESIZE - 1
sub r3, r1, r0
cmp r3, #MAX_AREA_SIZE
bhi cpu_sa110_cache_clean_invalidate_all_r2
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE
cmp r0, r1
blo 1b
teq r2, #0
movne r0, #0
mcrne p15, 0, r0, c7, c5, 0 @ invalidate I cache
mov pc, lr
ENTRY(cpu_sa1100_cache_clean_invalidate_range)
sub r3, r1, r0
cmp r3, #MAX_AREA_SIZE
bhi cpu_sa1100_cache_clean_invalidate_all_r2
b 1b
/* ================================ D-CACHE =============================== */
/*
* cpu_sa110_dcache_invalidate_range(start, end)
*
* throw away all D-cached data in specified region without an obligation
* to write them back. Note however that we must clean the D-cached entries
* around the boundaries if the start and/or end address are not cache
* aligned.
*
* start: virtual start address
* end: virtual end address
*/
.align 5
ENTRY(cpu_sa110_dcache_invalidate_range)
ENTRY(cpu_sa1100_dcache_invalidate_range)
tst r0, #DCACHELINESIZE - 1
bic r0, r0, #DCACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #DCACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE
cmp r0, r1
blo 1b
mov pc, lr
/*
* cpu_sa110_dcache_clean_range(start, end)
*
* For the specified virtual address range, ensure that all caches contain
* clean data, such that peripheral accesses to the physical RAM fetch
* correct data.
*
* start: virtual start address
* end: virtual end address
*/
.align 5
ENTRY(cpu_sa110_dcache_clean_range)
bic r0, r0, #DCACHELINESIZE - 1
sub r1, r1, r0
cmp r1, #MAX_AREA_SIZE
mov r2, #0
bhi cpu_sa110_cache_clean_invalidate_all_r2
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #2 * DCACHELINESIZE
bpl 1b
mcr p15, 0, r2, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(cpu_sa1100_dcache_clean_range)
bic r0, r0, #DCACHELINESIZE - 1
sub r1, r1, r0
cmp r1, #MAX_AREA_SIZE
mov r2, #0
bhi cpu_sa1100_cache_clean_invalidate_all_r2
b 1b
/*
* cpu_sa110_clean_dcache_page(page)
*
* Cleans a single page of dcache so that if we have any future aliased
* mappings, they will be consistent at the time that they are created.
*
* Note:
* 1. we don't need to flush the write buffer in this case.
* 2. we don't invalidate the entries since when we write the page
* out to disk, the entries may get reloaded into the cache.
*/
.align 5
ENTRY(cpu_sa110_dcache_clean_page)
ENTRY(cpu_sa1100_dcache_clean_page)
mov r1, #PAGESIZE
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #2 * DCACHELINESIZE
bne 1b
mov pc, lr
/*
* cpu_sa110_dcache_clean_entry(addr)
* *
* Clean the specified entry of any caches such that the MMU * Clean the specified entry of any caches such that the MMU
* translation fetches will obtain correct data. * translation fetches will obtain correct data.
...@@ -356,48 +125,24 @@ ENTRY(cpu_sa1100_dcache_clean_page) ...@@ -356,48 +125,24 @@ ENTRY(cpu_sa1100_dcache_clean_page)
* addr: cache-unaligned virtual address * addr: cache-unaligned virtual address
*/ */
.align 5 .align 5
ENTRY(cpu_sa110_dcache_clean_entry) ENTRY(cpu_sa110_dcache_clean_area)
ENTRY(cpu_sa1100_dcache_clean_entry) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/* ================================ I-CACHE =============================== */
/*
* cpu_sa110_icache_invalidate_range(start, end)
*
* invalidate a range of virtual addresses from the Icache
*
* start: virtual start address
* end: virtual end address
*/
.align 5
ENTRY(cpu_sa110_icache_invalidate_range)
ENTRY(cpu_sa1100_icache_invalidate_range)
bic r0, r0, #DCACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ Clean D entry
add r0, r0, #DCACHELINESIZE add r0, r0, #DCACHELINESIZE
cmp r0, r1 subs r1, r1, #DCACHELINESIZE
blo 1b bhi 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ENTRY(cpu_sa110_icache_invalidate_page)
ENTRY(cpu_sa1100_icache_invalidate_page)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mov pc, lr mov pc, lr
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
/* /*
* cpu_sa110_set_pgd(pgd) * cpu_sa110_switch_mm(pgd)
* *
* Set the translation base pointer to be as described by pgd. * Set the translation base pointer to be as described by pgd.
* *
* pgd: new page tables * pgd: new page tables
*/ */
.align 5 .align 5
ENTRY(cpu_sa110_set_pgd) ENTRY(cpu_sa110_switch_mm)
flush_110_dcache r3, ip, r1 flush_110_dcache r3, ip, r1
mov r1, #0 mov r1, #0
mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache
...@@ -406,39 +151,6 @@ ENTRY(cpu_sa110_set_pgd) ...@@ -406,39 +151,6 @@ ENTRY(cpu_sa110_set_pgd)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr mov pc, lr
/*
* cpu_sa1100_set_pgd(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_sa1100_set_pgd)
flush_1100_dcache r3, ip, r1
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr
/*
* cpu_sa110_flush_pmd(pmdp)
*
* Set a level 1 translation table entry, and clean it out of
* any caches such that the MMUs can load it correctly.
*
* pmdp: pointer to PMD entry
*/
.align 5
ENTRY(cpu_sa110_flush_pmd)
ENTRY(cpu_sa1100_flush_pmd)
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/* /*
* cpu_sa110_set_pte(ptep, pte) * cpu_sa110_set_pte(ptep, pte)
* *
...@@ -446,9 +158,6 @@ ENTRY(cpu_sa1100_flush_pmd) ...@@ -446,9 +158,6 @@ ENTRY(cpu_sa1100_flush_pmd)
*/ */
.align 5 .align 5
ENTRY(cpu_sa110_set_pte) ENTRY(cpu_sa110_set_pte)
ENTRY(cpu_sa1100_set_pte)
tst r0, #2048
streq r0, [r0, -r0] @ BUG_ON
str r1, [r0], #-2048 @ linux version str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
...@@ -457,7 +166,7 @@ ENTRY(cpu_sa1100_set_pte) ...@@ -457,7 +166,7 @@ ENTRY(cpu_sa1100_set_pte)
bic r2, r2, #3 bic r2, r2, #3
orr r2, r2, #PTE_TYPE_SMALL orr r2, r2, #PTE_TYPE_SMALL
tst r1, #L_PTE_USER | L_PTE_EXEC @ User or Exec? tst r1, #L_PTE_USER @ User or Exec?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
...@@ -475,30 +184,16 @@ ENTRY(cpu_sa1100_set_pte) ...@@ -475,30 +184,16 @@ ENTRY(cpu_sa1100_set_pte)
cpu_sa110_name: cpu_sa110_name:
.asciz "StrongARM-110" .asciz "StrongARM-110"
cpu_sa1100_name:
.asciz "StrongARM-1100"
cpu_sa1110_name:
.asciz "StrongARM-1110"
.align .align
__INIT __INIT
__sa1100_setup: @ Allow read-buffer operations from userland
mcr p15, 0, r0, c9, c0, 5
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, #0x0e00 @ ..VI ZFRS BLDP WCAM
bic r0, r0, #0x0002 @ .... 000. .... ..0.
orr r0, r0, #0x003d
orr r0, r0, #0x3100 @ ..11 ...1 ..11 11.1
b __setup_common
__sa110_setup: __sa110_setup:
mrc p15, 0, r0, c1, c0 @ get control register v4 mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, #0x2e00 @ ..VI ZFRS BLDP WCAM bic r0, r0, #0x2e00 @ ..VI ZFRS BLDP WCAM
bic r0, r0, #0x0002 @ ..0. 000. .... ..0. bic r0, r0, #0x0002 @ ..0. 000. .... ..0.
orr r0, r0, #0x003d orr r0, r0, #0x003d
orr r0, r0, #0x1100 @ ...1 ...1 ..11 11.1 orr r0, r0, #0x1100 @ ...1 ...1 ..11 11.1
__setup_common:
mov r10, #0 mov r10, #0
mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4 mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4
...@@ -518,66 +213,20 @@ __setup_common: ...@@ -518,66 +213,20 @@ __setup_common:
.type sa110_processor_functions, #object .type sa110_processor_functions, #object
ENTRY(sa110_processor_functions) ENTRY(sa110_processor_functions)
.word v4_early_abort .word v4_early_abort
.word cpu_sa110_check_bugs
.word cpu_sa110_proc_init .word cpu_sa110_proc_init
.word cpu_sa110_proc_fin .word cpu_sa110_proc_fin
.word cpu_sa110_reset .word cpu_sa110_reset
.word cpu_sa110_do_idle .word cpu_sa110_do_idle
/* cache */
.word cpu_sa110_cache_clean_invalidate_all
.word cpu_sa110_cache_clean_invalidate_range
/* dcache */ /* dcache */
.word cpu_sa110_dcache_invalidate_range .word cpu_sa110_dcache_clean_area
.word cpu_sa110_dcache_clean_range
.word cpu_sa110_dcache_clean_page
.word cpu_sa110_dcache_clean_entry
/* icache */
.word cpu_sa110_icache_invalidate_range
.word cpu_sa110_icache_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_sa110_set_pgd .word cpu_sa110_switch_mm
.word cpu_sa110_flush_pmd
.word cpu_sa110_set_pte .word cpu_sa110_set_pte
.size sa110_processor_functions, . - sa110_processor_functions .size sa110_processor_functions, . - sa110_processor_functions
/*
* SA1100 and SA1110 share the same function calls
*/
.type sa1100_processor_functions, #object
ENTRY(sa1100_processor_functions)
.word v4_early_abort
.word cpu_sa1100_check_bugs
.word cpu_sa1100_proc_init
.word cpu_sa1100_proc_fin
.word cpu_sa1100_reset
.word cpu_sa1100_do_idle
/* cache */
.word cpu_sa1100_cache_clean_invalidate_all
.word cpu_sa1100_cache_clean_invalidate_range
/* dcache */
.word cpu_sa1100_dcache_invalidate_range
.word cpu_sa1100_dcache_clean_range
.word cpu_sa1100_dcache_clean_page
.word cpu_sa1100_dcache_clean_entry
/* icache */
.word cpu_sa1100_icache_invalidate_range
.word cpu_sa1100_icache_invalidate_page
/* pgtable */
.word cpu_sa1100_set_pgd
.word cpu_sa1100_flush_pmd
.word cpu_sa1100_set_pte
.size sa1100_processor_functions, . - sa1100_processor_functions
.type cpu_arch_name, #object .type cpu_arch_name, #object
cpu_arch_name: cpu_arch_name:
.asciz "armv4" .asciz "armv4"
...@@ -591,7 +240,6 @@ cpu_elf_name: ...@@ -591,7 +240,6 @@ cpu_elf_name:
.section ".proc.info", #alloc, #execinstr .section ".proc.info", #alloc, #execinstr
#ifdef CONFIG_CPU_SA110
.type __sa110_proc_info,#object .type __sa110_proc_info,#object
__sa110_proc_info: __sa110_proc_info:
.long 0x4401a100 .long 0x4401a100
...@@ -605,37 +253,5 @@ __sa110_proc_info: ...@@ -605,37 +253,5 @@ __sa110_proc_info:
.long sa110_processor_functions .long sa110_processor_functions
.long v4wb_tlb_fns .long v4wb_tlb_fns
.long v4wb_user_fns .long v4wb_user_fns
.long v4wb_cache_fns
.size __sa110_proc_info, . - __sa110_proc_info .size __sa110_proc_info, . - __sa110_proc_info
#endif
#ifdef CONFIG_CPU_SA1100
.type __sa1100_proc_info,#object
__sa1100_proc_info:
.long 0x4401a110
.long 0xfffffff0
.long 0x00000c0e
b __sa1100_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1100_name
.long sa1100_processor_functions
.long v4wb_tlb_fns
.long v4_mc_user_fns
.size __sa1100_proc_info, . - __sa1100_proc_info
.type __sa1110_proc_info,#object
__sa1110_proc_info:
.long 0x6901b110
.long 0xfffffff0
.long 0x00000c0e
b __sa1100_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1110_name
.long sa1100_processor_functions
.long v4wb_tlb_fns
.long v4_mc_user_fns
.size __sa1110_proc_info, . - __sa1110_proc_info
#endif
/*
* linux/arch/arm/mm/proc-sa110.S
*
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* MMU functions for SA110
*
* These are the low level assembler for performing cache and TLB
* functions on the StrongARM-1100 and StrongARM-1110.
*
* Note that SA1100 and SA1110 share everything but their name and CPU ID.
*
* 12-jun-2000, Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
* Flush the read buffer at context switches
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/proc/pgtable.h>
/*
* the cache line size of the I and D cache
*/
#define DCACHELINESIZE 32
#define FLUSH_OFFSET 32768
.macro flush_1100_dcache rd, ra, re
ldr \rd, =flush_base
ldr \ra, [\rd]
eor \ra, \ra, #FLUSH_OFFSET
str \ra, [\rd]
add \re, \ra, #8192 @ only necessary for 8k
1001: ldr \rd, [\ra], #DCACHELINESIZE
teq \re, \ra
bne 1001b
#ifdef FLUSH_BASE_MINICACHE
add \ra, \ra, #FLUSH_BASE_MINICACHE - FLUSH_BASE
add \re, \ra, #512 @ only 512 bytes
1002: ldr \rd, [\ra], #DCACHELINESIZE
teq \re, \ra
bne 1002b
#endif
.endm
.data
flush_base:
.long FLUSH_BASE
.text
__INIT
/*
* cpu_sa1100_proc_init()
*/
ENTRY(cpu_sa1100_proc_init)
mov r0, #0
mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
mov pc, lr
.previous
/*
* cpu_sa1100_proc_fin()
*
* Prepare the CPU for reset:
* - Disable interrupts
* - Clean and turn off caches.
*/
ENTRY(cpu_sa1100_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
flush_1100_dcache r0, r1, r2 @ clean caches
mov r0, #0
mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc}
/*
* cpu_sa1100_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
ENTRY(cpu_sa1100_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
/*
* cpu_sa1100_do_idle(type)
*
* Cause the processor to idle
*
* type: call type:
* 0 = slow idle
* 1 = fast idle
* 2 = switch to slow processor clock
* 3 = switch to fast processor clock
*/
.align 5
ENTRY(cpu_sa1100_do_idle)
mov r0, r0 @ 4 nop padding
mov r0, r0
mov r0, r0
mov r0, r0 @ 4 nop padding
mov r0, r0
mov r0, r0
mov r0, #0
ldr r1, =UNCACHEABLE_ADDR @ ptr to uncacheable address
@ --- aligned to a cache line
mcr p15, 0, r0, c15, c2, 2 @ disable clock switching
ldr r1, [r1, #0] @ force switch to MCLK
mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt
mov r0, r0 @ safety
mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
mov pc, lr
/* ================================= CACHE ================================ */
/*
* cpu_sa1100_dcache_clean_area(addr,sz)
*
* Clean the specified entry of any caches such that the MMU
* translation fetches will obtain correct data.
*
* addr: cache-unaligned virtual address
*/
.align 5
ENTRY(cpu_sa1100_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bhi 1b
mov pc, lr
/* =============================== PageTable ============================== */
/*
* cpu_sa1100_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_sa1100_switch_mm)
flush_1100_dcache r3, ip, r1
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr
/*
* cpu_sa1100_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_sa1100_set_pte)
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #0xff0
bic r2, r2, #3
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #L_PTE_USER @ User or Exec?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0
str r2, [r0] @ hardware version
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
cpu_sa1100_name:
.asciz "StrongARM-1100"
cpu_sa1110_name:
.asciz "StrongARM-1110"
.align
__INIT
__sa1100_setup:
mov r10, #0
mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4
mcr p15, 0, r10, c8, c7 @ invalidate I,D TLBs on v4
mov r0, #0x1f @ Domains 0, 1 = client
mcr p15, 0, r0, c3, c0 @ load domain access register
mcr p15, 0, r4, c2, c0 @ load page table pointer
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, #0x0e00 @ ..VI ZFRS BLDP WCAM
bic r0, r0, #0x0002 @ .... 000. .... ..0.
orr r0, r0, #0x003d
orr r0, r0, #0x3100 @ ..11 ...1 ..11 11.1
mov pc, lr
.text
/*
* Purpose : Function pointers used to access above functions - all calls
* come through these
*/
/*
* SA1100 and SA1110 share the same function calls
*/
.type sa1100_processor_functions, #object
ENTRY(sa1100_processor_functions)
.word v4_early_abort
.word cpu_sa1100_proc_init
.word cpu_sa1100_proc_fin
.word cpu_sa1100_reset
.word cpu_sa1100_do_idle
.word cpu_sa1100_dcache_clean_area
.word cpu_sa1100_switch_mm
.word cpu_sa1100_set_pte
.size sa1100_processor_functions, . - sa1100_processor_functions
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv4"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v4"
.size cpu_elf_name, . - cpu_elf_name
.align
.section ".proc.info", #alloc, #execinstr
.type __sa1100_proc_info,#object
__sa1100_proc_info:
.long 0x4401a110
.long 0xfffffff0
.long 0x00000c0e
b __sa1100_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1100_name
.long sa1100_processor_functions
.long v4wb_tlb_fns
.long v4_mc_user_fns
.long v4wb_cache_fns
.size __sa1100_proc_info, . - __sa1100_proc_info
.type __sa1110_proc_info,#object
__sa1110_proc_info:
.long 0x6901b110
.long 0xfffffff0
.long 0x00000c0e
b __sa1100_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1110_name
.long sa1100_processor_functions
.long v4wb_tlb_fns
.long v4_mc_user_fns
.long v4wb_cache_fns
.size __sa1110_proc_info, . - __sa1110_proc_info
...@@ -27,7 +27,6 @@ EXPORT_SYMBOL(cpu_dcache_invalidate_range); ...@@ -27,7 +27,6 @@ EXPORT_SYMBOL(cpu_dcache_invalidate_range);
EXPORT_SYMBOL(cpu_icache_invalidate_range); EXPORT_SYMBOL(cpu_icache_invalidate_range);
EXPORT_SYMBOL(cpu_icache_invalidate_page); EXPORT_SYMBOL(cpu_icache_invalidate_page);
EXPORT_SYMBOL(cpu_set_pgd); EXPORT_SYMBOL(cpu_set_pgd);
EXPORT_SYMBOL(cpu_flush_pmd);
EXPORT_SYMBOL(cpu_set_pte); EXPORT_SYMBOL(cpu_set_pte);
#else #else
EXPORT_SYMBOL(processor); EXPORT_SYMBOL(processor);
......
...@@ -23,10 +23,11 @@ ...@@ -23,10 +23,11 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h> #include <asm/procinfo.h>
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/proc/pgtable.h> #include <asm/proc/pgtable.h>
#include <asm/page.h>
#include "proc-macros.S"
/* /*
* This is the maximum size of an area which will be flushed. If the area * This is the maximum size of an area which will be flushed. If the area
...@@ -44,11 +45,6 @@ ...@@ -44,11 +45,6 @@
*/ */
#define CACHESIZE 32768 #define CACHESIZE 32768
/*
* and the page size
*/
#define PAGESIZE 4096
/* /*
* Virtual address used to allocate the cache when flushed * Virtual address used to allocate the cache when flushed
* *
...@@ -111,15 +107,6 @@ clean_addr: .word CLEAN_ADDR ...@@ -111,15 +107,6 @@ clean_addr: .word CLEAN_ADDR
.text .text
/*
* cpu_xscale_check_bugs()
*/
ENTRY(cpu_xscale_check_bugs)
mrs ip, cpsr
bic ip, ip, #PSR_F_BIT
msr cpsr, ip
mov pc, lr
/* /*
* cpu_xscale_proc_init() * cpu_xscale_proc_init()
* *
...@@ -135,11 +122,11 @@ ENTRY(cpu_xscale_proc_fin) ...@@ -135,11 +122,11 @@ ENTRY(cpu_xscale_proc_fin)
str lr, [sp, #-4]! str lr, [sp, #-4]!
mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r0 msr cpsr_c, r0
bl xscale_flush_kern_cache_all @ clean caches
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1800 @ ...IZ........... bic r0, r0, #0x1800 @ ...IZ...........
bic r0, r0, #0x0006 @ .............CA. bic r0, r0, #0x0006 @ .............CA.
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
bl cpu_xscale_cache_clean_invalidate_all @ clean caches
ldr pc, [sp], #4 ldr pc, [sp], #4
/* /*
...@@ -168,16 +155,10 @@ ENTRY(cpu_xscale_reset) ...@@ -168,16 +155,10 @@ ENTRY(cpu_xscale_reset)
mov pc, r0 mov pc, r0
/* /*
* cpu_xscale_do_idle(type) * cpu_xscale_do_idle()
* *
* Cause the processor to idle * Cause the processor to idle
* *
* type:
* 0 = slow idle
* 1 = fast idle
* 2 = switch to slow processor clock
* 3 = switch to fast processor clock
*
* For now we do nothing but go to idle mode for every case * For now we do nothing but go to idle mode for every case
* *
* XScale supports clock switching, but using idle mode support * XScale supports clock switching, but using idle mode support
...@@ -193,226 +174,179 @@ ENTRY(cpu_xscale_do_idle) ...@@ -193,226 +174,179 @@ ENTRY(cpu_xscale_do_idle)
/* ================================= CACHE ================================ */ /* ================================= CACHE ================================ */
/* /*
* cpu_xscale_cache_clean_invalidate_all (void) * flush_user_cache_all()
* *
* clean and invalidate all cache lines * Invalidate all cache entries in a particular address
* space.
*/
ENTRY(xscale_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
* *
* Note: * Clean and invalidate the entire cache.
* 1. We should preserve r0 at all times.
* 2. Even if this function implies cache "invalidation" by its name,
* we don't need to actually use explicit invalidation operations
* since the goal is to discard all valid references from the cache
* and the cleaning of it already has that effect.
* 3. Because of 2 above and the fact that kernel space memory is always
* coherent across task switches there is no need to worry about
* inconsistencies due to interrupts, ence no irq disabling.
*/ */
.align 5 ENTRY(xscale_flush_kern_cache_all)
ENTRY(cpu_xscale_cache_clean_invalidate_all) mov r2, #VM_EXEC
mov r2, #1 mov ip, #0
cpu_xscale_cache_clean_invalidate_all_r2: __flush_whole_cache:
clean_d_cache r0, r1 clean_d_cache r0, r1
teq r2, #0 tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr mov pc, lr
/* /*
* cpu_xscale_cache_clean_invalidate_range(start, end, flags) * flush_user_cache_range(start, end, vm_flags)
* *
* clean and invalidate all cache lines associated with this area of memory * Invalidate a range of cache entries in the specified
* address space.
* *
* start: Area start address * - start - start address (may not be aligned)
* end: Area end address * - end - end address (exclusive, may not be aligned)
* flags: nonzero for I cache as well * - vma - vma_area_struct describing address space
*/ */
.align 5 .align 5
ENTRY(cpu_xscale_cache_clean_invalidate_range) ENTRY(xscale_flush_user_cache_range)
bic r0, r0, #CACHELINESIZE - 1 @ round down to cache line mov ip, #0
sub r3, r1, r0 sub r3, r1, r0 @ calculate total size
cmp r3, #MAX_AREA_SIZE cmp r3, #MAX_AREA_SIZE
bhi cpu_xscale_cache_clean_invalidate_all_r2 bhs __flush_whole_cache
1: mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
1: tst r2, #VM_EXEC
mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
teq r2, #0 tst r2, #VM_EXEC
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB
moveq pc, lr mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
sub r0, r0, r3
1: mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c5, 6 @ Invalidate BTB
mov pc, lr mov pc, lr
/* /*
* cpu_xscale_flush_ram_page(page) * coherent_kern_range(start, end)
* *
* clean all cache lines associated with this memory page * Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
* *
* page: page to clean * - start - virtual start address
* - end - virtual end address
*/ */
.align 5 ENTRY(xscale_coherent_kern_range)
ENTRY(cpu_xscale_flush_ram_page) bic r0, r0, #CACHELINESIZE - 1
mov r1, #PAGESIZE 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
add r0, r0, #CACHELINESIZE
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
subs r1, r1, #2 * CACHELINESIZE cmp r0, r1
bne 1b blo 1b
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr mov pc, lr
/* ================================ D-CACHE =============================== */
/* /*
* cpu_xscale_dcache_invalidate_range(start, end) * flush_kern_dcache_page(void *page)
* *
* throw away all D-cached data in specified region without an obligation * Ensure no D cache aliasing occurs, either with itself or
* to write them back. Note however that on XScale we must clean all * the I cache
* entries also due to hardware errata (80200 A0 & A1 only).
* *
* start: virtual start address * - addr - page aligned address
* end: virtual end address
*/ */
.align 5 ENTRY(xscale_flush_kern_dcache_page)
ENTRY(cpu_xscale_dcache_invalidate_range) add r1, r0, #PAGE_SZ
mrc p15, 0, r2, c0, c0, 0 @ Read part no. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
eor r2, r2, #0x69000000 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
eor r2, r2, #0x00052000 @ 80200 XX part no.
bics r2, r2, #0x1 @ Clear LSB in revision field
moveq r2, #0
beq cpu_xscale_cache_clean_invalidate_range @ An 80200 A0 or A1
tst r0, #CACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ Clean D cache line
tst r1, #CACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ Clean D cache line
bic r0, r0, #CACHELINESIZE - 1 @ round down to cache line
1: mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr mov pc, lr
/* /*
* cpu_xscale_dcache_clean_range(start, end) * dma_inv_range(start, end)
* *
* For the specified virtual address range, ensure that all caches contain * Invalidate (discard) the specified virtual address range.
* clean data, such that peripheral accesses to the physical RAM fetch * May not write back any entries. If 'start' or 'end'
* correct data. * are not cache line aligned, those lines must be written
* back.
* *
* start: virtual start address * - start - virtual start address
* end: virtual end address * - end - virtual end address
*/ */
.align 5 ENTRY(xscale_dma_inv_range)
ENTRY(cpu_xscale_dcache_clean_range) mrc p15, 0, r2, c0, c0, 0 @ read ID
bic r0, r0, #CACHELINESIZE - 1 eor r2, r2, #0x69000000
sub r2, r1, r0 eor r2, r2, #0x00052000
cmp r2, #MAX_AREA_SIZE bics r2, r2, #1
movhi r2, #0 beq xscale_dma_flush_range
bhi cpu_xscale_cache_clean_invalidate_all_r2
1: mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line tst r0, #CACHELINESIZE - 1
add r0, r0, #CACHELINESIZE bic r0, r0, #CACHELINESIZE - 1
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcr p15, 0, r0, c7, c10, 1 @ Drain Write (& Fill) Buffer
mov pc, lr mov pc, lr
/* /*
* cpu_xscale_clean_dcache_page(page) * dma_clean_range(start, end)
* *
* Cleans a single page of dcache so that if we have any future aliased * Clean the specified virtual address range.
* mappings, they will be consistent at the time that they are created.
* *
* Note: * - start - virtual start address
* 1. we don't need to flush the write buffer in this case. [really? -Nico] * - end - virtual end address
* 2. we don't invalidate the entries since when we write the page
* out to disk, the entries may get reloaded into the cache.
*/ */
.align 5 ENTRY(xscale_dma_clean_range)
ENTRY(cpu_xscale_dcache_clean_page) bic r0, r0, #CACHELINESIZE - 1
mov r1, #PAGESIZE 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
add r0, r0, #CACHELINESIZE
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
add r0, r0, #CACHELINESIZE
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
add r0, r0, #CACHELINESIZE
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
subs r1, r1, #4 * CACHELINESIZE cmp r0, r1
bne 1b blo 1b
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcr p15, 0, r0, c7, c10, 1 @ Drain Write (& Fill) Buffer
mov pc, lr
/*
* cpu_xscale_dcache_clean_entry(addr)
*
* Clean the specified entry of any caches such that the MMU
* translation fetches will obtain correct data.
*
* addr: cache-unaligned virtual address
*/
.align 5
ENTRY(cpu_xscale_dcache_clean_entry)
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr mov pc, lr
/* ================================ I-CACHE =============================== */
/* /*
* cpu_xscale_icache_invalidate_range(start, end) * dma_flush_range(start, end)
*
* invalidate a range of virtual addresses from the Icache
* *
* start: virtual start address * Clean and invalidate the specified virtual address range.
* end: virtual end address
* *
* Note: This is vaguely defined as supposed to bring the dcache and the * - start - virtual start address
* icache in sync by the way this function is used. * - end - virtual end address
*/ */
.align 5 ENTRY(xscale_dma_flush_range)
ENTRY(cpu_xscale_icache_invalidate_range)
bic r0, r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
mcr p15, 0, ip, c7, c5, 6 @ Invalidate BTB mcr p15, 0, r0, c7, c10, 1 @ Drain Write (& Fill) Buffer
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr mov pc, lr
/* ENTRY(xscale_cache_fns)
* cpu_xscale_icache_invalidate_page(page) .long xscale_flush_kern_cache_all
* .long xscale_flush_user_cache_all
* invalidate all Icache lines associated with this area of memory .long xscale_flush_user_cache_range
* .long xscale_coherent_kern_range
* page: page to invalidate .long xscale_flush_kern_dcache_page
*/ .long xscale_dma_inv_range
.align 5 .long xscale_dma_clean_range
ENTRY(cpu_xscale_icache_invalidate_page) .long xscale_dma_flush_range
mov r1, #PAGESIZE
1: mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line ENTRY(cpu_xscale_dcache_clean_area)
add r0, r0, #CACHELINESIZE 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
add r0, r0, #CACHELINESIZE
mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
add r0, r0, #CACHELINESIZE
mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
subs r1, r1, #4 * CACHELINESIZE subs r1, r1, #CACHELINESIZE
bne 1b bhi 1b
mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
mov pc, lr mov pc, lr
/* ================================ CACHE LOCKING============================ /* ================================ CACHE LOCKING============================
...@@ -553,18 +487,17 @@ ENTRY(xscale_dtlb_unlock) ...@@ -553,18 +487,17 @@ ENTRY(xscale_dtlb_unlock)
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
#define PMD_CACHE_WRITE_ALLOCATE 0
#define PTE_CACHE_WRITE_ALLOCATE 0 #define PTE_CACHE_WRITE_ALLOCATE 0
/* /*
* cpu_xscale_set_pgd(pgd) * cpu_xscale_switch_mm(pgd)
* *
* Set the translation base pointer to be as described by pgd. * Set the translation base pointer to be as described by pgd.
* *
* pgd: new page tables * pgd: new page tables
*/ */
.align 5 .align 5
ENTRY(cpu_xscale_set_pgd) ENTRY(cpu_xscale_switch_mm)
clean_d_cache r1, r2 clean_d_cache r1, r2
mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
...@@ -572,21 +505,6 @@ ENTRY(cpu_xscale_set_pgd) ...@@ -572,21 +505,6 @@ ENTRY(cpu_xscale_set_pgd)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
cpwait_ret lr, ip cpwait_ret lr, ip
/*
* cpu_xscale_flush_pmd(pmdp)
*
* Set a level 1 translation table entry, and clean it out of
* any caches such that the MMUs can load it correctly.
*
* pmdp: pointer to PMD entry
*/
.align 5
ENTRY(cpu_xscale_flush_pmd)
mov ip, #0
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr
/* /*
* cpu_xscale_set_pte(ptep, pte) * cpu_xscale_set_pte(ptep, pte)
* *
...@@ -603,7 +521,7 @@ ENTRY(cpu_xscale_set_pte) ...@@ -603,7 +521,7 @@ ENTRY(cpu_xscale_set_pte)
eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
tst r3, #L_PTE_USER | L_PTE_EXEC @ User or Exec? tst r3, #L_PTE_USER @ User?
orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
...@@ -631,12 +549,10 @@ ENTRY(cpu_xscale_set_pte) ...@@ -631,12 +549,10 @@ ENTRY(cpu_xscale_set_pte)
@ Erratum 40: The B bit must be cleared for a user read-only @ Erratum 40: The B bit must be cleared for a user read-only
@ cacheable page. @ cacheable page.
@ @
@ B = B & ~((U|E) & C & ~W) @ B = B & ~(U & C & ~W)
@ @
and ip, r1, #L_PTE_USER | L_PTE_EXEC | L_PTE_WRITE | L_PTE_CACHEABLE and ip, r1, #L_PTE_USER | L_PTE_WRITE | L_PTE_CACHEABLE
teq ip, #L_PTE_USER | L_PTE_CACHEABLE teq ip, #L_PTE_USER | L_PTE_CACHEABLE
teqne ip, #L_PTE_EXEC | L_PTE_CACHEABLE
teqne ip, #L_PTE_USER | L_PTE_EXEC | L_PTE_CACHEABLE
biceq r2, r2, #PTE_BUFFERABLE biceq r2, r2, #PTE_BUFFERABLE
tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
...@@ -696,30 +612,12 @@ __xscale_setup: ...@@ -696,30 +612,12 @@ __xscale_setup:
.type xscale_processor_functions, #object .type xscale_processor_functions, #object
ENTRY(xscale_processor_functions) ENTRY(xscale_processor_functions)
.word xscale_abort .word xscale_abort
.word cpu_xscale_check_bugs
.word cpu_xscale_proc_init .word cpu_xscale_proc_init
.word cpu_xscale_proc_fin .word cpu_xscale_proc_fin
.word cpu_xscale_reset .word cpu_xscale_reset
.word cpu_xscale_do_idle .word cpu_xscale_do_idle
.word cpu_xscale_dcache_clean_area
/* cache */ .word cpu_xscale_switch_mm
.word cpu_xscale_cache_clean_invalidate_all
.word cpu_xscale_cache_clean_invalidate_range
.word cpu_xscale_flush_ram_page
/* dcache */
.word cpu_xscale_dcache_invalidate_range
.word cpu_xscale_dcache_clean_range
.word cpu_xscale_dcache_clean_page
.word cpu_xscale_dcache_clean_entry
/* icache */
.word cpu_xscale_icache_invalidate_range
.word cpu_xscale_icache_invalidate_page
/* pgtable */
.word cpu_xscale_set_pgd
.word cpu_xscale_flush_pmd
.word cpu_xscale_set_pte .word cpu_xscale_set_pte
.size xscale_processor_functions, . - xscale_processor_functions .size xscale_processor_functions, . - xscale_processor_functions
...@@ -749,6 +647,7 @@ __80200_proc_info: ...@@ -749,6 +647,7 @@ __80200_proc_info:
.long xscale_processor_functions .long xscale_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long xscale_mc_user_fns .long xscale_mc_user_fns
.long xscale_cache_fns
.size __80200_proc_info, . - __80200_proc_info .size __80200_proc_info, . - __80200_proc_info
.type __80321_proc_info,#object .type __80321_proc_info,#object
...@@ -780,6 +679,7 @@ __pxa250_proc_info: ...@@ -780,6 +679,7 @@ __pxa250_proc_info:
.long xscale_processor_functions .long xscale_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long xscale_mc_user_fns .long xscale_mc_user_fns
.long xscale_cache_fns
.size __pxa250_proc_info, . - __pxa250_proc_info .size __pxa250_proc_info, . - __pxa250_proc_info
.type __pxa210_proc_info,#object .type __pxa210_proc_info,#object
......
...@@ -35,6 +35,9 @@ SECTIONS ...@@ -35,6 +35,9 @@ SECTIONS
__setup_start = .; __setup_start = .;
*(.init.setup) *(.init.setup)
__setup_end = .; __setup_end = .;
__early_begin = .;
*(__early_param)
__early_end = .;
__start___param = .; __start___param = .;
*(__param) *(__param)
__stop___param = .; __stop___param = .;
......
...@@ -67,17 +67,17 @@ static void iq80321_copy_to(struct map_info *map, unsigned long to, const void * ...@@ -67,17 +67,17 @@ static void iq80321_copy_to(struct map_info *map, unsigned long to, const void *
} }
static struct map_info iq80321_map = { static struct map_info iq80321_map = {
name = "IQ80321 flash", .name = "IQ80321 flash",
size = WINDOW_SIZE, .size = WINDOW_SIZE,
buswidth = BUSWIDTH, .buswidth = BUSWIDTH,
read8 = iq80321_read8, .read8 = iq80321_read8,
read16 = iq80321_read16, .read16 = iq80321_read16,
read32 = iq80321_read32, .read32 = iq80321_read32,
copy_from = iq80321_copy_from, .copy_from = iq80321_copy_from,
write8 = iq80321_write8, .write8 = iq80321_write8,
write16 = iq80321_write16, .write16 = iq80321_write16,
write32 = iq80321_write32, .write32 = iq80321_write32,
copy_to = iq80321_copy_to .copy_to = iq80321_copy_to
}; };
static struct mtd_partition iq80321_partitions[4] = { static struct mtd_partition iq80321_partitions[4] = {
......
...@@ -55,6 +55,10 @@ ...@@ -55,6 +55,10 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#ifdef __arm__
#include <asm/mach-types.h>
#endif
#include "cyber2000fb.h" #include "cyber2000fb.h"
struct cfb_info { struct cfb_info {
......
/*
* linux/include/asm-arm/arch-iop310/irqs.h
*
* Author: Nicolas Pitre
* Copyright: (C) 2001 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* 06/13/01: Added 80310 on-chip interrupt sources <dsaxena@mvista.com>
*
*/
#include <linux/config.h>
/*
* XS80200 specific IRQs
*/
#define IRQ_XS80200_BCU 0 /* Bus Control Unit */
#define IRQ_XS80200_PMU 1 /* Performance Monitoring Unit */
#define IRQ_XS80200_EXTIRQ 2 /* external IRQ signal */
#define IRQ_XS80200_EXTFIQ 3 /* external IRQ signal */
#define NR_XS80200_IRQS 4
#define XSCALE_PMU_IRQ IRQ_XS80200_PMU
/*
* IOP80310 chipset interrupts
*/
#define IOP310_IRQ_OFS NR_XS80200_IRQS
#define IOP310_IRQ(x) (IOP310_IRQ_OFS + (x))
/*
* On FIQ1ISR register
*/
#define IRQ_IOP310_DMA0 IOP310_IRQ(0) /* DMA Channel 0 */
#define IRQ_IOP310_DMA1 IOP310_IRQ(1) /* DMA Channel 1 */
#define IRQ_IOP310_DMA2 IOP310_IRQ(2) /* DMA Channel 2 */
#define IRQ_IOP310_PMON IOP310_IRQ(3) /* Bus performance Unit */
#define IRQ_IOP310_AAU IOP310_IRQ(4) /* Application Accelator Unit */
/*
* On FIQ2ISR register
*/
#define IRQ_IOP310_I2C IOP310_IRQ(5) /* I2C unit */
#define IRQ_IOP310_MU IOP310_IRQ(6) /* messaging unit */
#define NR_IOP310_IRQS (IOP310_IRQ(6) + 1)
#define NR_IRQS NR_IOP310_IRQS
/*
* Interrupts available on the Cyclone IQ80310 board
*/
#ifdef CONFIG_ARCH_IQ80310
#define IQ80310_IRQ_OFS NR_IOP310_IRQS
#define IQ80310_IRQ(y) ((IQ80310_IRQ_OFS) + (y))
#define IRQ_IQ80310_TIMER IQ80310_IRQ(0) /* Timer Interrupt */
#define IRQ_IQ80310_I82559 IQ80310_IRQ(1) /* I82559 Ethernet Interrupt */
#define IRQ_IQ80310_UART1 IQ80310_IRQ(2) /* UART1 Interrupt */
#define IRQ_IQ80310_UART2 IQ80310_IRQ(3) /* UART2 Interrupt */
#define IRQ_IQ80310_INTD IQ80310_IRQ(4) /* PCI INTD */
/*
* ONLY AVAILABLE ON REV F OR NEWER BOARDS!
*/
#define IRQ_IQ80310_INTA IQ80310_IRQ(5) /* PCI INTA */
#define IRQ_IQ80310_INTB IQ80310_IRQ(6) /* PCI INTB */
#define IRQ_IQ80310_INTC IQ80310_IRQ(7) /* PCI INTC */
#undef NR_IRQS
#define NR_IRQS (IQ80310_IRQ(7) + 1)
#endif // CONFIG_ARCH_IQ80310
/* /*
* linux/include/asm-arm/bugs.h * linux/include/asm-arm/bugs.h
* *
* Copyright (C) 1995 Russell King * Copyright (C) 1995-2003 Russell King
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -12,6 +12,6 @@ ...@@ -12,6 +12,6 @@
#include <asm/proc-fns.h> #include <asm/proc-fns.h>
#define check_bugs() cpu_check_bugs() #define check_bugs() do { } while (0)
#endif #endif
...@@ -19,14 +19,12 @@ struct task_struct; ...@@ -19,14 +19,12 @@ struct task_struct;
* relies on it. * relies on it.
*/ */
extern struct processor { extern struct processor {
/* check for any bugs */
void (*_check_bugs)(void);
/* Set up any processor specifics */ /* Set up any processor specifics */
void (*_proc_init)(void); void (*_proc_init)(void);
/* Disable any processor specifics */ /* Disable any processor specifics */
void (*_proc_fin)(void); void (*_proc_fin)(void);
/* set the MEMC hardware mappings */ /* set the MEMC hardware mappings */
void (*_set_pgd)(pgd_t *pgd); void (*_switch_mm)(pgd_t *pgd);
/* XCHG */ /* XCHG */
unsigned long (*_xchg_1)(unsigned long x, volatile void *ptr); unsigned long (*_xchg_1)(unsigned long x, volatile void *ptr);
unsigned long (*_xchg_4)(unsigned long x, volatile void *ptr); unsigned long (*_xchg_4)(unsigned long x, volatile void *ptr);
...@@ -36,11 +34,10 @@ extern const struct processor arm2_processor_functions; ...@@ -36,11 +34,10 @@ extern const struct processor arm2_processor_functions;
extern const struct processor arm250_processor_functions; extern const struct processor arm250_processor_functions;
extern const struct processor arm3_processor_functions; extern const struct processor arm3_processor_functions;
#define cpu_check_bugs() processor._check_bugs()
#define cpu_proc_init() processor._proc_init() #define cpu_proc_init() processor._proc_init()
#define cpu_proc_fin() processor._proc_fin() #define cpu_proc_fin() processor._proc_fin()
#define cpu_do_idle() do { } while (0) #define cpu_do_idle() do { } while (0)
#define cpu_switch_mm(pgd,mm) processor._set_pgd(pgd) #define cpu_switch_mm(pgd,mm) processor._switch_mm(pgd)
#define cpu_xchg_1(x,ptr) processor._xchg_1(x,ptr) #define cpu_xchg_1(x,ptr) processor._xchg_1(x,ptr)
#define cpu_xchg_4(x,ptr) processor._xchg_4(x,ptr) #define cpu_xchg_4(x,ptr) processor._xchg_4(x,ptr)
......
...@@ -23,10 +23,6 @@ extern struct processor { ...@@ -23,10 +23,6 @@ extern struct processor {
* get data abort address/flags * get data abort address/flags
*/ */
void (*_data_abort)(unsigned long pc); void (*_data_abort)(unsigned long pc);
/*
* check for any bugs
*/
void (*_check_bugs)(void);
/* /*
* Set up any processor specifics * Set up any processor specifics
*/ */
...@@ -46,96 +42,36 @@ extern struct processor { ...@@ -46,96 +42,36 @@ extern struct processor {
/* /*
* Processor architecture specific * Processor architecture specific
*/ */
struct { /* CACHE */
/*
* flush all caches
*/
void (*clean_invalidate_all)(void);
/*
* flush a specific page or pages
*/
void (*clean_invalidate_range)(unsigned long address, unsigned long end, int flags);
} cache;
struct { /* D-cache */
/*
* invalidate the specified data range
*/
void (*invalidate_range)(unsigned long start, unsigned long end);
/*
* clean specified data range
*/
void (*clean_range)(unsigned long start, unsigned long end);
/*
* obsolete flush cache entry
*/
void (*clean_page)(void *virt_page);
/* /*
* clean a virtual address range from the * clean a virtual address range from the
* D-cache without flushing the cache. * D-cache without flushing the cache.
*/ */
void (*clean_entry)(unsigned long start); void (*dcache_clean_area)(void *addr, int size);
} dcache;
struct { /* I-cache */
/*
* invalidate the I-cache for the specified range
*/
void (*invalidate_range)(unsigned long start, unsigned long end);
/*
* invalidate the I-cache for the specified virtual page
*/
void (*invalidate_page)(void *virt_page);
} icache;
struct { /* PageTable */
/* /*
* Set the page table * Set the page table
*/ */
void (*set_pgd)(unsigned long pgd_phys, struct mm_struct *mm); void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
/*
* Set a PMD (handling IMP bit 4)
*/
void (*flush_pmd)(pmd_t *pmdp);
/* /*
* Set a PTE * Set a PTE
*/ */
void (*set_pte)(pte_t *ptep, pte_t pte); void (*set_pte)(pte_t *ptep, pte_t pte);
} pgtable;
} processor; } processor;
extern const struct processor arm6_processor_functions;
extern const struct processor arm7_processor_functions;
extern const struct processor sa110_processor_functions;
#define cpu_check_bugs() processor._check_bugs()
#define cpu_proc_init() processor._proc_init() #define cpu_proc_init() processor._proc_init()
#define cpu_proc_fin() processor._proc_fin() #define cpu_proc_fin() processor._proc_fin()
#define cpu_reset(addr) processor.reset(addr) #define cpu_reset(addr) processor.reset(addr)
#define cpu_do_idle() processor._do_idle() #define cpu_do_idle() processor._do_idle()
#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
#define cpu_set_pte(ptep, pte) processor.set_pte(ptep, pte)
#define cpu_cache_clean_invalidate_all() processor.cache.clean_invalidate_all() #define cpu_switch_mm(pgd,mm) processor.switch_mm(__virt_to_phys((unsigned long)(pgd)),mm)
#define cpu_cache_clean_invalidate_range(s,e,f) processor.cache.clean_invalidate_range(s,e,f)
#define cpu_dcache_clean_page(vp) processor.dcache.clean_page(vp)
#define cpu_dcache_clean_entry(addr) processor.dcache.clean_entry(addr)
#define cpu_dcache_clean_range(s,e) processor.dcache.clean_range(s,e)
#define cpu_dcache_invalidate_range(s,e) processor.dcache.invalidate_range(s,e)
#define cpu_icache_invalidate_range(s,e) processor.icache.invalidate_range(s,e)
#define cpu_icache_invalidate_page(vp) processor.icache.invalidate_page(vp)
#define cpu_set_pgd(pgd,mm) processor.pgtable.set_pgd(pgd,mm)
#define cpu_flush_pmd(pmdp) processor.pgtable.flush_pmd(pmdp)
#define cpu_set_pte(ptep, pte) processor.pgtable.set_pte(ptep, pte)
#define cpu_switch_mm(pgd,mm) cpu_set_pgd(__virt_to_phys((unsigned long)(pgd)),mm)
#define cpu_get_pgd() \ #define cpu_get_pgd() \
({ \ ({ \
unsigned long pg; \ unsigned long pg; \
__asm__("mrc p15, 0, %0, c2, c0, 0" \ __asm__("mrc p15, 0, %0, c2, c0, 0" \
: "=r" (pg)); \ : "=r" (pg) : : "cc"); \
pg &= ~0x3fff; \ pg &= ~0x3fff; \
(pgd_t *)phys_to_virt(pg); \ (pgd_t *)phys_to_virt(pg); \
}) })
......
...@@ -22,21 +22,12 @@ ...@@ -22,21 +22,12 @@
* function pointers for this lot. Otherwise, we can optimise the * function pointers for this lot. Otherwise, we can optimise the
* table away. * table away.
*/ */
#define cpu_check_bugs __cpu_fn(CPU_NAME,_check_bugs)
#define cpu_proc_init __cpu_fn(CPU_NAME,_proc_init) #define cpu_proc_init __cpu_fn(CPU_NAME,_proc_init)
#define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin) #define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin)
#define cpu_reset __cpu_fn(CPU_NAME,_reset) #define cpu_reset __cpu_fn(CPU_NAME,_reset)
#define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle) #define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle)
#define cpu_cache_clean_invalidate_all __cpu_fn(CPU_NAME,_cache_clean_invalidate_all) #define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area)
#define cpu_cache_clean_invalidate_range __cpu_fn(CPU_NAME,_cache_clean_invalidate_range) #define cpu__switch_mm __cpu_fn(CPU_NAME,_switch_mm)
#define cpu_dcache_invalidate_range __cpu_fn(CPU_NAME,_dcache_invalidate_range)
#define cpu_dcache_clean_range __cpu_fn(CPU_NAME,_dcache_clean_range)
#define cpu_dcache_clean_page __cpu_fn(CPU_NAME,_dcache_clean_page)
#define cpu_dcache_clean_entry __cpu_fn(CPU_NAME,_dcache_clean_entry)
#define cpu_icache_invalidate_range __cpu_fn(CPU_NAME,_icache_invalidate_range)
#define cpu_icache_invalidate_page __cpu_fn(CPU_NAME,_icache_invalidate_page)
#define cpu_set_pgd __cpu_fn(CPU_NAME,_set_pgd)
#define cpu_flush_pmd __cpu_fn(CPU_NAME,_flush_pmd)
#define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte) #define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -47,36 +38,22 @@ ...@@ -47,36 +38,22 @@
struct mm_struct; struct mm_struct;
/* declare all the functions as extern */ /* declare all the functions as extern */
extern void cpu_data_abort(unsigned long pc);
extern void cpu_check_bugs(void);
extern void cpu_proc_init(void); extern void cpu_proc_init(void);
extern void cpu_proc_fin(void); extern void cpu_proc_fin(void);
extern int cpu_do_idle(void); extern int cpu_do_idle(void);
extern void cpu_dcache_clean_area(void *, int);
extern void cpu_cache_clean_invalidate_all(void); extern void cpu__switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_cache_clean_invalidate_range(unsigned long address, unsigned long end, int flags);
extern void cpu_dcache_invalidate_range(unsigned long start, unsigned long end);
extern void cpu_dcache_clean_range(unsigned long start, unsigned long end);
extern void cpu_dcache_clean_page(void *virt_page);
extern void cpu_dcache_clean_entry(unsigned long address);
extern void cpu_icache_invalidate_range(unsigned long start, unsigned long end);
extern void cpu_icache_invalidate_page(void *virt_page);
extern void cpu_set_pgd(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_flush_pmd(pmd_t *pmdp);
extern void cpu_set_pte(pte_t *ptep, pte_t pte); extern void cpu_set_pte(pte_t *ptep, pte_t pte);
extern volatile void cpu_reset(unsigned long addr); extern volatile void cpu_reset(unsigned long addr);
#define cpu_switch_mm(pgd,mm) cpu_set_pgd(__virt_to_phys((unsigned long)(pgd)),mm) #define cpu_switch_mm(pgd,mm) cpu__switch_mm(__virt_to_phys((unsigned long)(pgd)),mm)
#define cpu_get_pgd() \ #define cpu_get_pgd() \
({ \ ({ \
unsigned long pg; \ unsigned long pg; \
__asm__("mrc p15, 0, %0, c2, c0, 0" \ __asm__("mrc p15, 0, %0, c2, c0, 0" \
: "=r" (pg)); \ : "=r" (pg) : : "cc"); \
pg &= ~0x3fff; \ pg &= ~0x3fff; \
(pgd_t *)phys_to_virt(pg); \ (pgd_t *)phys_to_virt(pg); \
}) })
......
...@@ -77,11 +77,14 @@ typedef struct { ...@@ -77,11 +77,14 @@ typedef struct {
#endif #endif
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/*
* Some compilers get the use of "%?" wrong in the asm below.
*/
#define irq_exit() \ #define irq_exit() \
do { \ do { \
preempt_count() -= IRQ_EXIT_OFFSET; \ preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \ if (!in_interrupt() && softirq_pending(smp_processor_id())) \
__asm__("bl%? __do_softirq": : : "lr");/* out of line */\ __asm__("bl __do_softirq": : : "lr", "cc");/* out of line */\
preempt_enable_no_resched(); \ preempt_enable_no_resched(); \
} while (0) } while (0)
......
...@@ -10,6 +10,62 @@ ...@@ -10,6 +10,62 @@
#include <asm/mman.h> #include <asm/mman.h>
#include <asm/glue.h> #include <asm/glue.h>
/*
* Cache Model
* ===========
*/
#undef _CACHE
#undef MULTI_CACHE
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v3
# endif
#endif
#if defined(CONFIG_CPU_ARM720T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM1020)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_ARM926T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm926
# endif
#endif
#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4wb
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xscale
# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model
#endif
/* /*
* This flag is used to indicate that the page pointed to by a pte * This flag is used to indicate that the page pointed to by a pte
* is dirty and requires cleaning before returning it to the user. * is dirty and requires cleaning before returning it to the user.
...@@ -17,58 +73,174 @@ ...@@ -17,58 +73,174 @@
#define PG_dcache_dirty PG_arch_1 #define PG_dcache_dirty PG_arch_1
/* /*
* Cache handling for 32-bit ARM processors. * MM Cache Management
* ===================
*
* The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
* implement these methods.
*
* Start addresses are inclusive and end addresses are exclusive;
* start addresses should be rounded down, end addresses up.
*
* See linux/Documentation/cachetlb.txt for more information.
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
* flush_cache_kern_all()
*
* Unconditionally clean and invalidate the entire cache.
* *
* Note that on ARM, we have a more accurate specification than that * flush_cache_user_mm(mm)
* Linux's "flush". We therefore do not use "flush" here, but instead
* use:
* *
* clean: the act of pushing dirty cache entries out to memory. * Clean and invalidate all user space cache entries
* invalidate: the act of discarding data held within the cache, * before a change of page tables.
* whether it is dirty or not. *
* flush_cache_user_range(start, end, flags)
*
* Clean and invalidate a range of cache entries in the
* specified address space before a change of page tables.
* - start - user start address (inclusive, page aligned)
* - end - user end address (exclusive, page aligned)
* - flags - vma->vm_flags field
*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
* - start - virtual start address
* - end - virtual end address
*
* DMA Cache Coherency
* ===================
*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
* - start - virtual start address
* - end - virtual end address
*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*/
struct cpu_cache_fns {
void (*flush_kern_all)(void);
void (*flush_user_all)(void);
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
void (*coherent_kern_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_page)(void *);
void (*dma_inv_range)(unsigned long, unsigned long);
void (*dma_clean_range)(unsigned long, unsigned long);
void (*dma_flush_range)(unsigned long, unsigned long);
};
/*
* Select the calling method
*/ */
#ifdef MULTI_CACHE
extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
#define __cpuc_flush_user_all cpu_cache.flush_user_all
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
/* /*
* Generic I + D cache * These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/ */
#define flush_cache_all() \ #define dmac_inv_range cpu_cache.dma_inv_range
do { \ #define dmac_clean_range cpu_cache.dma_clean_range
cpu_cache_clean_invalidate_all(); \ #define dmac_flush_range cpu_cache.dma_flush_range
} while (0)
#else
/* This is always called for current->mm */
#define flush_cache_mm(_mm) \ #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
do { \ #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
if ((_mm) == current->active_mm) \ #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
cpu_cache_clean_invalidate_all(); \ #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
} while (0) #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
#define flush_cache_range(_vma,_start,_end) \ extern void __cpuc_flush_kern_all(void);
do { \ extern void __cpuc_flush_user_all(void);
if ((_vma)->vm_mm == current->active_mm) \ extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
cpu_cache_clean_invalidate_range((_start), (_end), 1); \ extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
} while (0) extern void __cpuc_flush_dcache_page(void *);
#define flush_cache_page(_vma,_vmaddr) \
do { \
if ((_vma)->vm_mm == current->active_mm) { \
cpu_cache_clean_invalidate_range((_vmaddr), \
(_vmaddr) + PAGE_SIZE, \
((_vma)->vm_flags & VM_EXEC)); \
} \
} while (0)
/* /*
* D cache only * These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/ */
#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
extern void dmac_inv_range(unsigned long, unsigned long);
extern void dmac_clean_range(unsigned long, unsigned long);
extern void dmac_flush_range(unsigned long, unsigned long);
#endif
/*
* Convert calls to our calling convention.
*/
#define flush_cache_all() __cpuc_flush_kern_all()
static inline void flush_cache_mm(struct mm_struct *mm)
{
if (current->active_mm == mm)
__cpuc_flush_user_all();
}
#define invalidate_dcache_range(_s,_e) cpu_dcache_invalidate_range((_s),(_e)) static inline void
#define clean_dcache_range(_s,_e) cpu_dcache_clean_range((_s),(_e)) flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
#define flush_dcache_range(_s,_e) cpu_cache_clean_invalidate_range((_s),(_e),0) {
if (current->active_mm == vma->vm_mm)
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags);
}
static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
{
if (current->active_mm == vma->vm_mm) {
unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
}
}
#define clean_dcache_area(start,size) \ /*
cpu_cache_clean_invalidate_range((unsigned long)start, \ * Perform necessary cache operations to ensure that data previously
((unsigned long)start) + size, 0); * stored within this range of addresses can be executed by the CPU.
*/
#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
/*
* Perform necessary cache operations to ensure that the TLB will
* see data written in the specified area.
*/
#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
/* /*
* flush_dcache_page is used when the kernel has written to the page * flush_dcache_page is used when the kernel has written to the page
...@@ -104,18 +276,3 @@ static inline void flush_dcache_page(struct page *page) ...@@ -104,18 +276,3 @@ static inline void flush_dcache_page(struct page *page)
* duplicate cache flushing elsewhere performed by flush_dcache_page(). * duplicate cache flushing elsewhere performed by flush_dcache_page().
*/ */
#define flush_icache_page(vma,page) do { } while (0) #define flush_icache_page(vma,page) do { } while (0)
/*
* I cache coherency stuff.
*
* This *is not* just icache. It is to make data written to memory
* consistent such that instructions fetched from the region are what
* we expect.
*
* This generally means that we have to clean out the Dcache and write
* buffers, and maybe flush the Icache in the specified range.
*/
#define flush_icache_range(_s,_e) \
do { \
cpu_icache_invalidate_range((_s), (_e)); \
} while (0)
...@@ -67,7 +67,7 @@ ...@@ -67,7 +67,7 @@
" blle " #wake \ " blle " #wake \
: \ : \
: "r" (ptr), "I" (1) \ : "r" (ptr), "I" (1) \
: "ip", "lr", "cc"); \ : "ip", "lr", "cc", "memory"); \
}) })
/* /*
...@@ -133,7 +133,7 @@ ...@@ -133,7 +133,7 @@
" bleq " #wake \ " bleq " #wake \
: \ : \
: "r" (ptr), "I" (1) \ : "r" (ptr), "I" (1) \
: "ip", "lr", "cc"); \ : "ip", "lr", "cc", "memory"); \
}) })
#endif #endif
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* Page table allocation/freeing primitives for 32-bit ARM processors. * Page table allocation/freeing primitives for 32-bit ARM processors.
*/ */
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include "pgtable.h" #include "pgtable.h"
/* /*
...@@ -92,7 +93,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) ...@@ -92,7 +93,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE; pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE;
pmdp[0] = __pmd(pmdval); pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
cpu_flush_pmd(pmdp); flush_pmd_entry(pmdp);
} }
static inline void static inline void
...@@ -105,5 +106,5 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) ...@@ -105,5 +106,5 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE; pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE;
pmdp[0] = __pmd(pmdval); pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
cpu_flush_pmd(pmdp); flush_pmd_entry(pmdp);
} }
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */ #define PMD_SECT_TEX(x) ((x) << 12) /* v5 */
#define PMD_SECT_UNCACHED (0) #define PMD_SECT_UNCACHED (0)
#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
#define PMD_SECT_WT (PMD_SECT_CACHEABLE) #define PMD_SECT_WT (PMD_SECT_CACHEABLE)
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) #define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) #define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
...@@ -120,14 +121,19 @@ ...@@ -120,14 +121,19 @@
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
#define pmd_bad(pmd) (pmd_val(pmd) & 2) #define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define set_pmd(pmdp,pmd) do { *pmdp = pmd; cpu_flush_pmd(pmdp); } while (0)
static inline void pmd_clear(pmd_t *pmdp) #define set_pmd(pmdp,pmd) \
{ do { \
pmdp[0] = __pmd(0); *pmdp = pmd; \
pmdp[1] = __pmd(0); flush_pmd_entry(pmdp); \
cpu_flush_pmd(pmdp); } while (0)
}
#define pmd_clear(pmdp) \
do { \
pmdp[0] = __pmd(0); \
pmdp[1] = __pmd(0); \
clean_pmd_entry(pmdp); \
} while (0)
static inline pte_t *pmd_page_kernel(pmd_t pmd) static inline pte_t *pmd_page_kernel(pmd_t pmd)
{ {
......
...@@ -15,12 +15,16 @@ ...@@ -15,12 +15,16 @@
#define set_cr(x) \ #define set_cr(x) \
__asm__ __volatile__( \ __asm__ __volatile__( \
"mcr p15, 0, %0, c1, c0, 0 @ set CR" \ "mcr p15, 0, %0, c1, c0, 0 @ set CR" \
: : "r" (x)) : : "r" (x) : "cc")
#define get_cr(x) \ #define get_cr() \
({ \
unsigned int __val; \
__asm__ __volatile__( \ __asm__ __volatile__( \
"mrc p15, 0, %0, c1, c0, 0 @ get CR" \ "mrc p15, 0, %0, c1, c0, 0 @ get CR" \
: "=r" (x)) : "=r" (__val) : : "cc"); \
__val; \
})
#define CR_M (1 << 0) /* MMU enable */ #define CR_M (1 << 0) /* MMU enable */
#define CR_A (1 << 1) /* Alignment abort enable */ #define CR_A (1 << 1) /* Alignment abort enable */
...@@ -47,16 +51,6 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */ ...@@ -47,16 +51,6 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
#define vectors_base() (0) #define vectors_base() (0)
#endif #endif
/*
* Save the current interrupt enable state.
*/
#define local_save_flags(x) \
({ \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_save_flags" \
: "=r" (x) : : "memory"); \
})
/* /*
* Save the current interrupt enable state & disable IRQs * Save the current interrupt enable state & disable IRQs
*/ */
...@@ -70,7 +64,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */ ...@@ -70,7 +64,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
" msr cpsr_c, %1" \ " msr cpsr_c, %1" \
: "=r" (x), "=r" (temp) \ : "=r" (x), "=r" (temp) \
: \ : \
: "memory"); \ : "memory", "cc"); \
}) })
/* /*
...@@ -85,7 +79,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */ ...@@ -85,7 +79,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
" msr cpsr_c, %0" \ " msr cpsr_c, %0" \
: "=r" (temp) \ : "=r" (temp) \
: \ : \
: "memory"); \ : "memory", "cc"); \
}) })
/* /*
...@@ -100,7 +94,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */ ...@@ -100,7 +94,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
" msr cpsr_c, %0" \ " msr cpsr_c, %0" \
: "=r" (temp) \ : "=r" (temp) \
: \ : \
: "memory"); \ : "memory", "cc"); \
}) })
/* /*
...@@ -115,7 +109,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */ ...@@ -115,7 +109,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
" msr cpsr_c, %0" \ " msr cpsr_c, %0" \
: "=r" (temp) \ : "=r" (temp) \
: \ : \
: "memory"); \ : "memory", "cc"); \
}) })
/* /*
...@@ -130,7 +124,17 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */ ...@@ -130,7 +124,17 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
" msr cpsr_c, %0" \ " msr cpsr_c, %0" \
: "=r" (temp) \ : "=r" (temp) \
: \ : \
: "memory"); \ : "memory", "cc"); \
})
/*
* Save the current interrupt enable state.
*/
#define local_save_flags(x) \
({ \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_save_flags" \
: "=r" (x) : : "memory", "cc"); \
}) })
/* /*
...@@ -141,7 +145,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */ ...@@ -141,7 +145,7 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */
"msr cpsr_c, %0 @ local_irq_restore\n" \ "msr cpsr_c, %0 @ local_irq_restore\n" \
: \ : \
: "r" (x) \ : "r" (x) \
: "memory") : "memory", "cc")
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/* /*
...@@ -186,12 +190,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -186,12 +190,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]" case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
: "=&r" (ret) : "=&r" (ret)
: "r" (x), "r" (ptr) : "r" (x), "r" (ptr)
: "memory"); : "memory", "cc");
break; break;
case 4: __asm__ __volatile__ ("swp %0, %1, [%2]" case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
: "=&r" (ret) : "=&r" (ret)
: "r" (x), "r" (ptr) : "r" (x), "r" (ptr)
: "memory"); : "memory", "cc");
break; break;
#endif #endif
default: __bad_xchg(ptr, size), ret = 0; default: __bad_xchg(ptr, size), ret = 0;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#define TLB_V4_D_FULL (1 << 10) #define TLB_V4_D_FULL (1 << 10)
#define TLB_V4_I_FULL (1 << 11) #define TLB_V4_I_FULL (1 << 11)
#define TLB_DCLEAN (1 << 30)
#define TLB_WB (1 << 31) #define TLB_WB (1 << 31)
/* /*
...@@ -65,7 +66,7 @@ ...@@ -65,7 +66,7 @@
# define v4_always_flags (-1UL) # define v4_always_flags (-1UL)
#endif #endif
#define v4wbi_tlb_flags (TLB_WB | \ #define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \ TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_I_PAGE | TLB_V4_D_PAGE) TLB_V4_I_PAGE | TLB_V4_D_PAGE)
...@@ -84,7 +85,7 @@ ...@@ -84,7 +85,7 @@
# define v4wbi_always_flags (-1UL) # define v4wbi_always_flags (-1UL)
#endif #endif
#define v4wb_tlb_flags (TLB_WB | \ #define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \ TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_D_PAGE) TLB_V4_D_PAGE)
...@@ -287,6 +288,41 @@ static inline void flush_tlb_kernel_page(unsigned long kaddr) ...@@ -287,6 +288,41 @@ static inline void flush_tlb_kernel_page(unsigned long kaddr)
asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
} }
/*
* flush_pmd_entry
*
* Flush a PMD entry (word aligned, or double-word aligned) to
* RAM if the TLB for the CPU we are running on requires this.
* This is typically used when we are creating PMD entries.
*
* clean_pmd_entry
*
* Clean (but don't drain the write buffer) if the CPU requires
* these operations. This is typically used when we are removing
* PMD entries.
*/
static inline void flush_pmd_entry(pmd_t *pmd)
{
const unsigned int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_DCLEAN))
asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
: : "r" (pmd));
if (tlb_flag(TLB_WB))
asm("mcr%? p15, 0, %0, c7, c10, 4 @ flush_pmd"
: : "r" (zero));
}
static inline void clean_pmd_entry(pmd_t *pmd)
{
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_DCLEAN))
asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
: : "r" (pmd));
}
#undef tlb_flag #undef tlb_flag
#undef always_tlb_flags #undef always_tlb_flags
#undef possible_tlb_flags #undef possible_tlb_flags
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
struct cpu_tlb_fns; struct cpu_tlb_fns;
struct cpu_user_fns; struct cpu_user_fns;
struct cpu_cache_fns;
struct processor; struct processor;
/* /*
...@@ -37,13 +38,14 @@ struct proc_info_list { ...@@ -37,13 +38,14 @@ struct proc_info_list {
struct processor *proc; struct processor *proc;
struct cpu_tlb_fns *tlb; struct cpu_tlb_fns *tlb;
struct cpu_user_fns *user; struct cpu_user_fns *user;
struct cpu_cache_fns *cache;
}; };
extern unsigned int elf_hwcap; extern unsigned int elf_hwcap;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#define PROC_INFO_SZ 44 #define PROC_INFO_SZ 48
#define HWCAP_SWP 1 #define HWCAP_SWP 1
#define HWCAP_HALF 2 #define HWCAP_HALF 2
......
...@@ -202,4 +202,17 @@ struct meminfo { ...@@ -202,4 +202,17 @@ struct meminfo {
extern struct meminfo meminfo; extern struct meminfo meminfo;
/*
* Early command line parameters.
*/
struct early_params {
const char *arg;
void (*fn)(char **p);
};
#define __early_param(name,fn) \
static struct early_params __early_##fn \
__attribute__((section("__early_param"), unused)) = \
{ name, fn }
#endif #endif
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#define __ASMARM_TLB_H #define __ASMARM_TLB_H
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h>
/* /*
* TLB handling. This allows us to remove pages from the page * TLB handling. This allows us to remove pages from the page
...@@ -26,6 +27,7 @@ ...@@ -26,6 +27,7 @@
struct mmu_gather { struct mmu_gather {
struct mm_struct *mm; struct mm_struct *mm;
unsigned int freed; unsigned int freed;
unsigned int fullmm;
unsigned int flushes; unsigned int flushes;
unsigned int avoided_flushes; unsigned int avoided_flushes;
...@@ -41,6 +43,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) ...@@ -41,6 +43,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
tlb->mm = mm; tlb->mm = mm;
tlb->freed = 0; tlb->freed = 0;
tlb->fullmm = full_mm_flush;
return tlb; return tlb;
} }
...@@ -68,7 +71,13 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) ...@@ -68,7 +71,13 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
} }
#define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0) #define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0)
#define tlb_start_vma(tlb,vma) do { } while (0)
#define tlb_start_vma(tlb,vma) \
do { \
if (!tlb->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define tlb_end_vma(tlb,vma) do { } while (0) #define tlb_end_vma(tlb,vma) do { } while (0)
#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment