Commit 5f97f7f9 authored by Haavard Skinnemoen's avatar Haavard Skinnemoen Committed by Linus Torvalds

[PATCH] avr32 architecture

This adds support for the Atmel AVR32 architecture as well as the AT32AP7000
CPU and the AT32STK1000 development board.

AVR32 is a new high-performance 32-bit RISC microprocessor core, designed for
cost-sensitive embedded applications, with particular emphasis on low power
consumption and high code density.  The AVR32 architecture is not binary
compatible with earlier 8-bit AVR architectures.

The AVR32 architecture, including the instruction set, is described by the
AVR32 Architecture Manual, available from

http://www.atmel.com/dyn/resources/prod_documents/doc32000.pdf

The Atmel AT32AP7000 is the first CPU implementing the AVR32 architecture.  It
features a 7-stage pipeline, 16KB instruction and data caches and a full
Memory Management Unit.  It also comes with a large set of integrated
peripherals, many of which are shared with the AT91 ARM-based controllers from
Atmel.

Full data sheet is available from

http://www.atmel.com/dyn/resources/prod_documents/doc32003.pdf

while the CPU core implementation including caches and MMU is documented by
the AVR32 AP Technical Reference, available from

http://www.atmel.com/dyn/resources/prod_documents/doc32001.pdf

Information about the AT32STK1000 development board can be found at

http://www.atmel.com/dyn/products/tools_card.asp?tool_id=3918

including a BSP CD image with an earlier version of this patch, development
tools (binaries and source/patches) and a root filesystem image suitable for
booting from SD card.

Alternatively, there's a preliminary "getting started" guide available at
http://avr32linux.org/twiki/bin/view/Main/GettingStarted which provides links
to the sources and patches you will need in order to set up a cross-compiling
environment for avr32-linux.

This patch, as well as the other patches included with the BSP and the
toolchain patches, is actively supported by Atmel Corporation.

[dmccr@us.ibm.com: Fix more pxx_page macro locations]
[bunk@stusta.de: fix `make defconfig']
Signed-off-by: default avatarHaavard Skinnemoen <hskinnemoen@atmel.com>
Signed-off-by: default avatarAdrian Bunk <bunk@stusta.de>
Signed-off-by: default avatarDave McCracken <dmccr@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 53e62d3a
......@@ -443,6 +443,23 @@ W: http://people.redhat.com/sgrubb/audit/
T: git kernel.org:/pub/scm/linux/kernel/git/dwmw2/audit-2.6.git
S: Maintained
AVR32 ARCHITECTURE
P: Atmel AVR32 Support Team
M: avr32@atmel.com
P: Haavard Skinnemoen
M: hskinnemoen@atmel.com
W: http://www.atmel.com/products/AVR32/
W: http://avr32linux.org/
W: http://avrfreaks.net/
S: Supported
AVR32/AT32AP MACHINE SUPPORT
P: Atmel AVR32 Support Team
M: avr32@atmel.com
P: Haavard Skinnemoen
M: hskinnemoen@atmel.com
S: Supported
AX.25 NETWORK LAYER
P: Ralf Baechle
M: ralf@linux-mips.org
......
#
# For a description of the syntax of this configuration file,
# see Documentation/kbuild/kconfig-language.txt.
#
mainmenu "Linux Kernel Configuration"
config AVR32
bool
default y
# With EMBEDDED=n, we get lots of stuff automatically selected
# that we usually don't need on AVR32.
select EMBEDDED
help
AVR32 is a high-performance 32-bit RISC microprocessor core,
designed for cost-sensitive embedded applications, with particular
emphasis on low power consumption and high code density.
There is an AVR32 Linux project with a web page at
http://avr32linux.org/.
config UID16
bool
config GENERIC_HARDIRQS
bool
default y
config HARDIRQS_SW_RESEND
bool
default y
config GENERIC_IRQ_PROBE
bool
default y
config RWSEM_GENERIC_SPINLOCK
bool
default y
config GENERIC_TIME
bool
default y
config RWSEM_XCHGADD_ALGORITHM
bool
config GENERIC_BUST_SPINLOCK
bool
config GENERIC_HWEIGHT
bool
default y
config GENERIC_CALIBRATE_DELAY
bool
default y
source "init/Kconfig"
menu "System Type and features"
config SUBARCH_AVR32B
bool
config MMU
bool
config PERFORMANCE_COUNTERS
bool
config PLATFORM_AT32AP
bool
select SUBARCH_AVR32B
select MMU
select PERFORMANCE_COUNTERS
choice
prompt "AVR32 CPU type"
default CPU_AT32AP7000
config CPU_AT32AP7000
bool "AT32AP7000"
select PLATFORM_AT32AP
endchoice
#
# CPU Daughterboards for ATSTK1000
config BOARD_ATSTK1002
bool
choice
prompt "AVR32 board type"
default BOARD_ATSTK1000
config BOARD_ATSTK1000
bool "ATSTK1000 evaluation board"
select BOARD_ATSTK1002 if CPU_AT32AP7000
endchoice
choice
prompt "Boot loader type"
default LOADER_U_BOOT
config LOADER_U_BOOT
bool "U-Boot (or similar) bootloader"
endchoice
config LOAD_ADDRESS
hex
default 0x10000000 if LOADER_U_BOOT=y && CPU_AT32AP7000=y
config ENTRY_ADDRESS
hex
default 0x90000000 if LOADER_U_BOOT=y && CPU_AT32AP7000=y
config PHYS_OFFSET
hex
default 0x10000000 if CPU_AT32AP7000=y
source "kernel/Kconfig.preempt"
config HAVE_ARCH_BOOTMEM_NODE
bool
default n
config ARCH_HAVE_MEMORY_PRESENT
bool
default n
config NEED_NODE_MEMMAP_SIZE
bool
default n
config ARCH_FLATMEM_ENABLE
bool
default y
config ARCH_DISCONTIGMEM_ENABLE
bool
default n
config ARCH_SPARSEMEM_ENABLE
bool
default n
source "mm/Kconfig"
config OWNERSHIP_TRACE
bool "Ownership trace support"
default y
help
Say Y to generate an Ownership Trace message on every context switch,
enabling Nexus-compliant debuggers to keep track of the PID of the
currently executing task.
# FPU emulation goes here
source "kernel/Kconfig.hz"
config CMDLINE
string "Default kernel command line"
default ""
help
If you don't have a boot loader capable of passing a command line string
to the kernel, you may specify one here. As a minimum, you should specify
the memory size and the root device (e.g., mem=8M, root=/dev/nfs).
endmenu
menu "Bus options"
config PCI
bool
source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig"
endmenu
menu "Executable file formats"
source "fs/Kconfig.binfmt"
endmenu
source "net/Kconfig"
source "drivers/Kconfig"
source "fs/Kconfig"
source "arch/avr32/Kconfig.debug"
source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
bool
default y
source "lib/Kconfig.debug"
config KPROBES
bool "Kprobes"
depends on DEBUG_KERNEL
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
a probepoint and specifies the callback. Kprobes is useful
for kernel debugging, non-intrusive instrumentation and testing.
If in doubt, say "N".
endmenu
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2004-2006 Atmel Corporation.
# Default target when executing plain make
.PHONY: all
all: uImage vmlinux.elf linux.lst
KBUILD_DEFCONFIG := atstk1002_defconfig
CFLAGS += -pipe -fno-builtin -mno-pic
AFLAGS += -mrelax -mno-pic
CFLAGS_MODULE += -mno-relax
LDFLAGS_vmlinux += --relax
cpuflags-$(CONFIG_CPU_AP7000) += -mcpu=ap7000
CFLAGS += $(cpuflags-y)
AFLAGS += $(cpuflags-y)
CHECKFLAGS += -D__avr32__
LIBGCC := $(shell $(CC) $(CFLAGS) -print-libgcc-file-name)
head-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/head.o
head-y += arch/avr32/kernel/head.o
core-$(CONFIG_PLATFORM_AT32AP) += arch/avr32/mach-at32ap/
core-$(CONFIG_BOARD_ATSTK1000) += arch/avr32/boards/atstk1000/
core-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/
core-y += arch/avr32/kernel/
core-y += arch/avr32/mm/
libs-y += arch/avr32/lib/ #$(LIBGCC)
archincdir-$(CONFIG_PLATFORM_AT32AP) := arch-at32ap
include/asm-avr32/.arch: $(wildcard include/config/platform/*.h) include/config/auto.conf
@echo ' SYMLINK include/asm-avr32/arch -> include/asm-avr32/$(archincdir-y)'
ifneq ($(KBUILD_SRC),)
$(Q)mkdir -p include/asm-avr32
$(Q)ln -fsn $(srctree)/include/asm-avr32/$(archincdir-y) include/asm-avr32/arch
else
$(Q)ln -fsn $(archincdir-y) include/asm-avr32/arch
endif
@touch $@
archprepare: include/asm-avr32/.arch
BOOT_TARGETS := vmlinux.elf vmlinux.bin uImage uImage.srec
.PHONY: $(BOOT_TARGETS) install
boot := arch/$(ARCH)/boot/images
KBUILD_IMAGE := $(boot)/uImage
vmlinux.elf: KBUILD_IMAGE := $(boot)/vmlinux.elf
vmlinux.cso: KBUILD_IMAGE := $(boot)/vmlinux.cso
uImage.srec: KBUILD_IMAGE := $(boot)/uImage.srec
uImage: KBUILD_IMAGE := $(boot)/uImage
quiet_cmd_listing = LST $@
cmd_listing = avr32-linux-objdump $(OBJDUMPFLAGS) -lS $< > $@
quiet_cmd_disasm = DIS $@
cmd_disasm = avr32-linux-objdump $(OBJDUMPFLAGS) -d $< > $@
vmlinux.elf vmlinux.bin uImage.srec uImage vmlinux.cso: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@
linux.s: vmlinux
$(call if_changed,disasm)
linux.lst: vmlinux
$(call if_changed,listing)
define archhelp
@echo '* vmlinux.elf - ELF image with load address 0'
@echo ' vmlinux.cso - PathFinder CSO image'
@echo ' uImage - Create a bootable image for U-Boot'
endef
obj-y += setup.o spi.o
obj-$(CONFIG_BOARD_ATSTK1002) += atstk1002.o
/*
* ATSTK1002 daughterboard-specific init code
*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <asm/arch/board.h>
struct eth_platform_data __initdata eth0_data = {
.valid = 1,
.mii_phy_addr = 0x10,
.is_rmii = 0,
.hw_addr = { 0x6a, 0x87, 0x71, 0x14, 0xcd, 0xcb },
};
extern struct lcdc_platform_data atstk1000_fb0_data;
static int __init atstk1002_init(void)
{
at32_add_system_devices();
at32_add_device_usart(1); /* /dev/ttyS0 */
at32_add_device_usart(2); /* /dev/ttyS1 */
at32_add_device_usart(3); /* /dev/ttyS2 */
at32_add_device_eth(0, &eth0_data);
at32_add_device_spi(0);
at32_add_device_lcdc(0, &atstk1000_fb0_data);
return 0;
}
postcore_initcall(atstk1002_init);
/*
* ATSTK1000 board-specific setup code.
*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/linkage.h>
#include <asm/setup.h>
#include <asm/arch/board.h>
/* Initialized by bootloader-specific startup code. */
struct tag *bootloader_tags __initdata;
struct lcdc_platform_data __initdata atstk1000_fb0_data;
asmlinkage void __init board_early_init(void)
{
extern void sdram_init(void);
#ifdef CONFIG_LOADER_STANDALONE
sdram_init();
#endif
}
void __init board_setup_fbmem(unsigned long fbmem_start,
unsigned long fbmem_size)
{
if (!fbmem_size)
return;
if (!fbmem_start) {
void *fbmem;
fbmem = alloc_bootmem_low_pages(fbmem_size);
fbmem_start = __pa(fbmem);
} else {
pg_data_t *pgdat;
for_each_online_pgdat(pgdat) {
if (fbmem_start >= pgdat->bdata->node_boot_start
&& fbmem_start <= pgdat->bdata->node_low_pfn)
reserve_bootmem_node(pgdat, fbmem_start,
fbmem_size);
}
}
printk("%luKiB framebuffer memory at address 0x%08lx\n",
fbmem_size >> 10, fbmem_start);
atstk1000_fb0_data.fbmem_start = fbmem_start;
atstk1000_fb0_data.fbmem_size = fbmem_size;
}
/*
* ATSTK1000 SPI devices
*
* Copyright (C) 2005 Atmel Norway
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/device.h>
#include <linux/spi/spi.h>
static struct spi_board_info spi_board_info[] __initdata = {
{
.modalias = "ltv350qv",
.max_speed_hz = 16000000,
.bus_num = 0,
.chip_select = 1,
},
};
static int board_init_spi(void)
{
spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
return 0;
}
arch_initcall(board_init_spi);
#
# Copyright (C) 2004-2006 Atmel Corporation
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
MKIMAGE := $(srctree)/scripts/mkuboot.sh
extra-y := vmlinux.bin vmlinux.gz
OBJCOPYFLAGS_vmlinux.bin := -O binary
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
quiet_cmd_uimage = UIMAGE $@
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A avr32 -O linux -T kernel \
-C gzip -a $(CONFIG_LOAD_ADDRESS) -e $(CONFIG_ENTRY_ADDRESS) \
-n 'Linux-$(KERNELRELEASE)' -d $< $@
targets += uImage uImage.srec
$(obj)/uImage: $(obj)/vmlinux.gz
$(call if_changed,uimage)
@echo ' Image $@ is ready'
OBJCOPYFLAGS_uImage.srec := -I binary -O srec
$(obj)/uImage.srec: $(obj)/uImage
$(call if_changed,objcopy)
OBJCOPYFLAGS_vmlinux.elf := --change-section-lma .text-0x80000000 \
--change-section-lma __ex_table-0x80000000 \
--change-section-lma .rodata-0x80000000 \
--change-section-lma .data-0x80000000 \
--change-section-lma .init-0x80000000 \
--change-section-lma .bss-0x80000000 \
--change-section-lma .initrd-0x80000000 \
--change-section-lma __param-0x80000000 \
--change-section-lma __ksymtab-0x80000000 \
--change-section-lma __ksymtab_gpl-0x80000000 \
--change-section-lma __kcrctab-0x80000000 \
--change-section-lma __kcrctab_gpl-0x80000000 \
--change-section-lma __ksymtab_strings-0x80000000 \
--change-section-lma .got-0x80000000 \
--set-start 0xa0000000
$(obj)/vmlinux.elf: vmlinux FORCE
$(call if_changed,objcopy)
quiet_cmd_sfdwarf = SFDWARF $@
cmd_sfdwarf = sfdwarf $< TO $@ GNUAVR IW $(SFDWARF_FLAGS) > $(obj)/sfdwarf.log
$(obj)/vmlinux.cso: $(obj)/vmlinux.elf FORCE
$(call if_changed,sfdwarf)
install: $(BOOTIMAGE)
sh $(srctree)/install-kernel.sh $<
# Generated files to be removed upon make clean
clean-files := vmlinux* uImage uImage.srec
extra-y := head.o
obj-y := empty.o
/*
* Startup code for use with the u-boot bootloader.
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/setup.h>
/*
* The kernel is loaded where we want it to be and all caches
* have just been flushed. We get two parameters from u-boot:
*
* r12 contains a magic number (ATAG_MAGIC)
* r11 points to a tag table providing information about
* the system.
*/
.section .init.text,"ax"
.global _start
_start:
/* Check if the boot loader actually provided a tag table */
lddpc r0, magic_number
cp.w r12, r0
brne no_tag_table
/* Initialize .bss */
lddpc r2, bss_start_addr
lddpc r3, end_addr
mov r0, 0
mov r1, 0
1: st.d r2++, r0
cp r2, r3
brlo 1b
/*
* Save the tag table address for later use. This must be done
* _after_ .bss has been initialized...
*/
lddpc r0, tag_table_addr
st.w r0[0], r11
/* Jump to loader-independent setup code */
rjmp kernel_entry
.align 2
magic_number:
.long ATAG_MAGIC
tag_table_addr:
.long bootloader_tags
bss_start_addr:
.long __bss_start
end_addr:
.long _end
no_tag_table:
sub r12, pc, (. - 2f)
bral panic
2: .asciz "Boot loader didn't provide correct magic number\n"
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.18-rc1
# Tue Jul 11 12:41:36 2006
#
CONFIG_AVR32=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_HARDIRQS_SW_RESEND=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
# Code maturity level options
#
CONFIG_EXPERIMENTAL=y
CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
#
# General setup
#
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SWAP=y
# CONFIG_SYSVIPC is not set
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
# CONFIG_IKCONFIG is not set
# CONFIG_RELAY is not set
CONFIG_INITRAMFS_SOURCE=""
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
# CONFIG_BASE_FULL is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
CONFIG_SHMEM=y
# CONFIG_SLAB is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=1
CONFIG_SLOB=y
#
# Loadable module support
#
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
# CONFIG_KMOD is not set
#
# Block layer
#
# CONFIG_BLK_DEV_IO_TRACE is not set
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
# CONFIG_IOSCHED_AS is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_DEFAULT_AS is not set
# CONFIG_DEFAULT_DEADLINE is not set
# CONFIG_DEFAULT_CFQ is not set
CONFIG_DEFAULT_NOOP=y
CONFIG_DEFAULT_IOSCHED="noop"
#
# System Type and features
#
CONFIG_SUBARCH_AVR32B=y
CONFIG_MMU=y
CONFIG_PERFORMANCE_COUNTERS=y
CONFIG_PLATFORM_AT32AP=y
CONFIG_CPU_AT32AP7000=y
CONFIG_BOARD_ATSTK1002=y
CONFIG_BOARD_ATSTK1000=y
CONFIG_LOADER_U_BOOT=y
CONFIG_LOAD_ADDRESS=0x10000000
CONFIG_ENTRY_ADDRESS=0x90000000
CONFIG_PHYS_OFFSET=0x10000000
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set
# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
# CONFIG_ARCH_SPARSEMEM_ENABLE is not set
CONFIG_SELECT_MEMORY_MODEL=y
CONFIG_FLATMEM_MANUAL=y
# CONFIG_DISCONTIGMEM_MANUAL is not set
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_RESOURCES_64BIT is not set
# CONFIG_OWNERSHIP_TRACE is not set
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
CONFIG_CMDLINE=""
#
# Bus options
#
#
# PCCARD (PCMCIA/CardBus) support
#
# CONFIG_PCCARD is not set
#
# Executable file formats
#
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
#
# Networking
#
CONFIG_NET=y
#
# Networking options
#
# CONFIG_NETDEBUG is not set
CONFIG_PACKET=y
CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
# CONFIG_IP_ADVANCED_ROUTER is not set
CONFIG_IP_FIB_HASH=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
# CONFIG_IP_PNP_BOOTP is not set
# CONFIG_IP_PNP_RARP is not set
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
# CONFIG_ARPD is not set
# CONFIG_SYN_COOKIES is not set
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_BIC=y
# CONFIG_IPV6 is not set
# CONFIG_INET6_XFRM_TUNNEL is not set
# CONFIG_INET6_TUNNEL is not set
# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
#
# DCCP Configuration (EXPERIMENTAL)
#
# CONFIG_IP_DCCP is not set
#
# SCTP Configuration (EXPERIMENTAL)
#
# CONFIG_IP_SCTP is not set
#
# TIPC Configuration (EXPERIMENTAL)
#
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
# CONFIG_X25 is not set
# CONFIG_LAPB is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
#
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NET_TCPPROBE is not set
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_IEEE80211 is not set
#
# Device Drivers
#
#
# Generic Driver Options
#
CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_SYS_HYPERVISOR is not set
#
# Connector - unified userspace <-> kernelspace linker
#
# CONFIG_CONNECTOR is not set
#
# Memory Technology Devices (MTD)
#
# CONFIG_MTD is not set
#
# Parallel port support
#
# CONFIG_PARPORT is not set
#
# Plug and Play support
#
#
# Block devices
#
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=m
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=m
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
#
# ATA/ATAPI/MFM/RLL support
#
# CONFIG_IDE is not set
#
# SCSI device support
#
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
#
# Multi-device support (RAID and LVM)
#
# CONFIG_MD is not set
#
# Fusion MPT device support
#
# CONFIG_FUSION is not set
#
# IEEE 1394 (FireWire) support
#
#
# I2O device support
#
#
# Network device support
#
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
# CONFIG_BONDING is not set
# CONFIG_EQUALIZER is not set
CONFIG_TUN=m
#
# PHY device support
#
# CONFIG_PHYLIB is not set
#
# Ethernet (10 or 100Mbit)
#
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
CONFIG_MACB=y
#
# Ethernet (1000 Mbit)
#
#
# Ethernet (10000 Mbit)
#
#
# Token Ring devices
#
#
# Wireless LAN (non-hamradio)
#
# CONFIG_NET_RADIO is not set
#
# Wan interfaces
#
# CONFIG_WAN is not set
CONFIG_PPP=m
# CONFIG_PPP_MULTILINK is not set
# CONFIG_PPP_FILTER is not set
CONFIG_PPP_ASYNC=m
# CONFIG_PPP_SYNC_TTY is not set
CONFIG_PPP_DEFLATE=m
# CONFIG_PPP_BSDCOMP is not set
# CONFIG_PPP_MPPE is not set
# CONFIG_PPPOE is not set
# CONFIG_SLIP is not set
# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
#
# ISDN subsystem
#
# CONFIG_ISDN is not set
#
# Telephony Support
#
# CONFIG_PHONE is not set
#
# Input device support
#
# CONFIG_INPUT is not set
#
# Hardware I/O ports
#
# CONFIG_SERIO is not set
# CONFIG_GAMEPORT is not set
#
# Character devices
#
# CONFIG_VT is not set
# CONFIG_SERIAL_NONSTANDARD is not set
#
# Serial drivers
#
# CONFIG_SERIAL_8250 is not set
#
# Non-8250 serial port support
#
CONFIG_SERIAL_AT91=y
CONFIG_SERIAL_AT91_CONSOLE=y
# CONFIG_SERIAL_AT91_TTYAT is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
# CONFIG_LEGACY_PTYS is not set
#
# IPMI
#
# CONFIG_IPMI_HANDLER is not set
#
# Watchdog Cards
#
# CONFIG_WATCHDOG is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_RTC is not set
# CONFIG_GEN_RTC is not set
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
#
# Ftape, the floppy tape device driver
#
# CONFIG_RAW_DRIVER is not set
#
# TPM devices
#
# CONFIG_TCG_TPM is not set
# CONFIG_TELCLOCK is not set
#
# I2C support
#
# CONFIG_I2C is not set
#
# SPI support
#
CONFIG_SPI=y
# CONFIG_SPI_DEBUG is not set
CONFIG_SPI_MASTER=y
#
# SPI Master Controller Drivers
#
CONFIG_SPI_ATMEL=m
# CONFIG_SPI_BITBANG is not set
#
# SPI Protocol Masters
#
#
# Dallas's 1-wire bus
#
#
# Hardware Monitoring support
#
# CONFIG_HWMON is not set
# CONFIG_HWMON_VID is not set
#
# Misc devices
#
#
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
CONFIG_VIDEO_V4L2=y
#
# Digital Video Broadcasting Devices
#
# CONFIG_DVB is not set
#
# Graphics support
#
# CONFIG_FIRMWARE_EDID is not set
CONFIG_FB=m
CONFIG_FB_CFB_FILLRECT=m
CONFIG_FB_CFB_COPYAREA=m
CONFIG_FB_CFB_IMAGEBLIT=m
# CONFIG_FB_MACMODES is not set
# CONFIG_FB_BACKLIGHT is not set
# CONFIG_FB_MODE_HELPERS is not set
# CONFIG_FB_TILEBLITTING is not set
CONFIG_FB_SIDSA=m
CONFIG_FB_SIDSA_DEFAULT_BPP=24
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_VIRTUAL is not set
#
# Logo configuration
#
# CONFIG_LOGO is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_LCD_CLASS_DEVICE=m
CONFIG_LCD_DEVICE=y
CONFIG_LCD_LTV350QV=m
#
# Sound
#
# CONFIG_SOUND is not set
#
# USB support
#
# CONFIG_USB_ARCH_HAS_HCD is not set
# CONFIG_USB_ARCH_HAS_OHCI is not set
# CONFIG_USB_ARCH_HAS_EHCI is not set
#
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
#
#
# USB Gadget Support
#
# CONFIG_USB_GADGET is not set
#
# MMC/SD Card support
#
# CONFIG_MMC is not set
#
# LED devices
#
# CONFIG_NEW_LEDS is not set
#
# LED drivers
#
#
# LED Triggers
#
#
# InfiniBand support
#
#
# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
#
#
# Real Time Clock
#
# CONFIG_RTC_CLASS is not set
#
# DMA Engine support
#
# CONFIG_DMA_ENGINE is not set
#
# DMA Clients
#
#
# DMA Devices
#
#
# File systems
#
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
# CONFIG_OCFS2_FS is not set
CONFIG_MINIX_FS=m
CONFIG_ROMFS_FS=m
# CONFIG_INOTIFY is not set
# CONFIG_QUOTA is not set
# CONFIG_DNOTIFY is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
# CONFIG_FUSE_FS is not set
#
# CD-ROM/DVD Filesystems
#
# CONFIG_ISO9660_FS is not set
# CONFIG_UDF_FS is not set
#
# DOS/FAT/NT Filesystems
#
CONFIG_FAT_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_FAT_DEFAULT_CODEPAGE=437
CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set
#
# Pseudo filesystems
#
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y
CONFIG_CONFIGFS_FS=m
#
# Miscellaneous filesystems
#
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
# CONFIG_HFSPLUS_FS is not set
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
#
# Network File Systems
#
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
# CONFIG_NFS_V4 is not set
# CONFIG_NFS_DIRECTIO is not set
# CONFIG_NFSD is not set
CONFIG_ROOT_NFS=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
# CONFIG_CIFS_WEAK_PW_HASH is not set
# CONFIG_CIFS_XATTR is not set
# CONFIG_CIFS_DEBUG2 is not set
# CONFIG_CIFS_EXPERIMENTAL is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
# CONFIG_9P_FS is not set
#
# Partition Types
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
#
# Native Language Support
#
CONFIG_NLS=m
CONFIG_NLS_DEFAULT="iso8859-1"
CONFIG_NLS_CODEPAGE_437=m
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_CODEPAGE_775 is not set
CONFIG_NLS_CODEPAGE_850=m
# CONFIG_NLS_CODEPAGE_852 is not set
# CONFIG_NLS_CODEPAGE_855 is not set
# CONFIG_NLS_CODEPAGE_857 is not set
# CONFIG_NLS_CODEPAGE_860 is not set
# CONFIG_NLS_CODEPAGE_861 is not set
# CONFIG_NLS_CODEPAGE_862 is not set
# CONFIG_NLS_CODEPAGE_863 is not set
# CONFIG_NLS_CODEPAGE_864 is not set
# CONFIG_NLS_CODEPAGE_865 is not set
# CONFIG_NLS_CODEPAGE_866 is not set
# CONFIG_NLS_CODEPAGE_869 is not set
# CONFIG_NLS_CODEPAGE_936 is not set
# CONFIG_NLS_CODEPAGE_950 is not set
# CONFIG_NLS_CODEPAGE_932 is not set
# CONFIG_NLS_CODEPAGE_949 is not set
# CONFIG_NLS_CODEPAGE_874 is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
# CONFIG_NLS_ASCII is not set
CONFIG_NLS_ISO8859_1=m
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_ISO8859_4 is not set
# CONFIG_NLS_ISO8859_5 is not set
# CONFIG_NLS_ISO8859_6 is not set
# CONFIG_NLS_ISO8859_7 is not set
# CONFIG_NLS_ISO8859_9 is not set
# CONFIG_NLS_ISO8859_13 is not set
# CONFIG_NLS_ISO8859_14 is not set
# CONFIG_NLS_ISO8859_15 is not set
# CONFIG_NLS_KOI8_R is not set
# CONFIG_NLS_KOI8_U is not set
CONFIG_NLS_UTF8=m
#
# Kernel hacking
#
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_KERNEL=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_RWSEMS is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_VM is not set
CONFIG_FRAME_POINTER=y
# CONFIG_UNWIND_INFO is not set
CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
CONFIG_KPROBES=y
#
# Security options
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
#
# Cryptographic options
#
# CONFIG_CRYPTO is not set
#
# Hardware crypto devices
#
#
# Library routines
#
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
CONFIG_CRC32=m
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=m
CONFIG_ZLIB_DEFLATE=m
#
# Makefile for the Linux/AVR32 kernel.
#
extra-y := head.o vmlinux.lds
obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o
obj-y += syscall_table.o syscall-stubs.o irq.o
obj-y += setup.o traps.o semaphore.o ptrace.o
obj-y += signal.o sys_avr32.o process.o time.o
obj-y += init_task.o switch_to.o cpu.o
obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o
obj-$(CONFIG_KPROBES) += kprobes.o
USE_STANDARD_AS_RULE := true
%.lds: %.lds.c FORCE
$(call if_changed_dep,cpp_lds_S)
/*
* Generate definitions needed by assembly language modules.
* This code generates raw asm output which is post-processed
* to extract and format the required data.
*/
#include <linux/thread_info.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : : )
#define OFFSET(sym, str, mem) \
DEFINE(sym, offsetof(struct str, mem));
void foo(void)
{
OFFSET(TI_task, thread_info, task);
OFFSET(TI_exec_domain, thread_info, exec_domain);
OFFSET(TI_flags, thread_info, flags);
OFFSET(TI_cpu, thread_info, cpu);
OFFSET(TI_preempt_count, thread_info, preempt_count);
OFFSET(TI_restart_block, thread_info, restart_block);
}
/*
* Export AVR32-specific functions for loadable modules.
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <asm/checksum.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
/*
* GCC functions
*/
extern unsigned long long __avr32_lsl64(unsigned long long u, unsigned long b);
extern unsigned long long __avr32_lsr64(unsigned long long u, unsigned long b);
extern unsigned long long __avr32_asr64(unsigned long long u, unsigned long b);
EXPORT_SYMBOL(__avr32_lsl64);
EXPORT_SYMBOL(__avr32_lsr64);
EXPORT_SYMBOL(__avr32_asr64);
/*
* String functions
*/
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
/*
* Userspace access stuff.
*/
EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
/* Delay loops (lib/delay.S) */
EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__const_udelay);
/* Bit operations (lib/findbit.S) */
EXPORT_SYMBOL(find_first_zero_bit);
EXPORT_SYMBOL(find_next_zero_bit);
EXPORT_SYMBOL(find_first_bit);
EXPORT_SYMBOL(find_next_bit);
EXPORT_SYMBOL(generic_find_next_zero_le_bit);
/*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/sysdev.h>
#include <linux/seq_file.h>
#include <linux/cpu.h>
#include <linux/percpu.h>
#include <linux/param.h>
#include <linux/errno.h>
#include <asm/setup.h>
#include <asm/sysreg.h>
static DEFINE_PER_CPU(struct cpu, cpu_devices);
#ifdef CONFIG_PERFORMANCE_COUNTERS
/*
* XXX: If/when a SMP-capable implementation of AVR32 will ever be
* made, we must make sure that the code executes on the correct CPU.
*/
static ssize_t show_pc0event(struct sys_device *dev, char *buf)
{
unsigned long pccr;
pccr = sysreg_read(PCCR);
return sprintf(buf, "0x%lx\n", (pccr >> 12) & 0x3f);
}
static ssize_t store_pc0event(struct sys_device *dev, const char *buf,
size_t count)
{
unsigned long val;
char *endp;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf || val > 0x3f)
return -EINVAL;
val = (val << 12) | (sysreg_read(PCCR) & 0xfffc0fff);
sysreg_write(PCCR, val);
return count;
}
static ssize_t show_pc0count(struct sys_device *dev, char *buf)
{
unsigned long pcnt0;
pcnt0 = sysreg_read(PCNT0);
return sprintf(buf, "%lu\n", pcnt0);
}
static ssize_t store_pc0count(struct sys_device *dev, const char *buf,
size_t count)
{
unsigned long val;
char *endp;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
sysreg_write(PCNT0, val);
return count;
}
static ssize_t show_pc1event(struct sys_device *dev, char *buf)
{
unsigned long pccr;
pccr = sysreg_read(PCCR);
return sprintf(buf, "0x%lx\n", (pccr >> 18) & 0x3f);
}
static ssize_t store_pc1event(struct sys_device *dev, const char *buf,
size_t count)
{
unsigned long val;
char *endp;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf || val > 0x3f)
return -EINVAL;
val = (val << 18) | (sysreg_read(PCCR) & 0xff03ffff);
sysreg_write(PCCR, val);
return count;
}
static ssize_t show_pc1count(struct sys_device *dev, char *buf)
{
unsigned long pcnt1;
pcnt1 = sysreg_read(PCNT1);
return sprintf(buf, "%lu\n", pcnt1);
}
static ssize_t store_pc1count(struct sys_device *dev, const char *buf,
size_t count)
{
unsigned long val;
char *endp;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
sysreg_write(PCNT1, val);
return count;
}
static ssize_t show_pccycles(struct sys_device *dev, char *buf)
{
unsigned long pccnt;
pccnt = sysreg_read(PCCNT);
return sprintf(buf, "%lu\n", pccnt);
}
static ssize_t store_pccycles(struct sys_device *dev, const char *buf,
size_t count)
{
unsigned long val;
char *endp;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
sysreg_write(PCCNT, val);
return count;
}
static ssize_t show_pcenable(struct sys_device *dev, char *buf)
{
unsigned long pccr;
pccr = sysreg_read(PCCR);
return sprintf(buf, "%c\n", (pccr & 1)?'1':'0');
}
static ssize_t store_pcenable(struct sys_device *dev, const char *buf,
size_t count)
{
unsigned long pccr, val;
char *endp;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
if (val)
val = 1;
pccr = sysreg_read(PCCR);
pccr = (pccr & ~1UL) | val;
sysreg_write(PCCR, pccr);
return count;
}
static SYSDEV_ATTR(pc0event, 0600, show_pc0event, store_pc0event);
static SYSDEV_ATTR(pc0count, 0600, show_pc0count, store_pc0count);
static SYSDEV_ATTR(pc1event, 0600, show_pc1event, store_pc1event);
static SYSDEV_ATTR(pc1count, 0600, show_pc1count, store_pc1count);
static SYSDEV_ATTR(pccycles, 0600, show_pccycles, store_pccycles);
static SYSDEV_ATTR(pcenable, 0600, show_pcenable, store_pcenable);
#endif /* CONFIG_PERFORMANCE_COUNTERS */
static int __init topology_init(void)
{
int cpu;
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
register_cpu(c, cpu);
#ifdef CONFIG_PERFORMANCE_COUNTERS
sysdev_create_file(&c->sysdev, &attr_pc0event);
sysdev_create_file(&c->sysdev, &attr_pc0count);
sysdev_create_file(&c->sysdev, &attr_pc1event);
sysdev_create_file(&c->sysdev, &attr_pc1count);
sysdev_create_file(&c->sysdev, &attr_pccycles);
sysdev_create_file(&c->sysdev, &attr_pcenable);
#endif
}
return 0;
}
subsys_initcall(topology_init);
static const char *cpu_names[] = {
"Morgan",
"AP7000",
};
#define NR_CPU_NAMES ARRAY_SIZE(cpu_names)
static const char *arch_names[] = {
"AVR32A",
"AVR32B",
};
#define NR_ARCH_NAMES ARRAY_SIZE(arch_names)
static const char *mmu_types[] = {
"No MMU",
"ITLB and DTLB",
"Shared TLB",
"MPU"
};
void __init setup_processor(void)
{
unsigned long config0, config1;
unsigned cpu_id, cpu_rev, arch_id, arch_rev, mmu_type;
unsigned tmp;
config0 = sysreg_read(CONFIG0); /* 0x0000013e; */
config1 = sysreg_read(CONFIG1); /* 0x01f689a2; */
cpu_id = config0 >> 24;
cpu_rev = (config0 >> 16) & 0xff;
arch_id = (config0 >> 13) & 0x07;
arch_rev = (config0 >> 10) & 0x07;
mmu_type = (config0 >> 7) & 0x03;
boot_cpu_data.arch_type = arch_id;
boot_cpu_data.cpu_type = cpu_id;
boot_cpu_data.arch_revision = arch_rev;
boot_cpu_data.cpu_revision = cpu_rev;
boot_cpu_data.tlb_config = mmu_type;
tmp = (config1 >> 13) & 0x07;
if (tmp) {
boot_cpu_data.icache.ways = 1 << ((config1 >> 10) & 0x07);
boot_cpu_data.icache.sets = 1 << ((config1 >> 16) & 0x0f);
boot_cpu_data.icache.linesz = 1 << (tmp + 1);
}
tmp = (config1 >> 3) & 0x07;
if (tmp) {
boot_cpu_data.dcache.ways = 1 << (config1 & 0x07);
boot_cpu_data.dcache.sets = 1 << ((config1 >> 6) & 0x0f);
boot_cpu_data.dcache.linesz = 1 << (tmp + 1);
}
if ((cpu_id >= NR_CPU_NAMES) || (arch_id >= NR_ARCH_NAMES)) {
printk ("Unknown CPU configuration (ID %02x, arch %02x), "
"continuing anyway...\n",
cpu_id, arch_id);
return;
}
printk ("CPU: %s [%02x] revision %d (%s revision %d)\n",
cpu_names[cpu_id], cpu_id, cpu_rev,
arch_names[arch_id], arch_rev);
printk ("CPU: MMU configuration: %s\n", mmu_types[mmu_type]);
printk ("CPU: features:");
if (config0 & (1 << 6))
printk(" fpu");
if (config0 & (1 << 5))
printk(" java");
if (config0 & (1 << 4))
printk(" perfctr");
if (config0 & (1 << 3))
printk(" ocd");
printk("\n");
}
#ifdef CONFIG_PROC_FS
static int c_show(struct seq_file *m, void *v)
{
unsigned int icache_size, dcache_size;
unsigned int cpu = smp_processor_id();
icache_size = boot_cpu_data.icache.ways *
boot_cpu_data.icache.sets *
boot_cpu_data.icache.linesz;
dcache_size = boot_cpu_data.dcache.ways *
boot_cpu_data.dcache.sets *
boot_cpu_data.dcache.linesz;
seq_printf(m, "processor\t: %d\n", cpu);
if (boot_cpu_data.arch_type < NR_ARCH_NAMES)
seq_printf(m, "cpu family\t: %s revision %d\n",
arch_names[boot_cpu_data.arch_type],
boot_cpu_data.arch_revision);
if (boot_cpu_data.cpu_type < NR_CPU_NAMES)
seq_printf(m, "cpu type\t: %s revision %d\n",
cpu_names[boot_cpu_data.cpu_type],
boot_cpu_data.cpu_revision);
seq_printf(m, "i-cache\t\t: %dK (%u ways x %u sets x %u)\n",
icache_size >> 10,
boot_cpu_data.icache.ways,
boot_cpu_data.icache.sets,
boot_cpu_data.icache.linesz);
seq_printf(m, "d-cache\t\t: %dK (%u ways x %u sets x %u)\n",
dcache_size >> 10,
boot_cpu_data.dcache.ways,
boot_cpu_data.dcache.sets,
boot_cpu_data.dcache.linesz);
seq_printf(m, "bogomips\t: %lu.%02lu\n",
boot_cpu_data.loops_per_jiffy / (500000/HZ),
(boot_cpu_data.loops_per_jiffy / (5000/HZ)) % 100);
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < 1 ? (void *)1 : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return NULL;
}
static void c_stop(struct seq_file *m, void *v)
{
}
struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = c_show
};
#endif /* CONFIG_PROC_FS */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* This file contains the low-level entry-points into the kernel, that is,
* exception handlers, debug trap handlers, interrupt handlers and the
* system call handler.
*/
#include <linux/errno.h>
#include <asm/asm.h>
#include <asm/hardirq.h>
#include <asm/irq.h>
#include <asm/ocd.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#ifdef CONFIG_PREEMPT
# define preempt_stop mask_interrupts
#else
# define preempt_stop
# define fault_resume_kernel fault_restore_all
#endif
#define __MASK(x) ((1 << (x)) - 1)
#define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
(__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
.section .ex.text,"ax",@progbits
.align 2
exception_vectors:
bral handle_critical
.align 2
bral handle_critical
.align 2
bral do_bus_error_write
.align 2
bral do_bus_error_read
.align 2
bral do_nmi_ll
.align 2
bral handle_address_fault
.align 2
bral handle_protection_fault
.align 2
bral handle_debug
.align 2
bral do_illegal_opcode_ll
.align 2
bral do_illegal_opcode_ll
.align 2
bral do_illegal_opcode_ll
.align 2
bral do_fpe_ll
.align 2
bral do_illegal_opcode_ll
.align 2
bral handle_address_fault
.align 2
bral handle_address_fault
.align 2
bral handle_protection_fault
.align 2
bral handle_protection_fault
.align 2
bral do_dtlb_modified
/*
* r0 : PGD/PT/PTE
* r1 : Offending address
* r2 : Scratch register
* r3 : Cause (5, 12 or 13)
*/
#define tlbmiss_save pushm r0-r3
#define tlbmiss_restore popm r0-r3
.section .tlbx.ex.text,"ax",@progbits
.global itlb_miss
itlb_miss:
tlbmiss_save
rjmp tlb_miss_common
.section .tlbr.ex.text,"ax",@progbits
dtlb_miss_read:
tlbmiss_save
rjmp tlb_miss_common
.section .tlbw.ex.text,"ax",@progbits
dtlb_miss_write:
tlbmiss_save
.global tlb_miss_common
tlb_miss_common:
mfsr r0, SYSREG_PTBR
mfsr r1, SYSREG_TLBEAR
/* Is it the vmalloc space? */
bld r1, 31
brcs handle_vmalloc_miss
/* First level lookup */
pgtbl_lookup:
lsr r2, r1, PGDIR_SHIFT
ld.w r0, r0[r2 << 2]
bld r0, _PAGE_BIT_PRESENT
brcc page_table_not_present
/* TODO: Check access rights on page table if necessary */
/* Translate to virtual address in P1. */
andl r0, 0xf000
sbr r0, 31
/* Second level lookup */
lsl r1, (32 - PGDIR_SHIFT)
lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
add r2, r0, r1 << 2
ld.w r1, r2[0]
bld r1, _PAGE_BIT_PRESENT
brcc page_not_present
/* Mark the page as accessed */
sbr r1, _PAGE_BIT_ACCESSED
st.w r2[0], r1
/* Drop software flags */
andl r1, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
mtsr SYSREG_TLBELO, r1
/* Figure out which entry we want to replace */
mfsr r0, SYSREG_TLBARLO
clz r2, r0
brcc 1f
mov r1, -1 /* All entries have been accessed, */
mtsr SYSREG_TLBARLO, r1 /* so reset TLBAR */
mov r2, 0 /* and start at 0 */
1: mfsr r1, SYSREG_MMUCR
lsl r2, 14
andl r1, 0x3fff, COH
or r1, r2
mtsr SYSREG_MMUCR, r1
tlbw
tlbmiss_restore
rete
handle_vmalloc_miss:
/* Simply do the lookup in init's page table */
mov r0, lo(swapper_pg_dir)
orh r0, hi(swapper_pg_dir)
rjmp pgtbl_lookup
/* --- System Call --- */
.section .scall.text,"ax",@progbits
system_call:
pushm r12 /* r12_orig */
stmts --sp, r0-lr
zero_fp
mfsr r0, SYSREG_RAR_SUP
mfsr r1, SYSREG_RSR_SUP
stm --sp, r0-r1
/* check for syscall tracing */
get_thread_info r0
ld.w r1, r0[TI_flags]
bld r1, TIF_SYSCALL_TRACE
brcs syscall_trace_enter
syscall_trace_cont:
cp.w r8, NR_syscalls
brhs syscall_badsys
lddpc lr, syscall_table_addr
ld.w lr, lr[r8 << 2]
mov r8, r5 /* 5th argument (6th is pushed by stub) */
icall lr
.global syscall_return
syscall_return:
get_thread_info r0
mask_interrupts /* make sure we don't miss an interrupt
setting need_resched or sigpending
between sampling and the rets */
/* Store the return value so that the correct value is loaded below */
stdsp sp[REG_R12], r12
ld.w r1, r0[TI_flags]
andl r1, _TIF_ALLWORK_MASK, COH
brne syscall_exit_work
syscall_exit_cont:
popm r8-r9
mtsr SYSREG_RAR_SUP, r8
mtsr SYSREG_RSR_SUP, r9
ldmts sp++, r0-lr
sub sp, -4 /* r12_orig */
rets
.align 2
syscall_table_addr:
.long sys_call_table
syscall_badsys:
mov r12, -ENOSYS
rjmp syscall_return
.global ret_from_fork
ret_from_fork:
rcall schedule_tail
/* check for syscall tracing */
get_thread_info r0
ld.w r1, r0[TI_flags]
andl r1, _TIF_ALLWORK_MASK, COH
brne syscall_exit_work
rjmp syscall_exit_cont
syscall_trace_enter:
pushm r8-r12
rcall syscall_trace
popm r8-r12
rjmp syscall_trace_cont
syscall_exit_work:
bld r1, TIF_SYSCALL_TRACE
brcc 1f
unmask_interrupts
rcall syscall_trace
mask_interrupts
ld.w r1, r0[TI_flags]
1: bld r1, TIF_NEED_RESCHED
brcc 2f
unmask_interrupts
rcall schedule
mask_interrupts
ld.w r1, r0[TI_flags]
rjmp 1b
2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
tst r1, r2
breq 3f
unmask_interrupts
mov r12, sp
mov r11, r0
rcall do_notify_resume
mask_interrupts
ld.w r1, r0[TI_flags]
rjmp 1b
3: bld r1, TIF_BREAKPOINT
brcc syscall_exit_cont
mfsr r3, SYSREG_TLBEHI
lddsp r2, sp[REG_PC]
andl r3, 0xff, COH
lsl r3, 1
sbr r3, 30
sbr r3, 0
mtdr DBGREG_BWA2A, r2
mtdr DBGREG_BWC2A, r3
rjmp syscall_exit_cont
/* The slow path of the TLB miss handler */
page_table_not_present:
page_not_present:
tlbmiss_restore
sub sp, 4
stmts --sp, r0-lr
rcall save_full_context_ex
mfsr r12, SYSREG_ECR
mov r11, sp
rcall do_page_fault
rjmp ret_from_exception
/* This function expects to find offending PC in SYSREG_RAR_EX */
save_full_context_ex:
mfsr r8, SYSREG_RSR_EX
mov r12, r8
andh r8, (MODE_MASK >> 16), COH
mfsr r11, SYSREG_RAR_EX
brne 2f
1: pushm r11, r12 /* PC and SR */
unmask_exceptions
ret r12
2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
stdsp sp[4], r10 /* replace saved SP */
rjmp 1b
/* Low-level exception handlers */
handle_critical:
pushm r12
pushm r0-r12
rcall save_full_context_ex
mfsr r12, SYSREG_ECR
mov r11, sp
rcall do_critical_exception
/* We should never get here... */
bad_return:
sub r12, pc, (. - 1f)
bral panic
.align 2
1: .asciz "Return from critical exception!"
.align 1
do_bus_error_write:
sub sp, 4
stmts --sp, r0-lr
rcall save_full_context_ex
mov r11, 1
rjmp 1f
do_bus_error_read:
sub sp, 4
stmts --sp, r0-lr
rcall save_full_context_ex
mov r11, 0
1: mfsr r12, SYSREG_BEAR
mov r10, sp
rcall do_bus_error
rjmp ret_from_exception
.align 1
do_nmi_ll:
sub sp, 4
stmts --sp, r0-lr
/* FIXME: Make sure RAR_NMI and RSR_NMI are pushed instead of *_EX */
rcall save_full_context_ex
mfsr r12, SYSREG_ECR
mov r11, sp
rcall do_nmi
rjmp bad_return
handle_address_fault:
sub sp, 4
stmts --sp, r0-lr
rcall save_full_context_ex
mfsr r12, SYSREG_ECR
mov r11, sp
rcall do_address_exception
rjmp ret_from_exception
handle_protection_fault:
sub sp, 4
stmts --sp, r0-lr
rcall save_full_context_ex
mfsr r12, SYSREG_ECR
mov r11, sp
rcall do_page_fault
rjmp ret_from_exception
.align 1
do_illegal_opcode_ll:
sub sp, 4
stmts --sp, r0-lr
rcall save_full_context_ex
mfsr r12, SYSREG_ECR
mov r11, sp
rcall do_illegal_opcode
rjmp ret_from_exception
do_dtlb_modified:
pushm r0-r3
mfsr r1, SYSREG_TLBEAR
mfsr r0, SYSREG_PTBR
lsr r2, r1, PGDIR_SHIFT
ld.w r0, r0[r2 << 2]
lsl r1, (32 - PGDIR_SHIFT)
lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
/* Translate to virtual address in P1 */
andl r0, 0xf000
sbr r0, 31
add r2, r0, r1 << 2
ld.w r3, r2[0]
sbr r3, _PAGE_BIT_DIRTY
mov r0, r3
st.w r2[0], r3
/* The page table is up-to-date. Update the TLB entry as well */
andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
mtsr SYSREG_TLBELO, r0
/* MMUCR[DRP] is updated automatically, so let's go... */
tlbw
popm r0-r3
rete
do_fpe_ll:
sub sp, 4
stmts --sp, r0-lr
rcall save_full_context_ex
unmask_interrupts
mov r12, 26
mov r11, sp
rcall do_fpe
rjmp ret_from_exception
ret_from_exception:
mask_interrupts
lddsp r4, sp[REG_SR]
andh r4, (MODE_MASK >> 16), COH
brne fault_resume_kernel
get_thread_info r0
ld.w r1, r0[TI_flags]
andl r1, _TIF_WORK_MASK, COH
brne fault_exit_work
fault_resume_user:
popm r8-r9
mask_exceptions
mtsr SYSREG_RAR_EX, r8
mtsr SYSREG_RSR_EX, r9
ldmts sp++, r0-lr
sub sp, -4
rete
fault_resume_kernel:
#ifdef CONFIG_PREEMPT
get_thread_info r0
ld.w r2, r0[TI_preempt_count]
cp.w r2, 0
brne 1f
ld.w r1, r0[TI_flags]
bld r1, TIF_NEED_RESCHED
brcc 1f
lddsp r4, sp[REG_SR]
bld r4, SYSREG_GM_OFFSET
brcs 1f
rcall preempt_schedule_irq
1:
#endif
popm r8-r9
mask_exceptions
mfsr r1, SYSREG_SR
mtsr SYSREG_RAR_EX, r8
mtsr SYSREG_RSR_EX, r9
popm lr
sub sp, -4 /* ignore SP */
popm r0-r12
sub sp, -4 /* ignore r12_orig */
rete
irq_exit_work:
/* Switch to exception mode so that we can share the same code. */
mfsr r8, SYSREG_SR
cbr r8, SYSREG_M0_OFFSET
orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
mtsr SYSREG_SR, r8
sub pc, -2
get_thread_info r0
ld.w r1, r0[TI_flags]
fault_exit_work:
bld r1, TIF_NEED_RESCHED
brcc 1f
unmask_interrupts
rcall schedule
mask_interrupts
ld.w r1, r0[TI_flags]
rjmp fault_exit_work
1: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
tst r1, r2
breq 2f
unmask_interrupts
mov r12, sp
mov r11, r0
rcall do_notify_resume
mask_interrupts
ld.w r1, r0[TI_flags]
rjmp fault_exit_work
2: bld r1, TIF_BREAKPOINT
brcc fault_resume_user
mfsr r3, SYSREG_TLBEHI
lddsp r2, sp[REG_PC]
andl r3, 0xff, COH
lsl r3, 1
sbr r3, 30
sbr r3, 0
mtdr DBGREG_BWA2A, r2
mtdr DBGREG_BWC2A, r3
rjmp fault_resume_user
/* If we get a debug trap from privileged context we end up here */
handle_debug_priv:
/* Fix up LR and SP in regs. r11 contains the mode we came from */
mfsr r8, SYSREG_SR
mov r9, r8
andh r8, hi(~MODE_MASK)
or r8, r11
mtsr SYSREG_SR, r8
sub pc, -2
stdsp sp[REG_LR], lr
mtsr SYSREG_SR, r9
sub pc, -2
sub r10, sp, -FRAME_SIZE_FULL
stdsp sp[REG_SP], r10
mov r12, sp
rcall do_debug_priv
/* Now, put everything back */
ssrf SR_EM_BIT
popm r10, r11
mtsr SYSREG_RAR_DBG, r10
mtsr SYSREG_RSR_DBG, r11
mfsr r8, SYSREG_SR
mov r9, r8
andh r8, hi(~MODE_MASK)
andh r11, hi(MODE_MASK)
or r8, r11
mtsr SYSREG_SR, r8
sub pc, -2
popm lr
mtsr SYSREG_SR, r9
sub pc, -2
sub sp, -4 /* skip SP */
popm r0-r12
sub sp, -4
retd
/*
* At this point, everything is masked, that is, interrupts,
* exceptions and debugging traps. We might get called from
* interrupt or exception context in some rare cases, but this
* will be taken care of by do_debug(), so we're not going to
* do a 100% correct context save here.
*/
handle_debug:
sub sp, 4 /* r12_orig */
stmts --sp, r0-lr
mfsr r10, SYSREG_RAR_DBG
mfsr r11, SYSREG_RSR_DBG
unmask_exceptions
pushm r10,r11
andh r11, (MODE_MASK >> 16), COH
brne handle_debug_priv
mov r12, sp
rcall do_debug
lddsp r10, sp[REG_SR]
andh r10, (MODE_MASK >> 16), COH
breq debug_resume_user
debug_restore_all:
popm r10,r11
mask_exceptions
mtsr SYSREG_RSR_DBG, r11
mtsr SYSREG_RAR_DBG, r10
ldmts sp++, r0-lr
sub sp, -4
retd
debug_resume_user:
get_thread_info r0
mask_interrupts
ld.w r1, r0[TI_flags]
andl r1, _TIF_DBGWORK_MASK, COH
breq debug_restore_all
1: bld r1, TIF_NEED_RESCHED
brcc 2f
unmask_interrupts
rcall schedule
mask_interrupts
ld.w r1, r0[TI_flags]
rjmp 1b
2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
tst r1, r2
breq 3f
unmask_interrupts
mov r12, sp
mov r11, r0
rcall do_notify_resume
mask_interrupts
ld.w r1, r0[TI_flags]
rjmp 1b
3: bld r1, TIF_SINGLE_STEP
brcc debug_restore_all
mfdr r2, DBGREG_DC
sbr r2, DC_SS_BIT
mtdr DBGREG_DC, r2
rjmp debug_restore_all
.set rsr_int0, SYSREG_RSR_INT0
.set rsr_int1, SYSREG_RSR_INT1
.set rsr_int2, SYSREG_RSR_INT2
.set rsr_int3, SYSREG_RSR_INT3
.set rar_int0, SYSREG_RAR_INT0
.set rar_int1, SYSREG_RAR_INT1
.set rar_int2, SYSREG_RAR_INT2
.set rar_int3, SYSREG_RAR_INT3
.macro IRQ_LEVEL level
.type irq_level\level, @function
irq_level\level:
sub sp, 4 /* r12_orig */
stmts --sp,r0-lr
mfsr r8, rar_int\level
mfsr r9, rsr_int\level
pushm r8-r9
mov r11, sp
mov r12, \level
rcall do_IRQ
lddsp r4, sp[REG_SR]
andh r4, (MODE_MASK >> 16), COH
#ifdef CONFIG_PREEMPT
brne 2f
#else
brne 1f
#endif
get_thread_info r0
ld.w r1, r0[TI_flags]
andl r1, _TIF_WORK_MASK, COH
brne irq_exit_work
1: popm r8-r9
mtsr rar_int\level, r8
mtsr rsr_int\level, r9
ldmts sp++,r0-lr
sub sp, -4 /* ignore r12_orig */
rete
#ifdef CONFIG_PREEMPT
2:
get_thread_info r0
ld.w r2, r0[TI_preempt_count]
cp.w r2, 0
brne 1b
ld.w r1, r0[TI_flags]
bld r1, TIF_NEED_RESCHED
brcc 1b
lddsp r4, sp[REG_SR]
bld r4, SYSREG_GM_OFFSET
brcs 1b
rcall preempt_schedule_irq
rjmp 1b
#endif
.endm
.section .irq.text,"ax",@progbits
.global irq_level0
.global irq_level1
.global irq_level2
.global irq_level3
IRQ_LEVEL 0
IRQ_LEVEL 1
IRQ_LEVEL 2
IRQ_LEVEL 3
/*
* Non-board-specific low-level startup code
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/sysreg.h>
.section .init.text,"ax"
.global kernel_entry
kernel_entry:
/* Initialize status register */
lddpc r0, init_sr
mtsr SYSREG_SR, r0
/* Set initial stack pointer */
lddpc sp, stack_addr
sub sp, -THREAD_SIZE
#ifdef CONFIG_FRAME_POINTER
/* Mark last stack frame */
mov lr, 0
mov r7, 0
#endif
/* Set up the PIO, SDRAM controller, early printk, etc. */
rcall board_early_init
/* Start the show */
lddpc pc, kernel_start_addr
.align 2
init_sr:
.long 0x007f0000 /* Supervisor mode, everything masked */
stack_addr:
.long init_thread_union
kernel_start_addr:
.long start_kernel
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <asm/pgtable.h>
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
EXPORT_SYMBOL(init_mm);
/*
* Initial thread structure. Must be aligned on an 8192-byte boundary.
*/
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* Based on arch/i386/kernel/irq.c
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains the code used by various IRQ handling routines:
* asking for different IRQ's should be done through these routines
* instead of just grabbing them. Thus setups with different IRQ numbers
* shouldn't result in any weird surprises, and installing new handlers
* should be easier.
*
* IRQ's are in fact implemented a bit like signal handlers for the kernel.
* Naturally it's not a 1:1 relation, but there are similarities.
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel_stat.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/sysdev.h>
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
void ack_bad_irq(unsigned int irq)
{
printk("unexpected IRQ %u\n", irq);
}
#ifdef CONFIG_PROC_FS
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *)v, cpu;
struct irqaction *action;
unsigned long flags;
if (i == 0) {
seq_puts(p, " ");
for_each_online_cpu(cpu)
seq_printf(p, "CPU%d ", cpu);
seq_putc(p, '\n');
}
if (i < NR_IRQS) {
spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
seq_printf(p, "%3d: ", i);
for_each_online_cpu(cpu)
seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
unlock:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
#endif
/*
* Kernel Probes (KProbes)
*
* Copyright (C) 2005-2006 Atmel Corporation
*
* Based on arch/ppc64/kernel/kprobes.c
* Copyright (C) IBM Corporation, 2002, 2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <asm/cacheflush.h>
#include <asm/kdebug.h>
#include <asm/ocd.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
static unsigned long kprobe_status;
static struct pt_regs jprobe_saved_regs;
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
int ret = 0;
if ((unsigned long)p->addr & 0x01) {
printk("Attempt to register kprobe at an unaligned address\n");
ret = -EINVAL;
}
/* XXX: Might be a good idea to check if p->addr is a valid
* kernel address as well... */
if (!ret) {
pr_debug("copy kprobe at %p\n", p->addr);
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
}
return ret;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
pr_debug("arming kprobe at %p\n", p->addr);
*p->addr = BREAKPOINT_INSTRUCTION;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
pr_debug("disarming kprobe at %p\n", p->addr);
*p->addr = p->opcode;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
unsigned long dc;
pr_debug("preparing to singlestep over %p (PC=%08lx)\n",
p->addr, regs->pc);
BUG_ON(!(sysreg_read(SR) & SYSREG_BIT(SR_D)));
dc = __mfdr(DBGREG_DC);
dc |= DC_SS;
__mtdr(DBGREG_DC, dc);
/*
* We must run the instruction from its original location
* since it may actually reference PC.
*
* TODO: Do the instruction replacement directly in icache.
*/
*p->addr = p->opcode;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
{
unsigned long dc;
pr_debug("resuming execution at PC=%08lx\n", regs->pc);
dc = __mfdr(DBGREG_DC);
dc &= ~DC_SS;
__mtdr(DBGREG_DC, dc);
*p->addr = BREAKPOINT_INSTRUCTION;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
static void __kprobes set_current_kprobe(struct kprobe *p)
{
__get_cpu_var(current_kprobe) = p;
}
static int __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
void *addr = (void *)regs->pc;
int ret = 0;
pr_debug("kprobe_handler: kprobe_running=%d\n",
kprobe_running());
/*
* We don't want to be preempted for the entire
* duration of kprobe processing
*/
preempt_disable();
/* Check that we're not recursing */
if (kprobe_running()) {
p = get_kprobe(addr);
if (p) {
if (kprobe_status == KPROBE_HIT_SS) {
printk("FIXME: kprobe hit while single-stepping!\n");
goto no_kprobe;
}
printk("FIXME: kprobe hit while handling another kprobe\n");
goto no_kprobe;
} else {
p = kprobe_running();
if (p->break_handler && p->break_handler(p, regs))
goto ss_probe;
}
/* If it's not ours, can't be delete race, (we hold lock). */
goto no_kprobe;
}
p = get_kprobe(addr);
if (!p)
goto no_kprobe;
kprobe_status = KPROBE_HIT_ACTIVE;
set_current_kprobe(p);
if (p->pre_handler && p->pre_handler(p, regs))
/* handler has already set things up, so skip ss setup */
return 1;
ss_probe:
prepare_singlestep(p, regs);
kprobe_status = KPROBE_HIT_SS;
return 1;
no_kprobe:
return ret;
}
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
pr_debug("post_kprobe_handler, cur=%p\n", cur);
if (!cur)
return 0;
if (cur->post_handler) {
kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
resume_execution(cur, regs);
reset_current_kprobe();
preempt_enable_no_resched();
return 1;
}
static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
pr_debug("kprobe_fault_handler: trapnr=%d\n", trapnr);
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
if (kprobe_status & KPROBE_HIT_SS) {
resume_execution(cur, regs);
preempt_enable_no_resched();
}
return 0;
}
/*
* Wrapper routine to for handling exceptions.
*/
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
pr_debug("kprobe_exceptions_notify: val=%lu, data=%p\n",
val, data);
switch (val) {
case DIE_BREAKPOINT:
if (kprobe_handler(args->regs))
ret = NOTIFY_STOP;
break;
case DIE_SSTEP:
if (post_kprobe_handler(args->regs))
ret = NOTIFY_STOP;
break;
case DIE_FAULT:
if (kprobe_running()
&& kprobe_fault_handler(args->regs, args->trapnr))
ret = NOTIFY_STOP;
break;
default:
break;
}
return ret;
}
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs));
/*
* TODO: We should probably save some of the stack here as
* well, since gcc may pass arguments on the stack for certain
* functions (lots of arguments, large aggregates, varargs)
*/
/* setup return addr to the jprobe handler routine */
regs->pc = (unsigned long)jp->entry;
return 1;
}
void __kprobes jprobe_return(void)
{
asm volatile("breakpoint" ::: "memory");
}
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
/*
* FIXME - we should ideally be validating that we got here 'cos
* of the "trap" in jprobe_return() above, before restoring the
* saved regs...
*/
memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs));
return 1;
}
int __init arch_init_kprobes(void)
{
printk("KPROBES: Enabling monitor mode (MM|DBE)...\n");
__mtdr(DBGREG_DC, DC_MM | DC_DBE);
/* TODO: Register kretprobe trampoline */
return 0;
}
/*
* AVR32-specific kernel module loader
*
* Copyright (C) 2005-2006 Atmel Corporation
*
* GOT initialization parts are based on the s390 version
* Copyright (C) 2002, 2003 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/moduleloader.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
void *module_alloc(unsigned long size)
{
if (size == 0)
return NULL;
return vmalloc(size);
}
void module_free(struct module *mod, void *module_region)
{
vfree(mod->arch.syminfo);
mod->arch.syminfo = NULL;
vfree(module_region);
/* FIXME: if module_region == mod->init_region, trim exception
* table entries. */
}
static inline int check_rela(Elf32_Rela *rela, struct module *module,
char *strings, Elf32_Sym *symbols)
{
struct mod_arch_syminfo *info;
info = module->arch.syminfo + ELF32_R_SYM(rela->r_info);
switch (ELF32_R_TYPE(rela->r_info)) {
case R_AVR32_GOT32:
case R_AVR32_GOT16:
case R_AVR32_GOT8:
case R_AVR32_GOT21S:
case R_AVR32_GOT18SW: /* mcall */
case R_AVR32_GOT16S: /* ld.w */
if (rela->r_addend != 0) {
printk(KERN_ERR
"GOT relocation against %s at offset %u with addend\n",
strings + symbols[ELF32_R_SYM(rela->r_info)].st_name,
rela->r_offset);
return -ENOEXEC;
}
if (info->got_offset == -1UL) {
info->got_offset = module->arch.got_size;
module->arch.got_size += sizeof(void *);
}
pr_debug("GOT[%3lu] %s\n", info->got_offset,
strings + symbols[ELF32_R_SYM(rela->r_info)].st_name);
break;
}
return 0;
}
int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *module)
{
Elf32_Shdr *symtab;
Elf32_Sym *symbols;
Elf32_Rela *rela;
char *strings;
int nrela, i, j;
int ret;
/* Find the symbol table */
symtab = NULL;
for (i = 0; i < hdr->e_shnum; i++)
switch (sechdrs[i].sh_type) {
case SHT_SYMTAB:
symtab = &sechdrs[i];
break;
}
if (!symtab) {
printk(KERN_ERR "module %s: no symbol table\n", module->name);
return -ENOEXEC;
}
/* Allocate room for one syminfo structure per symbol. */
module->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
module->arch.syminfo = vmalloc(module->arch.nsyms
* sizeof(struct mod_arch_syminfo));
if (!module->arch.syminfo)
return -ENOMEM;
symbols = (void *)hdr + symtab->sh_offset;
strings = (void *)hdr + sechdrs[symtab->sh_link].sh_offset;
for (i = 0; i < module->arch.nsyms; i++) {
if (symbols[i].st_shndx == SHN_UNDEF &&
strcmp(strings + symbols[i].st_name,
"_GLOBAL_OFFSET_TABLE_") == 0)
/* "Define" it as absolute. */
symbols[i].st_shndx = SHN_ABS;
module->arch.syminfo[i].got_offset = -1UL;
module->arch.syminfo[i].got_initialized = 0;
}
/* Allocate GOT entries for symbols that need it. */
module->arch.got_size = 0;
for (i = 0; i < hdr->e_shnum; i++) {
if (sechdrs[i].sh_type != SHT_RELA)
continue;
nrela = sechdrs[i].sh_size / sizeof(Elf32_Rela);
rela = (void *)hdr + sechdrs[i].sh_offset;
for (j = 0; j < nrela; j++) {
ret = check_rela(rela + j, module,
strings, symbols);
if (ret)
goto out_free_syminfo;
}
}
/*
* Increase core size to make room for GOT and set start
* offset for GOT.
*/
module->core_size = ALIGN(module->core_size, 4);
module->arch.got_offset = module->core_size;
module->core_size += module->arch.got_size;
return 0;
out_free_syminfo:
vfree(module->arch.syminfo);
module->arch.syminfo = NULL;
return ret;
}
static inline int reloc_overflow(struct module *module, const char *reloc_name,
Elf32_Addr relocation)
{
printk(KERN_ERR "module %s: Value %lx does not fit relocation %s\n",
module->name, (unsigned long)relocation, reloc_name);
return -ENOEXEC;
}
#define get_u16(loc) (*((uint16_t *)loc))
#define put_u16(loc, val) (*((uint16_t *)loc) = (val))
int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relindex,
struct module *module)
{
Elf32_Shdr *symsec = sechdrs + symindex;
Elf32_Shdr *relsec = sechdrs + relindex;
Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
Elf32_Rela *rel = (void *)relsec->sh_addr;
unsigned int i;
int ret = 0;
for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rela); i++, rel++) {
struct mod_arch_syminfo *info;
Elf32_Sym *sym;
Elf32_Addr relocation;
uint32_t *location;
uint32_t value;
location = (void *)dstsec->sh_addr + rel->r_offset;
sym = (Elf32_Sym *)symsec->sh_addr + ELF32_R_SYM(rel->r_info);
relocation = sym->st_value + rel->r_addend;
info = module->arch.syminfo + ELF32_R_SYM(rel->r_info);
/* Initialize GOT entry if necessary */
switch (ELF32_R_TYPE(rel->r_info)) {
case R_AVR32_GOT32:
case R_AVR32_GOT16:
case R_AVR32_GOT8:
case R_AVR32_GOT21S:
case R_AVR32_GOT18SW:
case R_AVR32_GOT16S:
if (!info->got_initialized) {
Elf32_Addr *gotent;
gotent = (module->module_core
+ module->arch.got_offset
+ info->got_offset);
*gotent = relocation;
info->got_initialized = 1;
}
relocation = info->got_offset;
break;
}
switch (ELF32_R_TYPE(rel->r_info)) {
case R_AVR32_32:
case R_AVR32_32_CPENT:
*location = relocation;
break;
case R_AVR32_22H_PCREL:
relocation -= (Elf32_Addr)location;
if ((relocation & 0xffe00001) != 0
&& (relocation & 0xffc00001) != 0xffc00000)
return reloc_overflow(module,
"R_AVR32_22H_PCREL",
relocation);
relocation >>= 1;
value = *location;
value = ((value & 0xe1ef0000)
| (relocation & 0xffff)
| ((relocation & 0x10000) << 4)
| ((relocation & 0x1e0000) << 8));
*location = value;
break;
case R_AVR32_11H_PCREL:
relocation -= (Elf32_Addr)location;
if ((relocation & 0xfffffc01) != 0
&& (relocation & 0xfffff801) != 0xfffff800)
return reloc_overflow(module,
"R_AVR32_11H_PCREL",
relocation);
value = get_u16(location);
value = ((value & 0xf00c)
| ((relocation & 0x1fe) << 3)
| ((relocation & 0x600) >> 9));
put_u16(location, value);
break;
case R_AVR32_9H_PCREL:
relocation -= (Elf32_Addr)location;
if ((relocation & 0xffffff01) != 0
&& (relocation & 0xfffffe01) != 0xfffffe00)
return reloc_overflow(module,
"R_AVR32_9H_PCREL",
relocation);
value = get_u16(location);
value = ((value & 0xf00f)
| ((relocation & 0x1fe) << 3));
put_u16(location, value);
break;
case R_AVR32_9UW_PCREL:
relocation -= ((Elf32_Addr)location) & 0xfffffffc;
if ((relocation & 0xfffffc03) != 0)
return reloc_overflow(module,
"R_AVR32_9UW_PCREL",
relocation);
value = get_u16(location);
value = ((value & 0xf80f)
| ((relocation & 0x1fc) << 2));
put_u16(location, value);
break;
case R_AVR32_GOTPC:
/*
* R6 = PC - (PC - GOT)
*
* At this point, relocation contains the
* value of PC. Just subtract the value of
* GOT, and we're done.
*/
pr_debug("GOTPC: PC=0x%lx, got_offset=0x%lx, core=0x%p\n",
relocation, module->arch.got_offset,
module->module_core);
relocation -= ((unsigned long)module->module_core
+ module->arch.got_offset);
*location = relocation;
break;
case R_AVR32_GOT18SW:
if ((relocation & 0xfffe0003) != 0
&& (relocation & 0xfffc0003) != 0xffff0000)
return reloc_overflow(module, "R_AVR32_GOT18SW",
relocation);
relocation >>= 2;
/* fall through */
case R_AVR32_GOT16S:
if ((relocation & 0xffff8000) != 0
&& (relocation & 0xffff0000) != 0xffff0000)
return reloc_overflow(module, "R_AVR32_GOT16S",
relocation);
pr_debug("GOT reloc @ 0x%lx -> %lu\n",
rel->r_offset, relocation);
value = *location;
value = ((value & 0xffff0000)
| (relocation & 0xffff));
*location = value;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
module->name, ELF32_R_TYPE(rel->r_info));
return -ENOEXEC;
}
}
return ret;
}
int apply_relocate(Elf32_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relindex,
struct module *module)
{
printk(KERN_ERR "module %s: REL relocations are not supported\n",
module->name);
return -ENOEXEC;
}
int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *module)
{
vfree(module->arch.syminfo);
module->arch.syminfo = NULL;
return 0;
}
void module_arch_cleanup(struct module *module)
{
}
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/fs.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
#include <linux/unistd.h>
#include <asm/sysreg.h>
#include <asm/ocd.h>
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
/*
* This file handles the architecture-dependent parts of process handling..
*/
void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
/* TODO: Enter sleep mode */
while (!need_resched())
cpu_relax();
preempt_enable_no_resched();
schedule();
preempt_disable();
}
}
void machine_halt(void)
{
}
void machine_power_off(void)
{
}
void machine_restart(char *cmd)
{
__mtdr(DBGREG_DC, DC_DBE);
__mtdr(DBGREG_DC, DC_RES);
while (1) ;
}
/*
* PC is actually discarded when returning from a system call -- the
* return address must be stored in LR. This function will make sure
* LR points to do_exit before starting the thread.
*
* Also, when returning from fork(), r12 is 0, so we must copy the
* argument as well.
*
* r0 : The argument to the main thread function
* r1 : The address of do_exit
* r2 : The address of the main thread function
*/
asmlinkage extern void kernel_thread_helper(void);
__asm__(" .type kernel_thread_helper, @function\n"
"kernel_thread_helper:\n"
" mov r12, r0\n"
" mov lr, r2\n"
" mov pc, r1\n"
" .size kernel_thread_helper, . - kernel_thread_helper");
int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
memset(&regs, 0, sizeof(regs));
regs.r0 = (unsigned long)arg;
regs.r1 = (unsigned long)fn;
regs.r2 = (unsigned long)do_exit;
regs.lr = (unsigned long)kernel_thread_helper;
regs.pc = (unsigned long)kernel_thread_helper;
regs.sr = MODE_SUPERVISOR;
return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
0, &regs, 0, NULL, NULL);
}
EXPORT_SYMBOL(kernel_thread);
/*
* Free current thread data structures etc
*/
void exit_thread(void)
{
/* nothing to do */
}
void flush_thread(void)
{
/* nothing to do */
}
void release_thread(struct task_struct *dead_task)
{
/* do nothing */
}
static const char *cpu_modes[] = {
"Application", "Supervisor", "Interrupt level 0", "Interrupt level 1",
"Interrupt level 2", "Interrupt level 3", "Exception", "NMI"
};
void show_regs(struct pt_regs *regs)
{
unsigned long sp = regs->sp;
unsigned long lr = regs->lr;
unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT;
if (!user_mode(regs))
sp = (unsigned long)regs + FRAME_SIZE_FULL;
print_symbol("PC is at %s\n", instruction_pointer(regs));
print_symbol("LR is at %s\n", lr);
printk("pc : [<%08lx>] lr : [<%08lx>] %s\n"
"sp : %08lx r12: %08lx r11: %08lx\n",
instruction_pointer(regs),
lr, print_tainted(), sp, regs->r12, regs->r11);
printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
regs->r10, regs->r9, regs->r8);
printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
regs->r7, regs->r6, regs->r5, regs->r4);
printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
regs->r3, regs->r2, regs->r1, regs->r0);
printk("Flags: %c%c%c%c%c\n",
regs->sr & SR_Q ? 'Q' : 'q',
regs->sr & SR_V ? 'V' : 'v',
regs->sr & SR_N ? 'N' : 'n',
regs->sr & SR_Z ? 'Z' : 'z',
regs->sr & SR_C ? 'C' : 'c');
printk("Mode bits: %c%c%c%c%c%c%c%c%c\n",
regs->sr & SR_H ? 'H' : 'h',
regs->sr & SR_R ? 'R' : 'r',
regs->sr & SR_J ? 'J' : 'j',
regs->sr & SR_EM ? 'E' : 'e',
regs->sr & SR_I3M ? '3' : '.',
regs->sr & SR_I2M ? '2' : '.',
regs->sr & SR_I1M ? '1' : '.',
regs->sr & SR_I0M ? '0' : '.',
regs->sr & SR_GM ? 'G' : 'g');
printk("CPU Mode: %s\n", cpu_modes[mode]);
show_trace(NULL, (unsigned long *)sp, regs);
}
EXPORT_SYMBOL(show_regs);
/* Fill in the fpu structure for a core dump. This is easy -- we don't have any */
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
{
/* Not valid */
return 0;
}
asmlinkage void ret_from_fork(void);
int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
{
struct pt_regs *childregs;
childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long)p->thread_info)) - 1;
*childregs = *regs;
if (user_mode(regs))
childregs->sp = usp;
else
childregs->sp = (unsigned long)p->thread_info + THREAD_SIZE;
childregs->r12 = 0; /* Set return value for child */
p->thread.cpu_context.sr = MODE_SUPERVISOR | SR_GM;
p->thread.cpu_context.ksp = (unsigned long)childregs;
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
return 0;
}
/* r12-r8 are dummy parameters to force the compiler to use the stack */
asmlinkage int sys_fork(struct pt_regs *regs)
{
return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
}
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
unsigned long parent_tidptr,
unsigned long child_tidptr, struct pt_regs *regs)
{
if (!newsp)
newsp = regs->sp;
return do_fork(clone_flags, newsp, regs, 0,
(int __user *)parent_tidptr,
(int __user *)child_tidptr);
}
asmlinkage int sys_vfork(struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs,
0, NULL, NULL);
}
asmlinkage int sys_execve(char __user *ufilename, char __user *__user *uargv,
char __user *__user *uenvp, struct pt_regs *regs)
{
int error;
char *filename;
filename = getname(ufilename);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename, uargv, uenvp, regs);
if (error == 0)
current->ptrace &= ~PT_DTRACE;
putname(filename);
out:
return error;
}
/*
* This function is supposed to answer the question "who called
* schedule()?"
*/
unsigned long get_wchan(struct task_struct *p)
{
unsigned long pc;
unsigned long stack_page;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)p->thread_info;
BUG_ON(!stack_page);
/*
* The stored value of PC is either the address right after
* the call to __switch_to() or ret_from_fork.
*/
pc = thread_saved_pc(p);
if (in_sched_functions(pc)) {
#ifdef CONFIG_FRAME_POINTER
unsigned long fp = p->thread.cpu_context.r7;
BUG_ON(fp < stack_page || fp > (THREAD_SIZE + stack_page));
pc = *(unsigned long *)fp;
#else
/*
* We depend on the frame size of schedule here, which
* is actually quite ugly. It might be possible to
* determine the frame size automatically at build
* time by doing this:
* - compile sched.c
* - disassemble the resulting sched.o
* - look for 'sub sp,??' shortly after '<schedule>:'
*/
unsigned long sp = p->thread.cpu_context.ksp + 16;
BUG_ON(sp < stack_page || sp > (THREAD_SIZE + stack_page));
pc = *(unsigned long *)sp;
#endif
}
return pc;
}
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp_lock.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/unistd.h>
#include <linux/notifier.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
#include <asm/ocd.h>
#include <asm/mmu_context.h>
#include <asm/kdebug.h>
static struct pt_regs *get_user_regs(struct task_struct *tsk)
{
return (struct pt_regs *)((unsigned long) tsk->thread_info +
THREAD_SIZE - sizeof(struct pt_regs));
}
static void ptrace_single_step(struct task_struct *tsk)
{
pr_debug("ptrace_single_step: pid=%u, SR=0x%08lx\n",
tsk->pid, tsk->thread.cpu_context.sr);
if (!(tsk->thread.cpu_context.sr & SR_D)) {
/*
* Set a breakpoint at the current pc to force the
* process into debug mode. The syscall/exception
* exit code will set a breakpoint at the return
* address when this flag is set.
*/
pr_debug("ptrace_single_step: Setting TIF_BREAKPOINT\n");
set_tsk_thread_flag(tsk, TIF_BREAKPOINT);
}
/* The monitor code will do the actual step for us */
set_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
}
/*
* Called by kernel/ptrace.c when detaching
*
* Make sure any single step bits, etc. are not set
*/
void ptrace_disable(struct task_struct *child)
{
clear_tsk_thread_flag(child, TIF_SINGLE_STEP);
}
/*
* Handle hitting a breakpoint
*/
static void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
{
siginfo_t info;
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = (void __user *)instruction_pointer(regs);
pr_debug("ptrace_break: Sending SIGTRAP to PID %u (pc = 0x%p)\n",
tsk->pid, info.si_addr);
force_sig_info(SIGTRAP, &info, tsk);
}
/*
* Read the word at offset "offset" into the task's "struct user". We
* actually access the pt_regs struct stored on the kernel stack.
*/
static int ptrace_read_user(struct task_struct *tsk, unsigned long offset,
unsigned long __user *data)
{
unsigned long *regs;
unsigned long value;
pr_debug("ptrace_read_user(%p, %#lx, %p)\n",
tsk, offset, data);
if (offset & 3 || offset >= sizeof(struct user)) {
printk("ptrace_read_user: invalid offset 0x%08lx\n", offset);
return -EIO;
}
regs = (unsigned long *)get_user_regs(tsk);
value = 0;
if (offset < sizeof(struct pt_regs))
value = regs[offset / sizeof(regs[0])];
return put_user(value, data);
}
/*
* Write the word "value" to offset "offset" into the task's "struct
* user". We actually access the pt_regs struct stored on the kernel
* stack.
*/
static int ptrace_write_user(struct task_struct *tsk, unsigned long offset,
unsigned long value)
{
unsigned long *regs;
if (offset & 3 || offset >= sizeof(struct user)) {
printk("ptrace_write_user: invalid offset 0x%08lx\n", offset);
return -EIO;
}
if (offset >= sizeof(struct pt_regs))
return 0;
regs = (unsigned long *)get_user_regs(tsk);
regs[offset / sizeof(regs[0])] = value;
return 0;
}
static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
{
struct pt_regs *regs = get_user_regs(tsk);
return copy_to_user(uregs, regs, sizeof(*regs)) ? -EFAULT : 0;
}
static int ptrace_setregs(struct task_struct *tsk, const void __user *uregs)
{
struct pt_regs newregs;
int ret;
ret = -EFAULT;
if (copy_from_user(&newregs, uregs, sizeof(newregs)) == 0) {
struct pt_regs *regs = get_user_regs(tsk);
ret = -EINVAL;
if (valid_user_regs(&newregs)) {
*regs = newregs;
ret = 0;
}
}
return ret;
}
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
unsigned long tmp;
int ret;
pr_debug("arch_ptrace(%ld, %ld, %#lx, %#lx)\n",
request, child->pid, addr, data);
pr_debug("ptrace: Enabling monitor mode...\n");
__mtdr(DBGREG_DC, __mfdr(DBGREG_DC) | DC_MM | DC_DBE);
switch (request) {
/* Read the word at location addr in the child process */
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
ret = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
if (ret == sizeof(tmp))
ret = put_user(tmp, (unsigned long __user *)data);
else
ret = -EIO;
break;
case PTRACE_PEEKUSR:
ret = ptrace_read_user(child, addr,
(unsigned long __user *)data);
break;
/* Write the word in data at location addr */
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
ret = access_process_vm(child, addr, &data, sizeof(data), 1);
if (ret == sizeof(data))
ret = 0;
else
ret = -EIO;
break;
case PTRACE_POKEUSR:
ret = ptrace_write_user(child, addr, data);
break;
/* continue and stop at next (return from) syscall */
case PTRACE_SYSCALL:
/* restart after signal */
case PTRACE_CONT:
ret = -EIO;
if (!valid_signal(data))
break;
if (request == PTRACE_SYSCALL)
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
else
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
/* XXX: Are we sure no breakpoints are active here? */
wake_up_process(child);
ret = 0;
break;
/*
* Make the child exit. Best I can do is send it a
* SIGKILL. Perhaps it should be put in the status that it
* wants to exit.
*/
case PTRACE_KILL:
ret = 0;
if (child->exit_state == EXIT_ZOMBIE)
break;
child->exit_code = SIGKILL;
wake_up_process(child);
break;
/*
* execute single instruction.
*/
case PTRACE_SINGLESTEP:
ret = -EIO;
if (!valid_signal(data))
break;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
ptrace_single_step(child);
child->exit_code = data;
wake_up_process(child);
ret = 0;
break;
/* Detach a process that was attached */
case PTRACE_DETACH:
ret = ptrace_detach(child, data);
break;
case PTRACE_GETREGS:
ret = ptrace_getregs(child, (void __user *)data);
break;
case PTRACE_SETREGS:
ret = ptrace_setregs(child, (const void __user *)data);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
pr_debug("sys_ptrace returning %d (DC = 0x%08lx)\n", ret, __mfdr(DBGREG_DC));
return ret;
}
asmlinkage void syscall_trace(void)
{
pr_debug("syscall_trace called\n");
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
if (!(current->ptrace & PT_PTRACED))
return;
pr_debug("syscall_trace: notifying parent\n");
/* The 0x80 provides a way for the tracing parent to
* distinguish between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it
* will do for normal use. strace only continues with a
* signal if the stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
pr_debug("syscall_trace: sending signal %d to PID %u\n",
current->exit_code, current->pid);
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
}
asmlinkage void do_debug_priv(struct pt_regs *regs)
{
unsigned long dc, ds;
unsigned long die_val;
ds = __mfdr(DBGREG_DS);
pr_debug("do_debug_priv: pc = %08lx, ds = %08lx\n", regs->pc, ds);
if (ds & DS_SSS)
die_val = DIE_SSTEP;
else
die_val = DIE_BREAKPOINT;
if (notify_die(die_val, regs, 0, SIGTRAP) == NOTIFY_STOP)
return;
if (likely(ds & DS_SSS)) {
extern void itlb_miss(void);
extern void tlb_miss_common(void);
struct thread_info *ti;
dc = __mfdr(DBGREG_DC);
dc &= ~DC_SS;
__mtdr(DBGREG_DC, dc);
ti = current_thread_info();
ti->flags |= _TIF_BREAKPOINT;
/* The TLB miss handlers don't check thread flags */
if ((regs->pc >= (unsigned long)&itlb_miss)
&& (regs->pc <= (unsigned long)&tlb_miss_common)) {
__mtdr(DBGREG_BWA2A, sysreg_read(RAR_EX));
__mtdr(DBGREG_BWC2A, 0x40000001 | (get_asid() << 1));
}
/*
* If we're running in supervisor mode, the breakpoint
* will take us where we want directly, no need to
* single step.
*/
if ((regs->sr & MODE_MASK) != MODE_SUPERVISOR)
ti->flags |= TIF_SINGLE_STEP;
} else {
panic("Unable to handle debug trap at pc = %08lx\n",
regs->pc);
}
}
/*
* Handle breakpoints, single steps and other debuggy things. To keep
* things simple initially, we run with interrupts and exceptions
* disabled all the time.
*/
asmlinkage void do_debug(struct pt_regs *regs)
{
unsigned long dc, ds;
ds = __mfdr(DBGREG_DS);
pr_debug("do_debug: pc = %08lx, ds = %08lx\n", regs->pc, ds);
if (test_thread_flag(TIF_BREAKPOINT)) {
pr_debug("TIF_BREAKPOINT set\n");
/* We're taking care of it */
clear_thread_flag(TIF_BREAKPOINT);
__mtdr(DBGREG_BWC2A, 0);
}
if (test_thread_flag(TIF_SINGLE_STEP)) {
pr_debug("TIF_SINGLE_STEP set, ds = 0x%08lx\n", ds);
if (ds & DS_SSS) {
dc = __mfdr(DBGREG_DC);
dc &= ~DC_SS;
__mtdr(DBGREG_DC, dc);
clear_thread_flag(TIF_SINGLE_STEP);
ptrace_break(current, regs);
}
} else {
/* regular breakpoint */
ptrace_break(current, regs);
}
}
/*
* AVR32 sempahore implementation.
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* Based on linux/arch/i386/kernel/semaphore.c
* Copyright (C) 1999 Linus Torvalds
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <asm/semaphore.h>
#include <asm/atomic.h>
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
* that tries to acquire the semaphore, while the "sleeping"
* variable is a count of such acquires.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
* needs to do something only if count was negative before
* the increment operation.
*
* "sleeping" and the contention routine ordering is protected
* by the spinlock in the semaphore's waitqueue head.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
* "non-critical" part of the whole semaphore business. The
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
/*
* Logic:
* - only on a boundary condition do we need to care. When we go
* from a negative count to a non-negative, we wake people up.
* - when we go from a non-negative count to a negative do we
* (a) synchronize with the "sleeper" count and (b) make sure
* that we're on the wakeup list before we synchronize so that
* we cannot lose wakeup events.
*/
void __up(struct semaphore *sem)
{
wake_up(&sem->wait);
}
EXPORT_SYMBOL(__up);
void __sched __down(struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
unsigned long flags;
tsk->state = TASK_UNINTERRUPTIBLE;
spin_lock_irqsave(&sem->wait.lock, flags);
add_wait_queue_exclusive_locked(&sem->wait, &wait);
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock in
* the wait_queue_head.
*/
if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irqrestore(&sem->wait.lock, flags);
schedule();
spin_lock_irqsave(&sem->wait.lock, flags);
tsk->state = TASK_UNINTERRUPTIBLE;
}
remove_wait_queue_locked(&sem->wait, &wait);
wake_up_locked(&sem->wait);
spin_unlock_irqrestore(&sem->wait.lock, flags);
tsk->state = TASK_RUNNING;
}
EXPORT_SYMBOL(__down);
int __sched __down_interruptible(struct semaphore *sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
unsigned long flags;
tsk->state = TASK_INTERRUPTIBLE;
spin_lock_irqsave(&sem->wait.lock, flags);
add_wait_queue_exclusive_locked(&sem->wait, &wait);
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
/*
* With signals pending, this turns into the trylock
* failure case - we won't be sleeping, and we can't
* get the lock as it has contention. Just correct the
* count and exit.
*/
if (signal_pending(current)) {
retval = -EINTR;
sem->sleepers = 0;
atomic_add(sleepers, &sem->count);
break;
}
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock in
* the wait_queue_head.
*/
if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irqrestore(&sem->wait.lock, flags);
schedule();
spin_lock_irqsave(&sem->wait.lock, flags);
tsk->state = TASK_INTERRUPTIBLE;
}
remove_wait_queue_locked(&sem->wait, &wait);
wake_up_locked(&sem->wait);
spin_unlock_irqrestore(&sem->wait.lock, flags);
tsk->state = TASK_RUNNING;
return retval;
}
EXPORT_SYMBOL(__down_interruptible);
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/console.h>
#include <linux/ioport.h>
#include <linux/bootmem.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <asm/sections.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/sysreg.h>
#include <asm/arch/board.h>
#include <asm/arch/init.h>
extern int root_mountflags;
/*
* Bootloader-provided information about physical memory
*/
struct tag_mem_range *mem_phys;
struct tag_mem_range *mem_reserved;
struct tag_mem_range *mem_ramdisk;
/*
* Initialize loops_per_jiffy as 5000000 (500MIPS).
* Better make it too large than too small...
*/
struct avr32_cpuinfo boot_cpu_data = {
.loops_per_jiffy = 5000000
};
EXPORT_SYMBOL(boot_cpu_data);
static char command_line[COMMAND_LINE_SIZE];
/*
* Should be more than enough, but if you have a _really_ complex
* setup, you might need to increase the size of this...
*/
static struct tag_mem_range __initdata mem_range_cache[32];
static unsigned mem_range_next_free;
/*
* Standard memory resources
*/
static struct resource mem_res[] = {
{
.name = "Kernel code",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM
},
{
.name = "Kernel data",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM,
},
};
#define kernel_code mem_res[0]
#define kernel_data mem_res[1]
/*
* Early framebuffer allocation. Works as follows:
* - If fbmem_size is zero, nothing will be allocated or reserved.
* - If fbmem_start is zero when setup_bootmem() is called,
* fbmem_size bytes will be allocated from the bootmem allocator.
* - If fbmem_start is nonzero, an area of size fbmem_size will be
* reserved at the physical address fbmem_start if necessary. If
* the area isn't in a memory region known to the kernel, it will
* be left alone.
*
* Board-specific code may use these variables to set up platform data
* for the framebuffer driver if fbmem_size is nonzero.
*/
static unsigned long __initdata fbmem_start;
static unsigned long __initdata fbmem_size;
/*
* "fbmem=xxx[kKmM]" allocates the specified amount of boot memory for
* use as framebuffer.
*
* "fbmem=xxx[kKmM]@yyy[kKmM]" defines a memory region of size xxx and
* starting at yyy to be reserved for use as framebuffer.
*
* The kernel won't verify that the memory region starting at yyy
* actually contains usable RAM.
*/
static int __init early_parse_fbmem(char *p)
{
fbmem_size = memparse(p, &p);
if (*p == '@')
fbmem_start = memparse(p, &p);
return 0;
}
early_param("fbmem", early_parse_fbmem);
static inline void __init resource_init(void)
{
struct tag_mem_range *region;
kernel_code.start = __pa(init_mm.start_code);
kernel_code.end = __pa(init_mm.end_code - 1);
kernel_data.start = __pa(init_mm.end_code);
kernel_data.end = __pa(init_mm.brk - 1);
for (region = mem_phys; region; region = region->next) {
struct resource *res;
unsigned long phys_start, phys_end;
if (region->size == 0)
continue;
phys_start = region->addr;
phys_end = phys_start + region->size - 1;
res = alloc_bootmem_low(sizeof(*res));
res->name = "System RAM";
res->start = phys_start;
res->end = phys_end;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
request_resource (&iomem_resource, res);
if (kernel_code.start >= res->start &&
kernel_code.end <= res->end)
request_resource (res, &kernel_code);
if (kernel_data.start >= res->start &&
kernel_data.end <= res->end)
request_resource (res, &kernel_data);
}
}
static int __init parse_tag_core(struct tag *tag)
{
if (tag->hdr.size > 2) {
if ((tag->u.core.flags & 1) == 0)
root_mountflags &= ~MS_RDONLY;
ROOT_DEV = new_decode_dev(tag->u.core.rootdev);
}
return 0;
}
__tagtable(ATAG_CORE, parse_tag_core);
static int __init parse_tag_mem_range(struct tag *tag,
struct tag_mem_range **root)
{
struct tag_mem_range *cur, **pprev;
struct tag_mem_range *new;
/*
* Ignore zero-sized entries. If we're running standalone, the
* SDRAM code may emit such entries if something goes
* wrong...
*/
if (tag->u.mem_range.size == 0)
return 0;
/*
* Copy the data so the bootmem init code doesn't need to care
* about it.
*/
if (mem_range_next_free >=
(sizeof(mem_range_cache) / sizeof(mem_range_cache[0])))
panic("Physical memory map too complex!\n");
new = &mem_range_cache[mem_range_next_free++];
*new = tag->u.mem_range;
pprev = root;
cur = *root;
while (cur) {
pprev = &cur->next;
cur = cur->next;
}
*pprev = new;
new->next = NULL;
return 0;
}
static int __init parse_tag_mem(struct tag *tag)
{
return parse_tag_mem_range(tag, &mem_phys);
}
__tagtable(ATAG_MEM, parse_tag_mem);
static int __init parse_tag_cmdline(struct tag *tag)
{
strlcpy(saved_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
return 0;
}
__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
static int __init parse_tag_rdimg(struct tag *tag)
{
return parse_tag_mem_range(tag, &mem_ramdisk);
}
__tagtable(ATAG_RDIMG, parse_tag_rdimg);
static int __init parse_tag_clock(struct tag *tag)
{
/*
* We'll figure out the clocks by peeking at the system
* manager regs directly.
*/
return 0;
}
__tagtable(ATAG_CLOCK, parse_tag_clock);
static int __init parse_tag_rsvd_mem(struct tag *tag)
{
return parse_tag_mem_range(tag, &mem_reserved);
}
__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
static int __init parse_tag_ethernet(struct tag *tag)
{
#if 0
const struct platform_device *pdev;
/*
* We really need a bus type that supports "classes"...this
* will do for now (until we must handle other kinds of
* ethernet controllers)
*/
pdev = platform_get_device("macb", tag->u.ethernet.mac_index);
if (pdev && pdev->dev.platform_data) {
struct eth_platform_data *data = pdev->dev.platform_data;
data->valid = 1;
data->mii_phy_addr = tag->u.ethernet.mii_phy_addr;
memcpy(data->hw_addr, tag->u.ethernet.hw_address,
sizeof(data->hw_addr));
}
#endif
return 0;
}
__tagtable(ATAG_ETHERNET, parse_tag_ethernet);
/*
* Scan the tag table for this tag, and call its parse function. The
* tag table is built by the linker from all the __tagtable
* declarations.
*/
static int __init parse_tag(struct tag *tag)
{
extern struct tagtable __tagtable_begin, __tagtable_end;
struct tagtable *t;
for (t = &__tagtable_begin; t < &__tagtable_end; t++)
if (tag->hdr.tag == t->tag) {
t->parse(tag);
break;
}
return t < &__tagtable_end;
}
/*
* Parse all tags in the list we got from the boot loader
*/
static void __init parse_tags(struct tag *t)
{
for (; t->hdr.tag != ATAG_NONE; t = tag_next(t))
if (!parse_tag(t))
printk(KERN_WARNING
"Ignoring unrecognised tag 0x%08x\n",
t->hdr.tag);
}
void __init setup_arch (char **cmdline_p)
{
struct clk *cpu_clk;
parse_tags(bootloader_tags);
setup_processor();
setup_platform();
cpu_clk = clk_get(NULL, "cpu");
if (IS_ERR(cpu_clk)) {
printk(KERN_WARNING "Warning: Unable to get CPU clock\n");
} else {
unsigned long cpu_hz = clk_get_rate(cpu_clk);
/*
* Well, duh, but it's probably a good idea to
* increment the use count.
*/
clk_enable(cpu_clk);
boot_cpu_data.clk = cpu_clk;
boot_cpu_data.loops_per_jiffy = cpu_hz * 4;
printk("CPU: Running at %lu.%03lu MHz\n",
((cpu_hz + 500) / 1000) / 1000,
((cpu_hz + 500) / 1000) % 1000);
}
init_mm.start_code = (unsigned long) &_text;
init_mm.end_code = (unsigned long) &_etext;
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end;
strlcpy(command_line, saved_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
parse_early_param();
setup_bootmem();
board_setup_fbmem(fbmem_start, fbmem_size);
#ifdef CONFIG_VT
conswitchp = &dummy_con;
#endif
paging_init();
resource_init();
}
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* Based on linux/arch/sh/kernel/signal.c
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/suspend.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
struct pt_regs *regs)
{
return do_sigaltstack(uss, uoss, regs->sp);
}
struct rt_sigframe
{
struct siginfo info;
struct ucontext uc;
unsigned long retcode;
};
static int
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
#define COPY(x) err |= __get_user(regs->x, &sc->x)
COPY(sr);
COPY(pc);
COPY(lr);
COPY(sp);
COPY(r12);
COPY(r11);
COPY(r10);
COPY(r9);
COPY(r8);
COPY(r7);
COPY(r6);
COPY(r5);
COPY(r4);
COPY(r3);
COPY(r2);
COPY(r1);
COPY(r0);
#undef COPY
/*
* Don't allow anyone to pretend they're running in supervisor
* mode or something...
*/
err |= !valid_user_regs(regs);
return err;
}
asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
sigset_t set;
frame = (struct rt_sigframe __user *)regs->sp;
pr_debug("SIG return: frame = %p\n", frame);
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe;
pr_debug("Context restored: pc = %08lx, lr = %08lx, sp = %08lx\n",
regs->pc, regs->lr, regs->sp);
return regs->r12;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
static int
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
{
int err = 0;
#define COPY(x) err |= __put_user(regs->x, &sc->x)
COPY(sr);
COPY(pc);
COPY(lr);
COPY(sp);
COPY(r12);
COPY(r11);
COPY(r10);
COPY(r9);
COPY(r8);
COPY(r7);
COPY(r6);
COPY(r5);
COPY(r4);
COPY(r3);
COPY(r2);
COPY(r1);
COPY(r0);
#undef COPY
return err;
}
static inline void __user *
get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize)
{
unsigned long sp = regs->sp;
if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void __user *)((sp - framesize) & ~3);
}
static int
setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
int err = 0;
frame = get_sigframe(ka, regs, sizeof(*frame));
err = -EFAULT;
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto out;
/*
* Set up the return code:
*
* mov r8, __NR_rt_sigreturn
* scall
*
* Note: This will blow up since we're using a non-executable
* stack. Better use SA_RESTORER.
*/
#if __NR_rt_sigreturn > 127
# error __NR_rt_sigreturn must be < 127 to fit in a short mov
#endif
err = __put_user(0x3008d733 | (__NR_rt_sigreturn << 20),
&frame->retcode);
err |= copy_siginfo_to_user(&frame->info, info);
/* Set up the ucontext */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(NULL, &frame->uc.uc_link);
err |= __put_user((void __user *)current->sas_ss_sp,
&frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->sp),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size,
&frame->uc.uc_stack.ss_size);
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
goto out;
regs->r12 = sig;
regs->r11 = (unsigned long) &frame->info;
regs->r10 = (unsigned long) &frame->uc;
regs->sp = (unsigned long) frame;
if (ka->sa.sa_flags & SA_RESTORER)
regs->lr = (unsigned long)ka->sa.sa_restorer;
else {
printk(KERN_NOTICE "[%s:%d] did not set SA_RESTORER\n",
current->comm, current->pid);
regs->lr = (unsigned long) &frame->retcode;
}
pr_debug("SIG deliver [%s:%d]: sig=%d sp=0x%lx pc=0x%lx->0x%p lr=0x%lx\n",
current->comm, current->pid, sig, regs->sp,
regs->pc, ka->sa.sa_handler, regs->lr);
regs->pc = (unsigned long) ka->sa.sa_handler;
out:
return err;
}
static inline void restart_syscall(struct pt_regs *regs)
{
if (regs->r12 == -ERESTART_RESTARTBLOCK)
regs->r8 = __NR_restart_syscall;
else
regs->r12 = regs->r12_orig;
regs->pc -= 2;
}
static inline void
handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs, int syscall)
{
int ret;
/*
* Set up the stack frame
*/
ret = setup_rt_frame(sig, ka, info, oldset, regs);
/*
* Check that the resulting registers are sane
*/
ret |= !valid_user_regs(regs);
/*
* Block the signal if we were unsuccessful.
*/
if (ret != 0 || !(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked, &current->blocked,
&ka->sa.sa_mask);
sigaddset(&current->blocked, sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
if (ret == 0)
return;
force_sigsegv(sig, current);
}
/*
* Note that 'init' is a special process: it doesn't get signals it
* doesn't want to handle. Thus you cannot kill init even with a
* SIGKILL even by mistake.
*/
int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall)
{
siginfo_t info;
int signr;
struct k_sigaction ka;
/*
* We want the common case to go fast, which is why we may in
* certain cases get here from kernel mode. Just return
* without doing anything if so.
*/
if (!user_mode(regs))
return 0;
if (try_to_freeze()) {
signr = 0;
if (!signal_pending(current))
goto no_signal;
}
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
else if (!oldset)
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
no_signal:
if (syscall) {
switch (regs->r12) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
if (signr > 0) {
regs->r12 = -EINTR;
break;
}
/* fall through */
case -ERESTARTSYS:
if (signr > 0 && !(ka.sa.sa_flags & SA_RESTART)) {
regs->r12 = -EINTR;
break;
}
/* fall through */
case -ERESTARTNOINTR:
restart_syscall(regs);
}
}
if (signr == 0) {
/* No signal to deliver -- put the saved sigmask back */
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
return 0;
}
handle_signal(signr, &ka, &info, oldset, regs, syscall);
return 1;
}
asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
{
int syscall = 0;
if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR)
syscall = 1;
if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
do_signal(regs, &current->blocked, syscall);
}
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/sysreg.h>
.text
.global __switch_to
.type __switch_to, @function
/* Switch thread context from "prev" to "next", returning "last"
* r12 : prev
* r11 : &prev->thread + 1
* r10 : &next->thread
*/
__switch_to:
stm --r11, r0,r1,r2,r3,r4,r5,r6,r7,sp,lr
mfsr r9, SYSREG_SR
st.w --r11, r9
ld.w r8, r10++
/*
* schedule() may have been called from a mode with a different
* set of registers. Make sure we don't lose anything here.
*/
pushm r10,r12
mtsr SYSREG_SR, r8
frs /* flush the return stack */
sub pc, -2 /* flush the pipeline */
popm r10,r12
ldm r10++, r0,r1,r2,r3,r4,r5,r6,r7,sp,pc
.size __switch_to, . - __switch_to
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/unistd.h>
#include <asm/mman.h>
#include <asm/uaccess.h>
asmlinkage int sys_pipe(unsigned long __user *filedes)
{
int fd[2];
int error;
error = do_pipe(fd);
if (!error) {
if (copy_to_user(filedes, fd, sizeof(fd)))
error = -EFAULT;
}
return error;
}
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, off_t offset)
{
int error = -EBADF;
struct file *file = NULL;
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
if (!(flags & MAP_ANONYMOUS)) {
file = fget(fd);
if (!file)
return error;
}
down_write(&current->mm->mmap_sem);
error = do_mmap_pgoff(file, addr, len, prot, flags, offset);
up_write(&current->mm->mmap_sem);
if (file)
fput(file);
return error;
}
/*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* Stubs for syscalls that require access to pt_regs or that take more
* than five parameters.
*/
#define ARG6 r3
.text
.global __sys_rt_sigsuspend
.type __sys_rt_sigsuspend,@function
__sys_rt_sigsuspend:
mov r10, sp
rjmp sys_rt_sigsuspend
.global __sys_sigaltstack
.type __sys_sigaltstack,@function
__sys_sigaltstack:
mov r10, sp
rjmp sys_sigaltstack
.global __sys_rt_sigreturn
.type __sys_rt_sigreturn,@function
__sys_rt_sigreturn:
mov r12, sp
rjmp sys_rt_sigreturn
.global __sys_fork
.type __sys_fork,@function
__sys_fork:
mov r12, sp
rjmp sys_fork
.global __sys_clone
.type __sys_clone,@function
__sys_clone:
mov r8, sp
rjmp sys_clone
.global __sys_vfork
.type __sys_vfork,@function
__sys_vfork:
mov r12, sp
rjmp sys_vfork
.global __sys_execve
.type __sys_execve,@function
__sys_execve:
mov r9, sp
rjmp sys_execve
.global __sys_mmap2
.type __sys_mmap2,@function
__sys_mmap2:
pushm lr
st.w --sp, ARG6
rcall sys_mmap2
sub sp, -4
popm pc
.global __sys_sendto
.type __sys_sendto,@function
__sys_sendto:
pushm lr
st.w --sp, ARG6
rcall sys_sendto
sub sp, -4
popm pc
.global __sys_recvfrom
.type __sys_recvfrom,@function
__sys_recvfrom:
pushm lr
st.w --sp, ARG6
rcall sys_recvfrom
sub sp, -4
popm pc
.global __sys_pselect6
.type __sys_pselect6,@function
__sys_pselect6:
pushm lr
st.w --sp, ARG6
rcall sys_pselect6
sub sp, -4
popm pc
.global __sys_splice
.type __sys_splice,@function
__sys_splice:
pushm lr
st.w --sp, ARG6
rcall sys_splice
sub sp, -4
popm pc
/*
* AVR32 system call table
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#if !defined(CONFIG_NFSD) && !defined(CONFIG_NFSD_MODULE)
#define sys_nfsservctl sys_ni_syscall
#endif
#if !defined(CONFIG_SYSV_IPC)
# define sys_ipc sys_ni_syscall
#endif
.section .rodata,"a",@progbits
.type sys_call_table,@object
.global sys_call_table
.align 2
sys_call_table:
.long sys_restart_syscall
.long sys_exit
.long __sys_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
.long sys_close
.long sys_umask
.long sys_creat
.long sys_link
.long sys_unlink /* 10 */
.long __sys_execve
.long sys_chdir
.long sys_time
.long sys_mknod
.long sys_chmod /* 15 */
.long sys_chown
.long sys_lchown
.long sys_lseek
.long sys_llseek
.long sys_getpid /* 20 */
.long sys_mount
.long sys_umount
.long sys_setuid
.long sys_getuid
.long sys_stime /* 25 */
.long sys_ptrace
.long sys_alarm
.long sys_pause
.long sys_utime
.long sys_newstat /* 30 */
.long sys_newfstat
.long sys_newlstat
.long sys_access
.long sys_chroot
.long sys_sync /* 35 */
.long sys_fsync
.long sys_kill
.long sys_rename
.long sys_mkdir
.long sys_rmdir /* 40 */
.long sys_dup
.long sys_pipe
.long sys_times
.long __sys_clone
.long sys_brk /* 45 */
.long sys_setgid
.long sys_getgid
.long sys_getcwd
.long sys_geteuid
.long sys_getegid /* 50 */
.long sys_acct
.long sys_setfsuid
.long sys_setfsgid
.long sys_ioctl
.long sys_fcntl /* 55 */
.long sys_setpgid
.long sys_mremap
.long sys_setresuid
.long sys_getresuid
.long sys_setreuid /* 60 */
.long sys_setregid
.long sys_ustat
.long sys_dup2
.long sys_getppid
.long sys_getpgrp /* 65 */
.long sys_setsid
.long sys_rt_sigaction
.long __sys_rt_sigreturn
.long sys_rt_sigprocmask
.long sys_rt_sigpending /* 70 */
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
.long __sys_rt_sigsuspend
.long sys_sethostname
.long sys_setrlimit /* 75 */
.long sys_getrlimit
.long sys_getrusage
.long sys_gettimeofday
.long sys_settimeofday
.long sys_getgroups /* 80 */
.long sys_setgroups
.long sys_select
.long sys_symlink
.long sys_fchdir
.long sys_readlink /* 85 */
.long sys_pread64
.long sys_pwrite64
.long sys_swapon
.long sys_reboot
.long __sys_mmap2 /* 90 */
.long sys_munmap
.long sys_truncate
.long sys_ftruncate
.long sys_fchmod
.long sys_fchown /* 95 */
.long sys_getpriority
.long sys_setpriority
.long sys_wait4
.long sys_statfs
.long sys_fstatfs /* 100 */
.long sys_vhangup
.long __sys_sigaltstack
.long sys_syslog
.long sys_setitimer
.long sys_getitimer /* 105 */
.long sys_swapoff
.long sys_sysinfo
.long sys_ipc
.long sys_sendfile
.long sys_setdomainname /* 110 */
.long sys_newuname
.long sys_adjtimex
.long sys_mprotect
.long __sys_vfork
.long sys_init_module /* 115 */
.long sys_delete_module
.long sys_quotactl
.long sys_getpgid
.long sys_bdflush
.long sys_sysfs /* 120 */
.long sys_personality
.long sys_ni_syscall /* reserved for afs_syscall */
.long sys_getdents
.long sys_flock
.long sys_msync /* 125 */
.long sys_readv
.long sys_writev
.long sys_getsid
.long sys_fdatasync
.long sys_sysctl /* 130 */
.long sys_mlock
.long sys_munlock
.long sys_mlockall
.long sys_munlockall
.long sys_sched_setparam /* 135 */
.long sys_sched_getparam
.long sys_sched_setscheduler
.long sys_sched_getscheduler
.long sys_sched_yield
.long sys_sched_get_priority_max /* 140 */
.long sys_sched_get_priority_min
.long sys_sched_rr_get_interval
.long sys_nanosleep
.long sys_poll
.long sys_nfsservctl /* 145 */
.long sys_setresgid
.long sys_getresgid
.long sys_prctl
.long sys_socket
.long sys_bind /* 150 */
.long sys_connect
.long sys_listen
.long sys_accept
.long sys_getsockname
.long sys_getpeername /* 155 */
.long sys_socketpair
.long sys_send
.long sys_recv
.long __sys_sendto
.long __sys_recvfrom /* 160 */
.long sys_shutdown
.long sys_setsockopt
.long sys_getsockopt
.long sys_sendmsg
.long sys_recvmsg /* 165 */
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64
.long sys_lstat64
.long sys_fstat64 /* 170 */
.long sys_pivot_root
.long sys_mincore
.long sys_madvise
.long sys_getdents64
.long sys_fcntl64 /* 175 */
.long sys_gettid
.long sys_readahead
.long sys_setxattr
.long sys_lsetxattr
.long sys_fsetxattr /* 180 */
.long sys_getxattr
.long sys_lgetxattr
.long sys_fgetxattr
.long sys_listxattr
.long sys_llistxattr /* 185 */
.long sys_flistxattr
.long sys_removexattr
.long sys_lremovexattr
.long sys_fremovexattr
.long sys_tkill /* 190 */
.long sys_sendfile64
.long sys_futex
.long sys_sched_setaffinity
.long sys_sched_getaffinity
.long sys_capget /* 195 */
.long sys_capset
.long sys_io_setup
.long sys_io_destroy
.long sys_io_getevents
.long sys_io_submit /* 200 */
.long sys_io_cancel
.long sys_fadvise64
.long sys_exit_group
.long sys_lookup_dcookie
.long sys_epoll_create /* 205 */
.long sys_epoll_ctl
.long sys_epoll_wait
.long sys_remap_file_pages
.long sys_set_tid_address
.long sys_timer_create /* 210 */
.long sys_timer_settime
.long sys_timer_gettime
.long sys_timer_getoverrun
.long sys_timer_delete
.long sys_clock_settime /* 215 */
.long sys_clock_gettime
.long sys_clock_getres
.long sys_clock_nanosleep
.long sys_statfs64
.long sys_fstatfs64 /* 220 */
.long sys_tgkill
.long sys_ni_syscall /* reserved for TUX */
.long sys_utimes
.long sys_fadvise64_64
.long sys_cacheflush /* 225 */
.long sys_ni_syscall /* sys_vserver */
.long sys_mq_open
.long sys_mq_unlink
.long sys_mq_timedsend
.long sys_mq_timedreceive /* 230 */
.long sys_mq_notify
.long sys_mq_getsetattr
.long sys_kexec_load
.long sys_waitid
.long sys_add_key /* 235 */
.long sys_request_key
.long sys_keyctl
.long sys_ioprio_set
.long sys_ioprio_get
.long sys_inotify_init /* 240 */
.long sys_inotify_add_watch
.long sys_inotify_rm_watch
.long sys_openat
.long sys_mkdirat
.long sys_mknodat /* 245 */
.long sys_fchownat
.long sys_futimesat
.long sys_fstatat64
.long sys_unlinkat
.long sys_renameat /* 250 */
.long sys_linkat
.long sys_symlinkat
.long sys_readlinkat
.long sys_fchmodat
.long sys_faccessat /* 255 */
.long __sys_pselect6
.long sys_ppoll
.long sys_unshare
.long sys_set_robust_list
.long sys_get_robust_list /* 260 */
.long __sys_splice
.long sys_sync_file_range
.long sys_tee
.long sys_vmsplice
.long sys_ni_syscall /* r8 is saturated at nr_syscalls */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* Based on MIPS implementation arch/mips/kernel/time.c
* Copyright 2001 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/time.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel_stat.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/profile.h>
#include <linux/sysdev.h>
#include <asm/div64.h>
#include <asm/sysreg.h>
#include <asm/io.h>
#include <asm/sections.h>
static cycle_t read_cycle_count(void)
{
return (cycle_t)sysreg_read(COUNT);
}
static struct clocksource clocksource_avr32 = {
.name = "avr32",
.rating = 350,
.read = read_cycle_count,
.mask = CLOCKSOURCE_MASK(32),
.shift = 16,
.is_continuous = 1,
};
/*
* By default we provide the null RTC ops
*/
static unsigned long null_rtc_get_time(void)
{
return mktime(2004, 1, 1, 0, 0, 0);
}
static int null_rtc_set_time(unsigned long sec)
{
return 0;
}
static unsigned long (*rtc_get_time)(void) = null_rtc_get_time;
static int (*rtc_set_time)(unsigned long) = null_rtc_set_time;
/* how many counter cycles in a jiffy? */
static unsigned long cycles_per_jiffy;
/* cycle counter value at the previous timer interrupt */
static unsigned int timerhi, timerlo;
/* the count value for the next timer interrupt */
static unsigned int expirelo;
static void avr32_timer_ack(void)
{
unsigned int count;
/* Ack this timer interrupt and set the next one */
expirelo += cycles_per_jiffy;
if (expirelo == 0) {
printk(KERN_DEBUG "expirelo == 0\n");
sysreg_write(COMPARE, expirelo + 1);
} else {
sysreg_write(COMPARE, expirelo);
}
/* Check to see if we have missed any timer interrupts */
count = sysreg_read(COUNT);
if ((count - expirelo) < 0x7fffffff) {
expirelo = count + cycles_per_jiffy;
sysreg_write(COMPARE, expirelo);
}
}
static unsigned int avr32_hpt_read(void)
{
return sysreg_read(COUNT);
}
/*
* Taken from MIPS c0_hpt_timer_init().
*
* Why is it so complicated, and what is "count"? My assumption is
* that `count' specifies the "reference cycle", i.e. the cycle since
* reset that should mean "zero". The reason COUNT is written twice is
* probably to make sure we don't get any timer interrupts while we
* are messing with the counter.
*/
static void avr32_hpt_init(unsigned int count)
{
count = sysreg_read(COUNT) - count;
expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy;
sysreg_write(COUNT, expirelo - cycles_per_jiffy);
sysreg_write(COMPARE, expirelo);
sysreg_write(COUNT, count);
}
/*
* Scheduler clock - returns current time in nanosec units.
*/
unsigned long long sched_clock(void)
{
/* There must be better ways...? */
return (unsigned long long)jiffies * (1000000000 / HZ);
}
/*
* local_timer_interrupt() does profiling and process accounting on a
* per-CPU basis.
*
* In UP mode, it is invoked from the (global) timer_interrupt.
*/
static void local_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
if (current->pid)
profile_tick(CPU_PROFILING, regs);
update_process_times(user_mode(regs));
}
static irqreturn_t
timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
unsigned int count;
/* ack timer interrupt and try to set next interrupt */
count = avr32_hpt_read();
avr32_timer_ack();
/* Update timerhi/timerlo for intra-jiffy calibration */
timerhi += count < timerlo; /* Wrap around */
timerlo = count;
/*
* Call the generic timer interrupt handler
*/
write_seqlock(&xtime_lock);
do_timer(regs);
write_sequnlock(&xtime_lock);
/*
* In UP mode, we call local_timer_interrupt() to do profiling
* and process accounting.
*
* SMP is not supported yet.
*/
local_timer_interrupt(irq, dev_id, regs);
return IRQ_HANDLED;
}
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED,
.name = "timer",
};
void __init time_init(void)
{
unsigned long mult, shift, count_hz;
int ret;
xtime.tv_sec = rtc_get_time();
xtime.tv_nsec = 0;
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
printk("Before time_init: count=%08lx, compare=%08lx\n",
(unsigned long)sysreg_read(COUNT),
(unsigned long)sysreg_read(COMPARE));
count_hz = clk_get_rate(boot_cpu_data.clk);
shift = clocksource_avr32.shift;
mult = clocksource_hz2mult(count_hz, shift);
clocksource_avr32.mult = mult;
printk("Cycle counter: mult=%lu, shift=%lu\n", mult, shift);
{
u64 tmp;
tmp = TICK_NSEC;
tmp <<= shift;
tmp += mult / 2;
do_div(tmp, mult);
cycles_per_jiffy = tmp;
}
/* This sets up the high precision timer for the first interrupt. */
avr32_hpt_init(avr32_hpt_read());
printk("After time_init: count=%08lx, compare=%08lx\n",
(unsigned long)sysreg_read(COUNT),
(unsigned long)sysreg_read(COMPARE));
ret = clocksource_register(&clocksource_avr32);
if (ret)
printk(KERN_ERR
"timer: could not register clocksource: %d\n", ret);
ret = setup_irq(0, &timer_irqaction);
if (ret)
printk("timer: could not request IRQ 0: %d\n", ret);
}
static struct sysdev_class timer_class = {
set_kset_name("timer"),
};
static struct sys_device timer_device = {
.id = 0,
.cls = &timer_class,
};
static int __init init_timer_sysfs(void)
{
int err = sysdev_class_register(&timer_class);
if (!err)
err = sysdev_register(&timer_device);
return err;
}
device_initcall(init_timer_sysfs);
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#undef DEBUG
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
#include <asm/traps.h>
#include <asm/sysreg.h>
#include <asm/addrspace.h>
#include <asm/ocd.h>
#include <asm/mmu_context.h>
#include <asm/uaccess.h>
static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
{
unsigned long p;
int i;
printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
for (p = bottom & ~31; p < top; ) {
printk("%04lx: ", p & 0xffff);
for (i = 0; i < 8; i++, p += 4) {
unsigned int val;
if (p < bottom || p >= top)
printk(" ");
else {
if (__get_user(val, (unsigned int __user *)p)) {
printk("\n");
goto out;
}
printk("%08x ", val);
}
}
printk("\n");
}
out:
return;
}
#ifdef CONFIG_FRAME_POINTER
static inline void __show_trace(struct task_struct *tsk, unsigned long *sp,
struct pt_regs *regs)
{
unsigned long __user *fp;
unsigned long __user *last_fp = NULL;
if (regs) {
fp = (unsigned long __user *)regs->r7;
} else if (tsk == current) {
register unsigned long __user *real_fp __asm__("r7");
fp = real_fp;
} else {
fp = (unsigned long __user *)tsk->thread.cpu_context.r7;
}
/*
* Walk the stack until (a) we get an exception, (b) the frame
* pointer becomes zero, or (c) the frame pointer gets stuck
* at the same value.
*/
while (fp && fp != last_fp) {
unsigned long lr, new_fp = 0;
last_fp = fp;
if (__get_user(lr, fp))
break;
if (fp && __get_user(new_fp, fp + 1))
break;
fp = (unsigned long __user *)new_fp;
printk(" [<%08lx>] ", lr);
print_symbol("%s\n", lr);
}
printk("\n");
}
#else
static inline void __show_trace(struct task_struct *tsk, unsigned long *sp,
struct pt_regs *regs)
{
unsigned long addr;
while (!kstack_end(sp)) {
addr = *sp++;
if (kernel_text_address(addr)) {
printk(" [<%08lx>] ", addr);
print_symbol("%s\n", addr);
}
}
}
#endif
void show_trace(struct task_struct *tsk, unsigned long *sp,
struct pt_regs *regs)
{
if (regs &&
(((regs->sr & MODE_MASK) == MODE_EXCEPTION) ||
((regs->sr & MODE_MASK) == MODE_USER)))
return;
printk ("Call trace:");
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
__show_trace(tsk, sp, regs);
printk("\n");
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
unsigned long stack;
if (!tsk)
tsk = current;
if (sp == 0) {
if (tsk == current) {
register unsigned long *real_sp __asm__("sp");
sp = real_sp;
} else {
sp = (unsigned long *)tsk->thread.cpu_context.ksp;
}
}
stack = (unsigned long)sp;
dump_mem("Stack: ", stack,
THREAD_SIZE + (unsigned long)tsk->thread_info);
show_trace(tsk, sp, NULL);
}
void dump_stack(void)
{
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
ATOMIC_NOTIFIER_HEAD(avr32_die_chain);
int register_die_notifier(struct notifier_block *nb)
{
pr_debug("register_die_notifier: %p\n", nb);
return atomic_notifier_chain_register(&avr32_die_chain, nb);
}
EXPORT_SYMBOL(register_die_notifier);
int unregister_die_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&avr32_die_chain, nb);
}
EXPORT_SYMBOL(unregister_die_notifier);
static DEFINE_SPINLOCK(die_lock);
void __die(const char *str, struct pt_regs *regs, unsigned long err,
const char *file, const char *func, unsigned long line)
{
struct task_struct *tsk = current;
static int die_counter;
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
printk(KERN_ALERT "%s", str);
if (file && func)
printk(" in %s:%s, line %ld", file, func, line);
printk("[#%d]:\n", ++die_counter);
print_modules();
show_regs(regs);
printk("Process %s (pid: %d, stack limit = 0x%p)\n",
tsk->comm, tsk->pid, tsk->thread_info + 1);
if (!user_mode(regs) || in_interrupt()) {
dump_mem("Stack: ", regs->sp,
THREAD_SIZE + (unsigned long)tsk->thread_info);
}
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
do_exit(SIGSEGV);
}
void __die_if_kernel(const char *str, struct pt_regs *regs, unsigned long err,
const char *file, const char *func, unsigned long line)
{
if (!user_mode(regs))
__die(str, regs, err, file, func, line);
}
asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)
{
#ifdef CONFIG_SUBARCH_AVR32B
/*
* The exception entry always saves RSR_EX. For NMI, this is
* wrong; it should be RSR_NMI
*/
regs->sr = sysreg_read(RSR_NMI);
#endif
printk("NMI taken!!!!\n");
die("NMI", regs, ecr);
BUG();
}
asmlinkage void do_critical_exception(unsigned long ecr, struct pt_regs *regs)
{
printk("Unable to handle critical exception %lu at pc = %08lx!\n",
ecr, regs->pc);
die("Oops", regs, ecr);
BUG();
}
asmlinkage void do_address_exception(unsigned long ecr, struct pt_regs *regs)
{
siginfo_t info;
die_if_kernel("Oops: Address exception in kernel mode", regs, ecr);
#ifdef DEBUG
if (ecr == ECR_ADDR_ALIGN_X)
pr_debug("Instruction Address Exception at pc = %08lx\n",
regs->pc);
else if (ecr == ECR_ADDR_ALIGN_R)
pr_debug("Data Address Exception (Read) at pc = %08lx\n",
regs->pc);
else if (ecr == ECR_ADDR_ALIGN_W)
pr_debug("Data Address Exception (Write) at pc = %08lx\n",
regs->pc);
else
BUG();
show_regs(regs);
#endif
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void __user *)regs->pc;
force_sig_info(SIGBUS, &info, current);
}
/* This way of handling undefined instructions is stolen from ARM */
static LIST_HEAD(undef_hook);
static spinlock_t undef_lock = SPIN_LOCK_UNLOCKED;
void register_undef_hook(struct undef_hook *hook)
{
spin_lock_irq(&undef_lock);
list_add(&hook->node, &undef_hook);
spin_unlock_irq(&undef_lock);
}
void unregister_undef_hook(struct undef_hook *hook)
{
spin_lock_irq(&undef_lock);
list_del(&hook->node);
spin_unlock_irq(&undef_lock);
}
static int do_cop_absent(u32 insn)
{
int cop_nr;
u32 cpucr;
if ( (insn & 0xfdf00000) == 0xf1900000 )
/* LDC0 */
cop_nr = 0;
else
cop_nr = (insn >> 13) & 0x7;
/* Try enabling the coprocessor */
cpucr = sysreg_read(CPUCR);
cpucr |= (1 << (24 + cop_nr));
sysreg_write(CPUCR, cpucr);
cpucr = sysreg_read(CPUCR);
if ( !(cpucr & (1 << (24 + cop_nr))) ){
printk("Coprocessor #%i not found!\n", cop_nr);
return -1;
}
return 0;
}
#ifdef CONFIG_BUG
#ifdef CONFIG_DEBUG_BUGVERBOSE
static inline void do_bug_verbose(struct pt_regs *regs, u32 insn)
{
char *file;
u16 line;
char c;
if (__get_user(line, (u16 __user *)(regs->pc + 2)))
return;
if (__get_user(file, (char * __user *)(regs->pc + 4))
|| (unsigned long)file < PAGE_OFFSET
|| __get_user(c, file))
file = "<bad filename>";
printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
}
#else
static inline void do_bug_verbose(struct pt_regs *regs, u32 insn)
{
}
#endif
#endif
asmlinkage void do_illegal_opcode(unsigned long ecr, struct pt_regs *regs)
{
u32 insn;
struct undef_hook *hook;
siginfo_t info;
void __user *pc;
if (!user_mode(regs))
goto kernel_trap;
local_irq_enable();
pc = (void __user *)instruction_pointer(regs);
if (__get_user(insn, (u32 __user *)pc))
goto invalid_area;
if (ecr == ECR_COPROC_ABSENT) {
if (do_cop_absent(insn) == 0)
return;
}
spin_lock_irq(&undef_lock);
list_for_each_entry(hook, &undef_hook, node) {
if ((insn & hook->insn_mask) == hook->insn_val) {
if (hook->fn(regs, insn) == 0) {
spin_unlock_irq(&undef_lock);
return;
}
}
}
spin_unlock_irq(&undef_lock);
invalid_area:
#ifdef DEBUG
printk("Illegal instruction at pc = %08lx\n", regs->pc);
if (regs->pc < TASK_SIZE) {
unsigned long ptbr, pgd, pte, *p;
ptbr = sysreg_read(PTBR);
p = (unsigned long *)ptbr;
pgd = p[regs->pc >> 22];
p = (unsigned long *)((pgd & 0x1ffff000) | 0x80000000);
pte = p[(regs->pc >> 12) & 0x3ff];
printk("page table: 0x%08lx -> 0x%08lx -> 0x%08lx\n", ptbr, pgd, pte);
}
#endif
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_addr = (void __user *)regs->pc;
switch (ecr) {
case ECR_ILLEGAL_OPCODE:
case ECR_UNIMPL_INSTRUCTION:
info.si_code = ILL_ILLOPC;
break;
case ECR_PRIVILEGE_VIOLATION:
info.si_code = ILL_PRVOPC;
break;
case ECR_COPROC_ABSENT:
info.si_code = ILL_COPROC;
break;
default:
BUG();
}
force_sig_info(SIGILL, &info, current);
return;
kernel_trap:
#ifdef CONFIG_BUG
if (__kernel_text_address(instruction_pointer(regs))) {
insn = *(u16 *)instruction_pointer(regs);
if (insn == AVR32_BUG_OPCODE) {
do_bug_verbose(regs, insn);
die("Kernel BUG", regs, 0);
return;
}
}
#endif
die("Oops: Illegal instruction in kernel code", regs, ecr);
}
asmlinkage void do_fpe(unsigned long ecr, struct pt_regs *regs)
{
siginfo_t info;
printk("Floating-point exception at pc = %08lx\n", regs->pc);
/* We have no FPU... */
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_addr = (void __user *)regs->pc;
info.si_code = ILL_COPROC;
force_sig_info(SIGILL, &info, current);
}
void __init trap_init(void)
{
}
/*
* AVR32 linker script for the Linux kernel
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define LOAD_OFFSET 0x00000000
#include <asm-generic/vmlinux.lds.h>
OUTPUT_FORMAT("elf32-avr32", "elf32-avr32", "elf32-avr32")
OUTPUT_ARCH(avr32)
ENTRY(_start)
/* Big endian */
jiffies = jiffies_64 + 4;
SECTIONS
{
. = CONFIG_ENTRY_ADDRESS;
.init : AT(ADDR(.init) - LOAD_OFFSET) {
_stext = .;
__init_begin = .;
_sinittext = .;
*(.text.reset)
*(.init.text)
_einittext = .;
. = ALIGN(4);
__tagtable_begin = .;
*(.taglist)
__tagtable_end = .;
*(.init.data)
. = ALIGN(16);
__setup_start = .;
*(.init.setup)
__setup_end = .;
. = ALIGN(4);
__initcall_start = .;
*(.initcall1.init)
*(.initcall2.init)
*(.initcall3.init)
*(.initcall4.init)
*(.initcall5.init)
*(.initcall6.init)
*(.initcall7.init)
__initcall_end = .;
__con_initcall_start = .;
*(.con_initcall.init)
__con_initcall_end = .;
__security_initcall_start = .;
*(.security_initcall.init)
__security_initcall_end = .;
. = ALIGN(32);
__initramfs_start = .;
*(.init.ramfs)
__initramfs_end = .;
. = ALIGN(4096);
__init_end = .;
}
. = ALIGN(8192);
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_evba = .;
_text = .;
*(.ex.text)
. = 0x50;
*(.tlbx.ex.text)
. = 0x60;
*(.tlbr.ex.text)
. = 0x70;
*(.tlbw.ex.text)
. = 0x100;
*(.scall.text)
*(.irq.text)
*(.text)
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
*(.gnu.warning)
_etext = .;
} = 0xd703d703
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
}
RODATA
. = ALIGN(8192);
.data : AT(ADDR(.data) - LOAD_OFFSET) {
_data = .;
_sdata = .;
/*
* First, the init task union, aligned to an 8K boundary.
*/
*(.data.init_task)
/* Then, the cacheline aligned data */
. = ALIGN(32);
*(.data.cacheline_aligned)
/* And the rest... */
*(.data.rel*)
*(.data)
CONSTRUCTORS
_edata = .;
}
. = ALIGN(8);
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
__bss_start = .;
*(.bss)
*(COMMON)
. = ALIGN(8);
__bss_stop = .;
_end = .;
}
/* When something in the kernel is NOT compiled as a module, the module
* cleanup code and data are put into these segments. Both can then be
* thrown away, as cleanup code is never called unless it's a module.
*/
/DISCARD/ : {
*(.exit.text)
*(.exit.data)
*(.exitcall.exit)
}
DWARF_DEBUG
}
#
# Makefile for AVR32-specific library files
#
lib-y := copy_user.o clear_user.o
lib-y += strncpy_from_user.o strnlen_user.o
lib-y += delay.o memset.o memcpy.o findbit.o
lib-y += csum_partial.o csum_partial_copy_generic.o
lib-y += io-readsw.o io-readsl.o io-writesw.o io-writesl.o
lib-y += __avr32_lsl64.o __avr32_lsr64.o __avr32_asr64.o
/*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* DWtype __avr32_asr64(DWtype u, word_type b)
*/
.text
.global __avr32_asr64
.type __avr32_asr64,@function
__avr32_asr64:
cp.w r12, 0
reteq r12
rsub r9, r12, 32
brle 1f
lsl r8, r11, r9
lsr r10, r10, r12
asr r11, r11, r12
or r10, r8
retal r12
1: neg r9
asr r10, r11, r9
asr r11, 31
retal r12
/*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* DWtype __avr32_lsl64(DWtype u, word_type b)
*/
.text
.global __avr32_lsl64
.type __avr32_lsl64,@function
__avr32_lsl64:
cp.w r12, 0
reteq r12
rsub r9, r12, 32
brle 1f
lsr r8, r10, r9
lsl r10, r10, r12
lsl r11, r11, r12
or r11, r8
retal r12
1: neg r9
lsl r11, r10, r9
mov r10, 0
retal r12
/*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* DWtype __avr32_lsr64(DWtype u, word_type b)
*/
.text
.global __avr32_lsr64
.type __avr32_lsr64,@function
__avr32_lsr64:
cp.w r12, 0
reteq r12
rsub r9, r12, 32
brle 1f
lsl r8, r11, r9
lsr r11, r11, r12
lsr r10, r10, r12
or r10, r8
retal r12
1: neg r9
lsr r10, r11, r9
mov r11, 0
retal r12
/*
* Copyright 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/asm.h>
.text
.align 1
.global clear_user
.type clear_user, "function"
clear_user:
branch_if_kernel r8, __clear_user
ret_if_privileged r8, r12, r11, r11
.global __clear_user
.type __clear_user, "function"
__clear_user:
mov r9, r12
mov r8, 0
andl r9, 3, COH
brne 5f
1: sub r11, 4
brlt 2f
10: st.w r12++, r8
sub r11, 4
brge 10b
2: sub r11, -4
reteq 0
/* Unaligned count or address */
bld r11, 1
brcc 12f
11: st.h r12++, r8
sub r11, 2
reteq 0
12: st.b r12++, r8
retal 0
/* Unaligned address */
5: cp.w r11, 4
brlt 2b
lsl r9, 2
add pc, pc, r9
13: st.b r12++, r8
sub r11, 1
14: st.b r12++, r8
sub r11, 1
15: st.b r12++, r8
sub r11, 1
rjmp 1b
.size clear_user, . - clear_user
.size __clear_user, . - __clear_user
.section .fixup, "ax"
.align 1
18: sub r11, -4
19: retal r11
.section __ex_table, "a"
.align 2
.long 10b, 18b
.long 11b, 19b
.long 12b, 19b
.long 13b, 19b
.long 14b, 19b
.long 15b, 19b
/*
* Copy to/from userspace with optional address space checking.
*
* Copyright 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/asm.h>
/*
* __kernel_size_t
* __copy_user(void *to, const void *from, __kernel_size_t n)
*
* Returns the number of bytes not copied. Might be off by
* max 3 bytes if we get a fault in the main loop.
*
* The address-space checking functions simply fall through to
* the non-checking version.
*/
.text
.align 1
.global copy_from_user
.type copy_from_user, @function
copy_from_user:
branch_if_kernel r8, __copy_user
ret_if_privileged r8, r11, r10, r10
rjmp __copy_user
.size copy_from_user, . - copy_from_user
.global copy_to_user
.type copy_to_user, @function
copy_to_user:
branch_if_kernel r8, __copy_user
ret_if_privileged r8, r12, r10, r10
.size copy_to_user, . - copy_to_user
.global __copy_user
.type __copy_user, @function
__copy_user:
mov r9, r11
andl r9, 3, COH
brne 6f
/* At this point, from is word-aligned */
1: sub r10, 4
brlt 3f
2:
10: ld.w r8, r11++
11: st.w r12++, r8
sub r10, 4
brge 2b
3: sub r10, -4
reteq 0
/*
* Handle unaligned count. Need to be careful with r10 here so
* that we return the correct value even if we get a fault
*/
4:
20: ld.ub r8, r11++
21: st.b r12++, r8
sub r10, 1
reteq 0
22: ld.ub r8, r11++
23: st.b r12++, r8
sub r10, 1
reteq 0
24: ld.ub r8, r11++
25: st.b r12++, r8
retal 0
/* Handle unaligned from-pointer */
6: cp.w r10, 4
brlt 4b
rsub r9, r9, 4
30: ld.ub r8, r11++
31: st.b r12++, r8
sub r10, 1
sub r9, 1
breq 1b
32: ld.ub r8, r11++
33: st.b r12++, r8
sub r10, 1
sub r9, 1
breq 1b
34: ld.ub r8, r11++
35: st.b r12++, r8
sub r10, 1
rjmp 1b
.size __copy_user, . - __copy_user
.section .fixup,"ax"
.align 1
19: sub r10, -4
29: retal r10
.section __ex_table,"a"
.align 2
.long 10b, 19b
.long 11b, 19b
.long 20b, 29b
.long 21b, 29b
.long 22b, 29b
.long 23b, 29b
.long 24b, 29b
.long 25b, 29b
.long 30b, 29b
.long 31b, 29b
.long 32b, 29b
.long 33b, 29b
.long 34b, 29b
.long 35b, 29b
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* unsigned int csum_partial(const unsigned char *buff,
* int len, unsigned int sum)
*/
.text
.global csum_partial
.type csum_partial,"function"
.align 1
csum_partial:
/* checksum complete words, aligned or not */
3: sub r11, 4
brlt 5f
4: ld.w r9, r12++
add r10, r9
acr r10
sub r11, 4
brge 4b
/* return if we had a whole number of words */
5: sub r11, -4
reteq r10
/* checksum any remaining bytes at the end */
mov r9, 0
mov r8, 0
cp r11, 2
brlt 6f
ld.uh r9, r12++
sub r11, 2
breq 7f
lsl r9, 16
6: ld.ub r8, r12++
lsl r8, 8
7: or r9, r8
add r10, r9
acr r10
retal r10
.size csum_partial, . - csum_partial
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/errno.h>
#include <asm/asm.h>
/*
* unsigned int csum_partial_copy_generic(const char *src, char *dst, int len
* int sum, int *src_err_ptr,
* int *dst_err_ptr)
*
* Copy src to dst while checksumming, otherwise like csum_partial.
*/
.macro ld_src size, reg, ptr
9999: ld.\size \reg, \ptr
.section __ex_table, "a"
.long 9999b, fixup_ld_src
.previous
.endm
.macro st_dst size, ptr, reg
9999: st.\size \ptr, \reg
.section __ex_table, "a"
.long 9999b, fixup_st_dst
.previous
.endm
.text
.global csum_partial_copy_generic
.type csum_partial_copy_generic,"function"
.align 1
csum_partial_copy_generic:
pushm r4-r7,lr
/* The inner loop */
1: sub r10, 4
brlt 5f
2: ld_src w, r5, r12++
st_dst w, r11++, r5
add r9, r5
acr r9
sub r10, 4
brge 2b
/* return if we had a whole number of words */
5: sub r10, -4
brne 7f
6: mov r12, r9
popm r4-r7,pc
/* handle additional bytes at the tail */
7: mov r5, 0
mov r4, 32
8: ld_src ub, r6, r12++
st_dst b, r11++, r6
lsl r5, 8
sub r4, 8
bfins r5, r6, 0, 8
sub r10, 1
brne 8b
lsl r5, r5, r4
add r9, r5
acr r9
rjmp 6b
/* Exception handler */
.section .fixup,"ax"
.align 1
fixup_ld_src:
mov r9, -EFAULT
cp.w r8, 0
breq 1f
st.w r8[0], r9
1: /*
* TODO: zero the complete destination - computing the rest
* is too much work
*/
mov r9, 0
rjmp 6b
fixup_st_dst:
mov r9, -EFAULT
lddsp r8, sp[20]
cp.w r8, 0
breq 1f
st.w r8[0], r9
1: mov r9, 0
rjmp 6b
.previous
/*
* Precise Delay Loops for avr32
*
* Copyright (C) 1993 Linus Torvalds
* Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/types.h>
#include <asm/delay.h>
#include <asm/processor.h>
#include <asm/sysreg.h>
int read_current_timer(unsigned long *timer_value)
{
*timer_value = sysreg_read(COUNT);
return 0;
}
void __delay(unsigned long loops)
{
unsigned bclock, now;
bclock = sysreg_read(COUNT);
do {
now = sysreg_read(COUNT);
} while ((now - bclock) < loops);
}
inline void __const_udelay(unsigned long xloops)
{
unsigned long long loops;
asm("mulu.d %0, %1, %2"
: "=r"(loops)
: "r"(current_cpu_data.loops_per_jiffy * HZ), "r"(xloops));
__delay(loops >> 32);
}
void __udelay(unsigned long usecs)
{
__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
}
void __ndelay(unsigned long nsecs)
{
__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
}
/*
* Copyright (C) 2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
.text
/*
* unsigned long find_first_zero_bit(const unsigned long *addr,
* unsigned long size)
*/
ENTRY(find_first_zero_bit)
cp.w r11, 0
reteq r11
mov r9, r11
1: ld.w r8, r12[0]
com r8
brne .L_found
sub r12, -4
sub r9, 32
brgt 1b
retal r11
/*
* unsigned long find_next_zero_bit(const unsigned long *addr,
* unsigned long size,
* unsigned long offset)
*/
ENTRY(find_next_zero_bit)
lsr r8, r10, 5
sub r9, r11, r10
retle r11
lsl r8, 2
add r12, r8
andl r10, 31, COH
breq 1f
/* offset is not word-aligned. Handle the first (32 - r10) bits */
ld.w r8, r12[0]
com r8
sub r12, -4
lsr r8, r8, r10
brne .L_found
/* r9 = r9 - (32 - r10) = r9 + r10 - 32 */
add r9, r10
sub r9, 32
retle r11
/* Main loop. offset must be word-aligned */
1: ld.w r8, r12[0]
com r8
brne .L_found
sub r12, -4
sub r9, 32
brgt 1b
retal r11
/* Common return path for when a bit is actually found. */
.L_found:
brev r8
clz r10, r8
rsub r9, r11
add r10, r9
/* XXX: If we don't have to return exactly "size" when the bit
is not found, we may drop this "min" thing */
min r12, r11, r10
retal r12
/*
* unsigned long find_first_bit(const unsigned long *addr,
* unsigned long size)
*/
ENTRY(find_first_bit)
cp.w r11, 0
reteq r11
mov r9, r11
1: ld.w r8, r12[0]
cp.w r8, 0
brne .L_found
sub r12, -4
sub r9, 32
brgt 1b
retal r11
/*
* unsigned long find_next_bit(const unsigned long *addr,
* unsigned long size,
* unsigned long offset)
*/
ENTRY(find_next_bit)
lsr r8, r10, 5
sub r9, r11, r10
retle r11
lsl r8, 2
add r12, r8
andl r10, 31, COH
breq 1f
/* offset is not word-aligned. Handle the first (32 - r10) bits */
ld.w r8, r12[0]
sub r12, -4
lsr r8, r8, r10
brne .L_found
/* r9 = r9 - (32 - r10) = r9 + r10 - 32 */
add r9, r10
sub r9, 32
retle r11
/* Main loop. offset must be word-aligned */
1: ld.w r8, r12[0]
cp.w r8, 0
brne .L_found
sub r12, -4
sub r9, 32
brgt 1b
retal r11
ENTRY(generic_find_next_zero_le_bit)
lsr r8, r10, 5
sub r9, r11, r10
retle r11
lsl r8, 2
add r12, r8
andl r10, 31, COH
breq 1f
/* offset is not word-aligned. Handle the first (32 - r10) bits */
ldswp.w r8, r12[0]
sub r12, -4
lsr r8, r8, r10
brne .L_found
/* r9 = r9 - (32 - r10) = r9 + r10 - 32 */
add r9, r10
sub r9, 32
retle r11
/* Main loop. offset must be word-aligned */
1: ldswp.w r8, r12[0]
cp.w r8, 0
brne .L_found
sub r12, -4
sub r9, 32
brgt 1b
retal r11
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
.global __raw_readsl
.type __raw_readsl,@function
__raw_readsl:
cp.w r10, 0
reteq r12
/*
* If r11 isn't properly aligned, we might get an exception on
* some implementations. But there's not much we can do about it.
*/
1: ld.w r8, r12[0]
sub r10, 1
st.w r11++, r8
brne 1b
retal r12
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
.Lnot_word_aligned:
/*
* Bad alignment will cause a hardware exception, which is as
* good as anything. No need for us to check for proper alignment.
*/
ld.uh r8, r12[0]
sub r10, 1
st.h r11++, r8
/* fall through */
.global __raw_readsw
.type __raw_readsw,@function
__raw_readsw:
cp.w r10, 0
reteq r12
mov r9, 3
tst r11, r9
brne .Lnot_word_aligned
sub r10, 2
brlt 2f
1: ldins.h r8:t, r12[0]
ldins.h r8:b, r12[0]
st.w r11++, r8
sub r10, 2
brge 1b
2: sub r10, -2
reteq r12
ld.uh r8, r12[0]
st.h r11++, r8
retal r12
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
.global __raw_writesl
.type __raw_writesl,@function
__raw_writesl:
cp.w r10, 0
reteq r12
1: ld.w r8, r11++
sub r10, 1
st.w r12[0], r8
brne 1b
retal r12
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
.Lnot_word_aligned:
ld.uh r8, r11++
sub r10, 1
st.h r12[0], r8
.global __raw_writesw
.type __raw_writesw,@function
__raw_writesw:
cp.w r10, 0
mov r9, 3
reteq r12
tst r11, r9
brne .Lnot_word_aligned
sub r10, 2
brlt 2f
1: ld.w r8, r11++
bfextu r9, r8, 16, 16
st.h r12[0], r9
st.h r12[0], r8
sub r10, 2
brge 1b
2: sub r10, -2
reteq r12
ld.uh r8, r11++
st.h r12[0], r8
retal r12
/* Definitions for various functions 'borrowed' from gcc-3.4.3 */
#define BITS_PER_UNIT 8
typedef int QItype __attribute__ ((mode (QI)));
typedef unsigned int UQItype __attribute__ ((mode (QI)));
typedef int HItype __attribute__ ((mode (HI)));
typedef unsigned int UHItype __attribute__ ((mode (HI)));
typedef int SItype __attribute__ ((mode (SI)));
typedef unsigned int USItype __attribute__ ((mode (SI)));
typedef int DItype __attribute__ ((mode (DI)));
typedef unsigned int UDItype __attribute__ ((mode (DI)));
typedef float SFtype __attribute__ ((mode (SF)));
typedef float DFtype __attribute__ ((mode (DF)));
typedef int word_type __attribute__ ((mode (__word__)));
#define W_TYPE_SIZE (4 * BITS_PER_UNIT)
#define Wtype SItype
#define UWtype USItype
#define HWtype SItype
#define UHWtype USItype
#define DWtype DItype
#define UDWtype UDItype
#define __NW(a,b) __ ## a ## si ## b
#define __NDW(a,b) __ ## a ## di ## b
struct DWstruct {Wtype high, low;};
typedef union
{
struct DWstruct s;
DWtype ll;
} DWunion;
/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
Copyright (C) 1991, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000
Free Software Foundation, Inc.
This definition file is free software; you can redistribute it
and/or modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 2, or (at your option) any later version.
This definition file is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* Borrowed from gcc-3.4.3 */
#define __BITS4 (W_TYPE_SIZE / 4)
#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
#define count_leading_zeros(count, x) ((count) = __builtin_clz(x))
#define __udiv_qrnnd_c(q, r, n1, n0, d) \
do { \
UWtype __d1, __d0, __q1, __q0; \
UWtype __r1, __r0, __m; \
__d1 = __ll_highpart (d); \
__d0 = __ll_lowpart (d); \
\
__r1 = (n1) % __d1; \
__q1 = (n1) / __d1; \
__m = (UWtype) __q1 * __d0; \
__r1 = __r1 * __ll_B | __ll_highpart (n0); \
if (__r1 < __m) \
{ \
__q1--, __r1 += (d); \
if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
if (__r1 < __m) \
__q1--, __r1 += (d); \
} \
__r1 -= __m; \
\
__r0 = __r1 % __d1; \
__q0 = __r1 / __d1; \
__m = (UWtype) __q0 * __d0; \
__r0 = __r0 * __ll_B | __ll_lowpart (n0); \
if (__r0 < __m) \
{ \
__q0--, __r0 += (d); \
if (__r0 >= (d)) \
if (__r0 < __m) \
__q0--, __r0 += (d); \
} \
__r0 -= __m; \
\
(q) = (UWtype) __q1 * __ll_B | __q0; \
(r) = __r0; \
} while (0)
#define udiv_qrnnd __udiv_qrnnd_c
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
do { \
UWtype __x; \
__x = (al) - (bl); \
(sh) = (ah) - (bh) - (__x > (al)); \
(sl) = __x; \
} while (0)
#define umul_ppmm(w1, w0, u, v) \
do { \
UWtype __x0, __x1, __x2, __x3; \
UHWtype __ul, __vl, __uh, __vh; \
\
__ul = __ll_lowpart (u); \
__uh = __ll_highpart (u); \
__vl = __ll_lowpart (v); \
__vh = __ll_highpart (v); \
\
__x0 = (UWtype) __ul * __vl; \
__x1 = (UWtype) __ul * __vh; \
__x2 = (UWtype) __uh * __vl; \
__x3 = (UWtype) __uh * __vh; \
\
__x1 += __ll_highpart (__x0);/* this can't give carry */ \
__x1 += __x2; /* but this indeed can */ \
if (__x1 < __x2) /* did we get it? */ \
__x3 += __ll_B; /* yes, add it in the proper pos. */ \
\
(w1) = __x3 + __ll_highpart (__x1); \
(w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
} while (0)
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* void *memcpy(void *to, const void *from, unsigned long n)
*
* This implementation does word-aligned loads in the main loop,
* possibly sacrificing alignment of stores.
*
* Hopefully, in most cases, both "to" and "from" will be
* word-aligned to begin with.
*/
.text
.global memcpy
.type memcpy, @function
memcpy:
mov r9, r11
andl r9, 3, COH
brne 1f
/* At this point, "from" is word-aligned */
2: sub r10, 4
mov r9, r12
brlt 4f
3: ld.w r8, r11++
sub r10, 4
st.w r12++, r8
brge 3b
4: neg r10
reteq r9
/* Handle unaligned count */
lsl r10, 2
add pc, pc, r10
ld.ub r8, r11++
st.b r12++, r8
ld.ub r8, r11++
st.b r12++, r8
ld.ub r8, r11++
st.b r12++, r8
retal r9
/* Handle unaligned "from" pointer */
1: sub r10, 4
brlt 4b
add r10, r9
lsl r9, 2
add pc, pc, r9
ld.ub r8, r11++
st.b r12++, r8
ld.ub r8, r11++
st.b r12++, r8
ld.ub r8, r11++
st.b r12++, r8
rjmp 2b
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* Based on linux/arch/arm/lib/memset.S
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ASM optimised string functions
*/
#include <asm/asm.h>
/*
* r12: void *b
* r11: int c
* r10: size_t len
*
* Returns b in r12
*/
.text
.global memset
.type memset, @function
.align 5
memset:
mov r9, r12
mov r8, r12
or r11, r11, r11 << 8
andl r9, 3, COH
brne 1f
2: or r11, r11, r11 << 16
sub r10, 4
brlt 5f
/* Let's do some real work */
4: st.w r8++, r11
sub r10, 4
brge 4b
/*
* When we get here, we've got less than 4 bytes to set. r10
* might be negative.
*/
5: sub r10, -4
reteq r12
/* Fastpath ends here, exactly 32 bytes from memset */
/* Handle unaligned count or pointer */
bld r10, 1
brcc 6f
st.b r8++, r11
st.b r8++, r11
bld r10, 0
retcc r12
6: st.b r8++, r11
retal r12
/* Handle unaligned pointer */
1: sub r10, 4
brlt 5b
add r10, r9
lsl r9, 1
add pc, r9
st.b r8++, r11
st.b r8++, r11
st.b r8++, r11
rjmp 2b
.size memset, . - memset
/*
* Copy to/from userspace with optional address space checking.
*
* Copyright 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/errno.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/asm.h>
/*
* long strncpy_from_user(char *dst, const char *src, long count)
*
* On success, returns the length of the string, not including
* the terminating NUL.
*
* If the string is longer than count, returns count
*
* If userspace access fails, returns -EFAULT
*/
.text
.align 1
.global strncpy_from_user
.type strncpy_from_user, "function"
strncpy_from_user:
mov r9, -EFAULT
branch_if_kernel r8, __strncpy_from_user
ret_if_privileged r8, r11, r10, r9
.global __strncpy_from_user
.type __strncpy_from_user, "function"
__strncpy_from_user:
cp.w r10, 0
reteq 0
mov r9, r10
1: ld.ub r8, r11++
st.b r12++, r8
cp.w r8, 0
breq 2f
sub r9, 1
brne 1b
2: sub r10, r9
retal r10
.section .fixup, "ax"
.align 1
3: mov r12, -EFAULT
retal r12
.section __ex_table, "a"
.align 2
.long 1b, 3b
/*
* Copy to/from userspace with optional address space checking.
*
* Copyright 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/processor.h>
#include <asm/asm.h>
.text
.align 1
.global strnlen_user
.type strnlen_user, "function"
strnlen_user:
branch_if_kernel r8, __strnlen_user
sub r8, r11, 1
add r8, r12
retcs 0
brmi adjust_length /* do a closer inspection */
.global __strnlen_user
.type __strnlen_user, "function"
__strnlen_user:
mov r10, r12
10: ld.ub r8, r12++
cp.w r8, 0
breq 2f
sub r11, 1
brne 10b
sub r12, -1
2: sub r12, r10
retal r12
.type adjust_length, "function"
adjust_length:
cp.w r12, 0 /* addr must always be < TASK_SIZE */
retmi 0
pushm lr
lddpc lr, _task_size
sub r11, lr, r12
mov r9, r11
rcall __strnlen_user
cp.w r12, r9
brgt 1f
popm pc
1: popm pc, r12=0
.align 2
_task_size:
.long TASK_SIZE
.section .fixup, "ax"
.align 1
19: retal 0
.section __ex_table, "a"
.align 2
.long 10b, 19b
obj-y += at32ap.o clock.o pio.o intc.o extint.o
obj-$(CONFIG_CPU_AT32AP7000) += at32ap7000.o
/*
* Copyright (C) 2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/arch/init.h>
#include <asm/arch/sm.h>
struct at32_sm system_manager;
static int __init at32_sm_init(void)
{
struct resource *regs;
struct at32_sm *sm = &system_manager;
int ret = -ENXIO;
regs = platform_get_resource(&at32_sm_device, IORESOURCE_MEM, 0);
if (!regs)
goto fail;
spin_lock_init(&sm->lock);
sm->pdev = &at32_sm_device;
ret = -ENOMEM;
sm->regs = ioremap(regs->start, regs->end - regs->start + 1);
if (!sm->regs)
goto fail;
return 0;
fail:
printk(KERN_ERR "Failed to initialize System Manager: %d\n", ret);
return ret;
}
void __init setup_platform(void)
{
at32_sm_init();
at32_clock_init();
at32_portmux_init();
/* FIXME: This doesn't belong here */
at32_setup_serial_console(1);
}
static int __init pdc_probe(struct platform_device *pdev)
{
struct clk *pclk, *hclk;
pclk = clk_get(&pdev->dev, "pclk");
if (IS_ERR(pclk)) {
dev_err(&pdev->dev, "no pclk defined\n");
return PTR_ERR(pclk);
}
hclk = clk_get(&pdev->dev, "hclk");
if (IS_ERR(hclk)) {
dev_err(&pdev->dev, "no hclk defined\n");
clk_put(pclk);
return PTR_ERR(hclk);
}
clk_enable(pclk);
clk_enable(hclk);
dev_info(&pdev->dev, "Atmel Peripheral DMA Controller enabled\n");
return 0;
}
static struct platform_driver pdc_driver = {
.probe = pdc_probe,
.driver = {
.name = "pdc",
},
};
static int __init pdc_init(void)
{
return platform_driver_register(&pdc_driver);
}
arch_initcall(pdc_init);
/*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/arch/board.h>
#include <asm/arch/portmux.h>
#include <asm/arch/sm.h>
#include "clock.h"
#include "pio.h"
#include "sm.h"
#define PBMEM(base) \
{ \
.start = base, \
.end = base + 0x3ff, \
.flags = IORESOURCE_MEM, \
}
#define IRQ(num) \
{ \
.start = num, \
.end = num, \
.flags = IORESOURCE_IRQ, \
}
#define NAMED_IRQ(num, _name) \
{ \
.start = num, \
.end = num, \
.name = _name, \
.flags = IORESOURCE_IRQ, \
}
#define DEFINE_DEV(_name, _id) \
static struct platform_device _name##_id##_device = { \
.name = #_name, \
.id = _id, \
.resource = _name##_id##_resource, \
.num_resources = ARRAY_SIZE(_name##_id##_resource), \
}
#define DEFINE_DEV_DATA(_name, _id) \
static struct platform_device _name##_id##_device = { \
.name = #_name, \
.id = _id, \
.dev = { \
.platform_data = &_name##_id##_data, \
}, \
.resource = _name##_id##_resource, \
.num_resources = ARRAY_SIZE(_name##_id##_resource), \
}
#define DEV_CLK(_name, devname, bus, _index) \
static struct clk devname##_##_name = { \
.name = #_name, \
.dev = &devname##_device.dev, \
.parent = &bus##_clk, \
.mode = bus##_clk_mode, \
.get_rate = bus##_clk_get_rate, \
.index = _index, \
}
enum {
PIOA,
PIOB,
PIOC,
PIOD,
};
enum {
FUNC_A,
FUNC_B,
};
unsigned long at32ap7000_osc_rates[3] = {
[0] = 32768,
/* FIXME: these are ATSTK1002-specific */
[1] = 20000000,
[2] = 12000000,
};
static unsigned long osc_get_rate(struct clk *clk)
{
return at32ap7000_osc_rates[clk->index];
}
static unsigned long pll_get_rate(struct clk *clk, unsigned long control)
{
unsigned long div, mul, rate;
if (!(control & SM_BIT(PLLEN)))
return 0;
div = SM_BFEXT(PLLDIV, control) + 1;
mul = SM_BFEXT(PLLMUL, control) + 1;
rate = clk->parent->get_rate(clk->parent);
rate = (rate + div / 2) / div;
rate *= mul;
return rate;
}
static unsigned long pll0_get_rate(struct clk *clk)
{
u32 control;
control = sm_readl(&system_manager, PM_PLL0);
return pll_get_rate(clk, control);
}
static unsigned long pll1_get_rate(struct clk *clk)
{
u32 control;
control = sm_readl(&system_manager, PM_PLL1);
return pll_get_rate(clk, control);
}
/*
* The AT32AP7000 has five primary clock sources: One 32kHz
* oscillator, two crystal oscillators and two PLLs.
*/
static struct clk osc32k = {
.name = "osc32k",
.get_rate = osc_get_rate,
.users = 1,
.index = 0,
};
static struct clk osc0 = {
.name = "osc0",
.get_rate = osc_get_rate,
.users = 1,
.index = 1,
};
static struct clk osc1 = {
.name = "osc1",
.get_rate = osc_get_rate,
.index = 2,
};
static struct clk pll0 = {
.name = "pll0",
.get_rate = pll0_get_rate,
.parent = &osc0,
};
static struct clk pll1 = {
.name = "pll1",
.get_rate = pll1_get_rate,
.parent = &osc0,
};
/*
* The main clock can be either osc0 or pll0. The boot loader may
* have chosen one for us, so we don't really know which one until we
* have a look at the SM.
*/
static struct clk *main_clock;
/*
* Synchronous clocks are generated from the main clock. The clocks
* must satisfy the constraint
* fCPU >= fHSB >= fPB
* i.e. each clock must not be faster than its parent.
*/
static unsigned long bus_clk_get_rate(struct clk *clk, unsigned int shift)
{
return main_clock->get_rate(main_clock) >> shift;
};
static void cpu_clk_mode(struct clk *clk, int enabled)
{
struct at32_sm *sm = &system_manager;
unsigned long flags;
u32 mask;
spin_lock_irqsave(&sm->lock, flags);
mask = sm_readl(sm, PM_CPU_MASK);
if (enabled)
mask |= 1 << clk->index;
else
mask &= ~(1 << clk->index);
sm_writel(sm, PM_CPU_MASK, mask);
spin_unlock_irqrestore(&sm->lock, flags);
}
static unsigned long cpu_clk_get_rate(struct clk *clk)
{
unsigned long cksel, shift = 0;
cksel = sm_readl(&system_manager, PM_CKSEL);
if (cksel & SM_BIT(CPUDIV))
shift = SM_BFEXT(CPUSEL, cksel) + 1;
return bus_clk_get_rate(clk, shift);
}
static void hsb_clk_mode(struct clk *clk, int enabled)
{
struct at32_sm *sm = &system_manager;
unsigned long flags;
u32 mask;
spin_lock_irqsave(&sm->lock, flags);
mask = sm_readl(sm, PM_HSB_MASK);
if (enabled)
mask |= 1 << clk->index;
else
mask &= ~(1 << clk->index);
sm_writel(sm, PM_HSB_MASK, mask);
spin_unlock_irqrestore(&sm->lock, flags);
}
static unsigned long hsb_clk_get_rate(struct clk *clk)
{
unsigned long cksel, shift = 0;
cksel = sm_readl(&system_manager, PM_CKSEL);
if (cksel & SM_BIT(HSBDIV))
shift = SM_BFEXT(HSBSEL, cksel) + 1;
return bus_clk_get_rate(clk, shift);
}
static void pba_clk_mode(struct clk *clk, int enabled)
{
struct at32_sm *sm = &system_manager;
unsigned long flags;
u32 mask;
spin_lock_irqsave(&sm->lock, flags);
mask = sm_readl(sm, PM_PBA_MASK);
if (enabled)
mask |= 1 << clk->index;
else
mask &= ~(1 << clk->index);
sm_writel(sm, PM_PBA_MASK, mask);
spin_unlock_irqrestore(&sm->lock, flags);
}
static unsigned long pba_clk_get_rate(struct clk *clk)
{
unsigned long cksel, shift = 0;
cksel = sm_readl(&system_manager, PM_CKSEL);
if (cksel & SM_BIT(PBADIV))
shift = SM_BFEXT(PBASEL, cksel) + 1;
return bus_clk_get_rate(clk, shift);
}
static void pbb_clk_mode(struct clk *clk, int enabled)
{
struct at32_sm *sm = &system_manager;
unsigned long flags;
u32 mask;
spin_lock_irqsave(&sm->lock, flags);
mask = sm_readl(sm, PM_PBB_MASK);
if (enabled)
mask |= 1 << clk->index;
else
mask &= ~(1 << clk->index);
sm_writel(sm, PM_PBB_MASK, mask);
spin_unlock_irqrestore(&sm->lock, flags);
}
static unsigned long pbb_clk_get_rate(struct clk *clk)
{
unsigned long cksel, shift = 0;
cksel = sm_readl(&system_manager, PM_CKSEL);
if (cksel & SM_BIT(PBBDIV))
shift = SM_BFEXT(PBBSEL, cksel) + 1;
return bus_clk_get_rate(clk, shift);
}
static struct clk cpu_clk = {
.name = "cpu",
.get_rate = cpu_clk_get_rate,
.users = 1,
};
static struct clk hsb_clk = {
.name = "hsb",
.parent = &cpu_clk,
.get_rate = hsb_clk_get_rate,
};
static struct clk pba_clk = {
.name = "pba",
.parent = &hsb_clk,
.mode = hsb_clk_mode,
.get_rate = pba_clk_get_rate,
.index = 1,
};
static struct clk pbb_clk = {
.name = "pbb",
.parent = &hsb_clk,
.mode = hsb_clk_mode,
.get_rate = pbb_clk_get_rate,
.users = 1,
.index = 2,
};
/* --------------------------------------------------------------------
* Generic Clock operations
* -------------------------------------------------------------------- */
static void genclk_mode(struct clk *clk, int enabled)
{
u32 control;
BUG_ON(clk->index > 7);
control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
if (enabled)
control |= SM_BIT(CEN);
else
control &= ~SM_BIT(CEN);
sm_writel(&system_manager, PM_GCCTRL + 4 * clk->index, control);
}
static unsigned long genclk_get_rate(struct clk *clk)
{
u32 control;
unsigned long div = 1;
BUG_ON(clk->index > 7);
if (!clk->parent)
return 0;
control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
if (control & SM_BIT(DIVEN))
div = 2 * (SM_BFEXT(DIV, control) + 1);
return clk->parent->get_rate(clk->parent) / div;
}
static long genclk_set_rate(struct clk *clk, unsigned long rate, int apply)
{
u32 control;
unsigned long parent_rate, actual_rate, div;
BUG_ON(clk->index > 7);
if (!clk->parent)
return 0;
parent_rate = clk->parent->get_rate(clk->parent);
control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
if (rate > 3 * parent_rate / 4) {
actual_rate = parent_rate;
control &= ~SM_BIT(DIVEN);
} else {
div = (parent_rate + rate) / (2 * rate) - 1;
control = SM_BFINS(DIV, div, control) | SM_BIT(DIVEN);
actual_rate = parent_rate / (2 * (div + 1));
}
printk("clk %s: new rate %lu (actual rate %lu)\n",
clk->name, rate, actual_rate);
if (apply)
sm_writel(&system_manager, PM_GCCTRL + 4 * clk->index,
control);
return actual_rate;
}
int genclk_set_parent(struct clk *clk, struct clk *parent)
{
u32 control;
BUG_ON(clk->index > 7);
printk("clk %s: new parent %s (was %s)\n",
clk->name, parent->name,
clk->parent ? clk->parent->name : "(null)");
control = sm_readl(&system_manager, PM_GCCTRL + 4 * clk->index);
if (parent == &osc1 || parent == &pll1)
control |= SM_BIT(OSCSEL);
else if (parent == &osc0 || parent == &pll0)
control &= ~SM_BIT(OSCSEL);
else
return -EINVAL;
if (parent == &pll0 || parent == &pll1)
control |= SM_BIT(PLLSEL);
else
control &= ~SM_BIT(PLLSEL);
sm_writel(&system_manager, PM_GCCTRL + 4 * clk->index, control);
clk->parent = parent;
return 0;
}
/* --------------------------------------------------------------------
* System peripherals
* -------------------------------------------------------------------- */
static struct resource sm_resource[] = {
PBMEM(0xfff00000),
NAMED_IRQ(19, "eim"),
NAMED_IRQ(20, "pm"),
NAMED_IRQ(21, "rtc"),
};
struct platform_device at32_sm_device = {
.name = "sm",
.id = 0,
.resource = sm_resource,
.num_resources = ARRAY_SIZE(sm_resource),
};
DEV_CLK(pclk, at32_sm, pbb, 0);
static struct resource intc0_resource[] = {
PBMEM(0xfff00400),
};
struct platform_device at32_intc0_device = {
.name = "intc",
.id = 0,
.resource = intc0_resource,
.num_resources = ARRAY_SIZE(intc0_resource),
};
DEV_CLK(pclk, at32_intc0, pbb, 1);
static struct clk ebi_clk = {
.name = "ebi",
.parent = &hsb_clk,
.mode = hsb_clk_mode,
.get_rate = hsb_clk_get_rate,
.users = 1,
};
static struct clk hramc_clk = {
.name = "hramc",
.parent = &hsb_clk,
.mode = hsb_clk_mode,
.get_rate = hsb_clk_get_rate,
.users = 1,
};
static struct platform_device pdc_device = {
.name = "pdc",
.id = 0,
};
DEV_CLK(hclk, pdc, hsb, 4);
DEV_CLK(pclk, pdc, pba, 16);
static struct clk pico_clk = {
.name = "pico",
.parent = &cpu_clk,
.mode = cpu_clk_mode,
.get_rate = cpu_clk_get_rate,
.users = 1,
};
/* --------------------------------------------------------------------
* PIO
* -------------------------------------------------------------------- */
static struct resource pio0_resource[] = {
PBMEM(0xffe02800),
IRQ(13),
};
DEFINE_DEV(pio, 0);
DEV_CLK(mck, pio0, pba, 10);
static struct resource pio1_resource[] = {
PBMEM(0xffe02c00),
IRQ(14),
};
DEFINE_DEV(pio, 1);
DEV_CLK(mck, pio1, pba, 11);
static struct resource pio2_resource[] = {
PBMEM(0xffe03000),
IRQ(15),
};
DEFINE_DEV(pio, 2);
DEV_CLK(mck, pio2, pba, 12);
static struct resource pio3_resource[] = {
PBMEM(0xffe03400),
IRQ(16),
};
DEFINE_DEV(pio, 3);
DEV_CLK(mck, pio3, pba, 13);
void __init at32_add_system_devices(void)
{
system_manager.eim_first_irq = NR_INTERNAL_IRQS;
platform_device_register(&at32_sm_device);
platform_device_register(&at32_intc0_device);
platform_device_register(&pdc_device);
platform_device_register(&pio0_device);
platform_device_register(&pio1_device);
platform_device_register(&pio2_device);
platform_device_register(&pio3_device);
}
/* --------------------------------------------------------------------
* USART
* -------------------------------------------------------------------- */
static struct resource usart0_resource[] = {
PBMEM(0xffe00c00),
IRQ(7),
};
DEFINE_DEV(usart, 0);
DEV_CLK(usart, usart0, pba, 4);
static struct resource usart1_resource[] = {
PBMEM(0xffe01000),
IRQ(7),
};
DEFINE_DEV(usart, 1);
DEV_CLK(usart, usart1, pba, 4);
static struct resource usart2_resource[] = {
PBMEM(0xffe01400),
IRQ(8),
};
DEFINE_DEV(usart, 2);
DEV_CLK(usart, usart2, pba, 5);
static struct resource usart3_resource[] = {
PBMEM(0xffe01800),
IRQ(9),
};
DEFINE_DEV(usart, 3);
DEV_CLK(usart, usart3, pba, 6);
static inline void configure_usart0_pins(void)
{
portmux_set_func(PIOA, 8, FUNC_B); /* RXD */
portmux_set_func(PIOA, 9, FUNC_B); /* TXD */
}
static inline void configure_usart1_pins(void)
{
portmux_set_func(PIOA, 17, FUNC_A); /* RXD */
portmux_set_func(PIOA, 18, FUNC_A); /* TXD */
}
static inline void configure_usart2_pins(void)
{
portmux_set_func(PIOB, 26, FUNC_B); /* RXD */
portmux_set_func(PIOB, 27, FUNC_B); /* TXD */
}
static inline void configure_usart3_pins(void)
{
portmux_set_func(PIOB, 18, FUNC_B); /* RXD */
portmux_set_func(PIOB, 17, FUNC_B); /* TXD */
}
static struct platform_device *setup_usart(unsigned int id)
{
struct platform_device *pdev;
switch (id) {
case 0:
pdev = &usart0_device;
configure_usart0_pins();
break;
case 1:
pdev = &usart1_device;
configure_usart1_pins();
break;
case 2:
pdev = &usart2_device;
configure_usart2_pins();
break;
case 3:
pdev = &usart3_device;
configure_usart3_pins();
break;
default:
pdev = NULL;
break;
}
return pdev;
}
struct platform_device *__init at32_add_device_usart(unsigned int id)
{
struct platform_device *pdev;
pdev = setup_usart(id);
if (pdev)
platform_device_register(pdev);
return pdev;
}
struct platform_device *at91_default_console_device;
void __init at32_setup_serial_console(unsigned int usart_id)
{
at91_default_console_device = setup_usart(usart_id);
}
/* --------------------------------------------------------------------
* Ethernet
* -------------------------------------------------------------------- */
static struct eth_platform_data macb0_data;
static struct resource macb0_resource[] = {
PBMEM(0xfff01800),
IRQ(25),
};
DEFINE_DEV_DATA(macb, 0);
DEV_CLK(hclk, macb0, hsb, 8);
DEV_CLK(pclk, macb0, pbb, 6);
struct platform_device *__init
at32_add_device_eth(unsigned int id, struct eth_platform_data *data)
{
struct platform_device *pdev;
switch (id) {
case 0:
pdev = &macb0_device;
portmux_set_func(PIOC, 3, FUNC_A); /* TXD0 */
portmux_set_func(PIOC, 4, FUNC_A); /* TXD1 */
portmux_set_func(PIOC, 7, FUNC_A); /* TXEN */
portmux_set_func(PIOC, 8, FUNC_A); /* TXCK */
portmux_set_func(PIOC, 9, FUNC_A); /* RXD0 */
portmux_set_func(PIOC, 10, FUNC_A); /* RXD1 */
portmux_set_func(PIOC, 13, FUNC_A); /* RXER */
portmux_set_func(PIOC, 15, FUNC_A); /* RXDV */
portmux_set_func(PIOC, 16, FUNC_A); /* MDC */
portmux_set_func(PIOC, 17, FUNC_A); /* MDIO */
if (!data->is_rmii) {
portmux_set_func(PIOC, 0, FUNC_A); /* COL */
portmux_set_func(PIOC, 1, FUNC_A); /* CRS */
portmux_set_func(PIOC, 2, FUNC_A); /* TXER */
portmux_set_func(PIOC, 5, FUNC_A); /* TXD2 */
portmux_set_func(PIOC, 6, FUNC_A); /* TXD3 */
portmux_set_func(PIOC, 11, FUNC_A); /* RXD2 */
portmux_set_func(PIOC, 12, FUNC_A); /* RXD3 */
portmux_set_func(PIOC, 14, FUNC_A); /* RXCK */
portmux_set_func(PIOC, 18, FUNC_A); /* SPD */
}
break;
default:
return NULL;
}
memcpy(pdev->dev.platform_data, data, sizeof(struct eth_platform_data));
platform_device_register(pdev);
return pdev;
}
/* --------------------------------------------------------------------
* SPI
* -------------------------------------------------------------------- */
static struct resource spi0_resource[] = {
PBMEM(0xffe00000),
IRQ(3),
};
DEFINE_DEV(spi, 0);
DEV_CLK(mck, spi0, pba, 0);
struct platform_device *__init at32_add_device_spi(unsigned int id)
{
struct platform_device *pdev;
switch (id) {
case 0:
pdev = &spi0_device;
portmux_set_func(PIOA, 0, FUNC_A); /* MISO */
portmux_set_func(PIOA, 1, FUNC_A); /* MOSI */
portmux_set_func(PIOA, 2, FUNC_A); /* SCK */
portmux_set_func(PIOA, 3, FUNC_A); /* NPCS0 */
portmux_set_func(PIOA, 4, FUNC_A); /* NPCS1 */
portmux_set_func(PIOA, 5, FUNC_A); /* NPCS2 */
break;
default:
return NULL;
}
platform_device_register(pdev);
return pdev;
}
/* --------------------------------------------------------------------
* LCDC
* -------------------------------------------------------------------- */
static struct lcdc_platform_data lcdc0_data;
static struct resource lcdc0_resource[] = {
{
.start = 0xff000000,
.end = 0xff000fff,
.flags = IORESOURCE_MEM,
},
IRQ(1),
};
DEFINE_DEV_DATA(lcdc, 0);
DEV_CLK(hclk, lcdc0, hsb, 7);
static struct clk lcdc0_pixclk = {
.name = "pixclk",
.dev = &lcdc0_device.dev,
.mode = genclk_mode,
.get_rate = genclk_get_rate,
.set_rate = genclk_set_rate,
.set_parent = genclk_set_parent,
.index = 7,
};
struct platform_device *__init
at32_add_device_lcdc(unsigned int id, struct lcdc_platform_data *data)
{
struct platform_device *pdev;
switch (id) {
case 0:
pdev = &lcdc0_device;
portmux_set_func(PIOC, 19, FUNC_A); /* CC */
portmux_set_func(PIOC, 20, FUNC_A); /* HSYNC */
portmux_set_func(PIOC, 21, FUNC_A); /* PCLK */
portmux_set_func(PIOC, 22, FUNC_A); /* VSYNC */
portmux_set_func(PIOC, 23, FUNC_A); /* DVAL */
portmux_set_func(PIOC, 24, FUNC_A); /* MODE */
portmux_set_func(PIOC, 25, FUNC_A); /* PWR */
portmux_set_func(PIOC, 26, FUNC_A); /* DATA0 */
portmux_set_func(PIOC, 27, FUNC_A); /* DATA1 */
portmux_set_func(PIOC, 28, FUNC_A); /* DATA2 */
portmux_set_func(PIOC, 29, FUNC_A); /* DATA3 */
portmux_set_func(PIOC, 30, FUNC_A); /* DATA4 */
portmux_set_func(PIOC, 31, FUNC_A); /* DATA5 */
portmux_set_func(PIOD, 0, FUNC_A); /* DATA6 */
portmux_set_func(PIOD, 1, FUNC_A); /* DATA7 */
portmux_set_func(PIOD, 2, FUNC_A); /* DATA8 */
portmux_set_func(PIOD, 3, FUNC_A); /* DATA9 */
portmux_set_func(PIOD, 4, FUNC_A); /* DATA10 */
portmux_set_func(PIOD, 5, FUNC_A); /* DATA11 */
portmux_set_func(PIOD, 6, FUNC_A); /* DATA12 */
portmux_set_func(PIOD, 7, FUNC_A); /* DATA13 */
portmux_set_func(PIOD, 8, FUNC_A); /* DATA14 */
portmux_set_func(PIOD, 9, FUNC_A); /* DATA15 */
portmux_set_func(PIOD, 10, FUNC_A); /* DATA16 */
portmux_set_func(PIOD, 11, FUNC_A); /* DATA17 */
portmux_set_func(PIOD, 12, FUNC_A); /* DATA18 */
portmux_set_func(PIOD, 13, FUNC_A); /* DATA19 */
portmux_set_func(PIOD, 14, FUNC_A); /* DATA20 */
portmux_set_func(PIOD, 15, FUNC_A); /* DATA21 */
portmux_set_func(PIOD, 16, FUNC_A); /* DATA22 */
portmux_set_func(PIOD, 17, FUNC_A); /* DATA23 */
clk_set_parent(&lcdc0_pixclk, &pll0);
clk_set_rate(&lcdc0_pixclk, clk_get_rate(&pll0));
break;
default:
return NULL;
}
memcpy(pdev->dev.platform_data, data,
sizeof(struct lcdc_platform_data));
platform_device_register(pdev);
return pdev;
}
struct clk *at32_clock_list[] = {
&osc32k,
&osc0,
&osc1,
&pll0,
&pll1,
&cpu_clk,
&hsb_clk,
&pba_clk,
&pbb_clk,
&at32_sm_pclk,
&at32_intc0_pclk,
&ebi_clk,
&hramc_clk,
&pdc_hclk,
&pdc_pclk,
&pico_clk,
&pio0_mck,
&pio1_mck,
&pio2_mck,
&pio3_mck,
&usart0_usart,
&usart1_usart,
&usart2_usart,
&usart3_usart,
&macb0_hclk,
&macb0_pclk,
&spi0_mck,
&lcdc0_hclk,
&lcdc0_pixclk,
};
unsigned int at32_nr_clocks = ARRAY_SIZE(at32_clock_list);
void __init at32_portmux_init(void)
{
at32_init_pio(&pio0_device);
at32_init_pio(&pio1_device);
at32_init_pio(&pio2_device);
at32_init_pio(&pio3_device);
}
void __init at32_clock_init(void)
{
struct at32_sm *sm = &system_manager;
u32 cpu_mask = 0, hsb_mask = 0, pba_mask = 0, pbb_mask = 0;
int i;
if (sm_readl(sm, PM_MCCTRL) & SM_BIT(PLLSEL))
main_clock = &pll0;
else
main_clock = &osc0;
if (sm_readl(sm, PM_PLL0) & SM_BIT(PLLOSC))
pll0.parent = &osc1;
if (sm_readl(sm, PM_PLL1) & SM_BIT(PLLOSC))
pll1.parent = &osc1;
/*
* Turn on all clocks that have at least one user already, and
* turn off everything else. We only do this for module
* clocks, and even though it isn't particularly pretty to
* check the address of the mode function, it should do the
* trick...
*/
for (i = 0; i < ARRAY_SIZE(at32_clock_list); i++) {
struct clk *clk = at32_clock_list[i];
if (clk->mode == &cpu_clk_mode)
cpu_mask |= 1 << clk->index;
else if (clk->mode == &hsb_clk_mode)
hsb_mask |= 1 << clk->index;
else if (clk->mode == &pba_clk_mode)
pba_mask |= 1 << clk->index;
else if (clk->mode == &pbb_clk_mode)
pbb_mask |= 1 << clk->index;
}
sm_writel(sm, PM_CPU_MASK, cpu_mask);
sm_writel(sm, PM_HSB_MASK, hsb_mask);
sm_writel(sm, PM_PBA_MASK, pba_mask);
sm_writel(sm, PM_PBB_MASK, pbb_mask);
}
/*
* Clock management for AT32AP CPUs
*
* Copyright (C) 2006 Atmel Corporation
*
* Based on arch/arm/mach-at91rm9200/clock.c
* Copyright (C) 2005 David Brownell
* Copyright (C) 2005 Ivan Kokshaysky
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/string.h>
#include "clock.h"
static spinlock_t clk_lock = SPIN_LOCK_UNLOCKED;
struct clk *clk_get(struct device *dev, const char *id)
{
int i;
for (i = 0; i < at32_nr_clocks; i++) {
struct clk *clk = at32_clock_list[i];
if (clk->dev == dev && strcmp(id, clk->name) == 0)
return clk;
}
return ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL(clk_get);
void clk_put(struct clk *clk)
{
/* clocks are static for now, we can't free them */
}
EXPORT_SYMBOL(clk_put);
static void __clk_enable(struct clk *clk)
{
if (clk->parent)
__clk_enable(clk->parent);
if (clk->users++ == 0 && clk->mode)
clk->mode(clk, 1);
}
int clk_enable(struct clk *clk)
{
unsigned long flags;
spin_lock_irqsave(&clk_lock, flags);
__clk_enable(clk);
spin_unlock_irqrestore(&clk_lock, flags);
return 0;
}
EXPORT_SYMBOL(clk_enable);
static void __clk_disable(struct clk *clk)
{
BUG_ON(clk->users == 0);
if (--clk->users == 0 && clk->mode)
clk->mode(clk, 0);
if (clk->parent)
__clk_disable(clk->parent);
}
void clk_disable(struct clk *clk)
{
unsigned long flags;
spin_lock_irqsave(&clk_lock, flags);
__clk_disable(clk);
spin_unlock_irqrestore(&clk_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
unsigned long clk_get_rate(struct clk *clk)
{
unsigned long flags;
unsigned long rate;
spin_lock_irqsave(&clk_lock, flags);
rate = clk->get_rate(clk);
spin_unlock_irqrestore(&clk_lock, flags);
return rate;
}
EXPORT_SYMBOL(clk_get_rate);
long clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long flags, actual_rate;
if (!clk->set_rate)
return -ENOSYS;
spin_lock_irqsave(&clk_lock, flags);
actual_rate = clk->set_rate(clk, rate, 0);
spin_unlock_irqrestore(&clk_lock, flags);
return actual_rate;
}
EXPORT_SYMBOL(clk_round_rate);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
unsigned long flags;
long ret;
if (!clk->set_rate)
return -ENOSYS;
spin_lock_irqsave(&clk_lock, flags);
ret = clk->set_rate(clk, rate, 1);
spin_unlock_irqrestore(&clk_lock, flags);
return (ret < 0) ? ret : 0;
}
EXPORT_SYMBOL(clk_set_rate);
int clk_set_parent(struct clk *clk, struct clk *parent)
{
unsigned long flags;
int ret;
if (!clk->set_parent)
return -ENOSYS;
spin_lock_irqsave(&clk_lock, flags);
ret = clk->set_parent(clk, parent);
spin_unlock_irqrestore(&clk_lock, flags);
return ret;
}
EXPORT_SYMBOL(clk_set_parent);
struct clk *clk_get_parent(struct clk *clk)
{
return clk->parent;
}
EXPORT_SYMBOL(clk_get_parent);
/*
* Clock management for AT32AP CPUs
*
* Copyright (C) 2006 Atmel Corporation
*
* Based on arch/arm/mach-at91rm9200/clock.c
* Copyright (C) 2005 David Brownell
* Copyright (C) 2005 Ivan Kokshaysky
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
struct clk {
const char *name; /* Clock name/function */
struct device *dev; /* Device the clock is used by */
struct clk *parent; /* Parent clock, if any */
void (*mode)(struct clk *clk, int enabled);
unsigned long (*get_rate)(struct clk *clk);
long (*set_rate)(struct clk *clk, unsigned long rate,
int apply);
int (*set_parent)(struct clk *clk, struct clk *parent);
u16 users; /* Enabled if non-zero */
u16 index; /* Sibling index */
};
extern struct clk *at32_clock_list[];
extern unsigned int at32_nr_clocks;
/*
* External interrupt handling for AT32AP CPUs
*
* Copyright (C) 2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/random.h>
#include <asm/io.h>
#include <asm/arch/sm.h>
#include "sm.h"
static void eim_ack_irq(unsigned int irq)
{
struct at32_sm *sm = get_irq_chip_data(irq);
sm_writel(sm, EIM_ICR, 1 << (irq - sm->eim_first_irq));
}
static void eim_mask_irq(unsigned int irq)
{
struct at32_sm *sm = get_irq_chip_data(irq);
sm_writel(sm, EIM_IDR, 1 << (irq - sm->eim_first_irq));
}
static void eim_mask_ack_irq(unsigned int irq)
{
struct at32_sm *sm = get_irq_chip_data(irq);
sm_writel(sm, EIM_ICR, 1 << (irq - sm->eim_first_irq));
sm_writel(sm, EIM_IDR, 1 << (irq - sm->eim_first_irq));
}
static void eim_unmask_irq(unsigned int irq)
{
struct at32_sm *sm = get_irq_chip_data(irq);
sm_writel(sm, EIM_IER, 1 << (irq - sm->eim_first_irq));
}
static int eim_set_irq_type(unsigned int irq, unsigned int flow_type)
{
struct at32_sm *sm = get_irq_chip_data(irq);
unsigned int i = irq - sm->eim_first_irq;
u32 mode, edge, level;
unsigned long flags;
int ret = 0;
flow_type &= IRQ_TYPE_SENSE_MASK;
spin_lock_irqsave(&sm->lock, flags);
mode = sm_readl(sm, EIM_MODE);
edge = sm_readl(sm, EIM_EDGE);
level = sm_readl(sm, EIM_LEVEL);
switch (flow_type) {
case IRQ_TYPE_LEVEL_LOW:
mode |= 1 << i;
level &= ~(1 << i);
break;
case IRQ_TYPE_LEVEL_HIGH:
mode |= 1 << i;
level |= 1 << i;
break;
case IRQ_TYPE_EDGE_RISING:
mode &= ~(1 << i);
edge |= 1 << i;
break;
case IRQ_TYPE_EDGE_FALLING:
mode &= ~(1 << i);
edge &= ~(1 << i);
break;
default:
ret = -EINVAL;
break;
}
sm_writel(sm, EIM_MODE, mode);
sm_writel(sm, EIM_EDGE, edge);
sm_writel(sm, EIM_LEVEL, level);
spin_unlock_irqrestore(&sm->lock, flags);
return ret;
}
struct irq_chip eim_chip = {
.name = "eim",
.ack = eim_ack_irq,
.mask = eim_mask_irq,
.mask_ack = eim_mask_ack_irq,
.unmask = eim_unmask_irq,
.set_type = eim_set_irq_type,
};
static void demux_eim_irq(unsigned int irq, struct irq_desc *desc,
struct pt_regs *regs)
{
struct at32_sm *sm = desc->handler_data;
struct irq_desc *ext_desc;
unsigned long status, pending;
unsigned int i, ext_irq;
spin_lock(&sm->lock);
status = sm_readl(sm, EIM_ISR);
pending = status & sm_readl(sm, EIM_IMR);
while (pending) {
i = fls(pending) - 1;
pending &= ~(1 << i);
ext_irq = i + sm->eim_first_irq;
ext_desc = irq_desc + ext_irq;
ext_desc->handle_irq(ext_irq, ext_desc, regs);
}
spin_unlock(&sm->lock);
}
static int __init eim_init(void)
{
struct at32_sm *sm = &system_manager;
unsigned int i;
unsigned int nr_irqs;
unsigned int int_irq;
u32 pattern;
/*
* The EIM is really the same module as SM, so register
* mapping, etc. has been taken care of already.
*/
/*
* Find out how many interrupt lines that are actually
* implemented in hardware.
*/
sm_writel(sm, EIM_IDR, ~0UL);
sm_writel(sm, EIM_MODE, ~0UL);
pattern = sm_readl(sm, EIM_MODE);
nr_irqs = fls(pattern);
sm->eim_chip = &eim_chip;
for (i = 0; i < nr_irqs; i++) {
set_irq_chip(sm->eim_first_irq + i, &eim_chip);
set_irq_chip_data(sm->eim_first_irq + i, sm);
}
int_irq = platform_get_irq_byname(sm->pdev, "eim");
set_irq_chained_handler(int_irq, demux_eim_irq);
set_irq_data(int_irq, sm);
printk("EIM: External Interrupt Module at 0x%p, IRQ %u\n",
sm->regs, int_irq);
printk("EIM: Handling %u external IRQs, starting with IRQ %u\n",
nr_irqs, sm->eim_first_irq);
return 0;
}
arch_initcall(eim_init);
/*
* Copyright (C) 2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include "intc.h"
struct intc {
void __iomem *regs;
struct irq_chip chip;
};
extern struct platform_device at32_intc0_device;
/*
* TODO: We may be able to implement mask/unmask by setting IxM flags
* in the status register.
*/
static void intc_mask_irq(unsigned int irq)
{
}
static void intc_unmask_irq(unsigned int irq)
{
}
static struct intc intc0 = {
.chip = {
.name = "intc",
.mask = intc_mask_irq,
.unmask = intc_unmask_irq,
},
};
/*
* All interrupts go via intc at some point.
*/
asmlinkage void do_IRQ(int level, struct pt_regs *regs)
{
struct irq_desc *desc;
unsigned int irq;
unsigned long status_reg;
local_irq_disable();
irq_enter();
irq = intc_readl(&intc0, INTCAUSE0 - 4 * level);
desc = irq_desc + irq;
desc->handle_irq(irq, desc, regs);
/*
* Clear all interrupt level masks so that we may handle
* interrupts during softirq processing. If this is a nested
* interrupt, interrupts must stay globally disabled until we
* return.
*/
status_reg = sysreg_read(SR);
status_reg &= ~(SYSREG_BIT(I0M) | SYSREG_BIT(I1M)
| SYSREG_BIT(I2M) | SYSREG_BIT(I3M));
sysreg_write(SR, status_reg);
irq_exit();
}
void __init init_IRQ(void)
{
extern void _evba(void);
extern void irq_level0(void);
struct resource *regs;
struct clk *pclk;
unsigned int i;
u32 offset, readback;
regs = platform_get_resource(&at32_intc0_device, IORESOURCE_MEM, 0);
if (!regs) {
printk(KERN_EMERG "intc: no mmio resource defined\n");
goto fail;
}
pclk = clk_get(&at32_intc0_device.dev, "pclk");
if (IS_ERR(pclk)) {
printk(KERN_EMERG "intc: no clock defined\n");
goto fail;
}
clk_enable(pclk);
intc0.regs = ioremap(regs->start, regs->end - regs->start + 1);
if (!intc0.regs) {
printk(KERN_EMERG "intc: failed to map registers (0x%08lx)\n",
(unsigned long)regs->start);
goto fail;
}
/*
* Initialize all interrupts to level 0 (lowest priority). The
* priority level may be changed by calling
* irq_set_priority().
*
*/
offset = (unsigned long)&irq_level0 - (unsigned long)&_evba;
for (i = 0; i < NR_INTERNAL_IRQS; i++) {
intc_writel(&intc0, INTPR0 + 4 * i, offset);
readback = intc_readl(&intc0, INTPR0 + 4 * i);
if (readback == offset)
set_irq_chip_and_handler(i, &intc0.chip,
handle_simple_irq);
}
/* Unmask all interrupt levels */
sysreg_write(SR, (sysreg_read(SR)
& ~(SR_I3M | SR_I2M | SR_I1M | SR_I0M)));
return;
fail:
panic("Interrupt controller initialization failed!\n");
}
/*
* Automatically generated by gen-header.xsl
*/
#ifndef __ASM_AVR32_PERIHP_INTC_H__
#define __ASM_AVR32_PERIHP_INTC_H__
#define INTC_NUM_INT_GRPS 33
#define INTC_INTPR0 0x0
# define INTC_INTPR0_INTLEV_OFFSET 30
# define INTC_INTPR0_INTLEV_SIZE 2
# define INTC_INTPR0_OFFSET_OFFSET 0
# define INTC_INTPR0_OFFSET_SIZE 24
#define INTC_INTREQ0 0x100
# define INTC_INTREQ0_IREQUEST0_OFFSET 0
# define INTC_INTREQ0_IREQUEST0_SIZE 1
# define INTC_INTREQ0_IREQUEST1_OFFSET 1
# define INTC_INTREQ0_IREQUEST1_SIZE 1
#define INTC_INTPR1 0x4
# define INTC_INTPR1_INTLEV_OFFSET 30
# define INTC_INTPR1_INTLEV_SIZE 2
# define INTC_INTPR1_OFFSET_OFFSET 0
# define INTC_INTPR1_OFFSET_SIZE 24
#define INTC_INTREQ1 0x104
# define INTC_INTREQ1_IREQUEST32_OFFSET 0
# define INTC_INTREQ1_IREQUEST32_SIZE 1
# define INTC_INTREQ1_IREQUEST33_OFFSET 1
# define INTC_INTREQ1_IREQUEST33_SIZE 1
# define INTC_INTREQ1_IREQUEST34_OFFSET 2
# define INTC_INTREQ1_IREQUEST34_SIZE 1
# define INTC_INTREQ1_IREQUEST35_OFFSET 3
# define INTC_INTREQ1_IREQUEST35_SIZE 1
# define INTC_INTREQ1_IREQUEST36_OFFSET 4
# define INTC_INTREQ1_IREQUEST36_SIZE 1
# define INTC_INTREQ1_IREQUEST37_OFFSET 5
# define INTC_INTREQ1_IREQUEST37_SIZE 1
#define INTC_INTPR2 0x8
# define INTC_INTPR2_INTLEV_OFFSET 30
# define INTC_INTPR2_INTLEV_SIZE 2
# define INTC_INTPR2_OFFSET_OFFSET 0
# define INTC_INTPR2_OFFSET_SIZE 24
#define INTC_INTREQ2 0x108
# define INTC_INTREQ2_IREQUEST64_OFFSET 0
# define INTC_INTREQ2_IREQUEST64_SIZE 1
# define INTC_INTREQ2_IREQUEST65_OFFSET 1
# define INTC_INTREQ2_IREQUEST65_SIZE 1
# define INTC_INTREQ2_IREQUEST66_OFFSET 2
# define INTC_INTREQ2_IREQUEST66_SIZE 1
# define INTC_INTREQ2_IREQUEST67_OFFSET 3
# define INTC_INTREQ2_IREQUEST67_SIZE 1
# define INTC_INTREQ2_IREQUEST68_OFFSET 4
# define INTC_INTREQ2_IREQUEST68_SIZE 1
#define INTC_INTPR3 0xc
# define INTC_INTPR3_INTLEV_OFFSET 30
# define INTC_INTPR3_INTLEV_SIZE 2
# define INTC_INTPR3_OFFSET_OFFSET 0
# define INTC_INTPR3_OFFSET_SIZE 24
#define INTC_INTREQ3 0x10c
# define INTC_INTREQ3_IREQUEST96_OFFSET 0
# define INTC_INTREQ3_IREQUEST96_SIZE 1
#define INTC_INTPR4 0x10
# define INTC_INTPR4_INTLEV_OFFSET 30
# define INTC_INTPR4_INTLEV_SIZE 2
# define INTC_INTPR4_OFFSET_OFFSET 0
# define INTC_INTPR4_OFFSET_SIZE 24
#define INTC_INTREQ4 0x110
# define INTC_INTREQ4_IREQUEST128_OFFSET 0
# define INTC_INTREQ4_IREQUEST128_SIZE 1
#define INTC_INTPR5 0x14
# define INTC_INTPR5_INTLEV_OFFSET 30
# define INTC_INTPR5_INTLEV_SIZE 2
# define INTC_INTPR5_OFFSET_OFFSET 0
# define INTC_INTPR5_OFFSET_SIZE 24
#define INTC_INTREQ5 0x114
# define INTC_INTREQ5_IREQUEST160_OFFSET 0
# define INTC_INTREQ5_IREQUEST160_SIZE 1
#define INTC_INTPR6 0x18
# define INTC_INTPR6_INTLEV_OFFSET 30
# define INTC_INTPR6_INTLEV_SIZE 2
# define INTC_INTPR6_OFFSET_OFFSET 0
# define INTC_INTPR6_OFFSET_SIZE 24
#define INTC_INTREQ6 0x118
# define INTC_INTREQ6_IREQUEST192_OFFSET 0
# define INTC_INTREQ6_IREQUEST192_SIZE 1
#define INTC_INTPR7 0x1c
# define INTC_INTPR7_INTLEV_OFFSET 30
# define INTC_INTPR7_INTLEV_SIZE 2
# define INTC_INTPR7_OFFSET_OFFSET 0
# define INTC_INTPR7_OFFSET_SIZE 24
#define INTC_INTREQ7 0x11c
# define INTC_INTREQ7_IREQUEST224_OFFSET 0
# define INTC_INTREQ7_IREQUEST224_SIZE 1
#define INTC_INTPR8 0x20
# define INTC_INTPR8_INTLEV_OFFSET 30
# define INTC_INTPR8_INTLEV_SIZE 2
# define INTC_INTPR8_OFFSET_OFFSET 0
# define INTC_INTPR8_OFFSET_SIZE 24
#define INTC_INTREQ8 0x120
# define INTC_INTREQ8_IREQUEST256_OFFSET 0
# define INTC_INTREQ8_IREQUEST256_SIZE 1
#define INTC_INTPR9 0x24
# define INTC_INTPR9_INTLEV_OFFSET 30
# define INTC_INTPR9_INTLEV_SIZE 2
# define INTC_INTPR9_OFFSET_OFFSET 0
# define INTC_INTPR9_OFFSET_SIZE 24
#define INTC_INTREQ9 0x124
# define INTC_INTREQ9_IREQUEST288_OFFSET 0
# define INTC_INTREQ9_IREQUEST288_SIZE 1
#define INTC_INTPR10 0x28
# define INTC_INTPR10_INTLEV_OFFSET 30
# define INTC_INTPR10_INTLEV_SIZE 2
# define INTC_INTPR10_OFFSET_OFFSET 0
# define INTC_INTPR10_OFFSET_SIZE 24
#define INTC_INTREQ10 0x128
# define INTC_INTREQ10_IREQUEST320_OFFSET 0
# define INTC_INTREQ10_IREQUEST320_SIZE 1
#define INTC_INTPR11 0x2c
# define INTC_INTPR11_INTLEV_OFFSET 30
# define INTC_INTPR11_INTLEV_SIZE 2
# define INTC_INTPR11_OFFSET_OFFSET 0
# define INTC_INTPR11_OFFSET_SIZE 24
#define INTC_INTREQ11 0x12c
# define INTC_INTREQ11_IREQUEST352_OFFSET 0
# define INTC_INTREQ11_IREQUEST352_SIZE 1
#define INTC_INTPR12 0x30
# define INTC_INTPR12_INTLEV_OFFSET 30
# define INTC_INTPR12_INTLEV_SIZE 2
# define INTC_INTPR12_OFFSET_OFFSET 0
# define INTC_INTPR12_OFFSET_SIZE 24
#define INTC_INTREQ12 0x130
# define INTC_INTREQ12_IREQUEST384_OFFSET 0
# define INTC_INTREQ12_IREQUEST384_SIZE 1
#define INTC_INTPR13 0x34
# define INTC_INTPR13_INTLEV_OFFSET 30
# define INTC_INTPR13_INTLEV_SIZE 2
# define INTC_INTPR13_OFFSET_OFFSET 0
# define INTC_INTPR13_OFFSET_SIZE 24
#define INTC_INTREQ13 0x134
# define INTC_INTREQ13_IREQUEST416_OFFSET 0
# define INTC_INTREQ13_IREQUEST416_SIZE 1
#define INTC_INTPR14 0x38
# define INTC_INTPR14_INTLEV_OFFSET 30
# define INTC_INTPR14_INTLEV_SIZE 2
# define INTC_INTPR14_OFFSET_OFFSET 0
# define INTC_INTPR14_OFFSET_SIZE 24
#define INTC_INTREQ14 0x138
# define INTC_INTREQ14_IREQUEST448_OFFSET 0
# define INTC_INTREQ14_IREQUEST448_SIZE 1
#define INTC_INTPR15 0x3c
# define INTC_INTPR15_INTLEV_OFFSET 30
# define INTC_INTPR15_INTLEV_SIZE 2
# define INTC_INTPR15_OFFSET_OFFSET 0
# define INTC_INTPR15_OFFSET_SIZE 24
#define INTC_INTREQ15 0x13c
# define INTC_INTREQ15_IREQUEST480_OFFSET 0
# define INTC_INTREQ15_IREQUEST480_SIZE 1
#define INTC_INTPR16 0x40
# define INTC_INTPR16_INTLEV_OFFSET 30
# define INTC_INTPR16_INTLEV_SIZE 2
# define INTC_INTPR16_OFFSET_OFFSET 0
# define INTC_INTPR16_OFFSET_SIZE 24
#define INTC_INTREQ16 0x140
# define INTC_INTREQ16_IREQUEST512_OFFSET 0
# define INTC_INTREQ16_IREQUEST512_SIZE 1
#define INTC_INTPR17 0x44
# define INTC_INTPR17_INTLEV_OFFSET 30
# define INTC_INTPR17_INTLEV_SIZE 2
# define INTC_INTPR17_OFFSET_OFFSET 0
# define INTC_INTPR17_OFFSET_SIZE 24
#define INTC_INTREQ17 0x144
# define INTC_INTREQ17_IREQUEST544_OFFSET 0
# define INTC_INTREQ17_IREQUEST544_SIZE 1
#define INTC_INTPR18 0x48
# define INTC_INTPR18_INTLEV_OFFSET 30
# define INTC_INTPR18_INTLEV_SIZE 2
# define INTC_INTPR18_OFFSET_OFFSET 0
# define INTC_INTPR18_OFFSET_SIZE 24
#define INTC_INTREQ18 0x148
# define INTC_INTREQ18_IREQUEST576_OFFSET 0
# define INTC_INTREQ18_IREQUEST576_SIZE 1
#define INTC_INTPR19 0x4c
# define INTC_INTPR19_INTLEV_OFFSET 30
# define INTC_INTPR19_INTLEV_SIZE 2
# define INTC_INTPR19_OFFSET_OFFSET 0
# define INTC_INTPR19_OFFSET_SIZE 24
#define INTC_INTREQ19 0x14c
# define INTC_INTREQ19_IREQUEST608_OFFSET 0
# define INTC_INTREQ19_IREQUEST608_SIZE 1
# define INTC_INTREQ19_IREQUEST609_OFFSET 1
# define INTC_INTREQ19_IREQUEST609_SIZE 1
# define INTC_INTREQ19_IREQUEST610_OFFSET 2
# define INTC_INTREQ19_IREQUEST610_SIZE 1
# define INTC_INTREQ19_IREQUEST611_OFFSET 3
# define INTC_INTREQ19_IREQUEST611_SIZE 1
#define INTC_INTPR20 0x50
# define INTC_INTPR20_INTLEV_OFFSET 30
# define INTC_INTPR20_INTLEV_SIZE 2
# define INTC_INTPR20_OFFSET_OFFSET 0
# define INTC_INTPR20_OFFSET_SIZE 24
#define INTC_INTREQ20 0x150
# define INTC_INTREQ20_IREQUEST640_OFFSET 0
# define INTC_INTREQ20_IREQUEST640_SIZE 1
#define INTC_INTPR21 0x54
# define INTC_INTPR21_INTLEV_OFFSET 30
# define INTC_INTPR21_INTLEV_SIZE 2
# define INTC_INTPR21_OFFSET_OFFSET 0
# define INTC_INTPR21_OFFSET_SIZE 24
#define INTC_INTREQ21 0x154
# define INTC_INTREQ21_IREQUEST672_OFFSET 0
# define INTC_INTREQ21_IREQUEST672_SIZE 1
#define INTC_INTPR22 0x58
# define INTC_INTPR22_INTLEV_OFFSET 30
# define INTC_INTPR22_INTLEV_SIZE 2
# define INTC_INTPR22_OFFSET_OFFSET 0
# define INTC_INTPR22_OFFSET_SIZE 24
#define INTC_INTREQ22 0x158
# define INTC_INTREQ22_IREQUEST704_OFFSET 0
# define INTC_INTREQ22_IREQUEST704_SIZE 1
# define INTC_INTREQ22_IREQUEST705_OFFSET 1
# define INTC_INTREQ22_IREQUEST705_SIZE 1
# define INTC_INTREQ22_IREQUEST706_OFFSET 2
# define INTC_INTREQ22_IREQUEST706_SIZE 1
#define INTC_INTPR23 0x5c
# define INTC_INTPR23_INTLEV_OFFSET 30
# define INTC_INTPR23_INTLEV_SIZE 2
# define INTC_INTPR23_OFFSET_OFFSET 0
# define INTC_INTPR23_OFFSET_SIZE 24
#define INTC_INTREQ23 0x15c
# define INTC_INTREQ23_IREQUEST736_OFFSET 0
# define INTC_INTREQ23_IREQUEST736_SIZE 1
# define INTC_INTREQ23_IREQUEST737_OFFSET 1
# define INTC_INTREQ23_IREQUEST737_SIZE 1
# define INTC_INTREQ23_IREQUEST738_OFFSET 2
# define INTC_INTREQ23_IREQUEST738_SIZE 1
#define INTC_INTPR24 0x60
# define INTC_INTPR24_INTLEV_OFFSET 30
# define INTC_INTPR24_INTLEV_SIZE 2
# define INTC_INTPR24_OFFSET_OFFSET 0
# define INTC_INTPR24_OFFSET_SIZE 24
#define INTC_INTREQ24 0x160
# define INTC_INTREQ24_IREQUEST768_OFFSET 0
# define INTC_INTREQ24_IREQUEST768_SIZE 1
#define INTC_INTPR25 0x64
# define INTC_INTPR25_INTLEV_OFFSET 30
# define INTC_INTPR25_INTLEV_SIZE 2
# define INTC_INTPR25_OFFSET_OFFSET 0
# define INTC_INTPR25_OFFSET_SIZE 24
#define INTC_INTREQ25 0x164
# define INTC_INTREQ25_IREQUEST800_OFFSET 0
# define INTC_INTREQ25_IREQUEST800_SIZE 1
#define INTC_INTPR26 0x68
# define INTC_INTPR26_INTLEV_OFFSET 30
# define INTC_INTPR26_INTLEV_SIZE 2
# define INTC_INTPR26_OFFSET_OFFSET 0
# define INTC_INTPR26_OFFSET_SIZE 24
#define INTC_INTREQ26 0x168
# define INTC_INTREQ26_IREQUEST832_OFFSET 0
# define INTC_INTREQ26_IREQUEST832_SIZE 1
#define INTC_INTPR27 0x6c
# define INTC_INTPR27_INTLEV_OFFSET 30
# define INTC_INTPR27_INTLEV_SIZE 2
# define INTC_INTPR27_OFFSET_OFFSET 0
# define INTC_INTPR27_OFFSET_SIZE 24
#define INTC_INTREQ27 0x16c
# define INTC_INTREQ27_IREQUEST864_OFFSET 0
# define INTC_INTREQ27_IREQUEST864_SIZE 1
#define INTC_INTPR28 0x70
# define INTC_INTPR28_INTLEV_OFFSET 30
# define INTC_INTPR28_INTLEV_SIZE 2
# define INTC_INTPR28_OFFSET_OFFSET 0
# define INTC_INTPR28_OFFSET_SIZE 24
#define INTC_INTREQ28 0x170
# define INTC_INTREQ28_IREQUEST896_OFFSET 0
# define INTC_INTREQ28_IREQUEST896_SIZE 1
#define INTC_INTPR29 0x74
# define INTC_INTPR29_INTLEV_OFFSET 30
# define INTC_INTPR29_INTLEV_SIZE 2
# define INTC_INTPR29_OFFSET_OFFSET 0
# define INTC_INTPR29_OFFSET_SIZE 24
#define INTC_INTREQ29 0x174
# define INTC_INTREQ29_IREQUEST928_OFFSET 0
# define INTC_INTREQ29_IREQUEST928_SIZE 1
#define INTC_INTPR30 0x78
# define INTC_INTPR30_INTLEV_OFFSET 30
# define INTC_INTPR30_INTLEV_SIZE 2
# define INTC_INTPR30_OFFSET_OFFSET 0
# define INTC_INTPR30_OFFSET_SIZE 24
#define INTC_INTREQ30 0x178
# define INTC_INTREQ30_IREQUEST960_OFFSET 0
# define INTC_INTREQ30_IREQUEST960_SIZE 1
#define INTC_INTPR31 0x7c
# define INTC_INTPR31_INTLEV_OFFSET 30
# define INTC_INTPR31_INTLEV_SIZE 2
# define INTC_INTPR31_OFFSET_OFFSET 0
# define INTC_INTPR31_OFFSET_SIZE 24
#define INTC_INTREQ31 0x17c
# define INTC_INTREQ31_IREQUEST992_OFFSET 0
# define INTC_INTREQ31_IREQUEST992_SIZE 1
#define INTC_INTPR32 0x80
# define INTC_INTPR32_INTLEV_OFFSET 30
# define INTC_INTPR32_INTLEV_SIZE 2
# define INTC_INTPR32_OFFSET_OFFSET 0
# define INTC_INTPR32_OFFSET_SIZE 24
#define INTC_INTREQ32 0x180
# define INTC_INTREQ32_IREQUEST1024_OFFSET 0
# define INTC_INTREQ32_IREQUEST1024_SIZE 1
#define INTC_INTCAUSE0 0x20c
# define INTC_INTCAUSE0_CAUSEGRP_OFFSET 0
# define INTC_INTCAUSE0_CAUSEGRP_SIZE 6
#define INTC_INTCAUSE1 0x208
# define INTC_INTCAUSE1_CAUSEGRP_OFFSET 0
# define INTC_INTCAUSE1_CAUSEGRP_SIZE 6
#define INTC_INTCAUSE2 0x204
# define INTC_INTCAUSE2_CAUSEGRP_OFFSET 0
# define INTC_INTCAUSE2_CAUSEGRP_SIZE 6
#define INTC_INTCAUSE3 0x200
# define INTC_INTCAUSE3_CAUSEGRP_OFFSET 0
# define INTC_INTCAUSE3_CAUSEGRP_SIZE 6
#define INTC_BIT(name) (1 << INTC_##name##_OFFSET)
#define INTC_MKBF(name, value) (((value) & ((1 << INTC_##name##_SIZE) - 1)) << INTC_##name##_OFFSET)
#define INTC_GETBF(name, value) (((value) >> INTC_##name##_OFFSET) & ((1 << INTC_##name##_SIZE) - 1))
#define intc_readl(port,reg) readl((port)->regs + INTC_##reg)
#define intc_writel(port,reg,value) writel((value), (port)->regs + INTC_##reg)
#endif /* __ASM_AVR32_PERIHP_INTC_H__ */
/*
* Atmel PIO2 Port Multiplexer support
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/arch/portmux.h>
#include "pio.h"
#define MAX_NR_PIO_DEVICES 8
struct pio_device {
void __iomem *regs;
const struct platform_device *pdev;
struct clk *clk;
u32 alloc_mask;
char name[32];
};
static struct pio_device pio_dev[MAX_NR_PIO_DEVICES];
void portmux_set_func(unsigned int portmux_id, unsigned int pin_id,
unsigned int function_id)
{
struct pio_device *pio;
u32 mask = 1 << pin_id;
BUG_ON(portmux_id >= MAX_NR_PIO_DEVICES);
pio = &pio_dev[portmux_id];
if (function_id)
pio_writel(pio, BSR, mask);
else
pio_writel(pio, ASR, mask);
pio_writel(pio, PDR, mask);
}
static int __init pio_probe(struct platform_device *pdev)
{
struct pio_device *pio = NULL;
BUG_ON(pdev->id >= MAX_NR_PIO_DEVICES);
pio = &pio_dev[pdev->id];
BUG_ON(!pio->regs);
/* TODO: Interrupts */
platform_set_drvdata(pdev, pio);
printk(KERN_INFO "%s: Atmel Port Multiplexer at 0x%p (irq %d)\n",
pio->name, pio->regs, platform_get_irq(pdev, 0));
return 0;
}
static struct platform_driver pio_driver = {
.probe = pio_probe,
.driver = {
.name = "pio",
},
};
static int __init pio_init(void)
{
return platform_driver_register(&pio_driver);
}
subsys_initcall(pio_init);
void __init at32_init_pio(struct platform_device *pdev)
{
struct resource *regs;
struct pio_device *pio;
if (pdev->id > MAX_NR_PIO_DEVICES) {
dev_err(&pdev->dev, "only %d PIO devices supported\n",
MAX_NR_PIO_DEVICES);
return;
}
pio = &pio_dev[pdev->id];
snprintf(pio->name, sizeof(pio->name), "pio%d", pdev->id);
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
dev_err(&pdev->dev, "no mmio resource defined\n");
return;
}
pio->clk = clk_get(&pdev->dev, "mck");
if (IS_ERR(pio->clk))
/*
* This is a fatal error, but if we continue we might
* be so lucky that we manage to initialize the
* console and display this message...
*/
dev_err(&pdev->dev, "no mck clock defined\n");
else
clk_enable(pio->clk);
pio->pdev = pdev;
pio->regs = ioremap(regs->start, regs->end - regs->start + 1);
pio_writel(pio, ODR, ~0UL);
pio_writel(pio, PER, ~0UL);
}
/*
* Atmel PIO2 Port Multiplexer support
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ARCH_AVR32_AT32AP_PIO_H__
#define __ARCH_AVR32_AT32AP_PIO_H__
/* PIO register offsets */
#define PIO_PER 0x0000
#define PIO_PDR 0x0004
#define PIO_PSR 0x0008
#define PIO_OER 0x0010
#define PIO_ODR 0x0014
#define PIO_OSR 0x0018
#define PIO_IFER 0x0020
#define PIO_IFDR 0x0024
#define PIO_ISFR 0x0028
#define PIO_SODR 0x0030
#define PIO_CODR 0x0034
#define PIO_ODSR 0x0038
#define PIO_PDSR 0x003c
#define PIO_IER 0x0040
#define PIO_IDR 0x0044
#define PIO_IMR 0x0048
#define PIO_ISR 0x004c
#define PIO_MDER 0x0050
#define PIO_MDDR 0x0054
#define PIO_MDSR 0x0058
#define PIO_PUDR 0x0060
#define PIO_PUER 0x0064
#define PIO_PUSR 0x0068
#define PIO_ASR 0x0070
#define PIO_BSR 0x0074
#define PIO_ABSR 0x0078
#define PIO_OWER 0x00a0
#define PIO_OWDR 0x00a4
#define PIO_OWSR 0x00a8
/* Bitfields in PER */
/* Bitfields in PDR */
/* Bitfields in PSR */
/* Bitfields in OER */
/* Bitfields in ODR */
/* Bitfields in OSR */
/* Bitfields in IFER */
/* Bitfields in IFDR */
/* Bitfields in ISFR */
/* Bitfields in SODR */
/* Bitfields in CODR */
/* Bitfields in ODSR */
/* Bitfields in PDSR */
/* Bitfields in IER */
/* Bitfields in IDR */
/* Bitfields in IMR */
/* Bitfields in ISR */
/* Bitfields in MDER */
/* Bitfields in MDDR */
/* Bitfields in MDSR */
/* Bitfields in PUDR */
/* Bitfields in PUER */
/* Bitfields in PUSR */
/* Bitfields in ASR */
/* Bitfields in BSR */
/* Bitfields in ABSR */
#define PIO_P0_OFFSET 0
#define PIO_P0_SIZE 1
#define PIO_P1_OFFSET 1
#define PIO_P1_SIZE 1
#define PIO_P2_OFFSET 2
#define PIO_P2_SIZE 1
#define PIO_P3_OFFSET 3
#define PIO_P3_SIZE 1
#define PIO_P4_OFFSET 4
#define PIO_P4_SIZE 1
#define PIO_P5_OFFSET 5
#define PIO_P5_SIZE 1
#define PIO_P6_OFFSET 6
#define PIO_P6_SIZE 1
#define PIO_P7_OFFSET 7
#define PIO_P7_SIZE 1
#define PIO_P8_OFFSET 8
#define PIO_P8_SIZE 1
#define PIO_P9_OFFSET 9
#define PIO_P9_SIZE 1
#define PIO_P10_OFFSET 10
#define PIO_P10_SIZE 1
#define PIO_P11_OFFSET 11
#define PIO_P11_SIZE 1
#define PIO_P12_OFFSET 12
#define PIO_P12_SIZE 1
#define PIO_P13_OFFSET 13
#define PIO_P13_SIZE 1
#define PIO_P14_OFFSET 14
#define PIO_P14_SIZE 1
#define PIO_P15_OFFSET 15
#define PIO_P15_SIZE 1
#define PIO_P16_OFFSET 16
#define PIO_P16_SIZE 1
#define PIO_P17_OFFSET 17
#define PIO_P17_SIZE 1
#define PIO_P18_OFFSET 18
#define PIO_P18_SIZE 1
#define PIO_P19_OFFSET 19
#define PIO_P19_SIZE 1
#define PIO_P20_OFFSET 20
#define PIO_P20_SIZE 1
#define PIO_P21_OFFSET 21
#define PIO_P21_SIZE 1
#define PIO_P22_OFFSET 22
#define PIO_P22_SIZE 1
#define PIO_P23_OFFSET 23
#define PIO_P23_SIZE 1
#define PIO_P24_OFFSET 24
#define PIO_P24_SIZE 1
#define PIO_P25_OFFSET 25
#define PIO_P25_SIZE 1
#define PIO_P26_OFFSET 26
#define PIO_P26_SIZE 1
#define PIO_P27_OFFSET 27
#define PIO_P27_SIZE 1
#define PIO_P28_OFFSET 28
#define PIO_P28_SIZE 1
#define PIO_P29_OFFSET 29
#define PIO_P29_SIZE 1
#define PIO_P30_OFFSET 30
#define PIO_P30_SIZE 1
#define PIO_P31_OFFSET 31
#define PIO_P31_SIZE 1
/* Bitfields in OWER */
/* Bitfields in OWDR */
/* Bitfields in OWSR */
/* Bit manipulation macros */
#define PIO_BIT(name) (1 << PIO_##name##_OFFSET)
#define PIO_BF(name,value) (((value) & ((1 << PIO_##name##_SIZE) - 1)) << PIO_##name##_OFFSET)
#define PIO_BFEXT(name,value) (((value) >> PIO_##name##_OFFSET) & ((1 << PIO_##name##_SIZE) - 1))
#define PIO_BFINS(name,value,old) (((old) & ~(((1 << PIO_##name##_SIZE) - 1) << PIO_##name##_OFFSET)) | PIO_BF(name,value))
/* Register access macros */
#define pio_readl(port,reg) readl((port)->regs + PIO_##reg)
#define pio_writel(port,reg,value) writel((value), (port)->regs + PIO_##reg)
void at32_init_pio(struct platform_device *pdev);
#endif /* __ARCH_AVR32_AT32AP_PIO_H__ */
/*
* System Manager driver for AT32AP CPUs
*
* Copyright (C) 2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/random.h>
#include <linux/spinlock.h>
#include <asm/intc.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/arch/sm.h>
#include "sm.h"
#define SM_EIM_IRQ_RESOURCE 1
#define SM_PM_IRQ_RESOURCE 2
#define SM_RTC_IRQ_RESOURCE 3
#define to_eim(irqc) container_of(irqc, struct at32_sm, irqc)
struct at32_sm system_manager;
int __init at32_sm_init(void)
{
struct resource *regs;
struct at32_sm *sm = &system_manager;
int ret = -ENXIO;
regs = platform_get_resource(&at32_sm_device, IORESOURCE_MEM, 0);
if (!regs)
goto fail;
spin_lock_init(&sm->lock);
sm->pdev = &at32_sm_device;
ret = -ENOMEM;
sm->regs = ioremap(regs->start, regs->end - regs->start + 1);
if (!sm->regs)
goto fail;
return 0;
fail:
printk(KERN_ERR "Failed to initialize System Manager: %d\n", ret);
return ret;
}
/*
* External Interrupt Module (EIM).
*
* EIM gets level- or edge-triggered interrupts of either polarity
* from the outside and converts it to active-high level-triggered
* interrupts that the internal interrupt controller can handle. EIM
* also provides masking/unmasking of interrupts, as well as
* acknowledging of edge-triggered interrupts.
*/
static irqreturn_t spurious_eim_interrupt(int irq, void *dev_id,
struct pt_regs *regs)
{
printk(KERN_WARNING "Spurious EIM interrupt %d\n", irq);
disable_irq(irq);
return IRQ_NONE;
}
static struct irqaction eim_spurious_action = {
.handler = spurious_eim_interrupt,
};
static irqreturn_t eim_handle_irq(int irq, void *dev_id, struct pt_regs *regs)
{
struct irq_controller * irqc = dev_id;
struct at32_sm *sm = to_eim(irqc);
unsigned long pending;
/*
* No need to disable interrupts globally. The interrupt
* level relevant to this group must be masked all the time,
* so we know that this particular EIM instance will not be
* re-entered.
*/
spin_lock(&sm->lock);
pending = intc_get_pending(sm->irqc.irq_group);
if (unlikely(!pending)) {
printk(KERN_ERR "EIM (group %u): No interrupts pending!\n",
sm->irqc.irq_group);
goto unlock;
}
do {
struct irqaction *action;
unsigned int i;
i = fls(pending) - 1;
pending &= ~(1 << i);
action = sm->action[i];
/* Acknowledge the interrupt */
sm_writel(sm, EIM_ICR, 1 << i);
spin_unlock(&sm->lock);
if (action->flags & SA_INTERRUPT)
local_irq_disable();
action->handler(sm->irqc.first_irq + i, action->dev_id, regs);
local_irq_enable();
spin_lock(&sm->lock);
if (action->flags & SA_SAMPLE_RANDOM)
add_interrupt_randomness(sm->irqc.first_irq + i);
} while (pending);
unlock:
spin_unlock(&sm->lock);
return IRQ_HANDLED;
}
static void eim_mask(struct irq_controller *irqc, unsigned int irq)
{
struct at32_sm *sm = to_eim(irqc);
unsigned int i;
i = irq - sm->irqc.first_irq;
sm_writel(sm, EIM_IDR, 1 << i);
}
static void eim_unmask(struct irq_controller *irqc, unsigned int irq)
{
struct at32_sm *sm = to_eim(irqc);
unsigned int i;
i = irq - sm->irqc.first_irq;
sm_writel(sm, EIM_IER, 1 << i);
}
static int eim_setup(struct irq_controller *irqc, unsigned int irq,
struct irqaction *action)
{
struct at32_sm *sm = to_eim(irqc);
sm->action[irq - sm->irqc.first_irq] = action;
/* Acknowledge earlier interrupts */
sm_writel(sm, EIM_ICR, (1<<(irq - sm->irqc.first_irq)));
eim_unmask(irqc, irq);
return 0;
}
static void eim_free(struct irq_controller *irqc, unsigned int irq,
void *dev)
{
struct at32_sm *sm = to_eim(irqc);
eim_mask(irqc, irq);
sm->action[irq - sm->irqc.first_irq] = &eim_spurious_action;
}
static int eim_set_type(struct irq_controller *irqc, unsigned int irq,
unsigned int type)
{
struct at32_sm *sm = to_eim(irqc);
unsigned long flags;
u32 value, pattern;
spin_lock_irqsave(&sm->lock, flags);
pattern = 1 << (irq - sm->irqc.first_irq);
value = sm_readl(sm, EIM_MODE);
if (type & IRQ_TYPE_LEVEL)
value |= pattern;
else
value &= ~pattern;
sm_writel(sm, EIM_MODE, value);
value = sm_readl(sm, EIM_EDGE);
if (type & IRQ_EDGE_RISING)
value |= pattern;
else
value &= ~pattern;
sm_writel(sm, EIM_EDGE, value);
value = sm_readl(sm, EIM_LEVEL);
if (type & IRQ_LEVEL_HIGH)
value |= pattern;
else
value &= ~pattern;
sm_writel(sm, EIM_LEVEL, value);
spin_unlock_irqrestore(&sm->lock, flags);
return 0;
}
static unsigned int eim_get_type(struct irq_controller *irqc,
unsigned int irq)
{
struct at32_sm *sm = to_eim(irqc);
unsigned long flags;
unsigned int type = 0;
u32 mode, edge, level, pattern;
pattern = 1 << (irq - sm->irqc.first_irq);
spin_lock_irqsave(&sm->lock, flags);
mode = sm_readl(sm, EIM_MODE);
edge = sm_readl(sm, EIM_EDGE);
level = sm_readl(sm, EIM_LEVEL);
spin_unlock_irqrestore(&sm->lock, flags);
if (mode & pattern)
type |= IRQ_TYPE_LEVEL;
if (edge & pattern)
type |= IRQ_EDGE_RISING;
if (level & pattern)
type |= IRQ_LEVEL_HIGH;
return type;
}
static struct irq_controller_class eim_irq_class = {
.typename = "EIM",
.handle = eim_handle_irq,
.setup = eim_setup,
.free = eim_free,
.mask = eim_mask,
.unmask = eim_unmask,
.set_type = eim_set_type,
.get_type = eim_get_type,
};
static int __init eim_init(void)
{
struct at32_sm *sm = &system_manager;
unsigned int i;
u32 pattern;
int ret;
/*
* The EIM is really the same module as SM, so register
* mapping, etc. has been taken care of already.
*/
/*
* Find out how many interrupt lines that are actually
* implemented in hardware.
*/
sm_writel(sm, EIM_IDR, ~0UL);
sm_writel(sm, EIM_MODE, ~0UL);
pattern = sm_readl(sm, EIM_MODE);
sm->irqc.nr_irqs = fls(pattern);
ret = -ENOMEM;
sm->action = kmalloc(sizeof(*sm->action) * sm->irqc.nr_irqs,
GFP_KERNEL);
if (!sm->action)
goto out;
for (i = 0; i < sm->irqc.nr_irqs; i++)
sm->action[i] = &eim_spurious_action;
spin_lock_init(&sm->lock);
sm->irqc.irq_group = sm->pdev->resource[SM_EIM_IRQ_RESOURCE].start;
sm->irqc.class = &eim_irq_class;
ret = intc_register_controller(&sm->irqc);
if (ret < 0)
goto out_free_actions;
printk("EIM: External Interrupt Module at 0x%p, IRQ group %u\n",
sm->regs, sm->irqc.irq_group);
printk("EIM: Handling %u external IRQs, starting with IRQ%u\n",
sm->irqc.nr_irqs, sm->irqc.first_irq);
return 0;
out_free_actions:
kfree(sm->action);
out:
return ret;
}
arch_initcall(eim_init);
/*
* Register definitions for SM
*
* System Manager
*/
#ifndef __ASM_AVR32_SM_H__
#define __ASM_AVR32_SM_H__
/* SM register offsets */
#define SM_PM_MCCTRL 0x0000
#define SM_PM_CKSEL 0x0004
#define SM_PM_CPU_MASK 0x0008
#define SM_PM_HSB_MASK 0x000c
#define SM_PM_PBA_MASK 0x0010
#define SM_PM_PBB_MASK 0x0014
#define SM_PM_PLL0 0x0020
#define SM_PM_PLL1 0x0024
#define SM_PM_VCTRL 0x0030
#define SM_PM_VMREF 0x0034
#define SM_PM_VMV 0x0038
#define SM_PM_IER 0x0040
#define SM_PM_IDR 0x0044
#define SM_PM_IMR 0x0048
#define SM_PM_ISR 0x004c
#define SM_PM_ICR 0x0050
#define SM_PM_GCCTRL 0x0060
#define SM_RTC_CTRL 0x0080
#define SM_RTC_VAL 0x0084
#define SM_RTC_TOP 0x0088
#define SM_RTC_IER 0x0090
#define SM_RTC_IDR 0x0094
#define SM_RTC_IMR 0x0098
#define SM_RTC_ISR 0x009c
#define SM_RTC_ICR 0x00a0
#define SM_WDT_CTRL 0x00b0
#define SM_WDT_CLR 0x00b4
#define SM_WDT_EXT 0x00b8
#define SM_RC_RCAUSE 0x00c0
#define SM_EIM_IER 0x0100
#define SM_EIM_IDR 0x0104
#define SM_EIM_IMR 0x0108
#define SM_EIM_ISR 0x010c
#define SM_EIM_ICR 0x0110
#define SM_EIM_MODE 0x0114
#define SM_EIM_EDGE 0x0118
#define SM_EIM_LEVEL 0x011c
#define SM_EIM_TEST 0x0120
#define SM_EIM_NMIC 0x0124
/* Bitfields in PM_MCCTRL */
/* Bitfields in PM_CKSEL */
#define SM_CPUSEL_OFFSET 0
#define SM_CPUSEL_SIZE 3
#define SM_CPUDIV_OFFSET 7
#define SM_CPUDIV_SIZE 1
#define SM_HSBSEL_OFFSET 8
#define SM_HSBSEL_SIZE 3
#define SM_HSBDIV_OFFSET 15
#define SM_HSBDIV_SIZE 1
#define SM_PBASEL_OFFSET 16
#define SM_PBASEL_SIZE 3
#define SM_PBADIV_OFFSET 23
#define SM_PBADIV_SIZE 1
#define SM_PBBSEL_OFFSET 24
#define SM_PBBSEL_SIZE 3
#define SM_PBBDIV_OFFSET 31
#define SM_PBBDIV_SIZE 1
/* Bitfields in PM_CPU_MASK */
/* Bitfields in PM_HSB_MASK */
/* Bitfields in PM_PBA_MASK */
/* Bitfields in PM_PBB_MASK */
/* Bitfields in PM_PLL0 */
#define SM_PLLEN_OFFSET 0
#define SM_PLLEN_SIZE 1
#define SM_PLLOSC_OFFSET 1
#define SM_PLLOSC_SIZE 1
#define SM_PLLOPT_OFFSET 2
#define SM_PLLOPT_SIZE 3
#define SM_PLLDIV_OFFSET 8
#define SM_PLLDIV_SIZE 8
#define SM_PLLMUL_OFFSET 16
#define SM_PLLMUL_SIZE 8
#define SM_PLLCOUNT_OFFSET 24
#define SM_PLLCOUNT_SIZE 6
#define SM_PLLTEST_OFFSET 31
#define SM_PLLTEST_SIZE 1
/* Bitfields in PM_PLL1 */
/* Bitfields in PM_VCTRL */
#define SM_VAUTO_OFFSET 0
#define SM_VAUTO_SIZE 1
#define SM_PM_VCTRL_VAL_OFFSET 8
#define SM_PM_VCTRL_VAL_SIZE 7
/* Bitfields in PM_VMREF */
#define SM_REFSEL_OFFSET 0
#define SM_REFSEL_SIZE 4
/* Bitfields in PM_VMV */
#define SM_PM_VMV_VAL_OFFSET 0
#define SM_PM_VMV_VAL_SIZE 8
/* Bitfields in PM_IER */
/* Bitfields in PM_IDR */
/* Bitfields in PM_IMR */
/* Bitfields in PM_ISR */
/* Bitfields in PM_ICR */
#define SM_LOCK0_OFFSET 0
#define SM_LOCK0_SIZE 1
#define SM_LOCK1_OFFSET 1
#define SM_LOCK1_SIZE 1
#define SM_WAKE_OFFSET 2
#define SM_WAKE_SIZE 1
#define SM_VOK_OFFSET 3
#define SM_VOK_SIZE 1
#define SM_VMRDY_OFFSET 4
#define SM_VMRDY_SIZE 1
#define SM_CKRDY_OFFSET 5
#define SM_CKRDY_SIZE 1
/* Bitfields in PM_GCCTRL */
#define SM_OSCSEL_OFFSET 0
#define SM_OSCSEL_SIZE 1
#define SM_PLLSEL_OFFSET 1
#define SM_PLLSEL_SIZE 1
#define SM_CEN_OFFSET 2
#define SM_CEN_SIZE 1
#define SM_CPC_OFFSET 3
#define SM_CPC_SIZE 1
#define SM_DIVEN_OFFSET 4
#define SM_DIVEN_SIZE 1
#define SM_DIV_OFFSET 8
#define SM_DIV_SIZE 8
/* Bitfields in RTC_CTRL */
#define SM_PCLR_OFFSET 1
#define SM_PCLR_SIZE 1
#define SM_TOPEN_OFFSET 2
#define SM_TOPEN_SIZE 1
#define SM_CLKEN_OFFSET 3
#define SM_CLKEN_SIZE 1
#define SM_PSEL_OFFSET 8
#define SM_PSEL_SIZE 16
/* Bitfields in RTC_VAL */
#define SM_RTC_VAL_VAL_OFFSET 0
#define SM_RTC_VAL_VAL_SIZE 31
/* Bitfields in RTC_TOP */
#define SM_RTC_TOP_VAL_OFFSET 0
#define SM_RTC_TOP_VAL_SIZE 32
/* Bitfields in RTC_IER */
/* Bitfields in RTC_IDR */
/* Bitfields in RTC_IMR */
/* Bitfields in RTC_ISR */
/* Bitfields in RTC_ICR */
#define SM_TOPI_OFFSET 0
#define SM_TOPI_SIZE 1
/* Bitfields in WDT_CTRL */
#define SM_KEY_OFFSET 24
#define SM_KEY_SIZE 8
/* Bitfields in WDT_CLR */
/* Bitfields in WDT_EXT */
/* Bitfields in RC_RCAUSE */
#define SM_POR_OFFSET 0
#define SM_POR_SIZE 1
#define SM_BOD_OFFSET 1
#define SM_BOD_SIZE 1
#define SM_EXT_OFFSET 2
#define SM_EXT_SIZE 1
#define SM_WDT_OFFSET 3
#define SM_WDT_SIZE 1
#define SM_NTAE_OFFSET 4
#define SM_NTAE_SIZE 1
#define SM_SERP_OFFSET 5
#define SM_SERP_SIZE 1
/* Bitfields in EIM_IER */
/* Bitfields in EIM_IDR */
/* Bitfields in EIM_IMR */
/* Bitfields in EIM_ISR */
/* Bitfields in EIM_ICR */
/* Bitfields in EIM_MODE */
/* Bitfields in EIM_EDGE */
#define SM_INT0_OFFSET 0
#define SM_INT0_SIZE 1
#define SM_INT1_OFFSET 1
#define SM_INT1_SIZE 1
#define SM_INT2_OFFSET 2
#define SM_INT2_SIZE 1
#define SM_INT3_OFFSET 3
#define SM_INT3_SIZE 1
/* Bitfields in EIM_LEVEL */
/* Bitfields in EIM_TEST */
#define SM_TESTEN_OFFSET 31
#define SM_TESTEN_SIZE 1
/* Bitfields in EIM_NMIC */
#define SM_EN_OFFSET 0
#define SM_EN_SIZE 1
/* Bit manipulation macros */
#define SM_BIT(name) (1 << SM_##name##_OFFSET)
#define SM_BF(name,value) (((value) & ((1 << SM_##name##_SIZE) - 1)) << SM_##name##_OFFSET)
#define SM_BFEXT(name,value) (((value) >> SM_##name##_OFFSET) & ((1 << SM_##name##_SIZE) - 1))
#define SM_BFINS(name,value,old) (((old) & ~(((1 << SM_##name##_SIZE) - 1) << SM_##name##_OFFSET)) | SM_BF(name,value))
/* Register access macros */
#define sm_readl(port,reg) readl((port)->regs + SM_##reg)
#define sm_writel(port,reg,value) writel((value), (port)->regs + SM_##reg)
#endif /* __ASM_AVR32_SM_H__ */
#
# Makefile for the Linux/AVR32 kernel.
#
obj-y += init.o clear_page.o copy_page.o dma-coherent.o
obj-y += ioremap.o cache.o fault.o tlb.o
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/highmem.h>
#include <linux/unistd.h>
#include <asm/cacheflush.h>
#include <asm/cachectl.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
/*
* If you attempt to flush anything more than this, you need superuser
* privileges. The value is completely arbitrary.
*/
#define CACHEFLUSH_MAX_LEN 1024
void invalidate_dcache_region(void *start, size_t size)
{
unsigned long v, begin, end, linesz;
linesz = boot_cpu_data.dcache.linesz;
//printk("invalidate dcache: %p + %u\n", start, size);
/* You asked for it, you got it */
begin = (unsigned long)start & ~(linesz - 1);
end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1);
for (v = begin; v < end; v += linesz)
invalidate_dcache_line((void *)v);
}
void clean_dcache_region(void *start, size_t size)
{
unsigned long v, begin, end, linesz;
linesz = boot_cpu_data.dcache.linesz;
begin = (unsigned long)start & ~(linesz - 1);
end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1);
for (v = begin; v < end; v += linesz)
clean_dcache_line((void *)v);
flush_write_buffer();
}
void flush_dcache_region(void *start, size_t size)
{
unsigned long v, begin, end, linesz;
linesz = boot_cpu_data.dcache.linesz;
begin = (unsigned long)start & ~(linesz - 1);
end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1);
for (v = begin; v < end; v += linesz)
flush_dcache_line((void *)v);
flush_write_buffer();
}
void invalidate_icache_region(void *start, size_t size)
{
unsigned long v, begin, end, linesz;
linesz = boot_cpu_data.icache.linesz;
begin = (unsigned long)start & ~(linesz - 1);
end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1);
for (v = begin; v < end; v += linesz)
invalidate_icache_line((void *)v);
}
static inline void __flush_icache_range(unsigned long start, unsigned long end)
{
unsigned long v, linesz;
linesz = boot_cpu_data.dcache.linesz;
for (v = start; v < end; v += linesz) {
clean_dcache_line((void *)v);
invalidate_icache_line((void *)v);
}
flush_write_buffer();
}
/*
* This one is called after a module has been loaded.
*/
void flush_icache_range(unsigned long start, unsigned long end)
{
unsigned long linesz;
linesz = boot_cpu_data.dcache.linesz;
__flush_icache_range(start & ~(linesz - 1),
(end + linesz - 1) & ~(linesz - 1));
}
/*
* This one is called from do_no_page(), do_swap_page() and install_page().
*/
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
if (vma->vm_flags & VM_EXEC) {
void *v = kmap(page);
__flush_icache_range((unsigned long)v, (unsigned long)v + PAGE_SIZE);
kunmap(v);
}
}
/*
* This one is used by copy_to_user_page()
*/
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
if (vma->vm_flags & VM_EXEC)
flush_icache_range(addr, addr + len);
}
asmlinkage int sys_cacheflush(int operation, void __user *addr, size_t len)
{
int ret;
if (len > CACHEFLUSH_MAX_LEN) {
ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
}
ret = -EFAULT;
if (!access_ok(VERIFY_WRITE, addr, len))
goto out;
switch (operation) {
case CACHE_IFLUSH:
flush_icache_range((unsigned long)addr,
(unsigned long)addr + len);
ret = 0;
break;
default:
ret = -EINVAL;
}
out:
return ret;
}
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/page.h>
/*
* clear_page
* r12: P1 address (to)
*/
.text
.global clear_page
clear_page:
sub r9, r12, -PAGE_SIZE
mov r10, 0
mov r11, 0
0: st.d r12++, r10
cp r12, r9
brne 0b
mov pc, lr
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/page.h>
/*
* copy_page
*
* r12 to (P1 address)
* r11 from (P1 address)
* r8-r10 scratch
*/
.text
.global copy_page
copy_page:
sub r10, r11, -(1 << PAGE_SHIFT)
/* pref r11[0] */
1: /* pref r11[8] */
ld.d r8, r11++
st.d r12++, r8
cp r11, r10
brlo 1b
mov pc, lr
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/dma-mapping.h>
#include <asm/addrspace.h>
#include <asm/cacheflush.h>
void dma_cache_sync(void *vaddr, size_t size, int direction)
{
/*
* No need to sync an uncached area
*/
if (PXSEG(vaddr) == P2SEG)
return;
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
dma_cache_inv(vaddr, size);
break;
case DMA_TO_DEVICE: /* writeback only */
dma_cache_wback(vaddr, size);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
dma_cache_wback_inv(vaddr, size);
break;
default:
BUG();
}
}
EXPORT_SYMBOL(dma_cache_sync);
static struct page *__dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp)
{
struct page *page, *free, *end;
int order;
size = PAGE_ALIGN(size);
order = get_order(size);
page = alloc_pages(gfp, order);
if (!page)
return NULL;
split_page(page, order);
/*
* When accessing physical memory with valid cache data, we
* get a cache hit even if the virtual memory region is marked
* as uncached.
*
* Since the memory is newly allocated, there is no point in
* doing a writeback. If the previous owner cares, he should
* have flushed the cache before releasing the memory.
*/
invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
*handle = page_to_bus(page);
free = page + (size >> PAGE_SHIFT);
end = page + (1 << order);
/*
* Free any unused pages
*/
while (free < end) {
__free_page(free);
free++;
}
return page;
}
static void __dma_free(struct device *dev, size_t size,
struct page *page, dma_addr_t handle)
{
struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
while (page < end)
__free_page(page++);
}
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp)
{
struct page *page;
void *ret = NULL;
page = __dma_alloc(dev, size, handle, gfp);
if (page)
ret = phys_to_uncached(page_to_phys(page));
return ret;
}
EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t handle)
{
void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
struct page *page;
pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
cpu_addr, (unsigned long)handle, (unsigned)size);
BUG_ON(!virt_addr_valid(addr));
page = virt_to_page(addr);
__dma_free(dev, size, page, handle);
}
EXPORT_SYMBOL(dma_free_coherent);
#if 0
void *dma_alloc_writecombine(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp)
{
struct page *page;
page = __dma_alloc(dev, size, handle, gfp);
/* Now, map the page into P3 with write-combining turned on */
return __ioremap(page_to_phys(page), size, _PAGE_BUFFER);
}
EXPORT_SYMBOL(dma_alloc_writecombine);
void dma_free_writecombine(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t handle)
{
struct page *page;
iounmap(cpu_addr);
page = bus_to_page(handle);
__dma_free(dev, size, page, handle);
}
EXPORT_SYMBOL(dma_free_writecombine);
#endif
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* Based on linux/arch/sh/mm/fault.c:
* Copyright (C) 1999 Niibe Yutaka
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <asm/kdebug.h>
#include <asm/mmu_context.h>
#include <asm/sysreg.h>
#include <asm/uaccess.h>
#include <asm/tlb.h>
#ifdef DEBUG
static void dump_code(unsigned long pc)
{
char *p = (char *)pc;
char val;
int i;
printk(KERN_DEBUG "Code:");
for (i = 0; i < 16; i++) {
if (__get_user(val, p + i))
break;
printk(" %02x", val);
}
printk("\n");
}
#endif
#ifdef CONFIG_KPROBES
ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
/* Hook to register for page fault notifications */
int register_page_fault_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
}
int unregister_page_fault_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
}
static inline int notify_page_fault(enum die_val val, struct pt_regs *regs,
int trap, int sig)
{
struct die_args args = {
.regs = regs,
.trapnr = trap,
};
return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
}
#else
static inline int notify_page_fault(enum die_val val, struct pt_regs *regs,
int trap, int sig)
{
return NOTIFY_DONE;
}
#endif
/*
* This routine handles page faults. It determines the address and the
* problem, and then passes it off to one of the appropriate routines.
*
* ecr is the Exception Cause Register. Possible values are:
* 5: Page not found (instruction access)
* 6: Protection fault (instruction access)
* 12: Page not found (read access)
* 13: Page not found (write access)
* 14: Protection fault (read access)
* 15: Protection fault (write access)
*/
asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
const struct exception_table_entry *fixup;
unsigned long address;
unsigned long page;
int writeaccess = 0;
if (notify_page_fault(DIE_PAGE_FAULT, regs,
ecr, SIGSEGV) == NOTIFY_STOP)
return;
address = sysreg_read(TLBEAR);
tsk = current;
mm = tsk->mm;
/*
* If we're in an interrupt or have no user context, we must
* not take the fault...
*/
if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
goto no_context;
local_irq_enable();
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so we
* can handle it...
*/
good_area:
//pr_debug("good area: vm_flags = 0x%lx\n", vma->vm_flags);
switch (ecr) {
case ECR_PROTECTION_X:
case ECR_TLB_MISS_X:
if (!(vma->vm_flags & VM_EXEC))
goto bad_area;
break;
case ECR_PROTECTION_R:
case ECR_TLB_MISS_R:
if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
goto bad_area;
break;
case ECR_PROTECTION_W:
case ECR_TLB_MISS_W:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
writeaccess = 1;
break;
default:
panic("Unhandled case %lu in do_page_fault!", ecr);
}
/*
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the
* fault.
*/
survive:
switch (handle_mm_fault(mm, vma, address, writeaccess)) {
case VM_FAULT_MINOR:
tsk->min_flt++;
break;
case VM_FAULT_MAJOR:
tsk->maj_flt++;
break;
case VM_FAULT_SIGBUS:
goto do_sigbus;
case VM_FAULT_OOM:
goto out_of_memory;
default:
BUG();
}
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory
* map. Fix it, but check if it's kernel or user first...
*/
bad_area:
pr_debug("Bad area [%s:%u]: addr %08lx, ecr %lu\n",
tsk->comm, tsk->pid, address, ecr);
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
/* Hmm...we have to pass address and ecr somehow... */
/* tsk->thread.address = address;
tsk->thread.error_code = ecr; */
#ifdef DEBUG
show_regs(regs);
dump_code(regs->pc);
page = sysreg_read(PTBR);
printk("ptbr = %08lx", page);
if (page) {
page = ((unsigned long *)page)[address >> 22];
printk(" pgd = %08lx", page);
if (page & _PAGE_PRESENT) {
page &= PAGE_MASK;
address &= 0x003ff000;
page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT];
printk(" pte = %08lx\n", page);
}
}
#endif
pr_debug("Sending SIGSEGV to PID %d...\n",
tsk->pid);
force_sig(SIGSEGV, tsk);
return;
}
no_context:
pr_debug("No context\n");
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
pr_debug("Found fixup at %08lx\n", fixup->fixup);
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have
* to terminate things with extreme prejudice.
*/
if (address < PAGE_SIZE)
printk(KERN_ALERT
"Unable to handle kernel NULL pointer dereference");
else
printk(KERN_ALERT
"Unable to handle kernel paging request");
printk(" at virtual address %08lx\n", address);
printk(KERN_ALERT "pc = %08lx\n", regs->pc);
page = sysreg_read(PTBR);
printk(KERN_ALERT "ptbr = %08lx", page);
if (page) {
page = ((unsigned long *)page)[address >> 22];
printk(" pgd = %08lx", page);
if (page & _PAGE_PRESENT) {
page &= PAGE_MASK;
address &= 0x003ff000;
page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT];
printk(" pte = %08lx\n", page);
}
}
die("\nOops", regs, ecr);
do_exit(SIGKILL);
/*
* We ran out of memory, or some other thing happened to us
* that made us unable to handle the page fault gracefully.
*/
out_of_memory:
printk("Out of memory\n");
up_read(&mm->mmap_sem);
if (current->pid == 1) {
yield();
down_read(&mm->mmap_sem);
goto survive;
}
printk("VM: Killing process %s\n", tsk->comm);
if (user_mode(regs))
do_exit(SIGKILL);
goto no_context;
do_sigbus:
up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel or
* user mode.
*/
/* address, error_code, trap_no, ... */
#ifdef DEBUG
show_regs(regs);
dump_code(regs->pc);
#endif
pr_debug("Sending SIGBUS to PID %d...\n", tsk->pid);
force_sig(SIGBUS, tsk);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
}
asmlinkage void do_bus_error(unsigned long addr, int write_access,
struct pt_regs *regs)
{
printk(KERN_ALERT
"Bus error at physical address 0x%08lx (%s access)\n",
addr, write_access ? "write" : "read");
printk(KERN_INFO "DTLB dump:\n");
dump_dtlb();
die("Bus Error", regs, write_access);
do_exit(SIGKILL);
}
/*
* This functionality is currently not possible to implement because
* we're using segmentation to ensure a fixed mapping of the kernel
* virtual address space.
*
* It would be possible to implement this, but it would require us to
* disable segmentation at startup and load the kernel mappings into
* the TLB like any other pages. There will be lots of trickery to
* avoid recursive invocation of the TLB miss handler, though...
*/
#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable)
{
}
EXPORT_SYMBOL(kernel_map_pages);
#endif
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/mmzone.h>
#include <linux/bootmem.h>
#include <linux/pagemap.h>
#include <linux/pfn.h>
#include <linux/nodemask.h>
#include <asm/page.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/setup.h>
#include <asm/sections.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD];
struct page *empty_zero_page;
/*
* Cache of MMU context last used.
*/
unsigned long mmu_context_cache = NO_CONTEXT;
#define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT)
#define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn)
void show_mem(void)
{
int total = 0, reserved = 0, cached = 0;
int slab = 0, free = 0, shared = 0;
pg_data_t *pgdat;
printk("Mem-info:\n");
show_free_areas();
for_each_online_pgdat(pgdat) {
struct page *page, *end;
page = pgdat->node_mem_map;
end = page + pgdat->node_spanned_pages;
do {
total++;
if (PageReserved(page))
reserved++;
else if (PageSwapCache(page))
cached++;
else if (PageSlab(page))
slab++;
else if (!page_count(page))
free++;
else
shared += page_count(page) - 1;
page++;
} while (page < end);
}
printk ("%d pages of RAM\n", total);
printk ("%d free pages\n", free);
printk ("%d reserved pages\n", reserved);
printk ("%d slab pages\n", slab);
printk ("%d pages shared\n", shared);
printk ("%d pages swap cached\n", cached);
}
static void __init print_memory_map(const char *what,
struct tag_mem_range *mem)
{
printk ("%s:\n", what);
for (; mem; mem = mem->next) {
printk (" %08lx - %08lx\n",
(unsigned long)mem->addr,
(unsigned long)(mem->addr + mem->size));
}
}
#define MAX_LOWMEM HIGHMEM_START
#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM)
/*
* Sort a list of memory regions in-place by ascending address.
*
* We're using bubble sort because we only have singly linked lists
* with few elements.
*/
static void __init sort_mem_list(struct tag_mem_range **pmem)
{
int done;
struct tag_mem_range **a, **b;
if (!*pmem)
return;
do {
done = 1;
a = pmem, b = &(*pmem)->next;
while (*b) {
if ((*a)->addr > (*b)->addr) {
struct tag_mem_range *tmp;
tmp = (*b)->next;
(*b)->next = *a;
*a = *b;
*b = tmp;
done = 0;
}
a = &(*a)->next;
b = &(*a)->next;
}
} while (!done);
}
/*
* Find a free memory region large enough for storing the
* bootmem bitmap.
*/
static unsigned long __init
find_bootmap_pfn(const struct tag_mem_range *mem)
{
unsigned long bootmap_pages, bootmap_len;
unsigned long node_pages = PFN_UP(mem->size);
unsigned long bootmap_addr = mem->addr;
struct tag_mem_range *reserved = mem_reserved;
struct tag_mem_range *ramdisk = mem_ramdisk;
unsigned long kern_start = virt_to_phys(_stext);
unsigned long kern_end = virt_to_phys(_end);
bootmap_pages = bootmem_bootmap_pages(node_pages);
bootmap_len = bootmap_pages << PAGE_SHIFT;
/*
* Find a large enough region without reserved pages for
* storing the bootmem bitmap. We can take advantage of the
* fact that all lists have been sorted.
*
* We have to check explicitly reserved regions as well as the
* kernel image and any RAMDISK images...
*
* Oh, and we have to make sure we don't overwrite the taglist
* since we're going to use it until the bootmem allocator is
* fully up and running.
*/
while (1) {
if ((bootmap_addr < kern_end) &&
((bootmap_addr + bootmap_len) > kern_start))
bootmap_addr = kern_end;
while (reserved &&
(bootmap_addr >= (reserved->addr + reserved->size)))
reserved = reserved->next;
if (reserved &&
((bootmap_addr + bootmap_len) >= reserved->addr)) {
bootmap_addr = reserved->addr + reserved->size;
continue;
}
while (ramdisk &&
(bootmap_addr >= (ramdisk->addr + ramdisk->size)))
ramdisk = ramdisk->next;
if (!ramdisk ||
((bootmap_addr + bootmap_len) < ramdisk->addr))
break;
bootmap_addr = ramdisk->addr + ramdisk->size;
}
if ((PFN_UP(bootmap_addr) + bootmap_len) >= (mem->addr + mem->size))
return ~0UL;
return PFN_UP(bootmap_addr);
}
void __init setup_bootmem(void)
{
unsigned bootmap_size;
unsigned long first_pfn, bootmap_pfn, pages;
unsigned long max_pfn, max_low_pfn;
unsigned long kern_start = virt_to_phys(_stext);
unsigned long kern_end = virt_to_phys(_end);
unsigned node = 0;
struct tag_mem_range *bank, *res;
sort_mem_list(&mem_phys);
sort_mem_list(&mem_reserved);
print_memory_map("Physical memory", mem_phys);
print_memory_map("Reserved memory", mem_reserved);
nodes_clear(node_online_map);
if (mem_ramdisk) {
#ifdef CONFIG_BLK_DEV_INITRD
initrd_start = __va(mem_ramdisk->addr);
initrd_end = initrd_start + mem_ramdisk->size;
print_memory_map("RAMDISK images", mem_ramdisk);
if (mem_ramdisk->next)
printk(KERN_WARNING
"Warning: Only the first RAMDISK image "
"will be used\n");
sort_mem_list(&mem_ramdisk);
#else
printk(KERN_WARNING "RAM disk image present, but "
"no initrd support in kernel!\n");
#endif
}
if (mem_phys->next)
printk(KERN_WARNING "Only using first memory bank\n");
for (bank = mem_phys; bank; bank = NULL) {
first_pfn = PFN_UP(bank->addr);
max_low_pfn = max_pfn = PFN_DOWN(bank->addr + bank->size);
bootmap_pfn = find_bootmap_pfn(bank);
if (bootmap_pfn > max_pfn)
panic("No space for bootmem bitmap!\n");
if (max_low_pfn > MAX_LOWMEM_PFN) {
max_low_pfn = MAX_LOWMEM_PFN;
#ifndef CONFIG_HIGHMEM
/*
* Lowmem is memory that can be addressed
* directly through P1/P2
*/
printk(KERN_WARNING
"Node %u: Only %ld MiB of memory will be used.\n",
node, MAX_LOWMEM >> 20);
printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
#else
#error HIGHMEM is not supported by AVR32 yet
#endif
}
/* Initialize the boot-time allocator with low memory only. */
bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn,
first_pfn, max_low_pfn);
printk("Node %u: bdata = %p, bdata->node_bootmem_map = %p\n",
node, NODE_DATA(node)->bdata,
NODE_DATA(node)->bdata->node_bootmem_map);
/*
* Register fully available RAM pages with the bootmem
* allocator.
*/
pages = max_low_pfn - first_pfn;
free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn),
PFN_PHYS(pages));
/*
* Reserve space for the kernel image (if present in
* this node)...
*/
if ((kern_start >= PFN_PHYS(first_pfn)) &&
(kern_start < PFN_PHYS(max_pfn))) {
printk("Node %u: Kernel image %08lx - %08lx\n",
node, kern_start, kern_end);
reserve_bootmem_node(NODE_DATA(node), kern_start,
kern_end - kern_start);
}
/* ...the bootmem bitmap... */
reserve_bootmem_node(NODE_DATA(node),
PFN_PHYS(bootmap_pfn),
bootmap_size);
/* ...any RAMDISK images... */
for (res = mem_ramdisk; res; res = res->next) {
if (res->addr > PFN_PHYS(max_pfn))
break;
if (res->addr >= PFN_PHYS(first_pfn)) {
printk("Node %u: RAMDISK %08lx - %08lx\n",
node,
(unsigned long)res->addr,
(unsigned long)(res->addr + res->size));
reserve_bootmem_node(NODE_DATA(node),
res->addr, res->size);
}
}
/* ...and any other reserved regions. */
for (res = mem_reserved; res; res = res->next) {
if (res->addr > PFN_PHYS(max_pfn))
break;
if (res->addr >= PFN_PHYS(first_pfn)) {
printk("Node %u: Reserved %08lx - %08lx\n",
node,
(unsigned long)res->addr,
(unsigned long)(res->addr + res->size));
reserve_bootmem_node(NODE_DATA(node),
res->addr, res->size);
}
}
node_set_online(node);
}
}
/*
* paging_init() sets up the page tables
*
* This routine also unmaps the page at virtual kernel address 0, so
* that we can trap those pesky NULL-reference errors in the kernel.
*/
void __init paging_init(void)
{
extern unsigned long _evba;
void *zero_page;
int nid;
/*
* Make sure we can handle exceptions before enabling
* paging. Not that we should ever _get_ any exceptions this
* early, but you never know...
*/
printk("Exception vectors start at %p\n", &_evba);
sysreg_write(EVBA, (unsigned long)&_evba);
/*
* Since we are ready to handle exceptions now, we should let
* the CPU generate them...
*/
__asm__ __volatile__ ("csrf %0" : : "i"(SR_EM_BIT));
/*
* Allocate the zero page. The allocator will panic if it
* can't satisfy the request, so no need to check.
*/
zero_page = alloc_bootmem_low_pages_node(NODE_DATA(0),
PAGE_SIZE);
{
pgd_t *pg_dir;
int i;
pg_dir = swapper_pg_dir;
sysreg_write(PTBR, (unsigned long)pg_dir);
for (i = 0; i < PTRS_PER_PGD; i++)
pgd_val(pg_dir[i]) = 0;
enable_mmu();
printk ("CPU: Paging enabled\n");
}
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
unsigned long zones_size[MAX_NR_ZONES];
unsigned long low, start_pfn;
start_pfn = pgdat->bdata->node_boot_start;
start_pfn >>= PAGE_SHIFT;
low = pgdat->bdata->node_low_pfn;
memset(zones_size, 0, sizeof(zones_size));
zones_size[ZONE_NORMAL] = low - start_pfn;
printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
nid, start_pfn, low);
free_area_init_node(nid, pgdat, zones_size, start_pfn, NULL);
printk("Node %u: mem_map starts at %p\n",
pgdat->node_id, pgdat->node_mem_map);
}
mem_map = NODE_DATA(0)->node_mem_map;
memset(zero_page, 0, PAGE_SIZE);
empty_zero_page = virt_to_page(zero_page);
flush_dcache_page(empty_zero_page);
}
void __init mem_init(void)
{
int codesize, reservedpages, datasize, initsize;
int nid, i;
reservedpages = 0;
high_memory = NULL;
/* this will put all low memory onto the freelists */
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
unsigned long node_pages = 0;
void *node_high_memory;
num_physpages += pgdat->node_present_pages;
if (pgdat->node_spanned_pages != 0)
node_pages = free_all_bootmem_node(pgdat);
totalram_pages += node_pages;
for (i = 0; i < node_pages; i++)
if (PageReserved(pgdat->node_mem_map + i))
reservedpages++;
node_high_memory = (void *)((pgdat->node_start_pfn
+ pgdat->node_spanned_pages)
<< PAGE_SHIFT);
if (node_high_memory > high_memory)
high_memory = node_high_memory;
}
max_mapnr = MAP_NR(high_memory);
codesize = (unsigned long)_etext - (unsigned long)_text;
datasize = (unsigned long)_edata - (unsigned long)_data;
initsize = (unsigned long)__init_end - (unsigned long)__init_begin;
printk ("Memory: %luk/%luk available (%dk kernel code, "
"%dk reserved, %dk data, %dk init)\n",
(unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
totalram_pages << (PAGE_SHIFT - 10),
codesize >> 10,
reservedpages << (PAGE_SHIFT - 10),
datasize >> 10,
initsize >> 10);
}
static inline void free_area(unsigned long addr, unsigned long end, char *s)
{
unsigned int size = (end - addr) >> 10;
for (; addr < end; addr += PAGE_SIZE) {
struct page *page = virt_to_page(addr);
ClearPageReserved(page);
init_page_count(page);
free_page(addr);
totalram_pages++;
}
if (size && s)
printk(KERN_INFO "Freeing %s memory: %dK (%lx - %lx)\n",
s, size, end - (size << 10), end);
}
void free_initmem(void)
{
free_area((unsigned long)__init_begin, (unsigned long)__init_end,
"init");
}
#ifdef CONFIG_BLK_DEV_INITRD
static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd)
free_area(start, end, "initrd");
}
static int __init keepinitrd_setup(char *__unused)
{
keep_initrd = 1;
return 1;
}
__setup("keepinitrd", keepinitrd_setup);
#endif
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/addrspace.h>
static inline int remap_area_pte(pte_t *pte, unsigned long address,
unsigned long end, unsigned long phys_addr,
pgprot_t prot)
{
unsigned long pfn;
pfn = phys_addr >> PAGE_SHIFT;
do {
WARN_ON(!pte_none(*pte));
set_pte(pte, pfn_pte(pfn, prot));
address += PAGE_SIZE;
pfn++;
pte++;
} while (address && (address < end));
return 0;
}
static inline int remap_area_pmd(pmd_t *pmd, unsigned long address,
unsigned long end, unsigned long phys_addr,
pgprot_t prot)
{
unsigned long next;
phys_addr -= address;
do {
pte_t *pte = pte_alloc_kernel(pmd, address);
if (!pte)
return -ENOMEM;
next = (address + PMD_SIZE) & PMD_MASK;
if (remap_area_pte(pte, address, next,
address + phys_addr, prot))
return -ENOMEM;
address = next;
pmd++;
} while (address && (address < end));
return 0;
}
static int remap_area_pud(pud_t *pud, unsigned long address,
unsigned long end, unsigned long phys_addr,
pgprot_t prot)
{
unsigned long next;
phys_addr -= address;
do {
pmd_t *pmd = pmd_alloc(&init_mm, pud, address);
if (!pmd)
return -ENOMEM;
next = (address + PUD_SIZE) & PUD_MASK;
if (remap_area_pmd(pmd, address, next,
phys_addr + address, prot))
return -ENOMEM;
address = next;
pud++;
} while (address && address < end);
return 0;
}
static int remap_area_pages(unsigned long address, unsigned long phys_addr,
size_t size, pgprot_t prot)
{
unsigned long end = address + size;
unsigned long next;
pgd_t *pgd;
int err = 0;
phys_addr -= address;
pgd = pgd_offset_k(address);
flush_cache_all();
BUG_ON(address >= end);
spin_lock(&init_mm.page_table_lock);
do {
pud_t *pud = pud_alloc(&init_mm, pgd, address);
err = -ENOMEM;
if (!pud)
break;
next = (address + PGDIR_SIZE) & PGDIR_MASK;
if (next < address || next > end)
next = end;
err = remap_area_pud(pud, address, next,
phys_addr + address, prot);
if (err)
break;
address = next;
pgd++;
} while (address && (address < end));
spin_unlock(&init_mm.page_table_lock);
flush_tlb_all();
return err;
}
/*
* Re-map an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access physical
* memory directly.
*/
void __iomem *__ioremap(unsigned long phys_addr, size_t size,
unsigned long flags)
{
void *addr;
struct vm_struct *area;
unsigned long offset, last_addr;
pgprot_t prot;
/*
* Check if we can simply use the P4 segment. This area is
* uncacheable, so if caching/buffering is requested, we can't
* use it.
*/
if ((phys_addr >= P4SEG) && (flags == 0))
return (void __iomem *)phys_addr;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
/*
* XXX: When mapping regular RAM, we'd better make damn sure
* it's never used for anything else. But this is really the
* caller's responsibility...
*/
if (PHYSADDR(P2SEGADDR(phys_addr)) == phys_addr)
return (void __iomem *)P2SEGADDR(phys_addr);
/* Mappings have to be page-aligned */
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr + 1) - phys_addr;
prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY
| _PAGE_ACCESSED | _PAGE_TYPE_SMALL | flags);
/*
* Ok, go for it..
*/
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
area->phys_addr = phys_addr;
addr = area->addr;
if (remap_area_pages((unsigned long)addr, phys_addr, size, prot)) {
vunmap(addr);
return NULL;
}
return (void __iomem *)(offset + (char *)addr);
}
EXPORT_SYMBOL(__ioremap);
void __iounmap(void __iomem *addr)
{
struct vm_struct *p;
if ((unsigned long)addr >= P4SEG)
return;
p = remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr));
if (unlikely(!p)) {
printk (KERN_ERR "iounmap: bad address %p\n", addr);
return;
}
kfree (p);
}
EXPORT_SYMBOL(__iounmap);
/*
* AVR32 TLB operations
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/mm.h>
#include <asm/mmu_context.h>
#define _TLBEHI_I 0x100
void show_dtlb_entry(unsigned int index)
{
unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save, flags;
local_irq_save(flags);
mmucr_save = sysreg_read(MMUCR);
tlbehi_save = sysreg_read(TLBEHI);
mmucr = mmucr_save & 0x13;
mmucr |= index << 14;
sysreg_write(MMUCR, mmucr);
asm volatile("tlbr" : : : "memory");
cpu_sync_pipeline();
tlbehi = sysreg_read(TLBEHI);
tlbelo = sysreg_read(TLBELO);
printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n",
index,
(tlbehi & 0x200)?'1':'0',
(tlbelo & 0x100)?'1':'0',
(tlbehi & 0xff),
(tlbehi >> 12), (tlbelo >> 12),
(tlbelo >> 4) & 7, (tlbelo >> 2) & 3,
(tlbelo & 0x200)?'1':'0',
(tlbelo & 0x080)?'1':'0',
(tlbelo & 0x001)?'1':'0',
(tlbelo & 0x002)?'1':'0');
sysreg_write(MMUCR, mmucr_save);
sysreg_write(TLBEHI, tlbehi_save);
cpu_sync_pipeline();
local_irq_restore(flags);
}
void dump_dtlb(void)
{
unsigned int i;
printk("ID V G ASID VPN PFN AP SZ C B W D\n");
for (i = 0; i < 32; i++)
show_dtlb_entry(i);
}
static unsigned long last_mmucr;
static inline void set_replacement_pointer(unsigned shift)
{
unsigned long mmucr, mmucr_save;
mmucr = mmucr_save = sysreg_read(MMUCR);
/* Does this mapping already exist? */
__asm__ __volatile__(
" tlbs\n"
" mfsr %0, %1"
: "=r"(mmucr)
: "i"(SYSREG_MMUCR));
if (mmucr & SYSREG_BIT(MMUCR_N)) {
/* Not found -- pick a not-recently-accessed entry */
unsigned long rp;
unsigned long tlbar = sysreg_read(TLBARLO);
rp = 32 - fls(tlbar);
if (rp == 32) {
rp = 0;
sysreg_write(TLBARLO, -1L);
}
mmucr &= 0x13;
mmucr |= (rp << shift);
sysreg_write(MMUCR, mmucr);
}
last_mmucr = mmucr;
}
static void update_dtlb(unsigned long address, pte_t pte, unsigned long asid)
{
unsigned long vpn;
vpn = (address & MMU_VPN_MASK) | _TLBEHI_VALID | asid;
sysreg_write(TLBEHI, vpn);
cpu_sync_pipeline();
set_replacement_pointer(14);
sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK);
/* Let's go */
asm volatile("nop\n\ttlbw" : : : "memory");
cpu_sync_pipeline();
}
void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t pte)
{
unsigned long flags;
/* ptrace may call this routine */
if (vma && current->active_mm != vma->vm_mm)
return;
local_irq_save(flags);
update_dtlb(address, pte, get_asid());
local_irq_restore(flags);
}
void __flush_tlb_page(unsigned long asid, unsigned long page)
{
unsigned long mmucr, tlbehi;
page |= asid;
sysreg_write(TLBEHI, page);
cpu_sync_pipeline();
asm volatile("tlbs");
mmucr = sysreg_read(MMUCR);
if (!(mmucr & SYSREG_BIT(MMUCR_N))) {
unsigned long tlbarlo;
unsigned long entry;
/* Clear the "valid" bit */
tlbehi = sysreg_read(TLBEHI);
tlbehi &= ~_TLBEHI_VALID;
sysreg_write(TLBEHI, tlbehi);
cpu_sync_pipeline();
/* mark the entry as "not accessed" */
entry = (mmucr >> 14) & 0x3f;
tlbarlo = sysreg_read(TLBARLO);
tlbarlo |= (0x80000000 >> entry);
sysreg_write(TLBARLO, tlbarlo);
/* update the entry with valid bit clear */
asm volatile("tlbw");
cpu_sync_pipeline();
}
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
unsigned long flags, asid;
unsigned long saved_asid = MMU_NO_ASID;
asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
page &= PAGE_MASK;
local_irq_save(flags);
if (vma->vm_mm != current->mm) {
saved_asid = get_asid();
set_asid(asid);
}
__flush_tlb_page(asid, page);
if (saved_asid != MMU_NO_ASID)
set_asid(saved_asid);
local_irq_restore(flags);
}
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
unsigned long flags;
int size;
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */
mm->context = NO_CONTEXT;
if (mm == current->mm)
activate_context(mm);
} else {
unsigned long asid = mm->context & MMU_CONTEXT_ASID_MASK;
unsigned long saved_asid = MMU_NO_ASID;
start &= PAGE_MASK;
end += (PAGE_SIZE - 1);
end &= PAGE_MASK;
if (mm != current->mm) {
saved_asid = get_asid();
set_asid(asid);
}
while (start < end) {
__flush_tlb_page(asid, start);
start += PAGE_SIZE;
}
if (saved_asid != MMU_NO_ASID)
set_asid(saved_asid);
}
local_irq_restore(flags);
}
}
/*
* TODO: If this is only called for addresses > TASK_SIZE, we can probably
* skip the ASID stuff and just use the Global bit...
*/
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long flags;
int size;
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */
flush_tlb_all();
} else {
unsigned long asid = init_mm.context & MMU_CONTEXT_ASID_MASK;
unsigned long saved_asid = get_asid();
start &= PAGE_MASK;
end += (PAGE_SIZE - 1);
end &= PAGE_MASK;
set_asid(asid);
while (start < end) {
__flush_tlb_page(asid, start);
start += PAGE_SIZE;
}
set_asid(saved_asid);
}
local_irq_restore(flags);
}
void flush_tlb_mm(struct mm_struct *mm)
{
/* Invalidate all TLB entries of this process by getting a new ASID */
if (mm->context != NO_CONTEXT) {
unsigned long flags;
local_irq_save(flags);
mm->context = NO_CONTEXT;
if (mm == current->mm)
activate_context(mm);
local_irq_restore(flags);
}
}
void flush_tlb_all(void)
{
unsigned long flags;
local_irq_save(flags);
sysreg_write(MMUCR, sysreg_read(MMUCR) | SYSREG_BIT(MMUCR_I));
local_irq_restore(flags);
}
#ifdef CONFIG_PROC_FS
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
static void *tlb_start(struct seq_file *tlb, loff_t *pos)
{
static unsigned long tlb_index;
if (*pos >= 32)
return NULL;
tlb_index = 0;
return &tlb_index;
}
static void *tlb_next(struct seq_file *tlb, void *v, loff_t *pos)
{
unsigned long *index = v;
if (*index >= 31)
return NULL;
++*pos;
++*index;
return index;
}
static void tlb_stop(struct seq_file *tlb, void *v)
{
}
static int tlb_show(struct seq_file *tlb, void *v)
{
unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save, flags;
unsigned long *index = v;
if (*index == 0)
seq_puts(tlb, "ID V G ASID VPN PFN AP SZ C B W D\n");
BUG_ON(*index >= 32);
local_irq_save(flags);
mmucr_save = sysreg_read(MMUCR);
tlbehi_save = sysreg_read(TLBEHI);
mmucr = mmucr_save & 0x13;
mmucr |= *index << 14;
sysreg_write(MMUCR, mmucr);
asm volatile("tlbr" : : : "memory");
cpu_sync_pipeline();
tlbehi = sysreg_read(TLBEHI);
tlbelo = sysreg_read(TLBELO);
sysreg_write(MMUCR, mmucr_save);
sysreg_write(TLBEHI, tlbehi_save);
cpu_sync_pipeline();
local_irq_restore(flags);
seq_printf(tlb, "%2lu: %c %c %02x %05x %05x %o %o %c %c %c %c\n",
*index,
(tlbehi & 0x200)?'1':'0',
(tlbelo & 0x100)?'1':'0',
(tlbehi & 0xff),
(tlbehi >> 12), (tlbelo >> 12),
(tlbelo >> 4) & 7, (tlbelo >> 2) & 3,
(tlbelo & 0x200)?'1':'0',
(tlbelo & 0x080)?'1':'0',
(tlbelo & 0x001)?'1':'0',
(tlbelo & 0x002)?'1':'0');
return 0;
}
static struct seq_operations tlb_ops = {
.start = tlb_start,
.next = tlb_next,
.stop = tlb_stop,
.show = tlb_show,
};
static int tlb_open(struct inode *inode, struct file *file)
{
return seq_open(file, &tlb_ops);
}
static struct file_operations proc_tlb_operations = {
.open = tlb_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int __init proctlb_init(void)
{
struct proc_dir_entry *entry;
entry = create_proc_entry("tlb", 0, NULL);
if (entry)
entry->proc_fops = &proc_tlb_operations;
return 0;
}
late_initcall(proctlb_init);
#endif /* CONFIG_PROC_FS */
include include/asm-generic/Kbuild.asm
headers-y += cachectl.h
#ifndef __ASM_AVR32_A_OUT_H
#define __ASM_AVR32_A_OUT_H
struct exec
{
unsigned long a_info; /* Use macros N_MAGIC, etc for access */
unsigned a_text; /* length of text, in bytes */
unsigned a_data; /* length of data, in bytes */
unsigned a_bss; /* length of uninitialized data area for file, in bytes */
unsigned a_syms; /* length of symbol table data in file, in bytes */
unsigned a_entry; /* start address */
unsigned a_trsize; /* length of relocation info for text, in bytes */
unsigned a_drsize; /* length of relocation info for data, in bytes */
};
#define N_TRSIZE(a) ((a).a_trsize)
#define N_DRSIZE(a) ((a).a_drsize)
#define N_SYMSIZE(a) ((a).a_syms)
#ifdef __KERNEL__
#define STACK_TOP TASK_SIZE
#endif
#endif /* __ASM_AVR32_A_OUT_H */
/*
* Defitions for the address spaces of the AVR32 CPUs. Heavily based on
* include/asm-sh/addrspace.h
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_ADDRSPACE_H
#define __ASM_AVR32_ADDRSPACE_H
#ifdef CONFIG_MMU
/* Memory segments when segmentation is enabled */
#define P0SEG 0x00000000
#define P1SEG 0x80000000
#define P2SEG 0xa0000000
#define P3SEG 0xc0000000
#define P4SEG 0xe0000000
/* Returns the privileged segment base of a given address */
#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000)
/* Returns the physical address of a PnSEG (n=1,2) address */
#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
/*
* Map an address to a certain privileged segment
*/
#define P1SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) \
| P1SEG))
#define P2SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) \
| P2SEG))
#define P3SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) \
| P3SEG))
#define P4SEGADDR(a) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) \
| P4SEG))
#endif /* CONFIG_MMU */
#endif /* __ASM_AVR32_ADDRSPACE_H */
/*
* include/asm-arm/arch-at91rm9200/at91rm9200_pdc.h
*
* Copyright (C) 2005 Ivan Kokshaysky
* Copyright (C) SAN People
*
* Peripheral Data Controller (PDC) registers.
* Based on AT91RM9200 datasheet revision E.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef AT91RM9200_PDC_H
#define AT91RM9200_PDC_H
#define AT91_PDC_RPR 0x100 /* Receive Pointer Register */
#define AT91_PDC_RCR 0x104 /* Receive Counter Register */
#define AT91_PDC_TPR 0x108 /* Transmit Pointer Register */
#define AT91_PDC_TCR 0x10c /* Transmit Counter Register */
#define AT91_PDC_RNPR 0x110 /* Receive Next Pointer Register */
#define AT91_PDC_RNCR 0x114 /* Receive Next Counter Register */
#define AT91_PDC_TNPR 0x118 /* Transmit Next Pointer Register */
#define AT91_PDC_TNCR 0x11c /* Transmit Next Counter Register */
#define AT91_PDC_PTCR 0x120 /* Transfer Control Register */
#define AT91_PDC_RXTEN (1 << 0) /* Receiver Transfer Enable */
#define AT91_PDC_RXTDIS (1 << 1) /* Receiver Transfer Disable */
#define AT91_PDC_TXTEN (1 << 8) /* Transmitter Transfer Enable */
#define AT91_PDC_TXTDIS (1 << 9) /* Transmitter Transfer Disable */
#define AT91_PDC_PTSR 0x124 /* Transfer Status Register */
#endif
/*
* include/asm-arm/arch-at91rm9200/at91rm9200_usart.h
*
* Copyright (C) 2005 Ivan Kokshaysky
* Copyright (C) SAN People
*
* USART registers.
* Based on AT91RM9200 datasheet revision E.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef AT91RM9200_USART_H
#define AT91RM9200_USART_H
#define AT91_US_CR 0x00 /* Control Register */
#define AT91_US_RSTRX (1 << 2) /* Reset Receiver */
#define AT91_US_RSTTX (1 << 3) /* Reset Transmitter */
#define AT91_US_RXEN (1 << 4) /* Receiver Enable */
#define AT91_US_RXDIS (1 << 5) /* Receiver Disable */
#define AT91_US_TXEN (1 << 6) /* Transmitter Enable */
#define AT91_US_TXDIS (1 << 7) /* Transmitter Disable */
#define AT91_US_RSTSTA (1 << 8) /* Reset Status Bits */
#define AT91_US_STTBRK (1 << 9) /* Start Break */
#define AT91_US_STPBRK (1 << 10) /* Stop Break */
#define AT91_US_STTTO (1 << 11) /* Start Time-out */
#define AT91_US_SENDA (1 << 12) /* Send Address */
#define AT91_US_RSTIT (1 << 13) /* Reset Iterations */
#define AT91_US_RSTNACK (1 << 14) /* Reset Non Acknowledge */
#define AT91_US_RETTO (1 << 15) /* Rearm Time-out */
#define AT91_US_DTREN (1 << 16) /* Data Terminal Ready Enable */
#define AT91_US_DTRDIS (1 << 17) /* Data Terminal Ready Disable */
#define AT91_US_RTSEN (1 << 18) /* Request To Send Enable */
#define AT91_US_RTSDIS (1 << 19) /* Request To Send Disable */
#define AT91_US_MR 0x04 /* Mode Register */
#define AT91_US_USMODE (0xf << 0) /* Mode of the USART */
#define AT91_US_USMODE_NORMAL 0
#define AT91_US_USMODE_RS485 1
#define AT91_US_USMODE_HWHS 2
#define AT91_US_USMODE_MODEM 3
#define AT91_US_USMODE_ISO7816_T0 4
#define AT91_US_USMODE_ISO7816_T1 6
#define AT91_US_USMODE_IRDA 8
#define AT91_US_USCLKS (3 << 4) /* Clock Selection */
#define AT91_US_CHRL (3 << 6) /* Character Length */
#define AT91_US_CHRL_5 (0 << 6)
#define AT91_US_CHRL_6 (1 << 6)
#define AT91_US_CHRL_7 (2 << 6)
#define AT91_US_CHRL_8 (3 << 6)
#define AT91_US_SYNC (1 << 8) /* Synchronous Mode Select */
#define AT91_US_PAR (7 << 9) /* Parity Type */
#define AT91_US_PAR_EVEN (0 << 9)
#define AT91_US_PAR_ODD (1 << 9)
#define AT91_US_PAR_SPACE (2 << 9)
#define AT91_US_PAR_MARK (3 << 9)
#define AT91_US_PAR_NONE (4 << 9)
#define AT91_US_PAR_MULTI_DROP (6 << 9)
#define AT91_US_NBSTOP (3 << 12) /* Number of Stop Bits */
#define AT91_US_NBSTOP_1 (0 << 12)
#define AT91_US_NBSTOP_1_5 (1 << 12)
#define AT91_US_NBSTOP_2 (2 << 12)
#define AT91_US_CHMODE (3 << 14) /* Channel Mode */
#define AT91_US_CHMODE_NORMAL (0 << 14)
#define AT91_US_CHMODE_ECHO (1 << 14)
#define AT91_US_CHMODE_LOC_LOOP (2 << 14)
#define AT91_US_CHMODE_REM_LOOP (3 << 14)
#define AT91_US_MSBF (1 << 16) /* Bit Order */
#define AT91_US_MODE9 (1 << 17) /* 9-bit Character Length */
#define AT91_US_CLKO (1 << 18) /* Clock Output Select */
#define AT91_US_OVER (1 << 19) /* Oversampling Mode */
#define AT91_US_INACK (1 << 20) /* Inhibit Non Acknowledge */
#define AT91_US_DSNACK (1 << 21) /* Disable Successive NACK */
#define AT91_US_MAX_ITER (7 << 24) /* Max Iterations */
#define AT91_US_FILTER (1 << 28) /* Infrared Receive Line Filter */
#define AT91_US_IER 0x08 /* Interrupt Enable Register */
#define AT91_US_RXRDY (1 << 0) /* Receiver Ready */
#define AT91_US_TXRDY (1 << 1) /* Transmitter Ready */
#define AT91_US_RXBRK (1 << 2) /* Break Received / End of Break */
#define AT91_US_ENDRX (1 << 3) /* End of Receiver Transfer */
#define AT91_US_ENDTX (1 << 4) /* End of Transmitter Transfer */
#define AT91_US_OVRE (1 << 5) /* Overrun Error */
#define AT91_US_FRAME (1 << 6) /* Framing Error */
#define AT91_US_PARE (1 << 7) /* Parity Error */
#define AT91_US_TIMEOUT (1 << 8) /* Receiver Time-out */
#define AT91_US_TXEMPTY (1 << 9) /* Transmitter Empty */
#define AT91_US_ITERATION (1 << 10) /* Max number of Repetitions Reached */
#define AT91_US_TXBUFE (1 << 11) /* Transmission Buffer Empty */
#define AT91_US_RXBUFF (1 << 12) /* Reception Buffer Full */
#define AT91_US_NACK (1 << 13) /* Non Acknowledge */
#define AT91_US_RIIC (1 << 16) /* Ring Indicator Input Change */
#define AT91_US_DSRIC (1 << 17) /* Data Set Ready Input Change */
#define AT91_US_DCDIC (1 << 18) /* Data Carrier Detect Input Change */
#define AT91_US_CTSIC (1 << 19) /* Clear to Send Input Change */
#define AT91_US_RI (1 << 20) /* RI */
#define AT91_US_DSR (1 << 21) /* DSR */
#define AT91_US_DCD (1 << 22) /* DCD */
#define AT91_US_CTS (1 << 23) /* CTS */
#define AT91_US_IDR 0x0c /* Interrupt Disable Register */
#define AT91_US_IMR 0x10 /* Interrupt Mask Register */
#define AT91_US_CSR 0x14 /* Channel Status Register */
#define AT91_US_RHR 0x18 /* Receiver Holding Register */
#define AT91_US_THR 0x1c /* Transmitter Holding Register */
#define AT91_US_BRGR 0x20 /* Baud Rate Generator Register */
#define AT91_US_CD (0xffff << 0) /* Clock Divider */
#define AT91_US_RTOR 0x24 /* Receiver Time-out Register */
#define AT91_US_TO (0xffff << 0) /* Time-out Value */
#define AT91_US_TTGR 0x28 /* Transmitter Timeguard Register */
#define AT91_US_TG (0xff << 0) /* Timeguard Value */
#define AT91_US_FIDI 0x40 /* FI DI Ratio Register */
#define AT91_US_NER 0x44 /* Number of Errors Register */
#define AT91_US_IF 0x4c /* IrDA Filter Register */
#endif
/*
* Platform data definitions.
*/
#ifndef __ASM_ARCH_BOARD_H
#define __ASM_ARCH_BOARD_H
#include <linux/types.h>
/* Add basic devices: system manager, interrupt controller, portmuxes, etc. */
void at32_add_system_devices(void);
#define AT91_NR_UART 4
extern struct platform_device *at91_default_console_device;
struct platform_device *at32_add_device_usart(unsigned int id);
struct eth_platform_data {
u8 valid;
u8 mii_phy_addr;
u8 is_rmii;
u8 hw_addr[6];
};
struct platform_device *
at32_add_device_eth(unsigned int id, struct eth_platform_data *data);
struct platform_device *at32_add_device_spi(unsigned int id);
struct lcdc_platform_data {
unsigned long fbmem_start;
unsigned long fbmem_size;
};
struct platform_device *
at32_add_device_lcdc(unsigned int id, struct lcdc_platform_data *data);
#endif /* __ASM_ARCH_BOARD_H */
/*
* AT32AP platform initialization calls.
*
* Copyright (C) 2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_AT32AP_INIT_H__
#define __ASM_AVR32_AT32AP_INIT_H__
void setup_platform(void);
/* Called by setup_platform */
void at32_clock_init(void);
void at32_portmux_init(void);
void at32_setup_serial_console(unsigned int usart_id);
#endif /* __ASM_AVR32_AT32AP_INIT_H__ */
/*
* AT32 portmux interface.
*
* Copyright (C) 2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_AT32_PORTMUX_H__
#define __ASM_AVR32_AT32_PORTMUX_H__
void portmux_set_func(unsigned int portmux_id, unsigned int pin_id,
unsigned int function_id);
#endif /* __ASM_AVR32_AT32_PORTMUX_H__ */
/*
* AT32 System Manager interface.
*
* Copyright (C) 2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_AT32_SM_H__
#define __ASM_AVR32_AT32_SM_H__
struct irq_chip;
struct platform_device;
struct at32_sm {
spinlock_t lock;
void __iomem *regs;
struct irq_chip *eim_chip;
unsigned int eim_first_irq;
struct platform_device *pdev;
};
extern struct platform_device at32_sm_device;
extern struct at32_sm system_manager;
#endif /* __ASM_AVR32_AT32_SM_H__ */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_ASM_H__
#define __ASM_AVR32_ASM_H__
#include <asm/sysreg.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#define mask_interrupts ssrf SR_GM_BIT
#define mask_exceptions ssrf SR_EM_BIT
#define unmask_interrupts csrf SR_GM_BIT
#define unmask_exceptions csrf SR_EM_BIT
#ifdef CONFIG_FRAME_POINTER
.macro save_fp
st.w --sp, r7
.endm
.macro restore_fp
ld.w r7, sp++
.endm
.macro zero_fp
mov r7, 0
.endm
#else
.macro save_fp
.endm
.macro restore_fp
.endm
.macro zero_fp
.endm
#endif
.macro get_thread_info reg
mov \reg, sp
andl \reg, ~(THREAD_SIZE - 1) & 0xffff
.endm
/* Save and restore registers */
.macro save_min sr, tmp=lr
pushm lr
mfsr \tmp, \sr
zero_fp
st.w --sp, \tmp
.endm
.macro restore_min sr, tmp=lr
ld.w \tmp, sp++
mtsr \sr, \tmp
popm lr
.endm
.macro save_half sr, tmp=lr
save_fp
pushm r8-r9,r10,r11,r12,lr
zero_fp
mfsr \tmp, \sr
st.w --sp, \tmp
.endm
.macro restore_half sr, tmp=lr
ld.w \tmp, sp++
mtsr \sr, \tmp
popm r8-r9,r10,r11,r12,lr
restore_fp
.endm
.macro save_full_user sr, tmp=lr
stmts --sp, r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,sp,lr
st.w --sp, lr
zero_fp
mfsr \tmp, \sr
st.w --sp, \tmp
.endm
.macro restore_full_user sr, tmp=lr
ld.w \tmp, sp++
mtsr \sr, \tmp
ld.w lr, sp++
ldmts sp++, r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,sp,lr
.endm
/* uaccess macros */
.macro branch_if_kernel scratch, label
get_thread_info \scratch
ld.w \scratch, \scratch[TI_flags]
bld \scratch, TIF_USERSPACE
brcc \label
.endm
.macro ret_if_privileged scratch, addr, size, ret
sub \scratch, \size, 1
add \scratch, \addr
retcs \ret
retmi \ret
.endm
#endif /* __ASM_AVR32_ASM_H__ */
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc.
*
* But use these as seldom as possible since they are slower than
* regular operations.
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_ATOMIC_H
#define __ASM_AVR32_ATOMIC_H
#include <asm/system.h>
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
/*
* atomic_sub_return - subtract the atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v. Returns the resulting value.
*/
static inline int atomic_sub_return(int i, atomic_t *v)
{
int result;
asm volatile(
"/* atomic_sub_return */\n"
"1: ssrf 5\n"
" ld.w %0, %2\n"
" sub %0, %3\n"
" stcond %1, %0\n"
" brne 1b"
: "=&r"(result), "=o"(v->counter)
: "m"(v->counter), "ir"(i)
: "cc");
return result;
}
/*
* atomic_add_return - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v. Returns the resulting value.
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
int result;
if (__builtin_constant_p(i))
result = atomic_sub_return(-i, v);
else
asm volatile(
"/* atomic_add_return */\n"
"1: ssrf 5\n"
" ld.w %0, %1\n"
" add %0, %3\n"
" stcond %2, %0\n"
" brne 1b"
: "=&r"(result), "=o"(v->counter)
: "m"(v->counter), "r"(i)
: "cc", "memory");
return result;
}
/*
* atomic_sub_unless - sub unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* If the atomic value v is not equal to u, this function subtracts a
* from v, and returns non zero. If v is equal to u then it returns
* zero. This is done as an atomic operation.
*/
static inline int atomic_sub_unless(atomic_t *v, int a, int u)
{
int tmp, result = 0;
asm volatile(
"/* atomic_sub_unless */\n"
"1: ssrf 5\n"
" ld.w %0, %3\n"
" cp.w %0, %5\n"
" breq 1f\n"
" sub %0, %4\n"
" stcond %2, %0\n"
" brne 1b\n"
" mov %1, 1\n"
"1:"
: "=&r"(tmp), "=&r"(result), "=o"(v->counter)
: "m"(v->counter), "ir"(a), "ir"(u)
: "cc", "memory");
return result;
}
/*
* atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* If the atomic value v is not equal to u, this function adds a to v,
* and returns non zero. If v is equal to u then it returns zero. This
* is done as an atomic operation.
*/
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int tmp, result;
if (__builtin_constant_p(a))
result = atomic_sub_unless(v, -a, u);
else {
result = 0;
asm volatile(
"/* atomic_add_unless */\n"
"1: ssrf 5\n"
" ld.w %0, %3\n"
" cp.w %0, %5\n"
" breq 1f\n"
" add %0, %4\n"
" stcond %2, %0\n"
" brne 1b\n"
" mov %1, 1\n"
"1:"
: "=&r"(tmp), "=&r"(result), "=o"(v->counter)
: "m"(v->counter), "r"(a), "ir"(u)
: "cc", "memory");
}
return result;
}
/*
* atomic_sub_if_positive - conditionally subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically test @v and subtract @i if @v is greater or equal than @i.
* The function returns the old value of @v minus @i.
*/
static inline int atomic_sub_if_positive(int i, atomic_t *v)
{
int result;
asm volatile(
"/* atomic_sub_if_positive */\n"
"1: ssrf 5\n"
" ld.w %0, %2\n"
" sub %0, %3\n"
" brlt 1f\n"
" stcond %1, %0\n"
" brne 1b\n"
"1:"
: "=&r"(result), "=o"(v->counter)
: "m"(v->counter), "ir"(i)
: "cc", "memory");
return result;
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_sub(i, v) (void)atomic_sub_return(i, v)
#define atomic_add(i, v) (void)atomic_add_return(i, v)
#define atomic_dec(v) atomic_sub(1, (v))
#define atomic_inc(v) atomic_add(1, (v))
#define atomic_dec_return(v) atomic_sub_return(1, v)
#define atomic_inc_return(v) atomic_add_return(1, v)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
#define atomic_inc_not_zero(v) atomic_add_unless(v, 1, 0)
#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif /* __ASM_AVR32_ATOMIC_H */
#ifndef __ASM_AVR32_AUXVEC_H
#define __ASM_AVR32_AUXVEC_H
#endif /* __ASM_AVR32_AUXVEC_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_BITOPS_H
#define __ASM_AVR32_BITOPS_H
#include <asm/byteorder.h>
#include <asm/system.h>
/*
* clear_bit() doesn't provide any barrier for the compiler
*/
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
/*
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
*
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void set_bit(int nr, volatile void * addr)
{
unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
unsigned long tmp;
if (__builtin_constant_p(nr)) {
asm volatile(
"1: ssrf 5\n"
" ld.w %0, %2\n"
" sbr %0, %3\n"
" stcond %1, %0\n"
" brne 1b"
: "=&r"(tmp), "=o"(*p)
: "m"(*p), "i"(nr)
: "cc");
} else {
unsigned long mask = 1UL << (nr % BITS_PER_LONG);
asm volatile(
"1: ssrf 5\n"
" ld.w %0, %2\n"
" or %0, %3\n"
" stcond %1, %0\n"
" brne 1b"
: "=&r"(tmp), "=o"(*p)
: "m"(*p), "r"(mask)
: "cc");
}
}
/*
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
static inline void clear_bit(int nr, volatile void * addr)
{
unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
unsigned long tmp;
if (__builtin_constant_p(nr)) {
asm volatile(
"1: ssrf 5\n"
" ld.w %0, %2\n"
" cbr %0, %3\n"
" stcond %1, %0\n"
" brne 1b"
: "=&r"(tmp), "=o"(*p)
: "m"(*p), "i"(nr)
: "cc");
} else {
unsigned long mask = 1UL << (nr % BITS_PER_LONG);
asm volatile(
"1: ssrf 5\n"
" ld.w %0, %2\n"
" andn %0, %3\n"
" stcond %1, %0\n"
" brne 1b"
: "=&r"(tmp), "=o"(*p)
: "m"(*p), "r"(mask)
: "cc");
}
}
/*
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void change_bit(int nr, volatile void * addr)
{
unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
unsigned long mask = 1UL << (nr % BITS_PER_LONG);
unsigned long tmp;
asm volatile(
"1: ssrf 5\n"
" ld.w %0, %2\n"
" eor %0, %3\n"
" stcond %1, %0\n"
" brne 1b"
: "=&r"(tmp), "=o"(*p)
: "m"(*p), "r"(mask)
: "cc");
}
/*
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_set_bit(int nr, volatile void * addr)
{
unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
unsigned long mask = 1UL << (nr % BITS_PER_LONG);
unsigned long tmp, old;
if (__builtin_constant_p(nr)) {
asm volatile(
"1: ssrf 5\n"
" ld.w %0, %3\n"
" mov %2, %0\n"
" sbr %0, %4\n"
" stcond %1, %0\n"
" brne 1b"
: "=&r"(tmp), "=o"(*p), "=&r"(old)
: "m"(*p), "i"(nr)
: "memory", "cc");
} else {
asm volatile(
"1: ssrf 5\n"
" ld.w %2, %3\n"
" or %0, %2, %4\n"
" stcond %1, %0\n"
" brne 1b"
: "=&r"(tmp), "=o"(*p), "=&r"(old)
: "m"(*p), "r"(mask)
: "memory", "cc");
}
return (old & mask) != 0;
}
/*
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_clear_bit(int nr, volatile void * addr)
{
unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
unsigned long mask = 1UL << (nr % BITS_PER_LONG);
unsigned long tmp, old;
if (__builtin_constant_p(nr)) {
asm volatile(
"1: ssrf 5\n"
" ld.w %0, %3\n"
" mov %2, %0\n"
" cbr %0, %4\n"
" stcond %1, %0\n"
" brne 1b"
: "=&r"(tmp), "=o"(*p), "=&r"(old)
: "m"(*p), "i"(nr)
: "memory", "cc");
} else {
asm volatile(
"1: ssrf 5\n"
" ld.w %0, %3\n"
" mov %2, %0\n"
" andn %0, %4\n"
" stcond %1, %0\n"
" brne 1b"
: "=&r"(tmp), "=o"(*p), "=&r"(old)
: "m"(*p), "r"(mask)
: "memory", "cc");
}
return (old & mask) != 0;
}
/*
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_change_bit(int nr, volatile void * addr)
{
unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG;
unsigned long mask = 1UL << (nr % BITS_PER_LONG);
unsigned long tmp, old;
asm volatile(
"1: ssrf 5\n"
" ld.w %2, %3\n"
" eor %0, %2, %4\n"
" stcond %1, %0\n"
" brne 1b"
: "=&r"(tmp), "=o"(*p), "=&r"(old)
: "m"(*p), "r"(mask)
: "memory", "cc");
return (old & mask) != 0;
}
#include <asm-generic/bitops/non-atomic.h>
/* Find First bit Set */
static inline unsigned long __ffs(unsigned long word)
{
unsigned long result;
asm("brev %1\n\t"
"clz %0,%1"
: "=r"(result), "=&r"(word)
: "1"(word));
return result;
}
/* Find First Zero */
static inline unsigned long ffz(unsigned long word)
{
return __ffs(~word);
}
/* Find Last bit Set */
static inline int fls(unsigned long word)
{
unsigned long result;
asm("clz %0,%1" : "=r"(result) : "r"(word));
return 32 - result;
}
unsigned long find_first_zero_bit(const unsigned long *addr,
unsigned long size);
unsigned long find_next_zero_bit(const unsigned long *addr,
unsigned long size,
unsigned long offset);
unsigned long find_first_bit(const unsigned long *addr,
unsigned long size);
unsigned long find_next_bit(const unsigned long *addr,
unsigned long size,
unsigned long offset);
/*
* ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*
* The difference is that bit numbering starts at 1, and if no bit is set,
* the function returns 0.
*/
static inline int ffs(unsigned long word)
{
if(word == 0)
return 0;
return __ffs(word) + 1;
}
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/minix-le.h>
#endif /* __ASM_AVR32_BITOPS_H */
/*
* Copyright (C) 2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_BUG_H
#define __ASM_AVR32_BUG_H
#ifdef CONFIG_BUG
/*
* According to our Chief Architect, this compact opcode is very
* unlikely to ever be implemented.
*/
#define AVR32_BUG_OPCODE 0x5df0
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define BUG() \
do { \
asm volatile(".hword %0\n\t" \
".hword %1\n\t" \
".long %2" \
: \
: "n"(AVR32_BUG_OPCODE), \
"i"(__LINE__), "X"(__FILE__)); \
} while (0)
#else
#define BUG() \
do { \
asm volatile(".hword %0\n\t" \
: : "n"(AVR32_BUG_OPCODE)); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
#define HAVE_ARCH_BUG
#endif /* CONFIG_BUG */
#include <asm-generic/bug.h>
#endif /* __ASM_AVR32_BUG_H */
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Needs:
* void check_bugs(void);
*/
#ifndef __ASM_AVR32_BUGS_H
#define __ASM_AVR32_BUGS_H
static void __init check_bugs(void)
{
cpu_data->loops_per_jiffy = loops_per_jiffy;
}
#endif /* __ASM_AVR32_BUGS_H */
/*
* AVR32 endian-conversion functions.
*/
#ifndef __ASM_AVR32_BYTEORDER_H
#define __ASM_AVR32_BYTEORDER_H
#include <asm/types.h>
#include <linux/compiler.h>
#ifdef __CHECKER__
extern unsigned long __builtin_bswap_32(unsigned long x);
extern unsigned short __builtin_bswap_16(unsigned short x);
#endif
#define __arch__swab32(x) __builtin_bswap_32(x)
#define __arch__swab16(x) __builtin_bswap_16(x)
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __BYTEORDER_HAS_U64__
# define __SWAB_64_THRU_32__
#endif
#include <linux/byteorder/big_endian.h>
#endif /* __ASM_AVR32_BYTEORDER_H */
#ifndef __ASM_AVR32_CACHE_H
#define __ASM_AVR32_CACHE_H
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#ifndef __ASSEMBLER__
struct cache_info {
unsigned int ways;
unsigned int sets;
unsigned int linesz;
};
#endif /* __ASSEMBLER */
/* Cache operation constants */
#define ICACHE_FLUSH 0x00
#define ICACHE_INVALIDATE 0x01
#define ICACHE_LOCK 0x02
#define ICACHE_UNLOCK 0x03
#define ICACHE_PREFETCH 0x04
#define DCACHE_FLUSH 0x08
#define DCACHE_LOCK 0x09
#define DCACHE_UNLOCK 0x0a
#define DCACHE_INVALIDATE 0x0b
#define DCACHE_CLEAN 0x0c
#define DCACHE_CLEAN_INVAL 0x0d
#endif /* __ASM_AVR32_CACHE_H */
#ifndef __ASM_AVR32_CACHECTL_H
#define __ASM_AVR32_CACHECTL_H
/*
* Operations that can be performed through the cacheflush system call
*/
/* Clean the data cache, then invalidate the icache */
#define CACHE_IFLUSH 0
#endif /* __ASM_AVR32_CACHECTL_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_CACHEFLUSH_H
#define __ASM_AVR32_CACHEFLUSH_H
/* Keep includes the same across arches. */
#include <linux/mm.h>
#define CACHE_OP_ICACHE_INVALIDATE 0x01
#define CACHE_OP_DCACHE_INVALIDATE 0x0b
#define CACHE_OP_DCACHE_CLEAN 0x0c
#define CACHE_OP_DCACHE_CLEAN_INVAL 0x0d
/*
* Invalidate any cacheline containing virtual address vaddr without
* writing anything back to memory.
*
* Note that this function may corrupt unrelated data structures when
* applied on buffers that are not cacheline aligned in both ends.
*/
static inline void invalidate_dcache_line(void *vaddr)
{
asm volatile("cache %0[0], %1"
:
: "r"(vaddr), "n"(CACHE_OP_DCACHE_INVALIDATE)
: "memory");
}
/*
* Make sure any cacheline containing virtual address vaddr is written
* to memory.
*/
static inline void clean_dcache_line(void *vaddr)
{
asm volatile("cache %0[0], %1"
:
: "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN)
: "memory");
}
/*
* Make sure any cacheline containing virtual address vaddr is written
* to memory and then invalidate it.
*/
static inline void flush_dcache_line(void *vaddr)
{
asm volatile("cache %0[0], %1"
:
: "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN_INVAL)
: "memory");
}
/*
* Invalidate any instruction cacheline containing virtual address
* vaddr.
*/
static inline void invalidate_icache_line(void *vaddr)
{
asm volatile("cache %0[0], %1"
:
: "r"(vaddr), "n"(CACHE_OP_ICACHE_INVALIDATE)
: "memory");
}
/*
* Applies the above functions on all lines that are touched by the
* specified virtual address range.
*/
void invalidate_dcache_region(void *start, size_t len);
void clean_dcache_region(void *start, size_t len);
void flush_dcache_region(void *start, size_t len);
void invalidate_icache_region(void *start, size_t len);
/*
* Make sure any pending writes are completed before continuing.
*/
#define flush_write_buffer() asm volatile("sync 0" : : : "memory")
/*
* The following functions are called when a virtual mapping changes.
* We do not need to flush anything in this case.
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
/*
* I think we need to implement this one to be able to reliably
* execute pages from RAMDISK. However, if we implement the
* flush_dcache_*() functions, it might not be needed anymore.
*
* #define flush_icache_page(vma, page) do { } while (0)
*/
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
/*
* These are (I think) related to D-cache aliasing. We might need to
* do something here, but only for certain configurations. No such
* configurations exist at this time.
*/
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(page) do { } while (0)
#define flush_dcache_mmap_unlock(page) do { } while (0)
/*
* These are for I/D cache coherency. In this case, we do need to
* flush with all configurations.
*/
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page,
unsigned long addr, int len);
#define copy_to_user_page(vma, page, vaddr, dst, src, len) do { \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while(0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#endif /* __ASM_AVR32_CACHEFLUSH_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_CHECKSUM_H
#define __ASM_AVR32_CHECKSUM_H
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
unsigned int csum_partial(const unsigned char * buff, int len,
unsigned int sum);
/*
* the same as csum_partial, but copies from src while it
* checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
unsigned int csum_partial_copy_generic(const char *src, char *dst, int len,
int sum, int *src_err_ptr,
int *dst_err_ptr);
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
* If you use these functions directly please don't forget the
* verify_area().
*/
static inline
unsigned int csum_partial_copy_nocheck(const char *src, char *dst,
int len, int sum)
{
return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
}
static inline
unsigned int csum_partial_copy_from_user (const char __user *src, char *dst,
int len, int sum, int *err_ptr)
{
return csum_partial_copy_generic((const char __force *)src, dst, len,
sum, err_ptr, NULL);
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
static inline unsigned short ip_fast_csum(unsigned char *iph,
unsigned int ihl)
{
unsigned int sum, tmp;
__asm__ __volatile__(
" ld.w %0, %1++\n"
" ld.w %3, %1++\n"
" sub %2, 4\n"
" add %0, %3\n"
" ld.w %3, %1++\n"
" adc %0, %0, %3\n"
" ld.w %3, %1++\n"
" adc %0, %0, %3\n"
" acr %0\n"
"1: ld.w %3, %1++\n"
" add %0, %3\n"
" acr %0\n"
" sub %2, 1\n"
" brne 1b\n"
" lsl %3, %0, 16\n"
" andl %0, 0\n"
" mov %2, 0xffff\n"
" add %0, %3\n"
" adc %0, %0, %2\n"
" com %0\n"
" lsr %0, 16\n"
: "=r"(sum), "=r"(iph), "=r"(ihl), "=r"(tmp)
: "1"(iph), "2"(ihl)
: "memory", "cc");
return sum;
}
/*
* Fold a partial checksum
*/
static inline unsigned int csum_fold(unsigned int sum)
{
unsigned int tmp;
asm(" bfextu %1, %0, 0, 16\n"
" lsr %0, 16\n"
" add %0, %1\n"
" bfextu %1, %0, 16, 16\n"
" add %0, %1"
: "=&r"(sum), "=&r"(tmp)
: "0"(sum));
return ~sum;
}
static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
unsigned long daddr,
unsigned short len,
unsigned short proto,
unsigned int sum)
{
asm(" add %0, %1\n"
" adc %0, %0, %2\n"
" adc %0, %0, %3\n"
" acr %0"
: "=r"(sum)
: "r"(daddr), "r"(saddr), "r"(ntohs(len) | (proto << 16)),
"0"(sum)
: "cc");
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
unsigned long daddr,
unsigned short len,
unsigned short proto,
unsigned int sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#endif /* __ASM_AVR32_CHECKSUM_H */
#ifndef __ASM_AVR32_CPUTIME_H
#define __ASM_AVR32_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __ASM_AVR32_CPUTIME_H */
#ifndef __ASM_AVR32_CURRENT_H
#define __ASM_AVR32_CURRENT_H
#include <linux/thread_info.h>
struct task_struct;
inline static struct task_struct * get_current(void)
{
return current_thread_info()->task;
}
#define current get_current()
#endif /* __ASM_AVR32_CURRENT_H */
#ifndef __ASM_AVR32_DELAY_H
#define __ASM_AVR32_DELAY_H
/*
* Copyright (C) 1993 Linus Torvalds
*
* Delay routines calling functions in arch/avr32/lib/delay.c
*/
extern void __bad_udelay(void);
extern void __bad_ndelay(void);
extern void __udelay(unsigned long usecs);
extern void __ndelay(unsigned long nsecs);
extern void __const_udelay(unsigned long usecs);
extern void __delay(unsigned long loops);
#define udelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
__udelay(n))
#define ndelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
__ndelay(n))
#endif /* __ASM_AVR32_DELAY_H */
#ifndef __ASM_AVR32_DIV64_H
#define __ASM_AVR32_DIV64_H
#include <asm-generic/div64.h>
#endif /* __ASM_AVR32_DIV64_H */
#ifndef __ASM_AVR32_DMA_MAPPING_H
#define __ASM_AVR32_DMA_MAPPING_H
#include <linux/mm.h>
#include <linux/device.h>
#include <asm/scatterlist.h>
#include <asm/processor.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
extern void dma_cache_sync(void *vaddr, size_t size, int direction);
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during bus mastering, then you would pass 0x00ffffff as the mask
* to this function.
*/
static inline int dma_supported(struct device *dev, u64 mask)
{
/* Fix when needed. I really don't know of any limitations */
return 1;
}
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
*dev->dma_mask = dma_mask;
return 0;
}
/**
* dma_alloc_coherent - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: required memory size
* @handle: bus-specific DMA address
*
* Allocate some uncached, unbuffered memory for a device for
* performing DMA. This function allocates pages, and will
* return the CPU-viewed address, and sets @handle to be the
* device-viewed address.
*/
extern void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp);
/**
* dma_free_coherent - free memory allocated by dma_alloc_coherent
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: size of memory originally requested in dma_alloc_coherent
* @cpu_addr: CPU-view address returned from dma_alloc_coherent
* @handle: device-view address returned from dma_alloc_coherent
*
* Free (and unmap) a DMA buffer previously allocated by
* dma_alloc_coherent().
*
* References to memory and mappings associated with cpu_addr/handle
* during and after this call executing are illegal.
*/
extern void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t handle);
/**
* dma_alloc_writecombine - allocate write-combining memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: required memory size
* @handle: bus-specific DMA address
*
* Allocate some uncached, buffered memory for a device for
* performing DMA. This function allocates pages, and will
* return the CPU-viewed address, and sets @handle to be the
* device-viewed address.
*/
extern void *dma_alloc_writecombine(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp);
/**
* dma_free_coherent - free memory allocated by dma_alloc_writecombine
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: size of memory originally requested in dma_alloc_writecombine
* @cpu_addr: CPU-view address returned from dma_alloc_writecombine
* @handle: device-view address returned from dma_alloc_writecombine
*
* Free (and unmap) a DMA buffer previously allocated by
* dma_alloc_writecombine().
*
* References to memory and mappings associated with cpu_addr/handle
* during and after this call executing are illegal.
*/
extern void dma_free_writecombine(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t handle);
/**
* dma_map_single - map a single buffer for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @cpu_addr: CPU direct mapped address of buffer
* @size: size of buffer to map
* @dir: DMA transfer direction
*
* Ensure that any data held in the cache is appropriately discarded
* or written back.
*
* The device owns this memory once this call has completed. The CPU
* can regain ownership by calling dma_unmap_single() or dma_sync_single().
*/
static inline dma_addr_t
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
dma_cache_sync(cpu_addr, size, direction);
return virt_to_bus(cpu_addr);
}
/**
* dma_unmap_single - unmap a single buffer previously mapped
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @handle: DMA address of buffer
* @size: size of buffer to map
* @dir: DMA transfer direction
*
* Unmap a single streaming mode DMA translation. The handle and size
* must match what was provided in the previous dma_map_single() call.
* All other usages are undefined.
*
* After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there.
*/
static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
}
/**
* dma_map_page - map a portion of a page for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @page: page that buffer resides in
* @offset: offset into page for start of buffer
* @size: size of buffer to map
* @dir: DMA transfer direction
*
* Ensure that any data held in the cache is appropriately discarded
* or written back.
*
* The device owns this memory once this call has completed. The CPU
* can regain ownership by calling dma_unmap_page() or dma_sync_single().
*/
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
return dma_map_single(dev, page_address(page) + offset,
size, direction);
}
/**
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @handle: DMA address of buffer
* @size: size of buffer to map
* @dir: DMA transfer direction
*
* Unmap a single streaming mode DMA translation. The handle and size
* must match what was provided in the previous dma_map_single() call.
* All other usages are undefined.
*
* After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there.
*/
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
dma_unmap_single(dev, dma_address, size, direction);
}
/**
* dma_map_sg - map a set of SG buffers for streaming mode DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map
* @dir: DMA transfer direction
*
* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scatter-gather version of the
* above pci_map_single interface. Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length. They are obtained via sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements.
* (for example via virtual mapping capabilities)
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
int i;
for (i = 0; i < nents; i++) {
char *virt;
sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset;
virt = page_address(sg[i].page) + sg[i].offset;
dma_cache_sync(virt, sg[i].length, direction);
}
return nents;
}
/**
* dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map
* @dir: DMA transfer direction
*
* Unmap a set of streaming mode DMA translations.
* Again, CPU read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
}
/**
* dma_sync_single_for_cpu
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @handle: DMA address of buffer
* @size: size of buffer to map
* @dir: DMA transfer direction
*
* Make physical memory consistent for a single streaming mode DMA
* translation after a transfer.
*
* If you perform a dma_map_single() but wish to interrogate the
* buffer using the cpu, yet do not wish to teardown the DMA mapping,
* you must call this function before doing so. At the next point you
* give the DMA address back to the card, you must first perform a
* dma_sync_single_for_device, and then the device again owns the
* buffer.
*/
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
dma_cache_sync(bus_to_virt(dma_handle), size, direction);
}
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
dma_cache_sync(bus_to_virt(dma_handle), size, direction);
}
/**
* dma_sync_sg_for_cpu
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map
* @dir: DMA transfer direction
*
* Make physical memory consistent for a set of streaming
* mode DMA translations after a transfer.
*
* The same as dma_sync_single_for_* but for a scatter-gather list,
* same rules and usage.
*/
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
int i;
for (i = 0; i < nents; i++) {
dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
sg[i].length, direction);
}
}
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
int i;
for (i = 0; i < nents; i++) {
dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
sg[i].length, direction);
}
}
/* Now for the API extensions over the pci_ one */
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
static inline int dma_is_consistent(dma_addr_t dma_addr)
{
return 1;
}
static inline int dma_get_cache_alignment(void)
{
return boot_cpu_data.dcache.linesz;
}
#endif /* __ASM_AVR32_DMA_MAPPING_H */
#ifndef __ASM_AVR32_DMA_H
#define __ASM_AVR32_DMA_H
/* The maximum address that we can perform a DMA transfer to on this platform.
* Not really applicable to AVR32, but some functions need it. */
#define MAX_DMA_ADDRESS 0xffffffff
#endif /* __ASM_AVR32_DMA_H */
#ifndef __ASM_AVR32_ELF_H
#define __ASM_AVR32_ELF_H
/* AVR32 relocation numbers */
#define R_AVR32_NONE 0
#define R_AVR32_32 1
#define R_AVR32_16 2
#define R_AVR32_8 3
#define R_AVR32_32_PCREL 4
#define R_AVR32_16_PCREL 5
#define R_AVR32_8_PCREL 6
#define R_AVR32_DIFF32 7
#define R_AVR32_DIFF16 8
#define R_AVR32_DIFF8 9
#define R_AVR32_GOT32 10
#define R_AVR32_GOT16 11
#define R_AVR32_GOT8 12
#define R_AVR32_21S 13
#define R_AVR32_16U 14
#define R_AVR32_16S 15
#define R_AVR32_8S 16
#define R_AVR32_8S_EXT 17
#define R_AVR32_22H_PCREL 18
#define R_AVR32_18W_PCREL 19
#define R_AVR32_16B_PCREL 20
#define R_AVR32_16N_PCREL 21
#define R_AVR32_14UW_PCREL 22
#define R_AVR32_11H_PCREL 23
#define R_AVR32_10UW_PCREL 24
#define R_AVR32_9H_PCREL 25
#define R_AVR32_9UW_PCREL 26
#define R_AVR32_HI16 27
#define R_AVR32_LO16 28
#define R_AVR32_GOTPC 29
#define R_AVR32_GOTCALL 30
#define R_AVR32_LDA_GOT 31
#define R_AVR32_GOT21S 32
#define R_AVR32_GOT18SW 33
#define R_AVR32_GOT16S 34
#define R_AVR32_GOT7UW 35
#define R_AVR32_32_CPENT 36
#define R_AVR32_CPCALL 37
#define R_AVR32_16_CP 38
#define R_AVR32_9W_CP 39
#define R_AVR32_RELATIVE 40
#define R_AVR32_GLOB_DAT 41
#define R_AVR32_JMP_SLOT 42
#define R_AVR32_ALIGN 43
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
#include <asm/user.h>
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof (struct pt_regs) / sizeof (elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fpu_struct elf_fpregset_t;
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ( (x)->e_machine == EM_AVR32 )
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#ifdef __LITTLE_ENDIAN__
#define ELF_DATA ELFDATA2LSB
#else
#define ELF_DATA ELFDATA2MSB
#endif
#define ELF_ARCH EM_AVR32
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
but it's not easy, and we've already done it here. */
#define ELF_HWCAP (0)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define ELF_PLATFORM (NULL)
#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
#endif
#endif /* __ASM_AVR32_ELF_H */
#ifndef __ASM_AVR32_EMERGENCY_RESTART_H
#define __ASM_AVR32_EMERGENCY_RESTART_H
#include <asm-generic/emergency-restart.h>
#endif /* __ASM_AVR32_EMERGENCY_RESTART_H */
#ifndef __ASM_AVR32_ERRNO_H
#define __ASM_AVR32_ERRNO_H
#include <asm-generic/errno.h>
#endif /* __ASM_AVR32_ERRNO_H */
#ifndef __ASM_AVR32_FCNTL_H
#define __ASM_AVR32_FCNTL_H
#include <asm-generic/fcntl.h>
#endif /* __ASM_AVR32_FCNTL_H */
#ifndef __ASM_AVR32_FUTEX_H
#define __ASM_AVR32_FUTEX_H
#include <asm-generic/futex.h>
#endif /* __ASM_AVR32_FUTEX_H */
#ifndef __ASM_AVR32_HARDIRQ_H
#define __ASM_AVR32_HARDIRQ_H
#include <linux/threads.h>
#include <asm/irq.h>
#ifndef __ASSEMBLY__
#include <linux/cache.h>
/* entry.S is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
} ____cacheline_aligned irq_cpustat_t;
void ack_bad_irq(unsigned int irq);
/* Standard mappings for irq_cpustat_t above */
#include <linux/irq_cpustat.h>
#endif /* __ASSEMBLY__ */
#define HARDIRQ_BITS 12
/*
* The hardirq mask has to be large enough to have
* space for potentially all IRQ sources in the system
* nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
#endif /* __ASM_AVR32_HARDIRQ_H */
#ifndef __ASM_AVR32_HW_IRQ_H
#define __ASM_AVR32_HW_IRQ_H
static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
{
/* Nothing to do */
}
#endif /* __ASM_AVR32_HW_IRQ_H */
#ifndef __ASM_AVR32_INTC_H
#define __ASM_AVR32_INTC_H
#include <linux/sysdev.h>
#include <linux/interrupt.h>
struct irq_controller;
struct irqaction;
struct pt_regs;
struct platform_device;
/* Information about the internal interrupt controller */
struct intc_device {
/* ioremapped address of configuration block */
void __iomem *regs;
/* the physical device */
struct platform_device *pdev;
/* Number of interrupt lines per group. */
unsigned int irqs_per_group;
/* The highest group ID + 1 */
unsigned int nr_groups;
/*
* Bitfield indicating which groups are actually in use. The
* size of the array is
* ceil(group_max / (8 * sizeof(unsigned int))).
*/
unsigned int group_mask[];
};
struct irq_controller_class {
/*
* A short name identifying this kind of controller.
*/
const char *typename;
/*
* Handle the IRQ. Must do any necessary acking and masking.
*/
irqreturn_t (*handle)(int irq, void *dev_id, struct pt_regs *regs);
/*
* Register a new IRQ handler.
*/
int (*setup)(struct irq_controller *ctrl, unsigned int irq,
struct irqaction *action);
/*
* Unregister a IRQ handler.
*/
void (*free)(struct irq_controller *ctrl, unsigned int irq,
void *dev_id);
/*
* Mask the IRQ in the interrupt controller.
*/
void (*mask)(struct irq_controller *ctrl, unsigned int irq);
/*
* Unmask the IRQ in the interrupt controller.
*/
void (*unmask)(struct irq_controller *ctrl, unsigned int irq);
/*
* Set the type of the IRQ. See below for possible types.
* Return -EINVAL if a given type is not supported
*/
int (*set_type)(struct irq_controller *ctrl, unsigned int irq,
unsigned int type);
/*
* Return the IRQ type currently set
*/
unsigned int (*get_type)(struct irq_controller *ctrl, unsigned int irq);
};
struct irq_controller {
struct irq_controller_class *class;
unsigned int irq_group;
unsigned int first_irq;
unsigned int nr_irqs;
struct list_head list;
};
struct intc_group_desc {
struct irq_controller *ctrl;
irqreturn_t (*handle)(int, void *, struct pt_regs *);
unsigned long flags;
void *dev_id;
const char *devname;
};
/*
* The internal interrupt controller. Defined in board/part-specific
* devices.c.
* TODO: Should probably be defined per-cpu.
*/
extern struct intc_device intc;
extern int request_internal_irq(unsigned int irq,
irqreturn_t (*handler)(int, void *, struct pt_regs *),
unsigned long irqflags,
const char *devname, void *dev_id);
extern void free_internal_irq(unsigned int irq);
/* Only used by time_init() */
extern int setup_internal_irq(unsigned int irq, struct intc_group_desc *desc);
/*
* Set interrupt priority for a given group. `group' can be found by
* using irq_to_group(irq). Priority can be from 0 (lowest) to 3
* (highest). Higher-priority interrupts will preempt lower-priority
* interrupts (unless interrupts are masked globally).
*
* This function does not check for conflicts within a group.
*/
extern int intc_set_priority(unsigned int group,
unsigned int priority);
/*
* Returns a bitmask of pending interrupts in a group.
*/
extern unsigned long intc_get_pending(unsigned int group);
/*
* Register a new external interrupt controller. Returns the first
* external IRQ number that is assigned to the new controller.
*/
extern int intc_register_controller(struct irq_controller *ctrl);
#endif /* __ASM_AVR32_INTC_H */
#ifndef __ASM_AVR32_IO_H
#define __ASM_AVR32_IO_H
#include <linux/string.h>
#ifdef __KERNEL__
#include <asm/addrspace.h>
#include <asm/byteorder.h>
/* virt_to_phys will only work when address is in P1 or P2 */
static __inline__ unsigned long virt_to_phys(volatile void *address)
{
return PHYSADDR(address);
}
static __inline__ void * phys_to_virt(unsigned long address)
{
return (void *)P1SEGADDR(address);
}
#define cached_to_phys(addr) ((unsigned long)PHYSADDR(addr))
#define uncached_to_phys(addr) ((unsigned long)PHYSADDR(addr))
#define phys_to_cached(addr) ((void *)P1SEGADDR(addr))
#define phys_to_uncached(addr) ((void *)P2SEGADDR(addr))
/*
* Generic IO read/write. These perform native-endian accesses. Note
* that some architectures will want to re-define __raw_{read,write}w.
*/
extern void __raw_writesb(unsigned int addr, const void *data, int bytelen);
extern void __raw_writesw(unsigned int addr, const void *data, int wordlen);
extern void __raw_writesl(unsigned int addr, const void *data, int longlen);
extern void __raw_readsb(unsigned int addr, void *data, int bytelen);
extern void __raw_readsw(unsigned int addr, void *data, int wordlen);
extern void __raw_readsl(unsigned int addr, void *data, int longlen);
static inline void writeb(unsigned char b, volatile void __iomem *addr)
{
*(volatile unsigned char __force *)addr = b;
}
static inline void writew(unsigned short b, volatile void __iomem *addr)
{
*(volatile unsigned short __force *)addr = b;
}
static inline void writel(unsigned int b, volatile void __iomem *addr)
{
*(volatile unsigned int __force *)addr = b;
}
#define __raw_writeb writeb
#define __raw_writew writew
#define __raw_writel writel
static inline unsigned char readb(const volatile void __iomem *addr)
{
return *(const volatile unsigned char __force *)addr;
}
static inline unsigned short readw(const volatile void __iomem *addr)
{
return *(const volatile unsigned short __force *)addr;
}
static inline unsigned int readl(const volatile void __iomem *addr)
{
return *(const volatile unsigned int __force *)addr;
}
#define __raw_readb readb
#define __raw_readw readw
#define __raw_readl readl
#define writesb(p, d, l) __raw_writesb((unsigned int)p, d, l)
#define writesw(p, d, l) __raw_writesw((unsigned int)p, d, l)
#define writesl(p, d, l) __raw_writesl((unsigned int)p, d, l)
#define readsb(p, d, l) __raw_readsb((unsigned int)p, d, l)
#define readsw(p, d, l) __raw_readsw((unsigned int)p, d, l)
#define readsl(p, d, l) __raw_readsl((unsigned int)p, d, l)
/*
* These two are only here because ALSA _thinks_ it needs them...
*/
static inline void memcpy_fromio(void * to, const volatile void __iomem *from,
unsigned long count)
{
char *p = to;
while (count) {
count--;
*p = readb(from);
p++;
from++;
}
}
static inline void memcpy_toio(volatile void __iomem *to, const void * from,
unsigned long count)
{
const char *p = from;
while (count) {
count--;
writeb(*p, to);
p++;
to++;
}
}
static inline void memset_io(volatile void __iomem *addr, unsigned char val,
unsigned long count)
{
memset((void __force *)addr, val, count);
}
/*
* Bad read/write accesses...
*/
extern void __readwrite_bug(const char *fn);
#define IO_SPACE_LIMIT 0xffffffff
/* Convert I/O port address to virtual address */
#define __io(p) ((void __iomem *)phys_to_uncached(p))
/*
* IO port access primitives
* -------------------------
*
* The AVR32 doesn't have special IO access instructions; all IO is memory
* mapped. Note that these are defined to perform little endian accesses
* only. Their primary purpose is to access PCI and ISA peripherals.
*
* Note that for a big endian machine, this implies that the following
* big endian mode connectivity is in place.
*
* The machine specific io.h include defines __io to translate an "IO"
* address to a memory address.
*
* Note that we prevent GCC re-ordering or caching values in expressions
* by introducing sequence points into the in*() definitions. Note that
* __raw_* do not guarantee this behaviour.
*
* The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
*/
#define outb(v, p) __raw_writeb(v, __io(p))
#define outw(v, p) __raw_writew(cpu_to_le16(v), __io(p))
#define outl(v, p) __raw_writel(cpu_to_le32(v), __io(p))
#define inb(p) __raw_readb(__io(p))
#define inw(p) le16_to_cpu(__raw_readw(__io(p)))
#define inl(p) le32_to_cpu(__raw_readl(__io(p)))
static inline void __outsb(unsigned long port, void *addr, unsigned int count)
{
while (count--) {
outb(*(u8 *)addr, port);
addr++;
}
}
static inline void __insb(unsigned long port, void *addr, unsigned int count)
{
while (count--) {
*(u8 *)addr = inb(port);
addr++;
}
}
static inline void __outsw(unsigned long port, void *addr, unsigned int count)
{
while (count--) {
outw(*(u16 *)addr, port);
addr += 2;
}
}
static inline void __insw(unsigned long port, void *addr, unsigned int count)
{
while (count--) {
*(u16 *)addr = inw(port);
addr += 2;
}
}
static inline void __outsl(unsigned long port, void *addr, unsigned int count)
{
while (count--) {
outl(*(u32 *)addr, port);
addr += 4;
}
}
static inline void __insl(unsigned long port, void *addr, unsigned int count)
{
while (count--) {
*(u32 *)addr = inl(port);
addr += 4;
}
}
#define outsb(port, addr, count) __outsb(port, addr, count)
#define insb(port, addr, count) __insb(port, addr, count)
#define outsw(port, addr, count) __outsw(port, addr, count)
#define insw(port, addr, count) __insw(port, addr, count)
#define outsl(port, addr, count) __outsl(port, addr, count)
#define insl(port, addr, count) __insl(port, addr, count)
extern void __iomem *__ioremap(unsigned long offset, size_t size,
unsigned long flags);
extern void __iounmap(void __iomem *addr);
/*
* ioremap - map bus memory into CPU space
* @offset bus address of the memory
* @size size of the resource to map
*
* ioremap performs a platform specific sequence of operations to make
* bus memory CPU accessible via the readb/.../writel functions and
* the other mmio helpers. The returned address is not guaranteed to
* be usable directly as a virtual address.
*/
#define ioremap(offset, size) \
__ioremap((offset), (size), 0)
#define iounmap(addr) \
__iounmap(addr)
#define cached(addr) P1SEGADDR(addr)
#define uncached(addr) P2SEGADDR(addr)
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
#define page_to_bus page_to_phys
#define bus_to_page phys_to_page
#define dma_cache_wback_inv(_start, _size) \
flush_dcache_region(_start, _size)
#define dma_cache_inv(_start, _size) \
invalidate_dcache_region(_start, _size)
#define dma_cache_wback(_start, _size) \
clean_dcache_region(_start, _size)
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
#endif /* __ASM_AVR32_IO_H */
#ifndef __ASM_AVR32_IOCTL_H
#define __ASM_AVR32_IOCTL_H
#include <asm-generic/ioctl.h>
#endif /* __ASM_AVR32_IOCTL_H */
#ifndef __ASM_AVR32_IOCTLS_H
#define __ASM_AVR32_IOCTLS_H
#include <asm/ioctl.h>
/* 0x54 is just a magic number to make these relatively unique ('T') */
#define TCGETS 0x5401
#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
#define TCSETSW 0x5403
#define TCSETSF 0x5404
#define TCGETA 0x5405
#define TCSETA 0x5406
#define TCSETAW 0x5407
#define TCSETAF 0x5408
#define TCSBRK 0x5409
#define TCXONC 0x540A
#define TCFLSH 0x540B
#define TIOCEXCL 0x540C
#define TIOCNXCL 0x540D
#define TIOCSCTTY 0x540E
#define TIOCGPGRP 0x540F
#define TIOCSPGRP 0x5410
#define TIOCOUTQ 0x5411
#define TIOCSTI 0x5412
#define TIOCGWINSZ 0x5413
#define TIOCSWINSZ 0x5414
#define TIOCMGET 0x5415
#define TIOCMBIS 0x5416
#define TIOCMBIC 0x5417
#define TIOCMSET 0x5418
#define TIOCGSOFTCAR 0x5419
#define TIOCSSOFTCAR 0x541A
#define FIONREAD 0x541B
#define TIOCINQ FIONREAD
#define TIOCLINUX 0x541C
#define TIOCCONS 0x541D
#define TIOCGSERIAL 0x541E
#define TIOCSSERIAL 0x541F
#define TIOCPKT 0x5420
#define FIONBIO 0x5421
#define TIOCNOTTY 0x5422
#define TIOCSETD 0x5423
#define TIOCGETD 0x5424
#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
#define TIOCSBRK 0x5427 /* BSD compatibility */
#define TIOCCBRK 0x5428 /* BSD compatibility */
#define TIOCGSID 0x5429 /* Return the session ID of FD */
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
#define FIONCLEX 0x5450
#define FIOCLEX 0x5451
#define FIOASYNC 0x5452
#define TIOCSERCONFIG 0x5453
#define TIOCSERGWILD 0x5454
#define TIOCSERSWILD 0x5455
#define TIOCGLCKTRMIOS 0x5456
#define TIOCSLCKTRMIOS 0x5457
#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
#define TIOCSERGETLSR 0x5459 /* Get line status register */
#define TIOCSERGETMULTI 0x545A /* Get multiport config */
#define TIOCSERSETMULTI 0x545B /* Set multiport config */
#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
#define FIOQSIZE 0x5460
/* Used for packet mode */
#define TIOCPKT_DATA 0
#define TIOCPKT_FLUSHREAD 1
#define TIOCPKT_FLUSHWRITE 2
#define TIOCPKT_STOP 4
#define TIOCPKT_START 8
#define TIOCPKT_NOSTOP 16
#define TIOCPKT_DOSTOP 32
#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
#endif /* __ASM_AVR32_IOCTLS_H */
#ifndef __ASM_AVR32_IPCBUF_H
#define __ASM_AVR32_IPCBUF_H
/*
* The user_ipc_perm structure for AVR32 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 32-bit mode_t and seq
* - 2 miscellaneous 32-bit values
*/
struct ipc64_perm
{
__kernel_key_t key;
__kernel_uid32_t uid;
__kernel_gid32_t gid;
__kernel_uid32_t cuid;
__kernel_gid32_t cgid;
__kernel_mode_t mode;
unsigned short __pad1;
unsigned short seq;
unsigned short __pad2;
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* __ASM_AVR32_IPCBUF_H */
#ifndef __ASM_AVR32_IRQ_H
#define __ASM_AVR32_IRQ_H
#define NR_INTERNAL_IRQS 64
#define NR_EXTERNAL_IRQS 64
#define NR_IRQS (NR_INTERNAL_IRQS + NR_EXTERNAL_IRQS)
#define irq_canonicalize(i) (i)
#endif /* __ASM_AVR32_IOCTLS_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_IRQFLAGS_H
#define __ASM_AVR32_IRQFLAGS_H
#include <asm/sysreg.h>
static inline unsigned long __raw_local_save_flags(void)
{
return sysreg_read(SR);
}
#define raw_local_save_flags(x) \
do { (x) = __raw_local_save_flags(); } while (0)
/*
* This will restore ALL status register flags, not only the interrupt
* mask flag.
*
* The empty asm statement informs the compiler of this fact while
* also serving as a barrier.
*/
static inline void raw_local_irq_restore(unsigned long flags)
{
sysreg_write(SR, flags);
asm volatile("" : : : "memory", "cc");
}
static inline void raw_local_irq_disable(void)
{
asm volatile("ssrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory");
}
static inline void raw_local_irq_enable(void)
{
asm volatile("csrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory");
}
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
return (flags & SYSREG_BIT(GM)) != 0;
}
static inline int raw_irqs_disabled(void)
{
unsigned long flags = __raw_local_save_flags();
return raw_irqs_disabled_flags(flags);
}
static inline unsigned long __raw_local_irq_save(void)
{
unsigned long flags = __raw_local_save_flags();
raw_local_irq_disable();
return flags;
}
#define raw_local_irq_save(flags) \
do { (flags) = __raw_local_irq_save(); } while (0)
#endif /* __ASM_AVR32_IRQFLAGS_H */
#ifndef __ASM_AVR32_KDEBUG_H
#define __ASM_AVR32_KDEBUG_H
#include <linux/notifier.h>
struct pt_regs;
struct die_args {
struct pt_regs *regs;
int trapnr;
};
int register_die_notifier(struct notifier_block *nb);
int unregister_die_notifier(struct notifier_block *nb);
int register_page_fault_notifier(struct notifier_block *nb);
int unregister_page_fault_notifier(struct notifier_block *nb);
extern struct atomic_notifier_head avr32_die_chain;
/* Grossly misnamed. */
enum die_val {
DIE_FAULT,
DIE_BREAKPOINT,
DIE_SSTEP,
DIE_PAGE_FAULT,
};
static inline int notify_die(enum die_val val, struct pt_regs *regs,
int trap, int sig)
{
struct die_args args = {
.regs = regs,
.trapnr = trap,
};
return atomic_notifier_call_chain(&avr32_die_chain, val, &args);
}
#endif /* __ASM_AVR32_KDEBUG_H */
#ifndef __ASM_AVR32_KMAP_TYPES_H
#define __ASM_AVR32_KMAP_TYPES_H
#ifdef CONFIG_DEBUG_HIGHMEM
# define D(n) __KM_FENCE_##n ,
#else
# define D(n)
#endif
enum km_type {
D(0) KM_BOUNCE_READ,
D(1) KM_SKB_SUNRPC_DATA,
D(2) KM_SKB_DATA_SOFTIRQ,
D(3) KM_USER0,
D(4) KM_USER1,
D(5) KM_BIO_SRC_IRQ,
D(6) KM_BIO_DST_IRQ,
D(7) KM_PTE0,
D(8) KM_PTE1,
D(9) KM_PTE2,
D(10) KM_IRQ0,
D(11) KM_IRQ1,
D(12) KM_SOFTIRQ0,
D(13) KM_SOFTIRQ1,
D(14) KM_TYPE_NR
};
#undef D
#endif /* __ASM_AVR32_KMAP_TYPES_H */
/*
* Kernel Probes (KProbes)
*
* Copyright (C) 2005-2006 Atmel Corporation
* Copyright (C) IBM Corporation, 2002, 2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_KPROBES_H
#define __ASM_AVR32_KPROBES_H
#include <linux/types.h>
typedef u16 kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION 0xd673 /* breakpoint */
#define MAX_INSN_SIZE 2
#define ARCH_INACTIVE_KPROBE_COUNT 1
#define arch_remove_kprobe(p) do { } while (0)
/* Architecture specific copy of original instruction */
struct arch_specific_insn {
kprobe_opcode_t insn[MAX_INSN_SIZE];
};
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
#define flush_insn_slot(p) do { } while (0)
#endif /* __ASM_AVR32_KPROBES_H */
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
#define __ALIGN .balign 2
#define __ALIGN_STR ".balign 2"
#endif /* __ASM_LINKAGE_H */
#ifndef __ASM_AVR32_LOCAL_H
#define __ASM_AVR32_LOCAL_H
#include <asm-generic/local.h>
#endif /* __ASM_AVR32_LOCAL_H */
/*
* linux/include/asm-arm/mach/serial_at91.h
*
* Based on serial_sa1100.h by Nicolas Pitre
*
* Copyright (C) 2002 ATMEL Rousset
*
* Low level machine dependent UART functions.
*/
struct uart_port;
/*
* This is a temporary structure for registering these
* functions; it is intended to be discarded after boot.
*/
struct at91_port_fns {
void (*set_mctrl)(struct uart_port *, u_int);
u_int (*get_mctrl)(struct uart_port *);
void (*enable_ms)(struct uart_port *);
void (*pm)(struct uart_port *, u_int, u_int);
int (*set_wake)(struct uart_port *, u_int);
int (*open)(struct uart_port *);
void (*close)(struct uart_port *);
};
#if defined(CONFIG_SERIAL_AT91)
void at91_register_uart_fns(struct at91_port_fns *fns);
#else
#define at91_register_uart_fns(fns) do { } while (0)
#endif
#ifndef __ASM_AVR32_MMAN_H__
#define __ASM_AVR32_MMAN_H__
#include <asm-generic/mman.h>
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
#define MAP_LOCKED 0x2000 /* pages are locked */
#define MAP_NORESERVE 0x4000 /* don't check for reservations */
#define MAP_POPULATE 0x8000 /* populate (prefault) page tables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
#endif /* __ASM_AVR32_MMAN_H__ */
#ifndef __ASM_AVR32_MMU_H
#define __ASM_AVR32_MMU_H
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
#define MMU_ITLB_ENTRIES 64
#define MMU_DTLB_ENTRIES 64
#endif /* __ASM_AVR32_MMU_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* ASID handling taken from SH implementation.
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2003 Paul Mundt
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_MMU_CONTEXT_H
#define __ASM_AVR32_MMU_CONTEXT_H
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#include <asm/sysreg.h>
/*
* The MMU "context" consists of two things:
* (a) TLB cache version
* (b) ASID (Address Space IDentifier)
*/
#define MMU_CONTEXT_ASID_MASK 0x000000ff
#define MMU_CONTEXT_VERSION_MASK 0xffffff00
#define MMU_CONTEXT_FIRST_VERSION 0x00000100
#define NO_CONTEXT 0
#define MMU_NO_ASID 0x100
/* Virtual Page Number mask */
#define MMU_VPN_MASK 0xfffff000
/* Cache of MMU context last used */
extern unsigned long mmu_context_cache;
/*
* Get MMU context if needed
*/
static inline void
get_mmu_context(struct mm_struct *mm)
{
unsigned long mc = mmu_context_cache;
if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0)
/* It's up to date, do nothing */
return;
/* It's old, we need to get new context with new version */
mc = ++mmu_context_cache;
if (!(mc & MMU_CONTEXT_ASID_MASK)) {
/*
* We have exhausted all ASIDs of this version.
* Flush the TLB and start new cycle.
*/
flush_tlb_all();
/*
* Fix version. Note that we avoid version #0
* to distinguish NO_CONTEXT.
*/
if (!mc)
mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
}
mm->context = mc;
}
/*
* Initialize the context related info for a new mm_struct
* instance.
*/
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
mm->context = NO_CONTEXT;
return 0;
}
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
static inline void destroy_context(struct mm_struct *mm)
{
/* Do nothing */
}
static inline void set_asid(unsigned long asid)
{
/* XXX: We're destroying TLBEHI[8:31] */
sysreg_write(TLBEHI, asid & MMU_CONTEXT_ASID_MASK);
cpu_sync_pipeline();
}
static inline unsigned long get_asid(void)
{
unsigned long asid;
asid = sysreg_read(TLBEHI);
return asid & MMU_CONTEXT_ASID_MASK;
}
static inline void activate_context(struct mm_struct *mm)
{
get_mmu_context(mm);
set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
}
static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
{
if (likely(prev != next)) {
unsigned long __pgdir = (unsigned long)next->pgd;
sysreg_write(PTBR, __pgdir);
activate_context(next);
}
}
#define deactivate_mm(tsk,mm) do { } while(0)
#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
static inline void enable_mmu(void)
{
sysreg_write(MMUCR, (SYSREG_BIT(MMUCR_S)
| SYSREG_BIT(E)
| SYSREG_BIT(MMUCR_I)));
nop(); nop(); nop(); nop(); nop(); nop(); nop(); nop();
if (mmu_context_cache == NO_CONTEXT)
mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK);
}
static inline void disable_mmu(void)
{
sysreg_write(MMUCR, SYSREG_BIT(MMUCR_S));
}
#endif /* __ASM_AVR32_MMU_CONTEXT_H */
#ifndef __ASM_AVR32_MODULE_H
#define __ASM_AVR32_MODULE_H
struct mod_arch_syminfo {
unsigned long got_offset;
int got_initialized;
};
struct mod_arch_specific {
/* Starting offset of got in the module core memory. */
unsigned long got_offset;
/* Size of the got. */
unsigned long got_size;
/* Number of symbols in syminfo. */
int nsyms;
/* Additional symbol information (got offsets). */
struct mod_arch_syminfo *syminfo;
};
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
#define MODULE_PROC_FAMILY "AVR32v1"
#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
#endif /* __ASM_AVR32_MODULE_H */
#ifndef __ASM_AVR32_MSGBUF_H
#define __ASM_AVR32_MSGBUF_H
/*
* The msqid64_ds structure for i386 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*/
struct msqid64_ds {
struct ipc64_perm msg_perm;
__kernel_time_t msg_stime; /* last msgsnd time */
unsigned long __unused1;
__kernel_time_t msg_rtime; /* last msgrcv time */
unsigned long __unused2;
__kernel_time_t msg_ctime; /* last change time */
unsigned long __unused3;
unsigned long msg_cbytes; /* current number of bytes on queue */
unsigned long msg_qnum; /* number of messages in queue */
unsigned long msg_qbytes; /* max number of bytes on queue */
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
__kernel_pid_t msg_lrpid; /* last receive pid */
unsigned long __unused4;
unsigned long __unused5;
};
#endif /* __ASM_AVR32_MSGBUF_H */
/*
* Pull in the generic implementation for the mutex fastpath.
*
* TODO: implement optimized primitives instead, or leave the generic
* implementation in place, or pick the atomic_xchg() based generic
* implementation. (see asm-generic/mutex-xchg.h for details)
*/
#include <asm-generic/mutex-dec.h>
#ifndef __ASM_AVR32_NAMEI_H
#define __ASM_AVR32_NAMEI_H
/* This dummy routine may be changed to something useful */
#define __emul_prefix() NULL
#endif /* __ASM_AVR32_NAMEI_H */
#ifndef __ASM_AVR32_NUMNODES_H
#define __ASM_AVR32_NUMNODES_H
/* Max 4 nodes */
#define NODES_SHIFT 2
#endif /* __ASM_AVR32_NUMNODES_H */
/*
* AVR32 OCD Registers
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_OCD_H
#define __ASM_AVR32_OCD_H
/* Debug Registers */
#define DBGREG_DID 0
#define DBGREG_DC 8
#define DBGREG_DS 16
#define DBGREG_RWCS 28
#define DBGREG_RWA 36
#define DBGREG_RWD 40
#define DBGREG_WT 44
#define DBGREG_DTC 52
#define DBGREG_DTSA0 56
#define DBGREG_DTSA1 60
#define DBGREG_DTEA0 72
#define DBGREG_DTEA1 76
#define DBGREG_BWC0A 88
#define DBGREG_BWC0B 92
#define DBGREG_BWC1A 96
#define DBGREG_BWC1B 100
#define DBGREG_BWC2A 104
#define DBGREG_BWC2B 108
#define DBGREG_BWC3A 112
#define DBGREG_BWC3B 116
#define DBGREG_BWA0A 120
#define DBGREG_BWA0B 124
#define DBGREG_BWA1A 128
#define DBGREG_BWA1B 132
#define DBGREG_BWA2A 136
#define DBGREG_BWA2B 140
#define DBGREG_BWA3A 144
#define DBGREG_BWA3B 148
#define DBGREG_BWD3A 153
#define DBGREG_BWD3B 156
#define DBGREG_PID 284
#define SABAH_OCD 0x01
#define SABAH_ICACHE 0x02
#define SABAH_MEM_CACHED 0x04
#define SABAH_MEM_UNCACHED 0x05
/* Fields in the Development Control register */
#define DC_SS_BIT 8
#define DC_SS (1 << DC_SS_BIT)
#define DC_DBE (1 << 13)
#define DC_RID (1 << 27)
#define DC_ORP (1 << 28)
#define DC_MM (1 << 29)
#define DC_RES (1 << 30)
/* Fields in the Development Status register */
#define DS_SSS (1 << 0)
#define DS_SWB (1 << 1)
#define DS_HWB (1 << 2)
#define DS_BP_SHIFT 8
#define DS_BP_MASK (0xff << DS_BP_SHIFT)
#define __mfdr(addr) \
({ \
register unsigned long value; \
asm volatile("mfdr %0, %1" : "=r"(value) : "i"(addr)); \
value; \
})
#define __mtdr(addr, value) \
asm volatile("mtdr %0, %1" : : "i"(addr), "r"(value))
#endif /* __ASM_AVR32_OCD_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_PAGE_H
#define __ASM_AVR32_PAGE_H
#ifdef __KERNEL__
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#ifdef __ASSEMBLY__
#define PAGE_SIZE (1 << PAGE_SHIFT)
#else
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#endif
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PTE_MASK PAGE_MASK
#ifndef __ASSEMBLY__
#include <asm/addrspace.h>
extern void clear_page(void *to);
extern void copy_page(void *to, void *from);
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) })
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
/* FIXME: These should be removed soon */
extern unsigned long memory_start, memory_end;
/* Pure 2^n version of get_order */
static inline int get_order(unsigned long size)
{
unsigned lz;
size = (size - 1) >> PAGE_SHIFT;
asm("clz %0, %1" : "=r"(lz) : "r"(size));
return 32 - lz;
}
#endif /* !__ASSEMBLY__ */
/* Align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
/*
* The hardware maps the virtual addresses 0x80000000 -> 0x9fffffff
* permanently to the physical addresses 0x00000000 -> 0x1fffffff when
* segmentation is enabled. We want to make use of this in order to
* minimize TLB pressure.
*/
#define PAGE_OFFSET (0x80000000UL)
/*
* ALSA uses virt_to_page() on DMA pages, which I'm not entirely sure
* is a good idea. Anyway, we can't simply subtract PAGE_OFFSET here
* in that case, so we'll have to mask out the three most significant
* bits of the address instead...
*
* What's the difference between __pa() and virt_to_phys() anyway?
*/
#define __pa(x) PHYSADDR(x)
#define __va(x) ((void *)(P1SEGADDR(x)))
#define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
#define phys_to_page(phys) (pfn_to_page(phys >> PAGE_SHIFT))
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#ifndef CONFIG_NEED_MULTIPLE_NODES
#define PHYS_PFN_OFFSET (CONFIG_PHYS_OFFSET >> PAGE_SHIFT)
#define pfn_to_page(pfn) (mem_map + ((pfn) - PHYS_PFN_OFFSET))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PHYS_PFN_OFFSET)
#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
#endif /* CONFIG_NEED_MULTIPLE_NODES */
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
/*
* Memory above this physical address will be considered highmem.
*/
#define HIGHMEM_START 0x20000000UL
#endif /* __KERNEL__ */
#endif /* __ASM_AVR32_PAGE_H */
#ifndef __ASM_AVR32_PARAM_H
#define __ASM_AVR32_PARAM_H
#ifdef __KERNEL__
# define HZ CONFIG_HZ
# define USER_HZ 100 /* User interfaces are in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */
#endif
#ifndef HZ
# define HZ 100
#endif
/* TODO: Should be configurable */
#define EXEC_PAGESIZE 4096
#ifndef NOGROUP
# define NOGROUP (-1)
#endif
#define MAXHOSTNAMELEN 64
#endif /* __ASM_AVR32_PARAM_H */
#ifndef __ASM_AVR32_PCI_H__
#define __ASM_AVR32_PCI_H__
/* We don't support PCI yet, but some drivers require this file anyway */
#define PCI_DMA_BUS_IS_PHYS (1)
#endif /* __ASM_AVR32_PCI_H__ */
#ifndef __ASM_AVR32_PERCPU_H
#define __ASM_AVR32_PERCPU_H
#include <asm-generic/percpu.h>
#endif /* __ASM_AVR32_PERCPU_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_PGALLOC_H
#define __ASM_AVR32_PGALLOC_H
#include <asm/processor.h>
#include <linux/threads.h>
#include <linux/slab.h>
#include <linux/mm.h>
#define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
struct page *pte)
{
set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
}
/*
* Allocate and free page tables
*/
static __inline__ pgd_t *pgd_alloc(struct mm_struct *mm)
{
unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
pgd_t *pgd = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL);
if (pgd)
memset(pgd, 0, pgd_size);
return pgd;
}
static inline void pgd_free(pgd_t *pgd)
{
kfree(pgd);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
int count = 0;
pte_t *pte;
do {
pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT);
if (pte)
clear_page(pte);
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte;
}
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
int count = 0;
struct page *pte;
do {
pte = alloc_pages(GFP_KERNEL, 0);
if (pte)
clear_page(page_address(pte));
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte;
}
static inline void pte_free_kernel(pte_t *pte)
{
free_page((unsigned long)pte);
}
static inline void pte_free(struct page *pte)
{
__free_page(pte);
}
#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
#define check_pgt_cache() do { } while(0)
#endif /* __ASM_AVR32_PGALLOC_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_PGTABLE_2LEVEL_H
#define __ASM_AVR32_PGTABLE_2LEVEL_H
#include <asm-generic/pgtable-nopmd.h>
/*
* Traditional 2-level paging structure
*/
#define PGDIR_SHIFT 22
#define PTRS_PER_PGD 1024
#define PTRS_PER_PTE 1024
#ifndef __ASSEMBLY__
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/*
* Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep, pteval)
/*
* (pmds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
*/
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
#define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT)))
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_AVR32_PGTABLE_2LEVEL_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_PGTABLE_H
#define __ASM_AVR32_PGTABLE_H
#include <asm/addrspace.h>
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#endif /* !__ASSEMBLY__ */
/*
* Use two-level page tables just as the i386 (without PAE)
*/
#include <asm/pgtable-2level.h>
/*
* The following code might need some cleanup when the values are
* final...
*/
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define PTE_PHYS_MASK 0x1ffff000
#ifndef __ASSEMBLY__
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init(void);
/*
* ZERO_PAGE is a global shared page that is always zero: used for
* zero-mapped memory areas etc.
*/
extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr) (empty_zero_page)
/*
* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8 MiB value just means that there will be a 8 MiB "hole"
* after the uncached physical memory (P2 segment) until the vmalloc
* area starts. That means that any out-of-bounds memory accesses will
* hopefully be caught; we don't know if the end of the P1/P2 segments
* are actually used for anything, but it is anyway safer to let the
* MMU catch these kinds of errors than to rely on the memory bus.
*
* A "hole" of the same size is added to the end of the P3 segment as
* well. It might seem wasteful to use 16 MiB of virtual address space
* on this, but we do have 512 MiB of it...
*
* The vmalloc() routines leave a hole of 4 KiB between each vmalloced
* area for the same reason.
*/
#define VMALLOC_OFFSET (8 * 1024 * 1024)
#define VMALLOC_START (P3SEG + VMALLOC_OFFSET)
#define VMALLOC_END (P4SEG - VMALLOC_OFFSET)
#endif /* !__ASSEMBLY__ */
/*
* Page flags. Some of these flags are not directly supported by
* hardware, so we have to emulate them.
*/
#define _TLBEHI_BIT_VALID 9
#define _TLBEHI_VALID (1 << _TLBEHI_BIT_VALID)
#define _PAGE_BIT_WT 0 /* W-bit : write-through */
#define _PAGE_BIT_DIRTY 1 /* D-bit : page changed */
#define _PAGE_BIT_SZ0 2 /* SZ0-bit : Size of page */
#define _PAGE_BIT_SZ1 3 /* SZ1-bit : Size of page */
#define _PAGE_BIT_EXECUTE 4 /* X-bit : execute access allowed */
#define _PAGE_BIT_RW 5 /* AP0-bit : write access allowed */
#define _PAGE_BIT_USER 6 /* AP1-bit : user space access allowed */
#define _PAGE_BIT_BUFFER 7 /* B-bit : bufferable */
#define _PAGE_BIT_GLOBAL 8 /* G-bit : global (ignore ASID) */
#define _PAGE_BIT_CACHABLE 9 /* C-bit : cachable */
/* If we drop support for 1K pages, we get two extra bits */
#define _PAGE_BIT_PRESENT 10
#define _PAGE_BIT_ACCESSED 11 /* software: page was accessed */
/* The following flags are only valid when !PRESENT */
#define _PAGE_BIT_FILE 0 /* software: pagecache or swap? */
#define _PAGE_WT (1 << _PAGE_BIT_WT)
#define _PAGE_DIRTY (1 << _PAGE_BIT_DIRTY)
#define _PAGE_EXECUTE (1 << _PAGE_BIT_EXECUTE)
#define _PAGE_RW (1 << _PAGE_BIT_RW)
#define _PAGE_USER (1 << _PAGE_BIT_USER)
#define _PAGE_BUFFER (1 << _PAGE_BIT_BUFFER)
#define _PAGE_GLOBAL (1 << _PAGE_BIT_GLOBAL)
#define _PAGE_CACHABLE (1 << _PAGE_BIT_CACHABLE)
/* Software flags */
#define _PAGE_ACCESSED (1 << _PAGE_BIT_ACCESSED)
#define _PAGE_PRESENT (1 << _PAGE_BIT_PRESENT)
#define _PAGE_FILE (1 << _PAGE_BIT_FILE)
/*
* Page types, i.e. sizes. _PAGE_TYPE_NONE corresponds to what is
* usually called _PAGE_PROTNONE on other architectures.
*
* XXX: Find out if _PAGE_PROTNONE is equivalent with !_PAGE_USER. If
* so, we can encode all possible page sizes (although we can't really
* support 1K pages anyway due to the _PAGE_PRESENT and _PAGE_ACCESSED
* bits)
*
*/
#define _PAGE_TYPE_MASK ((1 << _PAGE_BIT_SZ0) | (1 << _PAGE_BIT_SZ1))
#define _PAGE_TYPE_NONE (0 << _PAGE_BIT_SZ0)
#define _PAGE_TYPE_SMALL (1 << _PAGE_BIT_SZ0)
#define _PAGE_TYPE_MEDIUM (2 << _PAGE_BIT_SZ0)
#define _PAGE_TYPE_LARGE (3 << _PAGE_BIT_SZ0)
/*
* Mask which drop software flags. We currently can't handle more than
* 512 MiB of physical memory, so we can use bits 29-31 for other
* stuff. With a fixed 4K page size, we can use bits 10-11 as well as
* bits 2-3 (SZ)
*/
#define _PAGE_FLAGS_HARDWARE_MASK 0xfffff3ff
#define _PAGE_FLAGS_CACHE_MASK (_PAGE_CACHABLE | _PAGE_BUFFER | _PAGE_WT)
/* TODO: Check for saneness */
/* User-mode page table flags (to be set in a pgd or pmd entry) */
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_TYPE_SMALL | _PAGE_RW \
| _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
/* Kernel-mode page table flags */
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_TYPE_SMALL | _PAGE_RW \
| _PAGE_ACCESSED | _PAGE_DIRTY)
/* Flags that may be modified by software */
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY \
| _PAGE_FLAGS_CACHE_MASK)
#define _PAGE_FLAGS_READ (_PAGE_CACHABLE | _PAGE_BUFFER)
#define _PAGE_FLAGS_WRITE (_PAGE_FLAGS_READ | _PAGE_RW | _PAGE_DIRTY)
#define _PAGE_NORMAL(x) __pgprot((x) | _PAGE_PRESENT | _PAGE_TYPE_SMALL \
| _PAGE_ACCESSED)
#define PAGE_NONE (_PAGE_ACCESSED | _PAGE_TYPE_NONE)
#define PAGE_READ (_PAGE_FLAGS_READ | _PAGE_USER)
#define PAGE_EXEC (_PAGE_FLAGS_READ | _PAGE_EXECUTE | _PAGE_USER)
#define PAGE_WRITE (_PAGE_FLAGS_WRITE | _PAGE_USER)
#define PAGE_KERNEL _PAGE_NORMAL(_PAGE_FLAGS_WRITE | _PAGE_EXECUTE | _PAGE_GLOBAL)
#define PAGE_KERNEL_RO _PAGE_NORMAL(_PAGE_FLAGS_READ | _PAGE_EXECUTE | _PAGE_GLOBAL)
#define _PAGE_P(x) _PAGE_NORMAL((x) & ~(_PAGE_RW | _PAGE_DIRTY))
#define _PAGE_S(x) _PAGE_NORMAL(x)
#define PAGE_COPY _PAGE_P(PAGE_WRITE | PAGE_READ)
#ifndef __ASSEMBLY__
/*
* The hardware supports flags for write- and execute access. Read is
* always allowed if the page is loaded into the TLB, so the "-w-",
* "--x" and "-wx" mappings are implemented as "rw-", "r-x" and "rwx",
* respectively.
*
* The "---" case is handled by software; the page will simply not be
* loaded into the TLB if the page type is _PAGE_TYPE_NONE.
*/
#define __P000 __pgprot(PAGE_NONE)
#define __P001 _PAGE_P(PAGE_READ)
#define __P010 _PAGE_P(PAGE_WRITE)
#define __P011 _PAGE_P(PAGE_WRITE | PAGE_READ)
#define __P100 _PAGE_P(PAGE_EXEC)
#define __P101 _PAGE_P(PAGE_EXEC | PAGE_READ)
#define __P110 _PAGE_P(PAGE_EXEC | PAGE_WRITE)
#define __P111 _PAGE_P(PAGE_EXEC | PAGE_WRITE | PAGE_READ)
#define __S000 __pgprot(PAGE_NONE)
#define __S001 _PAGE_S(PAGE_READ)
#define __S010 _PAGE_S(PAGE_WRITE)
#define __S011 _PAGE_S(PAGE_WRITE | PAGE_READ)
#define __S100 _PAGE_S(PAGE_EXEC)
#define __S101 _PAGE_S(PAGE_EXEC | PAGE_READ)
#define __S110 _PAGE_S(PAGE_EXEC | PAGE_WRITE)
#define __S111 _PAGE_S(PAGE_EXEC | PAGE_WRITE | PAGE_READ)
#define pte_none(x) (!pte_val(x))
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_clear(mm,addr,xp) \
do { \
set_pte_at(mm, addr, xp, __pte(0)); \
} while (0)
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
static inline int pte_read(pte_t pte)
{
return pte_val(pte) & _PAGE_USER;
}
static inline int pte_write(pte_t pte)
{
return pte_val(pte) & _PAGE_RW;
}
static inline int pte_exec(pte_t pte)
{
return pte_val(pte) & _PAGE_EXECUTE;
}
static inline int pte_dirty(pte_t pte)
{
return pte_val(pte) & _PAGE_DIRTY;
}
static inline int pte_young(pte_t pte)
{
return pte_val(pte) & _PAGE_ACCESSED;
}
/*
* The following only work if pte_present() is not true.
*/
static inline int pte_file(pte_t pte)
{
return pte_val(pte) & _PAGE_FILE;
}
/* Mutator functions for PTE bits */
static inline pte_t pte_rdprotect(pte_t pte)
{
set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER));
return pte;
}
static inline pte_t pte_wrprotect(pte_t pte)
{
set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW));
return pte;
}
static inline pte_t pte_exprotect(pte_t pte)
{
set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE));
return pte;
}
static inline pte_t pte_mkclean(pte_t pte)
{
set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY));
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
{
set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED));
return pte;
}
static inline pte_t pte_mkread(pte_t pte)
{
set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER));
return pte;
}
static inline pte_t pte_mkwrite(pte_t pte)
{
set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW));
return pte;
}
static inline pte_t pte_mkexec(pte_t pte)
{
set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE));
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY));
return pte;
}
static inline pte_t pte_mkyoung(pte_t pte)
{
set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED));
return pte;
}
#define pmd_none(x) (!pmd_val(x))
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) \
!= _KERNPG_TABLE)
/*
* Permanent address of a page. We don't support highmem, so this is
* trivial.
*/
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
#define pte_page(x) phys_to_page(pte_val(x) & PTE_PHYS_MASK)
/*
* Mark the prot value as uncacheable and unbufferable
*/
#define pgprot_noncached(prot) \
__pgprot(pgprot_val(prot) & ~(_PAGE_BUFFER | _PAGE_CACHABLE))
/*
* Mark the prot value as uncacheable but bufferable
*/
#define pgprot_writecombine(prot) \
__pgprot((pgprot_val(prot) & ~_PAGE_CACHABLE) | _PAGE_BUFFER)
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
* extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK)
| pgprot_val(newprot)));
return pte;
}
#define page_pte(page) page_pte_prot(page, __pgprot(0))
#define pmd_page_vaddr(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define pmd_page(pmd) (phys_to_page(pmd_val(pmd)))
/* to find an entry in a page-table-directory. */
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
#define pgd_offset_current(address) \
((pgd_t *)__mfsr(SYSREG_PTBR) + pgd_index(address))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* Find an entry in the third-level page table.. */
#define pte_index(address) \
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_kernel(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte);
/*
* Encode and decode a swap entry
*
* Constraints:
* _PAGE_FILE at bit 0
* _PAGE_TYPE_* at bits 2-3 (for emulating _PAGE_PROTNONE)
* _PAGE_PRESENT at bit 10
*
* We encode the type into bits 4-9 and offset into bits 11-31. This
* gives us a 21 bits offset, or 2**21 * 4K = 8G usable swap space per
* device, and 64 possible types.
*
* NOTE: We should set ZEROs at the position of _PAGE_PRESENT
* and _PAGE_PROTNONE bits
*/
#define __swp_type(x) (((x).val >> 4) & 0x3f)
#define __swp_offset(x) ((x).val >> 11)
#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 11) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
/*
* Encode and decode a nonlinear file mapping entry. We have to
* preserve _PAGE_FILE and _PAGE_PRESENT here. _PAGE_TYPE_* isn't
* necessary, since _PAGE_FILE implies !_PAGE_PROTNONE (?)
*/
#define PTE_FILE_MAX_BITS 30
#define pte_to_pgoff(pte) (((pte_val(pte) >> 1) & 0x1ff) \
| ((pte_val(pte) >> 11) << 9))
#define pgoff_to_pte(off) ((pte_t) { ((((off) & 0x1ff) << 1) \
| (((off) >> 9) << 11) \
| _PAGE_FILE) })
typedef pte_t *pte_addr_t;
#define kern_addr_valid(addr) (1)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#define MK_IOSPACE_PFN(space, pfn) (pfn)
#define GET_IOSPACE(pfn) 0
#define GET_PFN(pfn) (pfn)
/* No page table caches to initialize (?) */
#define pgtable_cache_init() do { } while(0)
#include <asm-generic/pgtable.h>
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_AVR32_PGTABLE_H */
#ifndef __ASM_AVR32_POLL_H
#define __ASM_AVR32_POLL_H
/* These are specified by iBCS2 */
#define POLLIN 0x0001
#define POLLPRI 0x0002
#define POLLOUT 0x0004
#define POLLERR 0x0008
#define POLLHUP 0x0010
#define POLLNVAL 0x0020
/* The rest seem to be more-or-less nonstandard. Check them! */
#define POLLRDNORM 0x0040
#define POLLRDBAND 0x0080
#define POLLWRNORM 0x0100
#define POLLWRBAND 0x0200
#define POLLMSG 0x0400
#define POLLREMOVE 0x1000
#define POLLRDHUP 0x2000
struct pollfd {
int fd;
short events;
short revents;
};
#endif /* __ASM_AVR32_POLL_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_POSIX_TYPES_H
#define __ASM_AVR32_POSIX_TYPES_H
/*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*/
typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
typedef unsigned short __kernel_nlink_t;
typedef long __kernel_off_t;
typedef int __kernel_pid_t;
typedef unsigned short __kernel_ipc_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
typedef unsigned long __kernel_size_t;
typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
typedef long __kernel_time_t;
typedef long __kernel_suseconds_t;
typedef long __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef int __kernel_daddr_t;
typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t;
typedef unsigned short __kernel_gid16_t;
typedef unsigned int __kernel_uid32_t;
typedef unsigned int __kernel_gid32_t;
typedef unsigned short __kernel_old_uid_t;
typedef unsigned short __kernel_old_gid_t;
typedef unsigned short __kernel_old_dev_t;
#ifdef __GNUC__
typedef long long __kernel_loff_t;
#endif
typedef struct {
#if defined(__KERNEL__) || defined(__USE_ALL)
int val[2];
#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
int __val[2];
#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
} __kernel_fsid_t;
#if defined(__KERNEL__)
#undef __FD_SET
static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
{
unsigned long __tmp = __fd / __NFDBITS;
unsigned long __rem = __fd % __NFDBITS;
__fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
}
#undef __FD_CLR
static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
{
unsigned long __tmp = __fd / __NFDBITS;
unsigned long __rem = __fd % __NFDBITS;
__fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
}
#undef __FD_ISSET
static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
{
unsigned long __tmp = __fd / __NFDBITS;
unsigned long __rem = __fd % __NFDBITS;
return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
}
/*
* This will unroll the loop for the normal constant case (8 ints,
* for a 256-bit fd_set)
*/
#undef __FD_ZERO
static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
{
unsigned long *__tmp = __p->fds_bits;
int __i;
if (__builtin_constant_p(__FDSET_LONGS)) {
switch (__FDSET_LONGS) {
case 16:
__tmp[ 0] = 0; __tmp[ 1] = 0;
__tmp[ 2] = 0; __tmp[ 3] = 0;
__tmp[ 4] = 0; __tmp[ 5] = 0;
__tmp[ 6] = 0; __tmp[ 7] = 0;
__tmp[ 8] = 0; __tmp[ 9] = 0;
__tmp[10] = 0; __tmp[11] = 0;
__tmp[12] = 0; __tmp[13] = 0;
__tmp[14] = 0; __tmp[15] = 0;
return;
case 8:
__tmp[ 0] = 0; __tmp[ 1] = 0;
__tmp[ 2] = 0; __tmp[ 3] = 0;
__tmp[ 4] = 0; __tmp[ 5] = 0;
__tmp[ 6] = 0; __tmp[ 7] = 0;
return;
case 4:
__tmp[ 0] = 0; __tmp[ 1] = 0;
__tmp[ 2] = 0; __tmp[ 3] = 0;
return;
}
}
__i = __FDSET_LONGS;
while (__i) {
__i--;
*__tmp = 0;
__tmp++;
}
}
#endif /* defined(__KERNEL__) */
#endif /* __ASM_AVR32_POSIX_TYPES_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_PROCESSOR_H
#define __ASM_AVR32_PROCESSOR_H
#include <asm/page.h>
#include <asm/cache.h>
#define TASK_SIZE 0x80000000
#ifndef __ASSEMBLY__
static inline void *current_text_addr(void)
{
register void *pc asm("pc");
return pc;
}
enum arch_type {
ARCH_AVR32A,
ARCH_AVR32B,
ARCH_MAX
};
enum cpu_type {
CPU_MORGAN,
CPU_AT32AP,
CPU_MAX
};
enum tlb_config {
TLB_NONE,
TLB_SPLIT,
TLB_UNIFIED,
TLB_INVALID
};
struct avr32_cpuinfo {
struct clk *clk;
unsigned long loops_per_jiffy;
enum arch_type arch_type;
enum cpu_type cpu_type;
unsigned short arch_revision;
unsigned short cpu_revision;
enum tlb_config tlb_config;
struct cache_info icache;
struct cache_info dcache;
};
extern struct avr32_cpuinfo boot_cpu_data;
#ifdef CONFIG_SMP
extern struct avr32_cpuinfo cpu_data[];
#define current_cpu_data cpu_data[smp_processor_id()]
#else
#define cpu_data (&boot_cpu_data)
#define current_cpu_data boot_cpu_data
#endif
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's
*/
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
#define cpu_relax() barrier()
#define cpu_sync_pipeline() asm volatile("sub pc, -2" : : : "memory")
struct cpu_context {
unsigned long sr;
unsigned long pc;
unsigned long ksp; /* Kernel stack pointer */
unsigned long r7;
unsigned long r6;
unsigned long r5;
unsigned long r4;
unsigned long r3;
unsigned long r2;
unsigned long r1;
unsigned long r0;
};
/* This struct contains the CPU context as stored by switch_to() */
struct thread_struct {
struct cpu_context cpu_context;
unsigned long single_step_addr;
u16 single_step_insn;
};
#define INIT_THREAD { \
.cpu_context = { \
.ksp = sizeof(init_stack) + (long)&init_stack, \
}, \
}
/*
* Do necessary setup to start up a newly executed thread.
*/
#define start_thread(regs, new_pc, new_sp) \
do { \
set_fs(USER_DS); \
memset(regs, 0, sizeof(*regs)); \
regs->sr = MODE_USER; \
regs->pc = new_pc & ~1; \
regs->sp = new_sp; \
} while(0)
struct task_struct;
/* Free all resources held by a thread */
extern void release_thread(struct task_struct *);
/* Create a kernel thread without removing it from tasklists */
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while(0)
/* Return saved PC of a blocked thread */
#define thread_saved_pc(tsk) ((tsk)->thread.cpu_context.pc)
struct pt_regs;
void show_trace(struct task_struct *task, unsigned long *stack,
struct pt_regs *regs);
extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.cpu_context.pc)
#define KSTK_ESP(tsk) ((tsk)->thread.cpu_context.ksp)
#define ARCH_HAS_PREFETCH
static inline void prefetch(const void *x)
{
const char *c = x;
asm volatile("pref %0" : : "r"(c));
}
#define PREFETCH_STRIDE L1_CACHE_BYTES
#endif /* __ASSEMBLY__ */
#endif /* __ASM_AVR32_PROCESSOR_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_PTRACE_H
#define __ASM_AVR32_PTRACE_H
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
/*
* Status Register bits
*/
#define SR_H 0x40000000
#define SR_R 0x20000000
#define SR_J 0x10000000
#define SR_DM 0x08000000
#define SR_D 0x04000000
#define MODE_NMI 0x01c00000
#define MODE_EXCEPTION 0x01800000
#define MODE_INT3 0x01400000
#define MODE_INT2 0x01000000
#define MODE_INT1 0x00c00000
#define MODE_INT0 0x00800000
#define MODE_SUPERVISOR 0x00400000
#define MODE_USER 0x00000000
#define MODE_MASK 0x01c00000
#define SR_EM 0x00200000
#define SR_I3M 0x00100000
#define SR_I2M 0x00080000
#define SR_I1M 0x00040000
#define SR_I0M 0x00020000
#define SR_GM 0x00010000
#define SR_H_BIT 30
#define SR_R_BIT 29
#define SR_J_BIT 28
#define SR_DM_BIT 27
#define SR_D_BIT 26
#define MODE_SHIFT 22
#define SR_EM_BIT 21
#define SR_I3M_BIT 20
#define SR_I2M_BIT 19
#define SR_I1M_BIT 18
#define SR_I0M_BIT 17
#define SR_GM_BIT 16
/* The user-visible part */
#define SR_L 0x00000020
#define SR_Q 0x00000010
#define SR_V 0x00000008
#define SR_N 0x00000004
#define SR_Z 0x00000002
#define SR_C 0x00000001
#define SR_L_BIT 5
#define SR_Q_BIT 4
#define SR_V_BIT 3
#define SR_N_BIT 2
#define SR_Z_BIT 1
#define SR_C_BIT 0
/*
* The order is defined by the stmts instruction. r0 is stored first,
* so it gets the highest address.
*
* Registers 0-12 are general-purpose registers (r12 is normally used for
* the function return value).
* Register 13 is the stack pointer
* Register 14 is the link register
* Register 15 is the program counter (retrieved from the RAR sysreg)
*/
#define FRAME_SIZE_FULL 72
#define REG_R12_ORIG 68
#define REG_R0 64
#define REG_R1 60
#define REG_R2 56
#define REG_R3 52
#define REG_R4 48
#define REG_R5 44
#define REG_R6 40
#define REG_R7 36
#define REG_R8 32
#define REG_R9 28
#define REG_R10 24
#define REG_R11 20
#define REG_R12 16
#define REG_SP 12
#define REG_LR 8
#define FRAME_SIZE_MIN 8
#define REG_PC 4
#define REG_SR 0
#ifndef __ASSEMBLY__
struct pt_regs {
/* These are always saved */
unsigned long sr;
unsigned long pc;
/* These are sometimes saved */
unsigned long lr;
unsigned long sp;
unsigned long r12;
unsigned long r11;
unsigned long r10;
unsigned long r9;
unsigned long r8;
unsigned long r7;
unsigned long r6;
unsigned long r5;
unsigned long r4;
unsigned long r3;
unsigned long r2;
unsigned long r1;
unsigned long r0;
/* Only saved on system call */
unsigned long r12_orig;
};
#ifdef __KERNEL__
# define user_mode(regs) (((regs)->sr & MODE_MASK) == MODE_USER)
extern void show_regs (struct pt_regs *);
static __inline__ int valid_user_regs(struct pt_regs *regs)
{
/*
* Some of the Java bits might be acceptable if/when we
* implement some support for that stuff...
*/
if ((regs->sr & 0xffff0000) == 0)
return 1;
/*
* Force status register flags to be sane and report this
* illegal behaviour...
*/
regs->sr &= 0x0000ffff;
return 0;
}
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
#endif /* __KERNEL__ */
#endif /* ! __ASSEMBLY__ */
#endif /* __ASM_AVR32_PTRACE_H */
#ifndef __ASM_AVR32_RESOURCE_H
#define __ASM_AVR32_RESOURCE_H
#include <asm-generic/resource.h>
#endif /* __ASM_AVR32_RESOURCE_H */
#ifndef __ASM_AVR32_SCATTERLIST_H
#define __ASM_AVR32_SCATTERLIST_H
struct scatterlist {
struct page *page;
unsigned int offset;
dma_addr_t dma_address;
unsigned int length;
};
/* These macros should be used after a pci_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries pci_map_sg
* returns.
*/
#define sg_dma_address(sg) ((sg)->dma_address)
#define sg_dma_len(sg) ((sg)->length)
#define ISA_DMA_THRESHOLD (0xffffffff)
#endif /* __ASM_AVR32_SCATTERLIST_H */
#ifndef __ASM_AVR32_SECTIONS_H
#define __ASM_AVR32_SECTIONS_H
#include <asm-generic/sections.h>
#endif /* __ASM_AVR32_SECTIONS_H */
/*
* SMP- and interrupt-safe semaphores.
*
* Copyright (C) 2006 Atmel Corporation
*
* Based on include/asm-i386/semaphore.h
* Copyright (C) 1996 Linus Torvalds
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_SEMAPHORE_H
#define __ASM_AVR32_SEMAPHORE_H
#include <linux/linkage.h>
#include <asm/system.h>
#include <asm/atomic.h>
#include <linux/wait.h>
#include <linux/rwsem.h>
struct semaphore {
atomic_t count;
int sleepers;
wait_queue_head_t wait;
};
#define __SEMAPHORE_INITIALIZER(name, n) \
{ \
.count = ATOMIC_INIT(n), \
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
}
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
static inline void sema_init (struct semaphore *sem, int val)
{
atomic_set(&sem->count, val);
sem->sleepers = 0;
init_waitqueue_head(&sem->wait);
}
static inline void init_MUTEX (struct semaphore *sem)
{
sema_init(sem, 1);
}
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
{
sema_init(sem, 0);
}
void __down(struct semaphore * sem);
int __down_interruptible(struct semaphore * sem);
void __up(struct semaphore * sem);
/*
* This is ugly, but we want the default case to fall through.
* "__down_failed" is a special asm handler that calls the C
* routine that actually waits. See arch/i386/kernel/semaphore.c
*/
static inline void down(struct semaphore * sem)
{
might_sleep();
if (unlikely(atomic_dec_return (&sem->count) < 0))
__down (sem);
}
/*
* Interruptible try to acquire a semaphore. If we obtained
* it, return zero. If we were interrupted, returns -EINTR
*/
static inline int down_interruptible(struct semaphore * sem)
{
int ret = 0;
might_sleep();
if (unlikely(atomic_dec_return (&sem->count) < 0))
ret = __down_interruptible (sem);
return ret;
}
/*
* Non-blockingly attempt to down() a semaphore.
* Returns zero if we acquired it
*/
static inline int down_trylock(struct semaphore * sem)
{
return atomic_dec_if_positive(&sem->count) < 0;
}
/*
* Note! This is subtle. We jump to wake people up only if
* the semaphore was negative (== somebody was waiting on it).
* The default case (no contention) will result in NO
* jumps for both down() and up().
*/
static inline void up(struct semaphore * sem)
{
if (unlikely(atomic_inc_return (&sem->count) <= 0))
__up (sem);
}
#endif /*__ASM_AVR32_SEMAPHORE_H */
#ifndef __ASM_AVR32_SEMBUF_H
#define __ASM_AVR32_SEMBUF_H
/*
* The semid64_ds structure for AVR32 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*/
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
__kernel_time_t sem_otime; /* last semop time */
unsigned long __unused1;
__kernel_time_t sem_ctime; /* last change time */
unsigned long __unused2;
unsigned long sem_nsems; /* no. of semaphores in array */
unsigned long __unused3;
unsigned long __unused4;
};
#endif /* __ASM_AVR32_SEMBUF_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* Based on linux/include/asm-arm/setup.h
* Copyright (C) 1997-1999 Russel King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_SETUP_H__
#define __ASM_AVR32_SETUP_H__
#define COMMAND_LINE_SIZE 256
/* Magic number indicating that a tag table is present */
#define ATAG_MAGIC 0xa2a25441
#ifndef __ASSEMBLY__
/*
* Generic memory range, used by several tags.
*
* addr is always physical.
* size is measured in bytes.
* next is for use by the OS, e.g. for grouping regions into
* linked lists.
*/
struct tag_mem_range {
u32 addr;
u32 size;
struct tag_mem_range * next;
};
/* The list ends with an ATAG_NONE node. */
#define ATAG_NONE 0x00000000
struct tag_header {
u32 size;
u32 tag;
};
/* The list must start with an ATAG_CORE node */
#define ATAG_CORE 0x54410001
struct tag_core {
u32 flags;
u32 pagesize;
u32 rootdev;
};
/* it is allowed to have multiple ATAG_MEM nodes */
#define ATAG_MEM 0x54410002
/* ATAG_MEM uses tag_mem_range */
/* command line: \0 terminated string */
#define ATAG_CMDLINE 0x54410003
struct tag_cmdline {
char cmdline[1]; /* this is the minimum size */
};
/* Ramdisk image (may be compressed) */
#define ATAG_RDIMG 0x54410004
/* ATAG_RDIMG uses tag_mem_range */
/* Information about various clocks present in the system */
#define ATAG_CLOCK 0x54410005
struct tag_clock {
u32 clock_id; /* Which clock are we talking about? */
u32 clock_flags; /* Special features */
u64 clock_hz; /* Clock speed in Hz */
};
/* The clock types we know about */
#define CLOCK_BOOTCPU 0
/* Memory reserved for the system (e.g. the bootloader) */
#define ATAG_RSVD_MEM 0x54410006
/* ATAG_RSVD_MEM uses tag_mem_range */
/* Ethernet information */
#define ATAG_ETHERNET 0x54410007
struct tag_ethernet {
u8 mac_index;
u8 mii_phy_addr;
u8 hw_address[6];
};
#define ETH_INVALID_PHY 0xff
struct tag {
struct tag_header hdr;
union {
struct tag_core core;
struct tag_mem_range mem_range;
struct tag_cmdline cmdline;
struct tag_clock clock;
struct tag_ethernet ethernet;
} u;
};
struct tagtable {
u32 tag;
int (*parse)(struct tag *);
};
#define __tag __attribute_used__ __attribute__((__section__(".taglist")))
#define __tagtable(tag, fn) \
static struct tagtable __tagtable_##fn __tag = { tag, fn }
#define tag_member_present(tag,member) \
((unsigned long)(&((struct tag *)0L)->member + 1) \
<= (tag)->hdr.size * 4)
#define tag_next(t) ((struct tag *)((u32 *)(t) + (t)->hdr.size))
#define tag_size(type) ((sizeof(struct tag_header) + sizeof(struct type)) >> 2)
#define for_each_tag(t,base) \
for (t = base; t->hdr.size; t = tag_next(t))
extern struct tag_mem_range *mem_phys;
extern struct tag_mem_range *mem_reserved;
extern struct tag_mem_range *mem_ramdisk;
extern struct tag *bootloader_tags;
extern void setup_bootmem(void);
extern void setup_processor(void);
extern void board_setup_fbmem(unsigned long fbmem_start,
unsigned long fbmem_size);
/* Chip-specific hook to enable the use of SDRAM */
void chip_enable_sdram(void);
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_AVR32_SETUP_H__ */
#ifndef __ASM_AVR32_SHMBUF_H
#define __ASM_AVR32_SHMBUF_H
/*
* The shmid64_ds structure for i386 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*/
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
size_t shm_segsz; /* size of segment (bytes) */
__kernel_time_t shm_atime; /* last attach time */
unsigned long __unused1;
__kernel_time_t shm_dtime; /* last detach time */
unsigned long __unused2;
__kernel_time_t shm_ctime; /* last change time */
unsigned long __unused3;
__kernel_pid_t shm_cpid; /* pid of creator */
__kernel_pid_t shm_lpid; /* pid of last operator */
unsigned long shm_nattch; /* no. of current attaches */
unsigned long __unused4;
unsigned long __unused5;
};
struct shminfo64 {
unsigned long shmmax;
unsigned long shmmin;
unsigned long shmmni;
unsigned long shmseg;
unsigned long shmall;
unsigned long __unused1;
unsigned long __unused2;
unsigned long __unused3;
unsigned long __unused4;
};
#endif /* __ASM_AVR32_SHMBUF_H */
#ifndef __ASM_AVR32_SHMPARAM_H
#define __ASM_AVR32_SHMPARAM_H
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#endif /* __ASM_AVR32_SHMPARAM_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_SIGCONTEXT_H
#define __ASM_AVR32_SIGCONTEXT_H
struct sigcontext {
unsigned long oldmask;
/* CPU registers */
unsigned long sr;
unsigned long pc;
unsigned long lr;
unsigned long sp;
unsigned long r12;
unsigned long r11;
unsigned long r10;
unsigned long r9;
unsigned long r8;
unsigned long r7;
unsigned long r6;
unsigned long r5;
unsigned long r4;
unsigned long r3;
unsigned long r2;
unsigned long r1;
unsigned long r0;
};
#endif /* __ASM_AVR32_SIGCONTEXT_H */
#ifndef _AVR32_SIGINFO_H
#define _AVR32_SIGINFO_H
#include <asm-generic/siginfo.h>
#endif
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_SIGNAL_H
#define __ASM_AVR32_SIGNAL_H
#include <linux/types.h>
/* Avoid too many header ordering problems. */
struct siginfo;
#ifdef __KERNEL__
/* Most things should be clean enough to redefine this at will, if care
is taken to make libc match. */
#define _NSIG 64
#define _NSIG_BPW 32
#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
typedef unsigned long old_sigset_t; /* at least 32 bits */
typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
#else
/* Here we must cater to libcs that poke about in kernel headers. */
#define NSIG 32
typedef unsigned long sigset_t;
#endif /* __KERNEL__ */
#define SIGHUP 1
#define SIGINT 2
#define SIGQUIT 3
#define SIGILL 4
#define SIGTRAP 5
#define SIGABRT 6
#define SIGIOT 6
#define SIGBUS 7
#define SIGFPE 8
#define SIGKILL 9
#define SIGUSR1 10
#define SIGSEGV 11
#define SIGUSR2 12
#define SIGPIPE 13
#define SIGALRM 14
#define SIGTERM 15
#define SIGSTKFLT 16
#define SIGCHLD 17
#define SIGCONT 18
#define SIGSTOP 19
#define SIGTSTP 20
#define SIGTTIN 21
#define SIGTTOU 22
#define SIGURG 23
#define SIGXCPU 24
#define SIGXFSZ 25
#define SIGVTALRM 26
#define SIGPROF 27
#define SIGWINCH 28
#define SIGIO 29
#define SIGPOLL SIGIO
/*
#define SIGLOST 29
*/
#define SIGPWR 30
#define SIGSYS 31
#define SIGUNUSED 31
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
#define SIGRTMAX (_NSIG-1)
/*
* SA_FLAGS values:
*
* SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
* SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
* SA_SIGINFO deliver the signal with SIGINFO structs
* SA_ONSTACK indicates that a registered stack_t will be used.
* SA_RESTART flag to get restarting signals (which were the default long ago)
* SA_NODEFER prevents the current signal from being masked in the handler.
* SA_RESETHAND clears the handler when the signal is delivered.
*
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
* Unix names RESETHAND and NODEFER respectively.
*/
#define SA_NOCLDSTOP 0x00000001
#define SA_NOCLDWAIT 0x00000002
#define SA_SIGINFO 0x00000004
#define SA_RESTORER 0x04000000
#define SA_ONSTACK 0x08000000
#define SA_RESTART 0x10000000
#define SA_NODEFER 0x40000000
#define SA_RESETHAND 0x80000000
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
/*
* sigaltstack controls
*/
#define SS_ONSTACK 1
#define SS_DISABLE 2
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
#include <asm-generic/signal.h>
#ifdef __KERNEL__
struct old_sigaction {
__sighandler_t sa_handler;
old_sigset_t sa_mask;
unsigned long sa_flags;
__sigrestore_t sa_restorer;
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
__sigrestore_t sa_restorer;
sigset_t sa_mask; /* mask last for extensibility */
};
struct k_sigaction {
struct sigaction sa;
};
#else
/* Here we must cater to libcs that poke about in kernel headers. */
struct sigaction {
union {
__sighandler_t _sa_handler;
void (*_sa_sigaction)(int, struct siginfo *, void *);
} _u;
sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
};
#define sa_handler _u._sa_handler
#define sa_sigaction _u._sa_sigaction
#endif /* __KERNEL__ */
typedef struct sigaltstack {
void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
#ifdef __KERNEL__
#include <asm/sigcontext.h>
#undef __HAVE_ARCH_SIG_BITOPS
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#endif /* __KERNEL__ */
#endif
#ifndef __ASM_AVR32_SOCKET_H
#define __ASM_AVR32_SOCKET_H
#include <asm/sockios.h>
/* For setsockopt(2) */
#define SOL_SOCKET 1
#define SO_DEBUG 1
#define SO_REUSEADDR 2
#define SO_TYPE 3
#define SO_ERROR 4
#define SO_DONTROUTE 5
#define SO_BROADCAST 6
#define SO_SNDBUF 7
#define SO_RCVBUF 8
#define SO_SNDBUFFORCE 32
#define SO_RCVBUFFORCE 33
#define SO_KEEPALIVE 9
#define SO_OOBINLINE 10
#define SO_NO_CHECK 11
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
/* To add :#define SO_REUSEPORT 15 */
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
#define SO_SNDLOWAT 19
#define SO_RCVTIMEO 20
#define SO_SNDTIMEO 21
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 22
#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
#define SO_SECURITY_ENCRYPTION_NETWORK 24
#define SO_BINDTODEVICE 25
/* Socket filtering */
#define SO_ATTACH_FILTER 26
#define SO_DETACH_FILTER 27
#define SO_PEERNAME 28
#define SO_TIMESTAMP 29
#define SCM_TIMESTAMP SO_TIMESTAMP
#define SO_ACCEPTCONN 30
#define SO_PEERSEC 31
#define SO_PASSSEC 34
#endif /* __ASM_AVR32_SOCKET_H */
#ifndef __ASM_AVR32_SOCKIOS_H
#define __ASM_AVR32_SOCKIOS_H
/* Socket-level I/O control calls. */
#define FIOSETOWN 0x8901
#define SIOCSPGRP 0x8902
#define FIOGETOWN 0x8903
#define SIOCGPGRP 0x8904
#define SIOCATMARK 0x8905
#define SIOCGSTAMP 0x8906 /* Get stamp */
#endif /* __ASM_AVR32_SOCKIOS_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_STAT_H
#define __ASM_AVR32_STAT_H
struct __old_kernel_stat {
unsigned short st_dev;
unsigned short st_ino;
unsigned short st_mode;
unsigned short st_nlink;
unsigned short st_uid;
unsigned short st_gid;
unsigned short st_rdev;
unsigned long st_size;
unsigned long st_atime;
unsigned long st_mtime;
unsigned long st_ctime;
};
struct stat {
unsigned long st_dev;
unsigned long st_ino;
unsigned short st_mode;
unsigned short st_nlink;
unsigned short st_uid;
unsigned short st_gid;
unsigned long st_rdev;
unsigned long st_size;
unsigned long st_blksize;
unsigned long st_blocks;
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec;
unsigned long __unused4;
unsigned long __unused5;
};
#define STAT_HAVE_NSEC 1
struct stat64 {
unsigned long long st_dev;
unsigned long long st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned long st_uid;
unsigned long st_gid;
unsigned long long st_rdev;
long long st_size;
unsigned long __pad1; /* align 64-bit st_blocks */
unsigned long st_blksize;
unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec;
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* __ASM_AVR32_STAT_H */
#ifndef __ASM_AVR32_STATFS_H
#define __ASM_AVR32_STATFS_H
#include <asm-generic/statfs.h>
#endif /* __ASM_AVR32_STATFS_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_STRING_H
#define __ASM_AVR32_STRING_H
#define __HAVE_ARCH_MEMSET
extern void *memset(void *b, int c, size_t len);
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *to, const void *from, size_t len);
#endif /* __ASM_AVR32_STRING_H */
/*
* AVR32 System Registers
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_SYSREG_H__
#define __ASM_AVR32_SYSREG_H__
/* sysreg register offsets */
#define SYSREG_SR 0x0000
#define SYSREG_EVBA 0x0004
#define SYSREG_ACBA 0x0008
#define SYSREG_CPUCR 0x000c
#define SYSREG_ECR 0x0010
#define SYSREG_RSR_SUP 0x0014
#define SYSREG_RSR_INT0 0x0018
#define SYSREG_RSR_INT1 0x001c
#define SYSREG_RSR_INT2 0x0020
#define SYSREG_RSR_INT3 0x0024
#define SYSREG_RSR_EX 0x0028
#define SYSREG_RSR_NMI 0x002c
#define SYSREG_RSR_DBG 0x0030
#define SYSREG_RAR_SUP 0x0034
#define SYSREG_RAR_INT0 0x0038
#define SYSREG_RAR_INT1 0x003c
#define SYSREG_RAR_INT2 0x0040
#define SYSREG_RAR_INT3 0x0044
#define SYSREG_RAR_EX 0x0048
#define SYSREG_RAR_NMI 0x004c
#define SYSREG_RAR_DBG 0x0050
#define SYSREG_JECR 0x0054
#define SYSREG_JOSP 0x0058
#define SYSREG_JAVA_LV0 0x005c
#define SYSREG_JAVA_LV1 0x0060
#define SYSREG_JAVA_LV2 0x0064
#define SYSREG_JAVA_LV3 0x0068
#define SYSREG_JAVA_LV4 0x006c
#define SYSREG_JAVA_LV5 0x0070
#define SYSREG_JAVA_LV6 0x0074
#define SYSREG_JAVA_LV7 0x0078
#define SYSREG_JTBA 0x007c
#define SYSREG_JBCR 0x0080
#define SYSREG_CONFIG0 0x0100
#define SYSREG_CONFIG1 0x0104
#define SYSREG_COUNT 0x0108
#define SYSREG_COMPARE 0x010c
#define SYSREG_TLBEHI 0x0110
#define SYSREG_TLBELO 0x0114
#define SYSREG_PTBR 0x0118
#define SYSREG_TLBEAR 0x011c
#define SYSREG_MMUCR 0x0120
#define SYSREG_TLBARLO 0x0124
#define SYSREG_TLBARHI 0x0128
#define SYSREG_PCCNT 0x012c
#define SYSREG_PCNT0 0x0130
#define SYSREG_PCNT1 0x0134
#define SYSREG_PCCR 0x0138
#define SYSREG_BEAR 0x013c
/* Bitfields in SR */
#define SYSREG_SR_C_OFFSET 0
#define SYSREG_SR_C_SIZE 1
#define SYSREG_Z_OFFSET 1
#define SYSREG_Z_SIZE 1
#define SYSREG_SR_N_OFFSET 2
#define SYSREG_SR_N_SIZE 1
#define SYSREG_SR_V_OFFSET 3
#define SYSREG_SR_V_SIZE 1
#define SYSREG_Q_OFFSET 4
#define SYSREG_Q_SIZE 1
#define SYSREG_GM_OFFSET 16
#define SYSREG_GM_SIZE 1
#define SYSREG_I0M_OFFSET 17
#define SYSREG_I0M_SIZE 1
#define SYSREG_I1M_OFFSET 18
#define SYSREG_I1M_SIZE 1
#define SYSREG_I2M_OFFSET 19
#define SYSREG_I2M_SIZE 1
#define SYSREG_I3M_OFFSET 20
#define SYSREG_I3M_SIZE 1
#define SYSREG_EM_OFFSET 21
#define SYSREG_EM_SIZE 1
#define SYSREG_M0_OFFSET 22
#define SYSREG_M0_SIZE 1
#define SYSREG_M1_OFFSET 23
#define SYSREG_M1_SIZE 1
#define SYSREG_M2_OFFSET 24
#define SYSREG_M2_SIZE 1
#define SYSREG_SR_D_OFFSET 26
#define SYSREG_SR_D_SIZE 1
#define SYSREG_DM_OFFSET 27
#define SYSREG_DM_SIZE 1
#define SYSREG_SR_J_OFFSET 28
#define SYSREG_SR_J_SIZE 1
#define SYSREG_R_OFFSET 29
#define SYSREG_R_SIZE 1
#define SYSREG_H_OFFSET 30
#define SYSREG_H_SIZE 1
/* Bitfields in EVBA */
/* Bitfields in ACBA */
/* Bitfields in CPUCR */
#define SYSREG_BI_OFFSET 0
#define SYSREG_BI_SIZE 1
#define SYSREG_BE_OFFSET 1
#define SYSREG_BE_SIZE 1
#define SYSREG_FE_OFFSET 2
#define SYSREG_FE_SIZE 1
#define SYSREG_RE_OFFSET 3
#define SYSREG_RE_SIZE 1
#define SYSREG_IBE_OFFSET 4
#define SYSREG_IBE_SIZE 1
#define SYSREG_IEE_OFFSET 5
#define SYSREG_IEE_SIZE 1
/* Bitfields in ECR */
#define SYSREG_ECR_OFFSET 0
#define SYSREG_ECR_SIZE 32
/* Bitfields in RSR_SUP */
/* Bitfields in RSR_INT0 */
/* Bitfields in RSR_INT1 */
/* Bitfields in RSR_INT2 */
/* Bitfields in RSR_INT3 */
/* Bitfields in RSR_EX */
/* Bitfields in RSR_NMI */
/* Bitfields in RSR_DBG */
/* Bitfields in RAR_SUP */
/* Bitfields in RAR_INT0 */
/* Bitfields in RAR_INT1 */
/* Bitfields in RAR_INT2 */
/* Bitfields in RAR_INT3 */
/* Bitfields in RAR_EX */
/* Bitfields in RAR_NMI */
/* Bitfields in RAR_DBG */
/* Bitfields in JECR */
/* Bitfields in JOSP */
/* Bitfields in JAVA_LV0 */
/* Bitfields in JAVA_LV1 */
/* Bitfields in JAVA_LV2 */
/* Bitfields in JAVA_LV3 */
/* Bitfields in JAVA_LV4 */
/* Bitfields in JAVA_LV5 */
/* Bitfields in JAVA_LV6 */
/* Bitfields in JAVA_LV7 */
/* Bitfields in JTBA */
/* Bitfields in JBCR */
/* Bitfields in CONFIG0 */
#define SYSREG_CONFIG0_D_OFFSET 1
#define SYSREG_CONFIG0_D_SIZE 1
#define SYSREG_CONFIG0_S_OFFSET 2
#define SYSREG_CONFIG0_S_SIZE 1
#define SYSREG_O_OFFSET 3
#define SYSREG_O_SIZE 1
#define SYSREG_P_OFFSET 4
#define SYSREG_P_SIZE 1
#define SYSREG_CONFIG0_J_OFFSET 5
#define SYSREG_CONFIG0_J_SIZE 1
#define SYSREG_F_OFFSET 6
#define SYSREG_F_SIZE 1
#define SYSREG_MMUT_OFFSET 7
#define SYSREG_MMUT_SIZE 3
#define SYSREG_AR_OFFSET 10
#define SYSREG_AR_SIZE 3
#define SYSREG_AT_OFFSET 13
#define SYSREG_AT_SIZE 3
#define SYSREG_PROCESSORREVISION_OFFSET 16
#define SYSREG_PROCESSORREVISION_SIZE 8
#define SYSREG_PROCESSORID_OFFSET 24
#define SYSREG_PROCESSORID_SIZE 8
/* Bitfields in CONFIG1 */
#define SYSREG_DASS_OFFSET 0
#define SYSREG_DASS_SIZE 3
#define SYSREG_DLSZ_OFFSET 3
#define SYSREG_DLSZ_SIZE 3
#define SYSREG_DSET_OFFSET 6
#define SYSREG_DSET_SIZE 4
#define SYSREG_IASS_OFFSET 10
#define SYSREG_IASS_SIZE 2
#define SYSREG_ILSZ_OFFSET 13
#define SYSREG_ILSZ_SIZE 3
#define SYSREG_ISET_OFFSET 16
#define SYSREG_ISET_SIZE 4
#define SYSREG_DMMUSZ_OFFSET 20
#define SYSREG_DMMUSZ_SIZE 6
#define SYSREG_IMMUSZ_OFFSET 26
#define SYSREG_IMMUSZ_SIZE 6
/* Bitfields in COUNT */
/* Bitfields in COMPARE */
/* Bitfields in TLBEHI */
#define SYSREG_ASID_OFFSET 0
#define SYSREG_ASID_SIZE 8
#define SYSREG_TLBEHI_I_OFFSET 8
#define SYSREG_TLBEHI_I_SIZE 1
#define SYSREG_TLBEHI_V_OFFSET 9
#define SYSREG_TLBEHI_V_SIZE 1
#define SYSREG_VPN_OFFSET 10
#define SYSREG_VPN_SIZE 22
/* Bitfields in TLBELO */
#define SYSREG_W_OFFSET 0
#define SYSREG_W_SIZE 1
#define SYSREG_TLBELO_D_OFFSET 1
#define SYSREG_TLBELO_D_SIZE 1
#define SYSREG_SZ_OFFSET 2
#define SYSREG_SZ_SIZE 2
#define SYSREG_AP_OFFSET 4
#define SYSREG_AP_SIZE 3
#define SYSREG_B_OFFSET 7
#define SYSREG_B_SIZE 1
#define SYSREG_G_OFFSET 8
#define SYSREG_G_SIZE 1
#define SYSREG_TLBELO_C_OFFSET 9
#define SYSREG_TLBELO_C_SIZE 1
#define SYSREG_PFN_OFFSET 10
#define SYSREG_PFN_SIZE 22
/* Bitfields in PTBR */
/* Bitfields in TLBEAR */
/* Bitfields in MMUCR */
#define SYSREG_E_OFFSET 0
#define SYSREG_E_SIZE 1
#define SYSREG_M_OFFSET 1
#define SYSREG_M_SIZE 1
#define SYSREG_MMUCR_I_OFFSET 2
#define SYSREG_MMUCR_I_SIZE 1
#define SYSREG_MMUCR_N_OFFSET 3
#define SYSREG_MMUCR_N_SIZE 1
#define SYSREG_MMUCR_S_OFFSET 4
#define SYSREG_MMUCR_S_SIZE 1
#define SYSREG_DLA_OFFSET 8
#define SYSREG_DLA_SIZE 6
#define SYSREG_DRP_OFFSET 14
#define SYSREG_DRP_SIZE 6
#define SYSREG_ILA_OFFSET 20
#define SYSREG_ILA_SIZE 6
#define SYSREG_IRP_OFFSET 26
#define SYSREG_IRP_SIZE 6
/* Bitfields in TLBARLO */
/* Bitfields in TLBARHI */
/* Bitfields in PCCNT */
/* Bitfields in PCNT0 */
/* Bitfields in PCNT1 */
/* Bitfields in PCCR */
/* Bitfields in BEAR */
/* Constants for ECR */
#define ECR_UNRECOVERABLE 0
#define ECR_TLB_MULTIPLE 1
#define ECR_BUS_ERROR_WRITE 2
#define ECR_BUS_ERROR_READ 3
#define ECR_NMI 4
#define ECR_ADDR_ALIGN_X 5
#define ECR_PROTECTION_X 6
#define ECR_DEBUG 7
#define ECR_ILLEGAL_OPCODE 8
#define ECR_UNIMPL_INSTRUCTION 9
#define ECR_PRIVILEGE_VIOLATION 10
#define ECR_FPE 11
#define ECR_COPROC_ABSENT 12
#define ECR_ADDR_ALIGN_R 13
#define ECR_ADDR_ALIGN_W 14
#define ECR_PROTECTION_R 15
#define ECR_PROTECTION_W 16
#define ECR_DTLB_MODIFIED 17
#define ECR_TLB_MISS_X 20
#define ECR_TLB_MISS_R 24
#define ECR_TLB_MISS_W 28
/* Bit manipulation macros */
#define SYSREG_BIT(name) (1 << SYSREG_##name##_OFFSET)
#define SYSREG_BF(name,value) (((value) & ((1 << SYSREG_##name##_SIZE) - 1)) << SYSREG_##name##_OFFSET)
#define SYSREG_BFEXT(name,value) (((value) >> SYSREG_##name##_OFFSET) & ((1 << SYSREG_##name##_SIZE) - 1))
#define SYSREG_BFINS(name,value,old) (((old) & ~(((1 << SYSREG_##name##_SIZE) - 1) << SYSREG_##name##_OFFSET)) | SYSREG_BF(name,value))
#ifdef __CHECKER__
extern unsigned long __builtin_mfsr(unsigned long reg);
extern void __builtin_mtsr(unsigned long reg, unsigned long value);
#endif
/* Register access macros */
#define sysreg_read(reg) __builtin_mfsr(SYSREG_##reg)
#define sysreg_write(reg, value) __builtin_mtsr(SYSREG_##reg, value)
#endif /* __ASM_AVR32_SYSREG_H__ */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_SYSTEM_H
#define __ASM_AVR32_SYSTEM_H
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define nop() asm volatile("nop")
#define mb() asm volatile("" : : : "memory")
#define rmb() mb()
#define wmb() asm volatile("sync 0" : : : "memory")
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while(0)
/*
* Help PathFinder and other Nexus-compliant debuggers keep track of
* the current PID by emitting an Ownership Trace Message each time we
* switch task.
*/
#ifdef CONFIG_OWNERSHIP_TRACE
#include <asm/ocd.h>
#define finish_arch_switch(prev) \
do { \
__mtdr(DBGREG_PID, prev->pid); \
__mtdr(DBGREG_PID, current->pid); \
} while(0)
#endif
/*
* switch_to(prev, next, last) should switch from task `prev' to task
* `next'. `prev' will never be the same as `next'.
*
* We just delegate everything to the __switch_to assembly function,
* which is implemented in arch/avr32/kernel/switch_to.S
*
* mb() tells GCC not to cache `current' across this call.
*/
struct cpu_context;
struct task_struct;
extern struct task_struct *__switch_to(struct task_struct *,
struct cpu_context *,
struct cpu_context *);
#define switch_to(prev, next, last) \
do { \
last = __switch_to(prev, &prev->thread.cpu_context + 1, \
&next->thread.cpu_context); \
} while (0)
#ifdef CONFIG_SMP
# error "The AVR32 port does not support SMP"
#else
# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
# define smp_read_barrier_depends() do { } while(0)
#endif
#include <linux/irqflags.h>
extern void __xchg_called_with_bad_pointer(void);
#ifdef __CHECKER__
extern unsigned long __builtin_xchg(void *ptr, unsigned long x);
#endif
#define xchg_u32(val, m) __builtin_xchg((void *)m, val)
static inline unsigned long __xchg(unsigned long x,
volatile void *ptr,
int size)
{
switch(size) {
case 4:
return xchg_u32(x, ptr);
default:
__xchg_called_with_bad_pointer();
return x;
}
}
static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
unsigned long new)
{
__u32 ret;
asm volatile(
"1: ssrf 5\n"
" ld.w %[ret], %[m]\n"
" cp.w %[ret], %[old]\n"
" brne 2f\n"
" stcond %[m], %[new]\n"
" brne 1b\n"
"2:\n"
: [ret] "=&r"(ret), [m] "=m"(*m)
: "m"(m), [old] "ir"(old), [new] "r"(new)
: "memory", "cc");
return ret;
}
extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
volatile int * m, unsigned long old, unsigned long new);
#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);
#define __HAVE_ARCH_CMPXCHG 1
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
switch (size) {
case 4:
return __cmpxchg_u32(ptr, old, new);
case 8:
return __cmpxchg_u64(ptr, old, new);
}
__cmpxchg_called_with_bad_pointer();
return old;
}
#define cmpxchg(ptr, old, new) \
((typeof(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), \
(unsigned long)(new), \
sizeof(*(ptr))))
struct pt_regs;
extern void __die(const char *, struct pt_regs *, unsigned long,
const char *, const char *, unsigned long);
extern void __die_if_kernel(const char *, struct pt_regs *, unsigned long,
const char *, const char *, unsigned long);
#define die(msg, regs, err) \
__die(msg, regs, err, __FILE__ ":", __FUNCTION__, __LINE__)
#define die_if_kernel(msg, regs, err) \
__die_if_kernel(msg, regs, err, __FILE__ ":", __FUNCTION__, __LINE__)
#define arch_align_stack(x) (x)
#endif /* __ASM_AVR32_SYSTEM_H */
#ifndef __ASM_AVR32_TERMBITS_H
#define __ASM_AVR32_TERMBITS_H
#include <linux/posix_types.h>
typedef unsigned char cc_t;
typedef unsigned int speed_t;
typedef unsigned int tcflag_t;
#define NCCS 19
struct termios {
tcflag_t c_iflag; /* input mode flags */
tcflag_t c_oflag; /* output mode flags */
tcflag_t c_cflag; /* control mode flags */
tcflag_t c_lflag; /* local mode flags */
cc_t c_line; /* line discipline */
cc_t c_cc[NCCS]; /* control characters */
};
/* c_cc characters */
#define VINTR 0
#define VQUIT 1
#define VERASE 2
#define VKILL 3
#define VEOF 4
#define VTIME 5
#define VMIN 6
#define VSWTC 7
#define VSTART 8
#define VSTOP 9
#define VSUSP 10
#define VEOL 11
#define VREPRINT 12
#define VDISCARD 13
#define VWERASE 14
#define VLNEXT 15
#define VEOL2 16
/* c_iflag bits */
#define IGNBRK 0000001
#define BRKINT 0000002
#define IGNPAR 0000004
#define PARMRK 0000010
#define INPCK 0000020
#define ISTRIP 0000040
#define INLCR 0000100
#define IGNCR 0000200
#define ICRNL 0000400
#define IUCLC 0001000
#define IXON 0002000
#define IXANY 0004000
#define IXOFF 0010000
#define IMAXBEL 0020000
#define IUTF8 0040000
/* c_oflag bits */
#define OPOST 0000001
#define OLCUC 0000002
#define ONLCR 0000004
#define OCRNL 0000010
#define ONOCR 0000020
#define ONLRET 0000040
#define OFILL 0000100
#define OFDEL 0000200
#define NLDLY 0000400
#define NL0 0000000
#define NL1 0000400
#define CRDLY 0003000
#define CR0 0000000
#define CR1 0001000
#define CR2 0002000
#define CR3 0003000
#define TABDLY 0014000
#define TAB0 0000000
#define TAB1 0004000
#define TAB2 0010000
#define TAB3 0014000
#define XTABS 0014000
#define BSDLY 0020000
#define BS0 0000000
#define BS1 0020000
#define VTDLY 0040000
#define VT0 0000000
#define VT1 0040000
#define FFDLY 0100000
#define FF0 0000000
#define FF1 0100000
/* c_cflag bit meaning */
#define CBAUD 0010017
#define B0 0000000 /* hang up */
#define B50 0000001
#define B75 0000002
#define B110 0000003
#define B134 0000004
#define B150 0000005
#define B200 0000006
#define B300 0000007
#define B600 0000010
#define B1200 0000011
#define B1800 0000012
#define B2400 0000013
#define B4800 0000014
#define B9600 0000015
#define B19200 0000016
#define B38400 0000017
#define EXTA B19200
#define EXTB B38400
#define CSIZE 0000060
#define CS5 0000000
#define CS6 0000020
#define CS7 0000040
#define CS8 0000060
#define CSTOPB 0000100
#define CREAD 0000200
#define PARENB 0000400
#define PARODD 0001000
#define HUPCL 0002000
#define CLOCAL 0004000
#define CBAUDEX 0010000
#define B57600 0010001
#define B115200 0010002
#define B230400 0010003
#define B460800 0010004
#define B500000 0010005
#define B576000 0010006
#define B921600 0010007
#define B1000000 0010010
#define B1152000 0010011
#define B1500000 0010012
#define B2000000 0010013
#define B2500000 0010014
#define B3000000 0010015
#define B3500000 0010016
#define B4000000 0010017
#define CIBAUD 002003600000 /* input baud rate (not used) */
#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
/* c_lflag bits */
#define ISIG 0000001
#define ICANON 0000002
#define XCASE 0000004
#define ECHO 0000010
#define ECHOE 0000020
#define ECHOK 0000040
#define ECHONL 0000100
#define NOFLSH 0000200
#define TOSTOP 0000400
#define ECHOCTL 0001000
#define ECHOPRT 0002000
#define ECHOKE 0004000
#define FLUSHO 0010000
#define PENDIN 0040000
#define IEXTEN 0100000
/* tcflow() and TCXONC use these */
#define TCOOFF 0
#define TCOON 1
#define TCIOFF 2
#define TCION 3
/* tcflush() and TCFLSH use these */
#define TCIFLUSH 0
#define TCOFLUSH 1
#define TCIOFLUSH 2
/* tcsetattr uses these */
#define TCSANOW 0
#define TCSADRAIN 1
#define TCSAFLUSH 2
#endif /* __ASM_AVR32_TERMBITS_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_TERMIOS_H
#define __ASM_AVR32_TERMIOS_H
#include <asm/termbits.h>
#include <asm/ioctls.h>
struct winsize {
unsigned short ws_row;
unsigned short ws_col;
unsigned short ws_xpixel;
unsigned short ws_ypixel;
};
#define NCC 8
struct termio {
unsigned short c_iflag; /* input mode flags */
unsigned short c_oflag; /* output mode flags */
unsigned short c_cflag; /* control mode flags */
unsigned short c_lflag; /* local mode flags */
unsigned char c_line; /* line discipline */
unsigned char c_cc[NCC]; /* control characters */
};
/* modem lines */
#define TIOCM_LE 0x001
#define TIOCM_DTR 0x002
#define TIOCM_RTS 0x004
#define TIOCM_ST 0x008
#define TIOCM_SR 0x010
#define TIOCM_CTS 0x020
#define TIOCM_CAR 0x040
#define TIOCM_RNG 0x080
#define TIOCM_DSR 0x100
#define TIOCM_CD TIOCM_CAR
#define TIOCM_RI TIOCM_RNG
#define TIOCM_OUT1 0x2000
#define TIOCM_OUT2 0x4000
#define TIOCM_LOOP 0x8000
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
/* line disciplines */
#define N_TTY 0
#define N_SLIP 1
#define N_MOUSE 2
#define N_PPP 3
#define N_STRIP 4
#define N_AX25 5
#define N_X25 6 /* X.25 async */
#define N_6PACK 7
#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */
#define N_R3964 9 /* Reserved for Simatic R3964 module */
#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
#define N_IRDA 11 /* Linux IR - http://irda.sourceforge.net/ */
#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */
#define N_HDLC 13 /* synchronous HDLC */
#define N_SYNC_PPP 14 /* synchronous PPP */
#define N_HCI 15 /* Bluetooth HCI UART */
#ifdef __KERNEL__
/* intr=^C quit=^\ erase=del kill=^U
eof=^D vtime=\0 vmin=\1 sxtc=\0
start=^Q stop=^S susp=^Z eol=\0
reprint=^R discard=^U werase=^W lnext=^V
eol2=\0
*/
#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
#include <asm-generic/termios.h>
#endif /* __KERNEL__ */
#endif /* __ASM_AVR32_TERMIOS_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_THREAD_INFO_H
#define __ASM_AVR32_THREAD_INFO_H
#include <asm/page.h>
#define THREAD_SIZE_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#ifndef __ASSEMBLY__
#include <asm/types.h>
struct task_struct;
struct exec_domain;
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
__u32 cpu;
__s32 preempt_count; /* 0 => preemptable, <0 => BUG */
struct restart_block restart_block;
__u8 supervisor_stack[0];
};
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = 1, \
.restart_block = { \
.fn = do_no_restart_syscall \
} \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/*
* Get the thread information struct from C.
* We do the usual trick and use the lower end of the stack for this
*/
static inline struct thread_info *current_thread_info(void)
{
unsigned long addr = ~(THREAD_SIZE - 1);
asm("and %0, sp" : "=r"(addr) : "0"(addr));
return (struct thread_info *)addr;
}
/* thread information allocation */
#define alloc_thread_info(ti) \
((struct thread_info *) __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER))
#define free_thread_info(ti) free_pages((unsigned long)(ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* !__ASSEMBLY__ */
#define PREEMPT_ACTIVE 0x40000000
/*
* Thread information flags
* - these are process state flags that various assembly files may need to access
* - pending work-to-be-done flags are in LSW
* - other flags in MSW
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_POLLING_NRFLAG 4 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
#define TIF_BREAKPOINT 5 /* true if we should break after return */
#define TIF_SINGLE_STEP 6 /* single step after next break */
#define TIF_MEMDIE 7
#define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal */
#define TIF_USERSPACE 31 /* true if FS sets userspace */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_BREAKPOINT (1 << TIF_BREAKPOINT)
#define _TIF_SINGLE_STEP (1 << TIF_SINGLE_STEP)
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
/* XXX: These two masks must never span more than 16 bits! */
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK 0x0000013e
/* work to do on any return to userspace */
#define _TIF_ALLWORK_MASK 0x0000013f
/* work to do on return from debug mode */
#define _TIF_DBGWORK_MASK 0x0000017e
#endif /* __ASM_AVR32_THREAD_INFO_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_TIMEX_H
#define __ASM_AVR32_TIMEX_H
/*
* This is the frequency of the timer used for Linux's timer interrupt.
* The value should be defined as accurate as possible or under certain
* circumstances Linux timekeeping might become inaccurate or fail.
*
* For many system the exact clockrate of the timer isn't known but due to
* the way this value is used we can get away with a wrong value as long
* as this value is:
*
* - a multiple of HZ
* - a divisor of the actual rate
*
* 500000 is a good such cheat value.
*
* The obscure number 1193182 is the same as used by the original i8254
* time in legacy PC hardware; the chip is never found in AVR32 systems.
*/
#define CLOCK_TICK_RATE 500000 /* Underlying HZ */
typedef unsigned long cycles_t;
static inline cycles_t get_cycles (void)
{
return 0;
}
extern int read_current_timer(unsigned long *timer_value);
#define ARCH_HAS_READ_CURRENT_TIMER 1
#endif /* __ASM_AVR32_TIMEX_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_TLB_H
#define __ASM_AVR32_TLB_H
#define tlb_start_vma(tlb, vma) \
flush_cache_range(vma, vma->vm_start, vma->vm_end)
#define tlb_end_vma(tlb, vma) \
flush_tlb_range(vma, vma->vm_start, vma->vm_end)
#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while(0)
/*
* Flush whole TLB for MM
*/
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h>
/*
* For debugging purposes
*/
extern void show_dtlb_entry(unsigned int index);
extern void dump_dtlb(void);
#endif /* __ASM_AVR32_TLB_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_TLBFLUSH_H
#define __ASM_AVR32_TLBFLUSH_H
#include <asm/mmu.h>
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes' TLB entries
* - flush_tlb_mm(mm) flushes the specified mm context TLBs
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
extern void flush_tlb(void);
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
extern void __flush_tlb_page(unsigned long asid, unsigned long page);
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* Nothing to do */
}
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#endif /* __ASM_AVR32_TLBFLUSH_H */
#ifndef __ASM_AVR32_TOPOLOGY_H
#define __ASM_AVR32_TOPOLOGY_H
#include <asm-generic/topology.h>
#endif /* __ASM_AVR32_TOPOLOGY_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_TRAPS_H
#define __ASM_AVR32_TRAPS_H
#include <linux/list.h>
struct undef_hook {
struct list_head node;
u32 insn_mask;
u32 insn_val;
int (*fn)(struct pt_regs *regs, u32 insn);
};
void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
#endif /* __ASM_AVR32_TRAPS_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_TYPES_H
#define __ASM_AVR32_TYPES_H
#ifndef __ASSEMBLY__
typedef unsigned short umode_t;
/*
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
* header files exported to user space
*/
typedef __signed__ char __s8;
typedef unsigned char __u8;
typedef __signed__ short __s16;
typedef unsigned short __u16;
typedef __signed__ int __s32;
typedef unsigned int __u32;
#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
typedef __signed__ long long __s64;
typedef unsigned long long __u64;
#endif
#endif /* __ASSEMBLY__ */
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
#ifdef __KERNEL__
#define BITS_PER_LONG 32
#ifndef __ASSEMBLY__
typedef signed char s8;
typedef unsigned char u8;
typedef signed short s16;
typedef unsigned short u16;
typedef signed int s32;
typedef unsigned int u32;
typedef signed long long s64;
typedef unsigned long long u64;
/* Dma addresses are 32-bits wide. */
typedef u32 dma_addr_t;
#ifdef CONFIG_LBD
typedef u64 sector_t;
#define HAVE_SECTOR_T
#endif
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_AVR32_TYPES_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_UACCESS_H
#define __ASM_AVR32_UACCESS_H
#include <linux/errno.h>
#include <linux/sched.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
typedef struct {
unsigned int is_user_space;
} mm_segment_t;
/*
* The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with
* get_fs() == KERNEL_DS, checking is bypassed.
*
* For historical reasons (Data Segment Register?), these macros are misnamed.
*/
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define segment_eq(a,b) ((a).is_user_space == (b).is_user_space)
#define USER_ADDR_LIMIT 0x80000000
#define KERNEL_DS MAKE_MM_SEG(0)
#define USER_DS MAKE_MM_SEG(1)
#define get_ds() (KERNEL_DS)
static inline mm_segment_t get_fs(void)
{
return MAKE_MM_SEG(test_thread_flag(TIF_USERSPACE));
}
static inline void set_fs(mm_segment_t s)
{
if (s.is_user_space)
set_thread_flag(TIF_USERSPACE);
else
clear_thread_flag(TIF_USERSPACE);
}
/*
* Test whether a block of memory is a valid user space address.
* Returns 0 if the range is valid, nonzero otherwise.
*
* We do the following checks:
* 1. Is the access from kernel space?
* 2. Does (addr + size) set the carry bit?
* 3. Is (addr + size) a negative number (i.e. >= 0x80000000)?
*
* If yes on the first check, access is granted.
* If no on any of the others, access is denied.
*/
#define __range_ok(addr, size) \
(test_thread_flag(TIF_USERSPACE) \
&& (((unsigned long)(addr) >= 0x80000000) \
|| ((unsigned long)(size) > 0x80000000) \
|| (((unsigned long)(addr) + (unsigned long)(size)) > 0x80000000)))
#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))
static inline int
verify_area(int type, const void __user *addr, unsigned long size)
{
return access_ok(type, addr, size) ? 0 : -EFAULT;
}
/* Generic arbitrary sized copy. Return the number of bytes NOT copied */
extern __kernel_size_t __copy_user(void *to, const void *from,
__kernel_size_t n);
extern __kernel_size_t copy_to_user(void __user *to, const void *from,
__kernel_size_t n);
extern __kernel_size_t copy_from_user(void *to, const void __user *from,
__kernel_size_t n);
static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
__kernel_size_t n)
{
return __copy_user((void __force *)to, from, n);
}
static inline __kernel_size_t __copy_from_user(void *to,
const void __user *from,
__kernel_size_t n)
{
return __copy_user(to, (const void __force *)from, n);
}
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
/*
* put_user: - Write a simple value into user space.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
* Context: User context only. This function may sleep.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
* to the result of dereferencing @ptr.
*
* Returns zero on success, or -EFAULT on error.
*/
#define put_user(x,ptr) \
__put_user_check((x),(ptr),sizeof(*(ptr)))
/*
* get_user: - Get a simple variable from user space.
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
* Context: User context only. This function may sleep.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and the result of
* dereferencing @ptr must be assignable to @x without a cast.
*
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
#define get_user(x,ptr) \
__get_user_check((x),(ptr),sizeof(*(ptr)))
/*
* __put_user: - Write a simple value into user space, with less checking.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
* Context: User context only. This function may sleep.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
* to the result of dereferencing @ptr.
*
* Caller must check the pointer with access_ok() before calling this
* function.
*
* Returns zero on success, or -EFAULT on error.
*/
#define __put_user(x,ptr) \
__put_user_nocheck((x),(ptr),sizeof(*(ptr)))
/*
* __get_user: - Get a simple variable from user space, with less checking.
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
* Context: User context only. This function may sleep.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and the result of
* dereferencing @ptr must be assignable to @x without a cast.
*
* Caller must check the pointer with access_ok() before calling this
* function.
*
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
#define __get_user(x,ptr) \
__get_user_nocheck((x),(ptr),sizeof(*(ptr)))
extern int __get_user_bad(void);
extern int __put_user_bad(void);
#define __get_user_nocheck(x, ptr, size) \
({ \
typeof(*(ptr)) __gu_val = (typeof(*(ptr)) __force)0; \
int __gu_err = 0; \
\
switch (size) { \
case 1: __get_user_asm("ub", __gu_val, ptr, __gu_err); break; \
case 2: __get_user_asm("uh", __gu_val, ptr, __gu_err); break; \
case 4: __get_user_asm("w", __gu_val, ptr, __gu_err); break; \
case 8: __get_user_asm("d", __gu_val, ptr, __gu_err); break; \
default: __gu_err = __get_user_bad(); break; \
} \
\
x = __gu_val; \
__gu_err; \
})
#define __get_user_check(x, ptr, size) \
({ \
typeof(*(ptr)) __gu_val = (typeof(*(ptr)) __force)0; \
const typeof(*(ptr)) __user * __gu_addr = (ptr); \
int __gu_err = 0; \
\
if (access_ok(VERIFY_READ, __gu_addr, size)) { \
switch (size) { \
case 1: \
__get_user_asm("ub", __gu_val, __gu_addr, \
__gu_err); \
break; \
case 2: \
__get_user_asm("uh", __gu_val, __gu_addr, \
__gu_err); \
break; \
case 4: \
__get_user_asm("w", __gu_val, __gu_addr, \
__gu_err); \
break; \
case 8: \
__get_user_asm("d", __gu_val, __gu_addr, \
__gu_err); \
break; \
default: \
__gu_err = __get_user_bad(); \
break; \
} \
} else { \
__gu_err = -EFAULT; \
} \
x = __gu_val; \
__gu_err; \
})
#define __get_user_asm(suffix, __gu_val, ptr, __gu_err) \
asm volatile( \
"1: ld." suffix " %1, %3 \n" \
"2: \n" \
" .section .fixup, \"ax\" \n" \
"3: mov %0, %4 \n" \
" rjmp 2b \n" \
" .previous \n" \
" .section __ex_table, \"a\" \n" \
" .long 1b, 3b \n" \
" .previous \n" \
: "=r"(__gu_err), "=r"(__gu_val) \
: "0"(__gu_err), "m"(*(ptr)), "i"(-EFAULT))
#define __put_user_nocheck(x, ptr, size) \
({ \
typeof(*(ptr)) __pu_val; \
int __pu_err = 0; \
\
__pu_val = (x); \
switch (size) { \
case 1: __put_user_asm("b", ptr, __pu_val, __pu_err); break; \
case 2: __put_user_asm("h", ptr, __pu_val, __pu_err); break; \
case 4: __put_user_asm("w", ptr, __pu_val, __pu_err); break; \
case 8: __put_user_asm("d", ptr, __pu_val, __pu_err); break; \
default: __pu_err = __put_user_bad(); break; \
} \
__pu_err; \
})
#define __put_user_check(x, ptr, size) \
({ \
typeof(*(ptr)) __pu_val; \
typeof(*(ptr)) __user *__pu_addr = (ptr); \
int __pu_err = 0; \
\
__pu_val = (x); \
if (access_ok(VERIFY_WRITE, __pu_addr, size)) { \
switch (size) { \
case 1: \
__put_user_asm("b", __pu_addr, __pu_val, \
__pu_err); \
break; \
case 2: \
__put_user_asm("h", __pu_addr, __pu_val, \
__pu_err); \
break; \
case 4: \
__put_user_asm("w", __pu_addr, __pu_val, \
__pu_err); \
break; \
case 8: \
__put_user_asm("d", __pu_addr, __pu_val, \
__pu_err); \
break; \
default: \
__pu_err = __put_user_bad(); \
break; \
} \
} else { \
__pu_err = -EFAULT; \
} \
__pu_err; \
})
#define __put_user_asm(suffix, ptr, __pu_val, __gu_err) \
asm volatile( \
"1: st." suffix " %1, %3 \n" \
"2: \n" \
" .section .fixup, \"ax\" \n" \
"3: mov %0, %4 \n" \
" rjmp 2b \n" \
" .previous \n" \
" .section __ex_table, \"a\" \n" \
" .long 1b, 3b \n" \
" .previous \n" \
: "=r"(__gu_err), "=m"(*(ptr)) \
: "0"(__gu_err), "r"(__pu_val), "i"(-EFAULT))
extern __kernel_size_t clear_user(void __user *addr, __kernel_size_t size);
extern __kernel_size_t __clear_user(void __user *addr, __kernel_size_t size);
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern long __strncpy_from_user(char *dst, const char __user *src, long count);
extern long strnlen_user(const char __user *__s, long __n);
extern long __strnlen_user(const char __user *__s, long __n);
#define strlen_user(s) strnlen_user(s, ~0UL >> 1)
struct exception_table_entry
{
unsigned long insn, fixup;
};
#endif /* __ASM_AVR32_UACCESS_H */
#ifndef __ASM_AVR32_UCONTEXT_H
#define __ASM_AVR32_UCONTEXT_H
struct ucontext {
unsigned long uc_flags;
struct ucontext * uc_link;
stack_t uc_stack;
struct sigcontext uc_mcontext;
sigset_t uc_sigmask;
};
#endif /* __ASM_AVR32_UCONTEXT_H */
#ifndef __ASM_AVR32_UNALIGNED_H
#define __ASM_AVR32_UNALIGNED_H
/*
* AVR32 can handle some unaligned accesses, depending on the
* implementation. The AVR32 AP implementation can handle unaligned
* words, but halfwords must be halfword-aligned, and doublewords must
* be word-aligned.
*
* TODO: Make all this CPU-specific and optimize.
*/
#include <linux/string.h>
/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
#define get_unaligned(ptr) \
({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; })
#define put_unaligned(val, ptr) \
({ __typeof__(*(ptr)) __tmp = (val); \
memmove((ptr), &__tmp, sizeof(*(ptr))); \
(void)0; })
#endif /* __ASM_AVR32_UNALIGNED_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_UNISTD_H
#define __ASM_AVR32_UNISTD_H
/*
* This file contains the system call numbers.
*/
#define __NR_restart_syscall 0
#define __NR_exit 1
#define __NR_fork 2
#define __NR_read 3
#define __NR_write 4
#define __NR_open 5
#define __NR_close 6
#define __NR_umask 7
#define __NR_creat 8
#define __NR_link 9
#define __NR_unlink 10
#define __NR_execve 11
#define __NR_chdir 12
#define __NR_time 13
#define __NR_mknod 14
#define __NR_chmod 15
#define __NR_chown 16
#define __NR_lchown 17
#define __NR_lseek 18
#define __NR__llseek 19
#define __NR_getpid 20
#define __NR_mount 21
#define __NR_umount2 22
#define __NR_setuid 23
#define __NR_getuid 24
#define __NR_stime 25
#define __NR_ptrace 26
#define __NR_alarm 27
#define __NR_pause 28
#define __NR_utime 29
#define __NR_stat 30
#define __NR_fstat 31
#define __NR_lstat 32
#define __NR_access 33
#define __NR_chroot 34
#define __NR_sync 35
#define __NR_fsync 36
#define __NR_kill 37
#define __NR_rename 38
#define __NR_mkdir 39
#define __NR_rmdir 40
#define __NR_dup 41
#define __NR_pipe 42
#define __NR_times 43
#define __NR_clone 44
#define __NR_brk 45
#define __NR_setgid 46
#define __NR_getgid 47
#define __NR_getcwd 48
#define __NR_geteuid 49
#define __NR_getegid 50
#define __NR_acct 51
#define __NR_setfsuid 52
#define __NR_setfsgid 53
#define __NR_ioctl 54
#define __NR_fcntl 55
#define __NR_setpgid 56
#define __NR_mremap 57
#define __NR_setresuid 58
#define __NR_getresuid 59
#define __NR_setreuid 60
#define __NR_setregid 61
#define __NR_ustat 62
#define __NR_dup2 63
#define __NR_getppid 64
#define __NR_getpgrp 65
#define __NR_setsid 66
#define __NR_rt_sigaction 67
#define __NR_rt_sigreturn 68
#define __NR_rt_sigprocmask 69
#define __NR_rt_sigpending 70
#define __NR_rt_sigtimedwait 71
#define __NR_rt_sigqueueinfo 72
#define __NR_rt_sigsuspend 73
#define __NR_sethostname 74
#define __NR_setrlimit 75
#define __NR_getrlimit 76 /* SuS compliant getrlimit */
#define __NR_getrusage 77
#define __NR_gettimeofday 78
#define __NR_settimeofday 79
#define __NR_getgroups 80
#define __NR_setgroups 81
#define __NR_select 82
#define __NR_symlink 83
#define __NR_fchdir 84
#define __NR_readlink 85
#define __NR_pread 86
#define __NR_pwrite 87
#define __NR_swapon 88
#define __NR_reboot 89
#define __NR_mmap2 90
#define __NR_munmap 91
#define __NR_truncate 92
#define __NR_ftruncate 93
#define __NR_fchmod 94
#define __NR_fchown 95
#define __NR_getpriority 96
#define __NR_setpriority 97
#define __NR_wait4 98
#define __NR_statfs 99
#define __NR_fstatfs 100
#define __NR_vhangup 101
#define __NR_sigaltstack 102
#define __NR_syslog 103
#define __NR_setitimer 104
#define __NR_getitimer 105
#define __NR_swapoff 106
#define __NR_sysinfo 107
#define __NR_ipc 108
#define __NR_sendfile 109
#define __NR_setdomainname 110
#define __NR_uname 111
#define __NR_adjtimex 112
#define __NR_mprotect 113
#define __NR_vfork 114
#define __NR_init_module 115
#define __NR_delete_module 116
#define __NR_quotactl 117
#define __NR_getpgid 118
#define __NR_bdflush 119
#define __NR_sysfs 120
#define __NR_personality 121
#define __NR_afs_syscall 122 /* Syscall for Andrew File System */
#define __NR_getdents 123
#define __NR_flock 124
#define __NR_msync 125
#define __NR_readv 126
#define __NR_writev 127
#define __NR_getsid 128
#define __NR_fdatasync 129
#define __NR__sysctl 130
#define __NR_mlock 131
#define __NR_munlock 132
#define __NR_mlockall 133
#define __NR_munlockall 134
#define __NR_sched_setparam 135
#define __NR_sched_getparam 136
#define __NR_sched_setscheduler 137
#define __NR_sched_getscheduler 138
#define __NR_sched_yield 139
#define __NR_sched_get_priority_max 140
#define __NR_sched_get_priority_min 141
#define __NR_sched_rr_get_interval 142
#define __NR_nanosleep 143
#define __NR_poll 144
#define __NR_nfsservctl 145
#define __NR_setresgid 146
#define __NR_getresgid 147
#define __NR_prctl 148
#define __NR_socket 149
#define __NR_bind 150
#define __NR_connect 151
#define __NR_listen 152
#define __NR_accept 153
#define __NR_getsockname 154
#define __NR_getpeername 155
#define __NR_socketpair 156
#define __NR_send 157
#define __NR_recv 158
#define __NR_sendto 159
#define __NR_recvfrom 160
#define __NR_shutdown 161
#define __NR_setsockopt 162
#define __NR_getsockopt 163
#define __NR_sendmsg 164
#define __NR_recvmsg 165
#define __NR_truncate64 166
#define __NR_ftruncate64 167
#define __NR_stat64 168
#define __NR_lstat64 169
#define __NR_fstat64 170
#define __NR_pivot_root 171
#define __NR_mincore 172
#define __NR_madvise 173
#define __NR_getdents64 174
#define __NR_fcntl64 175
#define __NR_gettid 176
#define __NR_readahead 177
#define __NR_setxattr 178
#define __NR_lsetxattr 179
#define __NR_fsetxattr 180
#define __NR_getxattr 181
#define __NR_lgetxattr 182
#define __NR_fgetxattr 183
#define __NR_listxattr 184
#define __NR_llistxattr 185
#define __NR_flistxattr 186
#define __NR_removexattr 187
#define __NR_lremovexattr 188
#define __NR_fremovexattr 189
#define __NR_tkill 190
#define __NR_sendfile64 191
#define __NR_futex 192
#define __NR_sched_setaffinity 193
#define __NR_sched_getaffinity 194
#define __NR_capget 195
#define __NR_capset 196
#define __NR_io_setup 197
#define __NR_io_destroy 198
#define __NR_io_getevents 199
#define __NR_io_submit 200
#define __NR_io_cancel 201
#define __NR_fadvise64 202
#define __NR_exit_group 203
#define __NR_lookup_dcookie 204
#define __NR_epoll_create 205
#define __NR_epoll_ctl 206
#define __NR_epoll_wait 207
#define __NR_remap_file_pages 208
#define __NR_set_tid_address 209
#define __NR_timer_create 210
#define __NR_timer_settime 211
#define __NR_timer_gettime 212
#define __NR_timer_getoverrun 213
#define __NR_timer_delete 214
#define __NR_clock_settime 215
#define __NR_clock_gettime 216
#define __NR_clock_getres 217
#define __NR_clock_nanosleep 218
#define __NR_statfs64 219
#define __NR_fstatfs64 220
#define __NR_tgkill 221
/* 222 reserved for tux */
#define __NR_utimes 223
#define __NR_fadvise64_64 224
#define __NR_cacheflush 225
#define __NR_vserver 226
#define __NR_mq_open 227
#define __NR_mq_unlink 228
#define __NR_mq_timedsend 229
#define __NR_mq_timedreceive 230
#define __NR_mq_notify 231
#define __NR_mq_getsetattr 232
#define __NR_kexec_load 233
#define __NR_waitid 234
#define __NR_add_key 235
#define __NR_request_key 236
#define __NR_keyctl 237
#define __NR_ioprio_set 238
#define __NR_ioprio_get 239
#define __NR_inotify_init 240
#define __NR_inotify_add_watch 241
#define __NR_inotify_rm_watch 242
#define __NR_openat 243
#define __NR_mkdirat 244
#define __NR_mknodat 245
#define __NR_fchownat 246
#define __NR_futimesat 247
#define __NR_fstatat64 248
#define __NR_unlinkat 249
#define __NR_renameat 250
#define __NR_linkat 251
#define __NR_symlinkat 252
#define __NR_readlinkat 253
#define __NR_fchmodat 254
#define __NR_faccessat 255
#define __NR_pselect6 256
#define __NR_ppoll 257
#define __NR_unshare 258
#define __NR_set_robust_list 259
#define __NR_get_robust_list 260
#define __NR_splice 261
#define __NR_sync_file_range 262
#define __NR_tee 263
#define __NR_vmsplice 264
#define NR_syscalls 265
/*
* AVR32 calling convention for system calls:
* - System call number in r8
* - Parameters in r12 and downwards to r9 as well as r6 and r5.
* - Return value in r12
*/
/*
* user-visible error numbers are in the range -1 - -124: see
* <asm-generic/errno.h>
*/
#define __syscall_return(type, res) do { \
if ((unsigned long)(res) >= (unsigned long)(-125)) { \
errno = -(res); \
res = -1; \
} \
return (type) (res); \
} while (0)
#ifdef __KERNEL__
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_TIME
#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_WAITPID
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_GETPGRP
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#endif
#if defined(__KERNEL_SYSCALLS__) || defined(__CHECKER__)
#include <linux/types.h>
#include <linux/linkage.h>
#include <asm/signal.h>
struct pt_regs;
/*
* we need this inline - forking from kernel space will result
* in NO COPY ON WRITE (!!!), until an execve is executed. This
* is no problem, but for the stack. This is handled by not letting
* main() use the stack at all after fork(). Thus, no function
* calls - which means inline code for fork too, as otherwise we
* would use the stack upon exit from 'fork()'.
*
* Actually only pause and fork are needed inline, so that there
* won't be any messing with the stack from main(), but we define
* some others too.
*/
static inline int execve(const char *file, char **argv, char **envp)
{
register long scno asm("r8") = __NR_execve;
register long sc1 asm("r12") = (long)file;
register long sc2 asm("r11") = (long)argv;
register long sc3 asm("r10") = (long)envp;
int res;
asm volatile("scall"
: "=r"(sc1)
: "r"(scno), "0"(sc1), "r"(sc2), "r"(sc3)
: "lr", "memory");
res = sc1;
__syscall_return(int, res);
}
asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
struct pt_regs *regs);
asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
asmlinkage int sys_pipe(unsigned long __user *filedes);
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, off_t offset);
asmlinkage int sys_cacheflush(int operation, void __user *addr, size_t len);
asmlinkage int sys_fork(struct pt_regs *regs);
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
unsigned long parent_tidptr,
unsigned long child_tidptr, struct pt_regs *regs);
asmlinkage int sys_vfork(struct pt_regs *regs);
asmlinkage int sys_execve(char __user *ufilename, char __user *__user *uargv,
char __user *__user *uenvp, struct pt_regs *regs);
#endif
/*
* "Conditional" syscalls
*
* What we want is __attribute__((weak,alias("sys_ni_syscall"))),
* but it doesn't work on all toolchains, so we just do it by hand
*/
#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall");
#endif /* __ASM_AVR32_UNISTD_H */
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Note: We may not need these definitions for AVR32, as we don't
* support a.out.
*/
#ifndef __ASM_AVR32_USER_H
#define __ASM_AVR32_USER_H
#include <linux/types.h>
#include <asm/ptrace.h>
#include <asm/page.h>
/*
* Core file format: The core file is written in such a way that gdb
* can understand it and provide useful information to the user (under
* linux we use the `trad-core' bfd). The file contents are as follows:
*
* upage: 1 page consisting of a user struct that tells gdb
* what is present in the file. Directly after this is a
* copy of the task_struct, which is currently not used by gdb,
* but it may come in handy at some point. All of the registers
* are stored as part of the upage. The upage should always be
* only one page long.
* data: The data segment follows next. We use current->end_text to
* current->brk to pick up all of the user variables, plus any memory
* that may have been sbrk'ed. No attempt is made to determine if a
* page is demand-zero or if a page is totally unused, we just cover
* the entire range. All of the addresses are rounded in such a way
* that an integral number of pages is written.
* stack: We need the stack information in order to get a meaningful
* backtrace. We need to write the data from usp to
* current->start_stack, so we round each of these in order to be able
* to write an integer number of pages.
*/
struct user_fpu_struct {
/* We have no FPU (yet) */
};
struct user {
struct pt_regs regs; /* entire machine state */
size_t u_tsize; /* text size (pages) */
size_t u_dsize; /* data size (pages) */
size_t u_ssize; /* stack size (pages) */
unsigned long start_code; /* text starting address */
unsigned long start_data; /* data starting address */
unsigned long start_stack; /* stack starting address */
long int signal; /* signal causing core dump */
struct regs * u_ar0; /* help gdb find registers */
unsigned long magic; /* identifies a core file */
char u_comm[32]; /* user command name */
};
#define NBPG PAGE_SIZE
#define UPAGES 1
#define HOST_TEXT_START_ADDR (u.start_code)
#define HOST_DATA_START_ADDR (u.start_data)
#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
#endif /* __ASM_AVR32_USER_H */
......@@ -31,6 +31,7 @@
#define EM_M32R 88 /* Renesas M32R */
#define EM_H8_300 46 /* Renesas H8/300,300H,H8S */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
#define EM_AVR32 0x18ad /* Atmel AVR32 */
/*
* This is an interim value that we will use until the committee comes
......
......@@ -277,7 +277,7 @@ config DEBUG_HIGHMEM
config DEBUG_BUGVERBOSE
bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED
depends on BUG
depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV
depends on ARM || ARM26 || AVR32 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV
default !EMBEDDED
help
Say Y here to make BUG() panics output the file name and line number
......@@ -315,7 +315,7 @@ config DEBUG_VM
config FRAME_POINTER
bool "Compile the kernel with frame pointers"
depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390)
depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || AVR32)
default y if DEBUG_INFO && UML
help
If you say Y here the resulting kernel image will be slightly larger
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment