Commit 6bc9a396 authored by Chen Liqin's avatar Chen Liqin Committed by Arnd Bergmann

score: Add support for Sunplus S+core architecture

This is the complete set of new arch Score's files for linux.
Score instruction set support 16bits, 32bits and 64bits instruction,
Score SOC had been used in game machine and LCD TV.
Signed-off-by: default avatarChen Liqin <liqin.chen@sunplusct.com>
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
parent 0732f877
# For a description of the syntax of this configuration file,
# see Documentation/kbuild/kconfig-language.txt.
mainmenu "Linux/SCORE Kernel Configuration"
menu "Machine selection"
choice
prompt "System type"
default MACH_SPCT6600
config ARCH_SCORE7
bool "SCORE7 processor"
select SYS_SUPPORTS_32BIT_KERNEL
select CPU_SCORE7
select GENERIC_HAS_IOMAP
config MACH_SPCT6600
bool "SPCT6600 series based machines"
select SYS_SUPPORTS_32BIT_KERNEL
select CPU_SCORE7
select GENERIC_HAS_IOMAP
config SCORE_SIM
bool "Score simulator"
select SYS_SUPPORTS_32BIT_KERNEL
select CPU_SCORE7
select GENERIC_HAS_IOMAP
endchoice
endmenu
config CPU_SCORE7
bool
config GENERIC_IOMAP
def_bool y
config NO_DMA
bool
default y
config RWSEM_GENERIC_SPINLOCK
def_bool y
config GENERIC_FIND_NEXT_BIT
def_bool y
config GENERIC_HWEIGHT
def_bool y
config GENERIC_CALIBRATE_DELAY
def_bool y
config GENERIC_CLOCKEVENTS
def_bool y
config GENERIC_TIME
def_bool y
config SCHED_NO_NO_OMIT_FRAME_POINTER
def_bool y
config GENERIC_HARDIRQS_NO__DO_IRQ
def_bool y
config GENERIC_SYSCALL_TABLE
def_bool y
config SCORE_L1_CACHE_SHIFT
int
default "4"
menu "Kernel type"
config 32BIT
def_bool y
config GENERIC_HARDIRQS
def_bool y
config ARCH_FLATMEM_ENABLE
def_bool y
config ARCH_POPULATES_NODE_MAP
def_bool y
source "mm/Kconfig"
config MEMORY_START
hex
default 0xa0000000
source "kernel/time/Kconfig"
source "kernel/Kconfig.hz"
source "kernel/Kconfig.preempt"
endmenu
config RWSEM_GENERIC_SPINLOCK
def_bool y
config LOCKDEP_SUPPORT
def_bool y
config STACKTRACE_SUPPORT
def_bool y
source "init/Kconfig"
config PROBE_INITRD_HEADER
bool "Probe initrd header created by addinitrd"
depends on BLK_DEV_INITRD
help
Probe initrd header at the last page of kernel image.
Say Y here if you are using arch/score/boot/addinitrd.c to
add initrd or initramfs image to the kernel image.
Otherwise, say N.
config MMU
def_bool y
menu "Executable file formats"
source "fs/Kconfig.binfmt"
endmenu
source "net/Kconfig"
source "drivers/Kconfig"
source "fs/Kconfig"
source "arch/score/Kconfig.debug"
source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
bool
default y
source "lib/Kconfig.debug"
config CMDLINE
string "Default kernel command string"
default ""
help
On some platforms, there is currently no way for the boot loader to
pass arguments to the kernel. For these platforms, you can supply
some command-line options at build time by entering them here. In
other cases you can specify kernel args so that you don't have
to set them up in board prom initialization routines.
config DEBUG_STACK_USAGE
bool "Enable stack utilization instrumentation"
depends on DEBUG_KERNEL
help
Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T and sysrq-P debug output.
This option will slow down process creation somewhat.
config RUNTIME_DEBUG
bool "Enable run-time debugging"
depends on DEBUG_KERNEL
help
If you say Y here, some debugging macros will do run-time checking.
If you say N here, those macros will mostly turn to no-ops. See
include/asm-score/debug.h for debuging macros.
If unsure, say N.
endmenu
#
# arch/score/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
KBUILD_DEFCONFIG := spct6600_defconfig
CROSS_COMPILE := score-linux-
#
# CPU-dependent compiler/assembler options for optimization.
#
cflags-y += -G0 -pipe -mel -mnhwloop -D__SCOREEL__ \
-D__linux__ -ffunction-sections -ffreestanding
#
# Board-dependent options and extra files
#
KBUILD_AFLAGS += $(cflags-y)
KBUILD_CFLAGS += $(cflags-y)
MODFLAGS += -mlong-calls
LDFLAGS += --oformat elf32-littlescore
LDFLAGS_vmlinux += -G0 -static -nostdlib
head-y := arch/score/kernel/head.o
libs-y += arch/score/lib/
core-y += arch/score/kernel/ arch/score/mm/
boot := arch/score/boot
vmlinux.bin: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
archclean:
@$(MAKE) $(clean)=$(boot)
define archhelp
echo ' vmlinux.bin - Raw binary boot image'
echo
echo ' These will be default as apropriate for a configured platform.'
endef
#
# arch/score/boot/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
targets := vmlinux.bin
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
clean-files += vmlinux.bin
This diff is collapsed.
include include/asm-generic/Kbuild.asm
header-y +=
#ifndef _ASM_SCORE_ASMMACRO_H
#define _ASM_SCORE_ASMMACRO_H
#include <asm/asm-offsets.h>
#ifdef __ASSEMBLY__
.macro SAVE_ALL
mfcr r30, cr0
mv r31, r0
nop
/* if UMs == 1, change stack. */
slli.c r30, r30, 28
bpl 1f
la r31, kernelsp
lw r31, [r31]
1:
mv r30, r0
addri r0, r31, -PT_SIZE
sw r30, [r0, PT_R0]
.set r1
sw r1, [r0, PT_R1]
.set nor1
sw r2, [r0, PT_R2]
sw r3, [r0, PT_R3]
sw r4, [r0, PT_R4]
sw r5, [r0, PT_R5]
sw r6, [r0, PT_R6]
sw r7, [r0, PT_R7]
sw r8, [r0, PT_R8]
sw r9, [r0, PT_R9]
sw r10, [r0, PT_R10]
sw r11, [r0, PT_R11]
sw r12, [r0, PT_R12]
sw r13, [r0, PT_R13]
sw r14, [r0, PT_R14]
sw r15, [r0, PT_R15]
sw r16, [r0, PT_R16]
sw r17, [r0, PT_R17]
sw r18, [r0, PT_R18]
sw r19, [r0, PT_R19]
sw r20, [r0, PT_R20]
sw r21, [r0, PT_R21]
sw r22, [r0, PT_R22]
sw r23, [r0, PT_R23]
sw r24, [r0, PT_R24]
sw r25, [r0, PT_R25]
sw r25, [r0, PT_R25]
sw r26, [r0, PT_R26]
sw r27, [r0, PT_R27]
sw r28, [r0, PT_R28]
sw r29, [r0, PT_R29]
orri r28, r0, 0x1fff
li r31, 0x00001fff
xor r28, r28, r31
mfcehl r30, r31
sw r30, [r0, PT_CEH]
sw r31, [r0, PT_CEL]
mfcr r31, cr0
sw r31, [r0, PT_PSR]
mfcr r31, cr1
sw r31, [r0, PT_CONDITION]
mfcr r31, cr2
sw r31, [r0, PT_ECR]
mfcr r31, cr5
srli r31, r31, 1
slli r31, r31, 1
sw r31, [r0, PT_EPC]
.endm
.macro RESTORE_ALL_AND_RET
mfcr r30, cr0
srli r30, r30, 1
slli r30, r30, 1
mtcr r30, cr0
nop
nop
nop
nop
nop
.set r1
ldis r1, 0x00ff
and r30, r30, r1
not r1, r1
lw r31, [r0, PT_PSR]
and r31, r31, r1
.set nor1
or r31, r31, r30
mtcr r31, cr0
nop
nop
nop
nop
nop
lw r30, [r0, PT_CONDITION]
mtcr r30, cr1
nop
nop
nop
nop
nop
lw r30, [r0, PT_CEH]
lw r31, [r0, PT_CEL]
mtcehl r30, r31
.set r1
lw r1, [r0, PT_R1]
.set nor1
lw r2, [r0, PT_R2]
lw r3, [r0, PT_R3]
lw r4, [r0, PT_R4]
lw r5, [r0, PT_R5]
lw r6, [r0, PT_R6]
lw r7, [r0, PT_R7]
lw r8, [r0, PT_R8]
lw r9, [r0, PT_R9]
lw r10, [r0, PT_R10]
lw r11, [r0, PT_R11]
lw r12, [r0, PT_R12]
lw r13, [r0, PT_R13]
lw r14, [r0, PT_R14]
lw r15, [r0, PT_R15]
lw r16, [r0, PT_R16]
lw r17, [r0, PT_R17]
lw r18, [r0, PT_R18]
lw r19, [r0, PT_R19]
lw r20, [r0, PT_R20]
lw r21, [r0, PT_R21]
lw r22, [r0, PT_R22]
lw r23, [r0, PT_R23]
lw r24, [r0, PT_R24]
lw r25, [r0, PT_R25]
lw r26, [r0, PT_R26]
lw r27, [r0, PT_R27]
lw r28, [r0, PT_R28]
lw r29, [r0, PT_R29]
lw r30, [r0, PT_EPC]
lw r0, [r0, PT_R0]
mtcr r30, cr5
rte
.endm
#endif /* __ASSEMBLY__ */
#endif /* _ASM_SCORE_ASMMACRO_H */
#ifndef _ASM_SCORE_ATOMIC_H
#define _ASM_SCORE_ATOMIC_H
#include <asm-generic/atomic.h>
#endif /* _ASM_SCORE_ATOMIC_H */
#ifndef _ASM_SCORE_AUXVEC_H
#define _ASM_SCORE_AUXVEC_H
#endif /* _ASM_SCORE_AUXVEC_H */
#ifndef _ASM_SCORE_BITOPS_H
#define _ASM_SCORE_BITOPS_H
#include <asm/byteorder.h> /* swab32 */
#include <asm/system.h> /* save_flags */
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
#include <asm-generic/bitops.h>
#include <asm-generic/bitops/__fls.h>
#endif /* _ASM_SCORE_BITOPS_H */
#ifndef _ASM_SCORE_BITSPERLONG_H
#define _ASM_SCORE_BITSPERLONG_H
#include <asm-generic/bitsperlong.h>
#endif /* _ASM_SCORE_BITSPERLONG_H */
#ifndef _ASM_SCORE_BUG_H
#define _ASM_SCORE_BUG_H
#include <asm-generic/bug.h>
#endif /* _ASM_SCORE_BUG_H */
#ifndef _ASM_SCORE_BUGS_H
#define _ASM_SCORE_BUGS_H
#include <asm-generic/bugs.h>
#endif /* _ASM_SCORE_BUGS_H */
#ifndef _ASM_SCORE_BYTEORDER_H
#define _ASM_SCORE_BYTEORDER_H
#include <linux/byteorder/little_endian.h>
#endif /* _ASM_SCORE_BYTEORDER_H */
#ifndef _ASM_SCORE_CACHE_H
#define _ASM_SCORE_CACHE_H
#define L1_CACHE_SHIFT 4
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#endif /* _ASM_SCORE_CACHE_H */
#ifndef _ASM_SCORE_CACHEFLUSH_H
#define _ASM_SCORE_CACHEFLUSH_H
/* Keep includes the same across arches. */
#include <linux/mm.h>
extern void (*flush_cache_all)(void);
extern void (*flush_cache_mm)(struct mm_struct *mm);
extern void (*flush_cache_range)(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void (*flush_cache_page)(struct vm_area_struct *vma,
unsigned long page, unsigned long pfn);
extern void (*flush_cache_sigtramp)(unsigned long addr);
extern void (*flush_icache_all)(void);
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
extern void (*flush_data_cache_page)(unsigned long addr);
extern void s7_flush_cache_all(void);
#define flush_cache_dup_mm(mm) do {} while (0)
#define flush_dcache_page(page) do {} while (0)
#define flush_dcache_mmap_lock(mapping) do {} while (0)
#define flush_dcache_mmap_unlock(mapping) do {} while (0)
#define flush_cache_vmap(start, end) do {} while (0)
#define flush_cache_vunmap(start, end) do {} while (0)
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
if (vma->vm_flags & VM_EXEC) {
void *v = page_address(page);
flush_icache_range((unsigned long) v,
(unsigned long) v + PAGE_SIZE);
}
}
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
if ((vma->vm_flags & VM_EXEC)) \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
} while (0)
#endif /* _ASM_SCORE_CACHEFLUSH_H */
#ifndef _ASM_SCORE_CHECKSUM_H
#define _ASM_SCORE_CHECKSUM_H
#include <linux/in6.h>
#include <asm/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
unsigned int csum_partial(const void *buff, int len, __wsum sum);
unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len,
unsigned int sum, int *csum_err);
unsigned int csum_partial_copy(const char *src, char *dst,
int len, unsigned int sum);
/*
* this is a new version of the above that records errors it finds in *errp,
* but continues and zeros the rest of the buffer.
*/
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static inline
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err_ptr)
{
sum = csum_partial(src, len, sum);
if (copy_to_user(dst, src, len)) {
*err_ptr = -EFAULT;
return (__force __wsum) -1; /* invalid checksum */
}
return sum;
}
#define csum_partial_copy_nocheck csum_partial_copy
/*
* Fold a partial checksum without adding pseudo headers
*/
static inline __sum16 csum_fold(__wsum sum)
{
/* the while loop is unnecessary really, it's always enough with two
iterations */
__asm__ __volatile__(
".set volatile\n\t"
".set\tr1\n\t"
"slli\tr1,%0, 16\n\t"
"add\t%0,%0, r1\n\t"
"cmp.c\tr1, %0\n\t"
"srli\t%0, %0, 16\n\t"
"bleu\t1f\n\t"
"addi\t%0, 0x1\n\t"
"1:ldi\tr30, 0xffff\n\t"
"xor\t%0, %0, r30\n\t"
"slli\t%0, %0, 16\n\t"
"srli\t%0, %0, 16\n\t"
".set\tnor1\n\t"
".set optimize\n\t"
: "=r" (sum)
: "0" (sum));
return sum;
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
* Arnt Gulbrandsen.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
unsigned long dummy;
__asm__ __volatile__(
".set volatile\n\t"
".set\tnor1\n\t"
"lw\t%0, [%1]\n\t"
"subri\t%2, %2, 4\n\t"
"slli\t%2, %2, 2\n\t"
"lw\t%3, [%1, 4]\n\t"
"add\t%2, %2, %1\n\t"
"add\t%0, %0, %3\n\t"
"cmp.c\t%3, %0\n\t"
"lw\t%3, [%1, 8]\n\t"
"bleu\t1f\n\t"
"addi\t%0, 0x1\n\t"
"1:\n\t"
"add\t%0, %0, %3\n\t"
"cmp.c\t%3, %0\n\t"
"lw\t%3, [%1, 12]\n\t"
"bleu\t1f\n\t"
"addi\t%0, 0x1\n\t"
"1:add\t%0, %0, %3\n\t"
"cmp.c\t%3, %0\n\t"
"bleu\t1f\n\t"
"addi\t%0, 0x1\n"
"1:\tlw\t%3, [%1, 16]\n\t"
"addi\t%1, 4\n\t"
"add\t%0, %0, %3\n\t"
"cmp.c\t%3, %0\n\t"
"bleu\t2f\n\t"
"addi\t%0, 0x1\n"
"2:cmp.c\t%2, %1\n\t"
"bne\t1b\n\t"
".set\tr1\n\t"
".set optimize\n\t"
: "=&r" (sum), "=&r" (iph), "=&r" (ihl), "=&r" (dummy)
: "1" (iph), "2" (ihl));
return csum_fold(sum);
}
static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
unsigned long tmp = (ntohs(len) << 16) + proto * 256;
__asm__ __volatile__(
".set volatile\n\t"
"add\t%0, %0, %2\n\t"
"cmp.c\t%2, %0\n\t"
"bleu\t1f\n\t"
"addi\t%0, 0x1\n\t"
"1:\n\t"
"add\t%0, %0, %3\n\t"
"cmp.c\t%3, %0\n\t"
"bleu\t1f\n\t"
"addi\t%0, 0x1\n\t"
"1:\n\t"
"add\t%0, %0, %4\n\t"
"cmp.c\t%4, %0\n\t"
"bleu\t1f\n\t"
"addi\t%0, 0x1\n\t"
"1:\n\t"
".set optimize\n\t"
: "=r" (sum)
: "0" (daddr), "r"(saddr),
"r" (tmp),
"r" (sum));
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16
csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static inline unsigned short ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__ __volatile__(
".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t"
".set\tnoat\n\t"
"addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t"
"sltu\t$1, %0, %5\n\t"
"addu\t%0, $1\n\t"
"addu\t%0, %6\t\t\t# csum\n\t"
"sltu\t$1, %0, %6\n\t"
"lw\t%1, 0(%2)\t\t\t# four words source address\n\t"
"addu\t%0, $1\n\t"
"addu\t%0, %1\n\t"
"sltu\t$1, %0, %1\n\t"
"lw\t%1, 4(%2)\n\t"
"addu\t%0, $1\n\t"
"addu\t%0, %1\n\t"
"sltu\t$1, %0, %1\n\t"
"lw\t%1, 8(%2)\n\t"
"addu\t%0, $1\n\t"
"addu\t%0, %1\n\t"
"sltu\t$1, %0, %1\n\t"
"lw\t%1, 12(%2)\n\t"
"addu\t%0, $1\n\t"
"addu\t%0, %1\n\t"
"sltu\t$1, %0, %1\n\t"
"lw\t%1, 0(%3)\n\t"
"addu\t%0, $1\n\t"
"addu\t%0, %1\n\t"
"sltu\t$1, %0, %1\n\t"
"lw\t%1, 4(%3)\n\t"
"addu\t%0, $1\n\t"
"addu\t%0, %1\n\t"
"sltu\t$1, %0, %1\n\t"
"lw\t%1, 8(%3)\n\t"
"addu\t%0, $1\n\t"
"addu\t%0, %1\n\t"
"sltu\t$1, %0, %1\n\t"
"lw\t%1, 12(%3)\n\t"
"addu\t%0, $1\n\t"
"addu\t%0, %1\n\t"
"sltu\t$1, %0, %1\n\t"
"addu\t%0, $1\t\t\t# Add final carry\n\t"
".set\tnoat\n\t"
".set\tnoreorder"
: "=r" (sum), "=r" (proto)
: "r" (saddr), "r" (daddr),
"0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
return csum_fold(sum);
}
#endif /* _ASM_SCORE_CHECKSUM_H */
#ifndef _ASM_SCORE_CPUTIME_H
#define _ASM_SCORE_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* _ASM_SCORE_CPUTIME_H */
#ifndef _ASM_SCORE_CURRENT_H
#define _ASM_SCORE_CURRENT_H
#include <asm-generic/current.h>
#endif /* _ASM_SCORE_CURRENT_H */
#ifndef _ASM_SCORE_DELAY_H
#define _ASM_SCORE_DELAY_H
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__ (
"1:\tsubi\t%0,1\n\t"
"cmpz.c\t%0\n\t"
"bne\t1b\n\t"
: "=r" (loops)
: "0" (loops));
}
static inline void __udelay(unsigned long usecs)
{
__delay(usecs);
}
#define udelay(usecs) __udelay(usecs)
#endif /* _ASM_SCORE_DELAY_H */
#ifndef _ASM_SCORE_DEVICE_H
#define _ASM_SCORE_DEVICE_H
#include <asm-generic/device.h>
#endif /* _ASM_SCORE_DEVICE_H */
#ifndef _ASM_SCORE_DIV64_H
#define _ASM_SCORE_DIV64_H
#include <asm-generic/div64.h>
#endif /* _ASM_SCORE_DIV64_H */
#ifndef _ASM_SCORE_DMA_MAPPING_H
#define _ASM_SCORE_DMA_MAPPING_H
#include <asm-generic/dma-mapping-broken.h>
#endif /* _ASM_SCORE_DMA_MAPPING_H */
#ifndef _ASM_SCORE_DMA_H
#define _ASM_SCORE_DMA_H
#include <asm/io.h>
#define MAX_DMA_ADDRESS (0)
#endif /* _ASM_SCORE_DMA_H */
#ifndef _ASM_SCORE_ELF_H
#define _ASM_SCORE_ELF_H
/* ELF register definitions */
#define ELF_NGREG 45
#define ELF_NFPREG 33
#define EM_SCORE7 135
/* Relocation types. */
#define R_SCORE_NONE 0
#define R_SCORE_HI16 1
#define R_SCORE_LO16 2
#define R_SCORE_BCMP 3
#define R_SCORE_24 4
#define R_SCORE_PC19 5
#define R_SCORE16_11 6
#define R_SCORE16_PC8 7
#define R_SCORE_ABS32 8
#define R_SCORE_ABS16 9
#define R_SCORE_DUMMY2 10
#define R_SCORE_GP15 11
#define R_SCORE_GNU_VTINHERIT 12
#define R_SCORE_GNU_VTENTRY 13
#define R_SCORE_GOT15 14
#define R_SCORE_GOT_LO16 15
#define R_SCORE_CALL15 16
#define R_SCORE_GPREL32 17
#define R_SCORE_REL32 18
#define R_SCORE_DUMMY_HI16 19
#define R_SCORE_IMM30 20
#define R_SCORE_IMM32 21
typedef unsigned long elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define elf_check_arch(x) ((x)->e_machine == EM_SCORE7)
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_SCORE7
#define SET_PERSONALITY(ex) \
do { \
set_personality(PER_LINUX); \
} while (0)
struct task_struct;
struct pt_regs;
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This yields a mask that user programs can use to figure out what
instruction set this cpu supports. This could be done in userspace,
but it's not easy, and we've already done it here. */
#define ELF_HWCAP (0)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define ELF_PLATFORM (NULL)
#define ELF_PLAT_INIT(_r, load_addr) \
do { \
_r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \
_r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \
_r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \
_r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \
_r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \
_r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \
_r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0; \
_r->regs[30] = _r->regs[31] = 0; \
} while (0)
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#ifndef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
#endif
#endif /* _ASM_SCORE_ELF_H */
#ifndef _ASM_SCORE_EMERGENCY_RESTART_H
#define _ASM_SCORE_EMERGENCY_RESTART_H
#include <asm-generic/emergency-restart.h>
#endif /* _ASM_SCORE_EMERGENCY_RESTART_H */
#ifndef _ASM_SCORE_ERRNO_H
#define _ASM_SCORE_ERRNO_H
#include <asm-generic/errno.h>
#define EMAXERRNO 1024
#endif /* _ASM_SCORE_ERRNO_H */
#ifndef _ASM_SCORE_FCNTL_H
#define _ASM_SCORE_FCNTL_H
#include <asm-generic/fcntl.h>
#endif /* _ASM_SCORE_FCNTL_H */
#ifndef _ASM_SCORE_FIXMAP_H
#define _ASM_SCORE_FIXMAP_H
#include <asm/page.h>
#define PHY_RAM_BASE 0x00000000
#define PHY_IO_BASE 0x10000000
#define VIRTUAL_RAM_BASE 0xa0000000
#define VIRTUAL_IO_BASE 0xb0000000
#define RAM_SPACE_SIZE 0x10000000
#define IO_SPACE_SIZE 0x10000000
/* Kernel unmapped, cached 512MB */
#define KSEG1 0xa0000000
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the end of virtual memory (0xfffff000) backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* highger than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
/*
* on UP currently we will have no trace of the fixmap mechanizm,
* no page table allocations, etc. This might change in the
* future, say framebuffers for the console driver(s) could be
* fix-mapped?
*/
enum fixed_addresses {
#define FIX_N_COLOURS 8
FIX_CMAP_BEGIN,
FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS,
__end_of_fixed_addresses
};
/*
* used by vmalloc.c.
*
* Leave one empty page between vmalloc'ed areas and
* the start of the fixmap, and leave one page empty
* at the top of mem..
*/
#define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) \
((FIXADDR_TOP - ((x) & PAGE_MASK)) >> PAGE_SHIFT)
extern void __this_fixmap_does_not_exist(void);
/*
* 'index to address' translation. If anyone tries to use the idx
* directly without tranlation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
static inline unsigned long fix_to_virt(const unsigned int idx)
{
return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
{
return __virt_to_fix(vaddr);
}
#endif /* _ASM_SCORE_FIXMAP_H */
#ifndef _ASM_SCORE_FTRACE_H
#define _ASM_SCORE_FTRACE_H
#endif /* _ASM_SCORE_FTRACE_H */
#ifndef _ASM_SCORE_FUTEX_H
#define _ASM_SCORE_FUTEX_H
#include <asm-generic/futex.h>
#endif /* _ASM_SCORE_FUTEX_H */
#ifndef _ASM_SCORE_HARDIRQ_H
#define _ASM_SCORE_HARDIRQ_H
#include <asm-generic/hardirq.h>
#endif /* _ASM_SCORE_HARDIRQ_H */
#ifndef _ASM_SCORE_HW_IRQ_H
#define _ASM_SCORE_HW_IRQ_H
#endif /* _ASM_SCORE_HW_IRQ_H */
#ifndef _ASM_SCORE_IO_H
#define _ASM_SCORE_IO_H
#include <asm-generic/io.h>
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
#endif /* _ASM_SCORE_IO_H */
#ifndef _ASM_SCORE_IOCTL_H
#define _ASM_SCORE_IOCTL_H
#include <asm-generic/ioctl.h>
#endif /* _ASM_SCORE_IOCTL_H */
#ifndef _ASM_SCORE_IOCTLS_H
#define _ASM_SCORE_IOCTLS_H
#include <asm-generic/ioctls.h>
#endif /* _ASM_SCORE_IOCTLS_H */
#ifndef _ASM_SCORE_IPCBUF_H
#define _ASM_SCORE_IPCBUF_H
#include <asm-generic/ipcbuf.h>
#endif /* _ASM_SCORE_IPCBUF_H */
#ifndef _ASM_SCORE_IRQ_H
#define _ASM_SCORE_IRQ_H
#define EXCEPTION_VECTOR_BASE_ADDR 0xa0000000
#define VECTOR_ADDRESS_OFFSET_MODE4 0
#define VECTOR_ADDRESS_OFFSET_MODE16 1
#define DEBUG_VECTOR_SIZE (0x4)
#define DEBUG_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x1fc)
#define GENERAL_VECTOR_SIZE (0x10)
#define GENERAL_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x200)
#define NR_IRQS 64
#define IRQ_VECTOR_SIZE (0x10)
#define IRQ_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x210)
#define IRQ_VECTOR_END_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x5f0)
#define irq_canonicalize(irq) (irq)
#define P_INT_PNDL 0x95F50000
#define P_INT_PNDH 0x95F50004
#define P_INT_PRIORITY_M 0x95F50008
#define P_INT_PRIORITY_SG0 0x95F50010
#define P_INT_PRIORITY_SG1 0x95F50014
#define P_INT_PRIORITY_SG2 0x95F50018
#define P_INT_PRIORITY_SG3 0x95F5001C
#define P_INT_MASKL 0x95F50020
#define P_INT_MASKH 0x95F50024
#define IRQ_TIMER (7) /* Timer IRQ number of SPCT6600 */
#endif /* _ASM_SCORE_IRQ_H */
#ifndef _ASM_SCORE_IRQ_REGS_H
#define _ASM_SCORE_IRQ_REGS_H
#include <asm-generic/irq_regs.h>
#endif /* _ASM_SCORE_IRQ_REGS_H */
#ifndef _ASM_SCORE_IRQFLAGS_H
#define _ASM_SCORE_IRQFLAGS_H
#ifndef __ASSEMBLY__
#define raw_local_irq_save(x) \
{ \
__asm__ __volatile__( \
"mfcr r8, cr0;" \
"li r9, 0xfffffffe;" \
"nop;" \
"mv %0, r8;" \
"and r8, r8, r9;" \
"mtcr r8, cr0;" \
"nop;" \
"nop;" \
"nop;" \
"nop;" \
"nop;" \
"ldi r9, 0x1;" \
"and %0, %0, r9;" \
: "=r" (x) \
: \
: "r8", "r9" \
); \
}
#define raw_local_irq_restore(x) \
{ \
__asm__ __volatile__( \
"mfcr r8, cr0;" \
"ldi r9, 0x1;" \
"and %0, %0, r9;" \
"or r8, r8, %0;" \
"mtcr r8, cr0;" \
"nop;" \
"nop;" \
"nop;" \
"nop;" \
"nop;" \
: \
: "r"(x) \
: "r8", "r9" \
); \
}
#define raw_local_irq_enable(void) \
{ \
__asm__ __volatile__( \
"mfcr\tr8,cr0;" \
"nop;" \
"nop;" \
"ori\tr8,0x1;" \
"mtcr\tr8,cr0;" \
"nop;" \
"nop;" \
"nop;" \
"nop;" \
"nop;" \
: \
: \
: "r8"); \
}
#define raw_local_irq_disable(void) \
{ \
__asm__ __volatile__( \
"mfcr\tr8,cr0;" \
"nop;" \
"nop;" \
"srli\tr8,r8,1;" \
"slli\tr8,r8,1;" \
"mtcr\tr8,cr0;" \
"nop;" \
"nop;" \
"nop;" \
"nop;" \
"nop;" \
: \
: \
: "r8"); \
}
#define raw_local_save_flags(x) \
{ \
__asm__ __volatile__( \
"mfcr r8, cr0;" \
"nop;" \
"nop;" \
"mv %0, r8;" \
"nop;" \
"nop;" \
"nop;" \
"nop;" \
"nop;" \
"ldi r9, 0x1;" \
"and %0, %0, r9;" \
: "=r" (x) \
: \
: "r8", "r9" \
); \
}
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
return !(flags & 1);
}
#endif
#endif /* _ASM_SCORE_IRQFLAGS_H */
#ifndef _ASM_SCORE_KDEBUG_H
#define _ASM_SCORE_KDEBUG_H
#include <asm-generic/kdebug.h>
#endif /* _ASM_SCORE_KDEBUG_H */
#ifndef _ASM_SCORE_KMAP_TYPES_H
#define _ASM_SCORE_KMAP_TYPES_H
#include <asm-generic/kmap_types.h>
#endif /* _ASM_SCORE_KMAP_TYPES_H */
#ifndef _ASM_SCORE_LINKAGE_H
#define _ASM_SCORE_LINKAGE_H
#endif /* _ASM_SCORE_LINKAGE_H */
#ifndef _ASM_SCORE_LOCAL_H
#define _ASM_SCORE_LOCAL_H
#include <asm-generic/local.h>
#endif /* _ASM_SCORE_LOCAL_H */
#ifndef _ASM_SCORE_MMAN_H
#define _ASM_SCORE_MMAN_H
#include <asm-generic/mman.h>
#endif /* _ASM_SCORE_MMAN_H */
#ifndef _ASM_SCORE_MMU_H
#define _ASM_SCORE_MMU_H
typedef unsigned long mm_context_t;
#endif /* _ASM_SCORE_MMU_H */
#ifndef _ASM_SCORE_MMU_CONTEXT_H
#define _ASM_SCORE_MMU_CONTEXT_H
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <asm-generic/mm_hooks.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/scoreregs.h>
/*
* For the fast tlb miss handlers, we keep a per cpu array of pointers
* to the current pgd for each processor. Also, the proc. id is stuffed
* into the context register.
*/
extern unsigned long asid_cache;
extern unsigned long pgd_current;
#define TLBMISS_HANDLER_SETUP_PGD(pgd) (pgd_current = (unsigned long)(pgd))
#define TLBMISS_HANDLER_SETUP() \
do { \
write_c0_context(0); \
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) \
} while (0)
/*
* All unused by hardware upper bits will be considered
* as a software asid extension.
*/
#define ASID_VERSION_MASK 0xfffff000
#define ASID_FIRST_VERSION 0x1000
/* PEVN --------- VPN ---------- --ASID--- -NA- */
/* binary: 0000 0000 0000 0000 0000 0000 0001 0000 */
/* binary: 0000 0000 0000 0000 0000 1111 1111 0000 */
#define ASID_INC 0x10
#define ASID_MASK 0xff0
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{}
static inline void
get_new_mmu_context(struct mm_struct *mm)
{
unsigned long asid = asid_cache + ASID_INC;
if (!(asid & ASID_MASK)) {
local_flush_tlb_all(); /* start new asid cycle */
if (!asid) /* fix version if needed */
asid = ASID_FIRST_VERSION;
}
mm->context = asid;
asid_cache = asid;
}
/*
* Initialize the context related info for a new mm_struct
* instance.
*/
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context = 0;
return 0;
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned long flags;
local_irq_save(flags);
if ((next->context ^ asid_cache) & ASID_VERSION_MASK)
get_new_mmu_context(next);
pevn_set(next->context);
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
local_irq_restore(flags);
}
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
static inline void destroy_context(struct mm_struct *mm)
{}
static inline void
deactivate_mm(struct task_struct *task, struct mm_struct *mm)
{}
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
static inline void
activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
unsigned long flags;
local_irq_save(flags);
get_new_mmu_context(next);
pevn_set(next->context);
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
local_irq_restore(flags);
}
#endif /* _ASM_SCORE_MMU_CONTEXT_H */
#ifndef _ASM_SCORE_MODULE_H
#define _ASM_SCORE_MODULE_H
#include <linux/list.h>
#include <asm/uaccess.h>
struct mod_arch_specific {
/* Data Bus Error exception tables */
struct list_head dbe_list;
const struct exception_table_entry *dbe_start;
const struct exception_table_entry *dbe_end;
};
typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
#define Elf_Addr Elf32_Addr
/* Given an address, look for it in the exception tables. */
#ifdef CONFIG_MODULES
const struct exception_table_entry *search_module_dbetables(unsigned long addr);
#else
static inline const struct exception_table_entry
*search_module_dbetables(unsigned long addr)
{
return NULL;
}
#endif
#define MODULE_PROC_FAMILY "SCORE7"
#define MODULE_KERNEL_TYPE "32BIT "
#define MODULE_KERNEL_SMTC ""
#define MODULE_ARCH_VERMAGIC \
MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC
#endif /* _ASM_SCORE_MODULE_H */
#ifndef _ASM_SCORE_MSGBUF_H
#define _ASM_SCORE_MSGBUF_H
#include <asm-generic/msgbuf.h>
#endif /* _ASM_SCORE_MSGBUF_H */
#ifndef _ASM_SCORE_MUTEX_H
#define _ASM_SCORE_MUTEX_H
#include <asm-generic/mutex-dec.h>
#endif /* _ASM_SCORE_MUTEX_H */
#ifndef _ASM_SCORE_PAGE_H
#define _ASM_SCORE_PAGE_H
#include <linux/pfn.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT (12)
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
/* align addr on a size boundary - adjust address up/down if needed */
#define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1)))
#define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1)))
/* align addr on a size boundary - adjust address up if needed */
#define _ALIGN(addr, size) _ALIGN_UP(addr, size)
/*
* PAGE_OFFSET -- the first address of the first page of memory. When not
* using MMU this corresponds to the first free page in physical memory (aligned
* on a page boundary).
*/
#define PAGE_OFFSET (0xA0000000UL)
#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
#define copy_user_page(vto, vfrom, vaddr, topg) \
memcpy((vto), (vfrom), PAGE_SIZE)
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte; } pte_t; /* page table entry */
typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct page *pgtable_t;
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) })
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn;
extern unsigned long max_pfn;
#define __pa(vaddr) ((unsigned long) (vaddr))
#define __va(paddr) ((void *) (paddr))
#define phys_to_pfn(phys) (PFN_DOWN(phys))
#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
#define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr))))
#define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
#define page_to_bus(page) (page_to_phys(page))
#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_mapnr)
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
#endif /* __KERNEL__ */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
#endif /* _ASM_SCORE_PAGE_H */
#ifndef _ASM_SCORE_PARAM_H
#define _ASM_SCORE_PARAM_H
#include <asm-generic/param.h>
#endif /* _ASM_SCORE_PARAM_H */
#ifndef _ASM_SCORE_PCI_H
#define _ASM_SCORE_PCI_H
#endif /* _ASM_SCORE_PCI_H */
#ifndef _ASM_SCORE_PERCPU_H
#define _ASM_SCORE_PERCPU_H
#include <asm-generic/percpu.h>
#endif /* _ASM_SCORE_PERCPU_H */
#ifndef _ASM_SCORE_PGALLOC_H
#define _ASM_SCORE_PGALLOC_H
#include <linux/mm.h>
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
{
set_pmd(pmd, __pmd((unsigned long)pte));
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
pgtable_t pte)
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
#define pmd_pgtable(pmd) pmd_page(pmd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret, *init;
ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init((unsigned long)ret);
memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return ret;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_pages((unsigned long)pgd, PGD_ORDER);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
pte_t *pte;
pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
PTE_ORDER);
return pte;
}
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *pte;
pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
if (pte) {
clear_highpage(pte);
pgtable_page_ctor(pte);
}
return pte;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_pages((unsigned long)pte, PTE_ORDER);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
pgtable_page_dtor(pte);
__free_pages(pte, PTE_ORDER);
}
#define __pte_free_tlb(tlb, pte) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
} while (0)
#define check_pgt_cache() do {} while (0)
#endif /* _ASM_SCORE_PGALLOC_H */
#ifndef _ASM_SCORE_PGTABLE_BITS_H
#define _ASM_SCORE_PGTABLE_BITS_H
#define _PAGE_ACCESSED (1<<5) /* implemented in software */
#define _PAGE_READ (1<<6) /* implemented in software */
#define _PAGE_WRITE (1<<7) /* implemented in software */
#define _PAGE_PRESENT (1<<9) /* implemented in software */
#define _PAGE_MODIFIED (1<<10) /* implemented in software */
#define _PAGE_FILE (1<<10)
#define _PAGE_GLOBAL (1<<0)
#define _PAGE_VALID (1<<1)
#define _PAGE_SILENT_READ (1<<1) /* synonym */
#define _PAGE_DIRTY (1<<2) /* Write bit */
#define _PAGE_SILENT_WRITE (1<<2)
#define _PAGE_CACHE (1<<3) /* cache */
#define _CACHE_MASK (1<<3)
#define _PAGE_BUFFERABLE (1<<4) /*Fallow Spec. */
#define _PAGE_CHG_MASK \
(PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_CACHE)
#endif /* _ASM_SCORE_PGTABLE_BITS_H */
#ifndef _ASM_SCORE_PGTABLE_H
#define _ASM_SCORE_PGTABLE_H
#include <linux/const.h>
#include <asm-generic/pgtable-nopmd.h>
#include <asm/fixmap.h>
#include <asm/setup.h>
#include <asm/pgtable-bits.h>
extern void load_pgd(unsigned long pg_dir);
extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define PGDIR_SHIFT 22
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
/*
* Entries per page directory level: we use two-level, so
* we don't really have any PUD/PMD directory physically.
*/
#define PGD_ORDER 0
#define PTE_ORDER 0
#define PTRS_PER_PGD 1024
#define PTRS_PER_PTE 1024
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define VMALLOC_START (0xc0000000UL)
#define PKMAP_BASE (0xfd000000UL)
#define VMALLOC_END (FIXADDR_START - 2*PAGE_SIZE)
#define pte_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
__FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
__FILE__, __LINE__, pgd_val(e))
/*
* Empty pgd/pmd entries point to the invalid_pte_table.
*/
static inline int pmd_none(pmd_t pmd)
{
return pmd_val(pmd) == (unsigned long) invalid_pte_table;
}
#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
static inline int pmd_present(pmd_t pmd)
{
return pmd_val(pmd) != (unsigned long) invalid_pte_table;
}
static inline void pmd_clear(pmd_t *pmdp)
{
pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
}
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT))
#define pfn_pte(pfn, prot) \
__pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define __pgd_offset(address) pgd_index(address)
#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
/* to find an entry in a page-table-directory */
#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
/* Find an entry in the third-level page table.. */
#define __pte_offset(address) \
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
#define pte_offset_kernel(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_offset_map_nested(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte))
#define pte_unmap_nested(pte) ((void)(pte))
/*
* Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken,
* split up 30 bits of offset into this range:
*/
#define PTE_FILE_MAX_BITS 30
#define pte_to_pgoff(_pte) \
(((_pte).pte & 0x1ff) | (((_pte).pte >> 11) << 9))
#define pgoff_to_pte(off) \
((pte_t) {((off) & 0x1ff) | (((off) >> 9) << 11) | _PAGE_FILE})
#define __pte_to_swp_entry(pte) \
((swp_entry_t) { pte_val(pte)})
#define __swp_entry_to_pte(x) ((pte_t) {(x).val})
#define __P000 __pgprot(0)
#define __P001 __pgprot(0)
#define __P010 __pgprot(0)
#define __P011 __pgprot(0)
#define __P100 __pgprot(0)
#define __P101 __pgprot(0)
#define __P110 __pgprot(0)
#define __P111 __pgprot(0)
#define __S000 __pgprot(0)
#define __S001 __pgprot(0)
#define __S010 __pgprot(0)
#define __S011 __pgprot(0)
#define __S100 __pgprot(0)
#define __S101 __pgprot(0)
#define __S110 __pgprot(0)
#define __S111 __pgprot(0)
#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
#define pte_clear(mm, addr, xp) \
do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#define pgd_present(pgd) (1) /* pages are always present on non MMU */
#define pgd_none(pgd) (0)
#define pgd_bad(pgd) (0)
#define pgd_clear(pgdp)
#define kern_addr_valid(addr) (1)
#define pmd_offset(a, b) ((void *) 0)
#define pmd_page_vaddr(pmd) pmd_val(pmd)
#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
#define pud_offset(pgd, address) ((pud_t *) pgd)
#define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */
#define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */
#define PAGE_COPY __pgprot(0) /* these mean nothing to non MMU */
#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */
#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */
#define pgprot_noncached(x) (x)
#define __swp_type(x) (0)
#define __swp_offset(x) (0)
#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
#define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
#define swapper_pg_dir ((pgd_t *) NULL)
#define pgtable_cache_init() do {} while (0)
#define arch_enter_lazy_cpu_mode() do {} while (0)
static inline int pte_write(pte_t pte)
{
return pte_val(pte) & _PAGE_WRITE;
}
static inline int pte_dirty(pte_t pte)
{
return pte_val(pte) & _PAGE_MODIFIED;
}
static inline int pte_young(pte_t pte)
{
return pte_val(pte) & _PAGE_ACCESSED;
}
static inline int pte_file(pte_t pte)
{
return pte_val(pte) & _PAGE_FILE;
}
#define pte_special(pte) (0)
static inline pte_t pte_wrprotect(pte_t pte)
{
pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
return pte;
}
static inline pte_t pte_mkclean(pte_t pte)
{
pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
{
pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
return pte;
}
static inline pte_t pte_mkwrite(pte_t pte)
{
pte_val(pte) |= _PAGE_WRITE;
if (pte_val(pte) & _PAGE_MODIFIED)
pte_val(pte) |= _PAGE_SILENT_WRITE;
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
pte_val(pte) |= _PAGE_MODIFIED;
if (pte_val(pte) & _PAGE_WRITE)
pte_val(pte) |= _PAGE_SILENT_WRITE;
return pte;
}
static inline pte_t pte_mkyoung(pte_t pte)
{
pte_val(pte) |= _PAGE_ACCESSED;
if (pte_val(pte) & _PAGE_READ)
pte_val(pte) |= _PAGE_SILENT_READ;
return pte;
}
#define set_pmd(pmdptr, pmdval) \
do { *(pmdptr) = (pmdval); } while (0)
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
extern unsigned long pgd_current;
extern void paging_init(void);
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
}
extern void __update_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t pte);
extern void __update_cache(struct vm_area_struct *vma,
unsigned long address, pte_t pte);
static inline void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t pte)
{
__update_tlb(vma, address, pte);
__update_cache(vma, address, pte);
}
#ifndef __ASSEMBLY__
#include <asm-generic/pgtable.h>
void setup_memory(void);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_SCORE_PGTABLE_H */
#ifndef _ASM_SCORE_POLL_H
#define _ASM_SCORE_POLL_H
#include <asm-generic/poll.h>
#endif /* _ASM_SCORE_POLL_H */
#ifndef _ASM_SCORE_POSIX_TYPES_H
#define _ASM_SCORE_POSIX_TYPES_H
#include <asm-generic/posix_types.h>
#endif /* _ASM_SCORE_POSIX_TYPES_H */
#ifndef _ASM_SCORE_PROCESSOR_H
#define _ASM_SCORE_PROCESSOR_H
#include <linux/cpumask.h>
#include <linux/threads.h>
#include <asm/segment.h>
struct task_struct;
/*
* System setup and hardware flags..
*/
extern void (*cpu_wait)(void);
extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern unsigned long thread_saved_pc(struct task_struct *tsk);
extern void start_thread(struct pt_regs *regs,
unsigned long pc, unsigned long sp);
extern unsigned long get_wchan(struct task_struct *p);
/*
* Return current * instruction pointer ("program counter").
*/
#define current_text_addr() ({ __label__ _l; _l: &&_l; })
#define cpu_relax() barrier()
#define release_thread(thread) do {} while (0)
#define prepare_to_copy(tsk) do {} while (0)
/*
* User space process size: 2GB. This is hardcoded into a few places,
* so don't change it unless you know what you are doing.
*/
#define TASK_SIZE 0x7fff8000UL
/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE ((TASK_SIZE / 3) & ~(PAGE_SIZE))
#ifdef __KERNEL__
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX TASK_SIZE
#endif
/*
* If you change thread_struct remember to change the #defines below too!
*/
struct thread_struct {
unsigned long reg0, reg2, reg3;
unsigned long reg12, reg13, reg14, reg15, reg16;
unsigned long reg17, reg18, reg19, reg20, reg21;
unsigned long cp0_psr;
unsigned long cp0_ema; /* Last user fault */
unsigned long cp0_badvaddr; /* Last user fault */
unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */
unsigned long error_code;
unsigned long trap_no;
unsigned long mflags;
unsigned long reg29;
unsigned long single_step;
unsigned long ss_nextcnt;
unsigned long insn1_type;
unsigned long addr1;
unsigned long insn1;
unsigned long insn2_type;
unsigned long addr2;
unsigned long insn2;
mm_segment_t current_ds;
};
#define INIT_THREAD { \
.reg0 = 0, \
.reg2 = 0, \
.reg3 = 0, \
.reg12 = 0, \
.reg13 = 0, \
.reg14 = 0, \
.reg15 = 0, \
.reg16 = 0, \
.reg17 = 0, \
.reg18 = 0, \
.reg19 = 0, \
.reg20 = 0, \
.reg21 = 0, \
.cp0_psr = 0, \
.error_code = 0, \
.trap_no = 0, \
}
#define kstk_tos(tsk) \
((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32)
#define task_pt_regs(tsk) ((struct pt_regs *)kstk_tos(tsk) - 1)
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
#endif /* _ASM_SCORE_PROCESSOR_H */
#ifndef _ASM_SCORE_PTRACE_H
#define _ASM_SCORE_PTRACE_H
#define PC 32
#define CONDITION 33
#define ECR 34
#define EMA 35
#define CEH 36
#define CEL 37
#define COUNTER 38
#define LDCR 39
#define STCR 40
#define PSR 41
#define SINGLESTEP16_INSN 0x7006
#define SINGLESTEP32_INSN 0x840C8000
#define BREAKPOINT16_INSN 0x7002 /* work on SPG300 */
#define BREAKPOINT32_INSN 0x84048000 /* work on SPG300 */
/* Define instruction mask */
#define INSN32_MASK 0x80008000
#define J32 0x88008000 /* 1_00010_0000000000_1_000000000000000 */
#define J32M 0xFC008000 /* 1_11111_0000000000_1_000000000000000 */
#define B32 0x90008000 /* 1_00100_0000000000_1_000000000000000 */
#define B32M 0xFC008000
#define BL32 0x90008001 /* 1_00100_0000000000_1_000000000000001 */
#define BL32M B32
#define BR32 0x80008008 /* 1_00000_0000000000_1_00000000_000100_0 */
#define BR32M 0xFFE0807E
#define BRL32 0x80008009 /* 1_00000_0000000000_1_00000000_000100_1 */
#define BRL32M BR32M
#define B32_SET (J32 | B32 | BL32 | BR32 | BRL32)
#define J16 0x3000 /* 0_011_....... */
#define J16M 0xF000
#define B16 0x4000 /* 0_100_....... */
#define B16M 0xF000
#define BR16 0x0004 /* 0_000.......0100 */
#define BR16M 0xF00F
#define B16_SET (J16 | B16 | BR16)
/*
* This struct defines the way the registers are stored on the stack during a
* system call/exception. As usual the registers k0/k1 aren't being saved.
*/
struct pt_regs {
unsigned long pad0[6];
unsigned long orig_r4;
unsigned long orig_r7;
unsigned long regs[32];
unsigned long cel;
unsigned long ceh;
unsigned long sr0; /* cnt */
unsigned long sr1; /* lcr */
unsigned long sr2; /* scr */
unsigned long cp0_epc;
unsigned long cp0_ema;
unsigned long cp0_psr;
unsigned long cp0_ecr;
unsigned long cp0_condition;
long is_syscall;
};
#ifdef __KERNEL__
/*
* Does the process account for user or for system time?
*/
#define user_mode(regs) ((regs->cp0_psr & 8) == 8)
#define instruction_pointer(regs) (0)
#define profile_pc(regs) instruction_pointer(regs)
extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit);
extern int read_tsk_long(struct task_struct *, unsigned long, unsigned long *);
extern void clear_single_step(struct task_struct *);
#endif
#endif /* _ASM_SCORE_PTRACE_H */
#ifndef _ASM_SCORE_RESOURCE_H
#define _ASM_SCORE_RESOURCE_H
#include <asm-generic/resource.h>
#endif /* _ASM_SCORE_RESOURCE_H */
#ifndef _ASM_SCORE_SCATTERLIST_H
#define _ASM_SCORE_SCATTERLIST_H
#include <asm-generic/scatterlist.h>
#endif /* _ASM_SCORE_SCATTERLIST_H */
#ifndef _ASM_SCORE_SCOREREGS_H
#define _ASM_SCORE_SCOREREGS_H
#include <linux/linkage.h>
/* TIMER register */
#define TIME0BASE 0x96080000
#define P_TIMER0_CTRL (TIME0BASE + 0x00)
#define P_TIMER0_CPP_CTRL (TIME0BASE + 0x04)
#define P_TIMER0_PRELOAD (TIME0BASE + 0x08)
#define P_TIMER0_CPP_REG (TIME0BASE + 0x0C)
#define P_TIMER0_UPCNT (TIME0BASE + 0x10)
/* Timer Controller Register */
/* bit 0 Timer enable */
#define TMR_DISABLE 0x0000
#define TMR_ENABLE 0x0001
/* bit 1 Interrupt enable */
#define TMR_IE_DISABLE 0x0000
#define TMR_IE_ENABLE 0x0002
/* bit 2 Output enable */
#define TMR_OE_DISABLE 0x0004
#define TMR_OE_ENABLE 0x0000
/* bit4 Up/Down counting selection */
#define TMR_UD_DOWN 0x0000
#define TMR_UD_UP 0x0010
/* bit5 Up/Down counting control selection */
#define TMR_UDS_UD 0x0000
#define TMR_UDS_EXTUD 0x0020
/* bit6 Time output mode */
#define TMR_OM_TOGGLE 0x0000
#define TMR_OM_PILSE 0x0040
/* bit 8..9 External input active edge selection */
#define TMR_ES_PE 0x0000
#define TMR_ES_NE 0x0100
#define TMR_ES_BOTH 0x0200
/* bit 10..11 Operating mode */
#define TMR_M_FREE 0x0000 /* free running timer mode */
#define TMR_M_PERIODIC 0x0400 /* periodic timer mode */
#define TMR_M_FC 0x0800 /* free running counter mode */
#define TMR_M_PC 0x0c00 /* periodic counter mode */
#define SYSTEM_CLOCK (27*1000000/4) /* 27 MHz */
#endif /* _ASM_SCORE_SCOREREGS_H */
#ifndef _ASM_SCORE_SECTIONS_H
#define _ASM_SCORE_SECTIONS_H
#include <asm-generic/sections.h>
#endif /* _ASM_SCORE_SECTIONS_H */
#ifndef _ASM_SCORE_SEGMENT_H
#define _ASM_SCORE_SEGMENT_H
#ifndef __ASSEMBLY__
typedef struct {
unsigned long seg;
} mm_segment_t;
#define KERNEL_DS ((mm_segment_t){0})
#define USER_DS KERNEL_DS
# define get_ds() (KERNEL_DS)
# define get_fs() (current_thread_info()->addr_limit)
# define set_fs(x) \
do { current_thread_info()->addr_limit = (x); } while (0)
# define segment_eq(a, b) ((a).seg == (b).seg)
# endif /* __ASSEMBLY__ */
#endif /* _ASM_SCORE_SEGMENT_H */
#ifndef _ASM_SCORE_SEMBUF_H
#define _ASM_SCORE_SEMBUF_H
#include <asm-generic/sembuf.h>
#endif /* _ASM_SCORE_SEMBUF_H */
#ifndef _ASM_SCORE_SETUP_H
#define _ASM_SCORE_SETUP_H
#define COMMAND_LINE_SIZE 256
#define MEM_SIZE 0x2000000
#ifdef __KERNEL__
extern void pagetable_init(void);
extern void pgd_init(unsigned long page);
extern void setup_early_printk(void);
extern void cpu_cache_init(void);
extern void tlb_init(void);
extern void handle_nmi(void);
extern void handle_adelinsn(void);
extern void handle_adedata(void);
extern void handle_ibe(void);
extern void handle_pel(void);
extern void handle_sys(void);
extern void handle_ccu(void);
extern void handle_ri(void);
extern void handle_tr(void);
extern void handle_ades(void);
extern void handle_cee(void);
extern void handle_cpe(void);
extern void handle_dve(void);
extern void handle_dbe(void);
extern void handle_reserved(void);
extern void handle_tlb_refill(void);
extern void handle_tlb_invaild(void);
extern void handle_mod(void);
extern void debug_exception_vector(void);
extern void general_exception_vector(void);
extern void interrupt_exception_vector(void);
#endif /* __KERNEL__ */
#endif /* _ASM_SCORE_SETUP_H */
#ifndef _ASM_SCORE_SHMBUF_H
#define _ASM_SCORE_SHMBUF_H
#include <asm-generic/shmbuf.h>
#endif /* _ASM_SCORE_SHMBUF_H */
#ifndef _ASM_SCORE_SHMPARAM_H
#define _ASM_SCORE_SHMPARAM_H
#include <asm-generic/shmparam.h>
#endif /* _ASM_SCORE_SHMPARAM_H */
#ifndef _ASM_SCORE_SIGCONTEXT_H
#define _ASM_SCORE_SIGCONTEXT_H
/*
* Keep this struct definition in sync with the sigcontext fragment
* in arch/score/tools/offset.c
*/
struct sigcontext {
unsigned int sc_regmask;
unsigned int sc_psr;
unsigned int sc_condition;
unsigned long sc_pc;
unsigned long sc_regs[32];
unsigned int sc_ssflags;
unsigned int sc_mdceh;
unsigned int sc_mdcel;
unsigned int sc_ecr;
unsigned long sc_ema;
unsigned long sc_sigset[4];
};
#endif /* _ASM_SCORE_SIGCONTEXT_H */
#ifndef _ASM_SCORE_SIGINFO_H
#define _ASM_SCORE_SIGINFO_H
#include <asm-generic/siginfo.h>
#endif /* _ASM_SCORE_SIGINFO_H */
#ifndef _ASM_SCORE_SIGNAL_H
#define _ASM_SCORE_SIGNAL_H
#include <asm-generic/signal.h>
#endif /* _ASM_SCORE_SIGNAL_H */
#ifndef _ASM_SCORE_SOCKET_H
#define _ASM_SCORE_SOCKET_H
#include <asm-generic/socket.h>
#endif /* _ASM_SCORE_SOCKET_H */
#ifndef _ASM_SCORE_SOCKIOS_H
#define _ASM_SCORE_SOCKIOS_H
#include <asm-generic/sockios.h>
#endif /* _ASM_SCORE_SOCKIOS_H */
#ifndef _ASM_SCORE_STAT_H
#define _ASM_SCORE_STAT_H
#include <asm-generic/stat.h>
#endif /* _ASM_SCORE_STAT_H */
#ifndef _ASM_SCORE_STATFS_H
#define _ASM_SCORE_STATFS_H
#include <asm-generic/statfs.h>
#endif /* _ASM_SCORE_STATFS_H */
#ifndef _ASM_SCORE_STRING_H
#define _ASM_SCORE_STRING_H
extern void *memset(void *__s, int __c, size_t __count);
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
#endif /* _ASM_SCORE_STRING_H */
#ifndef _ASM_SCORE_SWAB_H
#define _ASM_SCORE_SWAB_H
#include <asm-generic/swab.h>
#endif /* _ASM_SCORE_SWAB_H */
#ifndef _ASM_SCORE_SYSCALLS_H
#define _ASM_SCORE_SYSCALLS_H
asmlinkage long sys_clone(int flags, unsigned long stack, struct pt_regs *regs);
#define sys_clone sys_clone
#include <asm-generic/syscalls.h>
#endif /* _ASM_SCORE_SYSCALLS_H */
#ifndef _ASM_SCORE_SYSTEM_H
#define _ASM_SCORE_SYSTEM_H
#include <linux/types.h>
#include <linux/irqflags.h>
struct pt_regs;
struct task_struct;
extern void *resume(void *last, void *next, void *next_ti);
#define switch_to(prev, next, last) \
do { \
(last) = resume(prev, next, task_thread_info(next)); \
} while (0)
#define finish_arch_switch(prev) do {} while (0)
typedef void (*vi_handler_t)(void);
extern unsigned long arch_align_stack(unsigned long sp);
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define read_barrier_depends() do {} while (0)
#define smp_read_barrier_depends() do {} while (0)
#define set_mb(var, value) do {var = value; wmb(); } while (0)
#define __HAVE_ARCH_CMPXCHG 1
#include <asm-generic/cmpxchg-local.h>
#ifndef __ASSEMBLY__
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((struct __xchg_dummy *)(x))
static inline
unsigned long __xchg(volatile unsigned long *m, unsigned long val)
{
unsigned long retval;
unsigned long flags;
local_irq_save(flags);
retval = *m;
*m = val;
local_irq_restore(flags);
return retval;
}
#define xchg(ptr, v) \
((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
(unsigned long)(v)))
static inline unsigned long __cmpxchg(volatile unsigned long *m,
unsigned long old, unsigned long new)
{
unsigned long retval;
unsigned long flags;
local_irq_save(flags);
retval = *m;
if (retval == old)
*m = new;
local_irq_restore(flags);
return retval;
}
#define cmpxchg(ptr, o, n) \
((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
(unsigned long)(o), \
(unsigned long)(n)))
extern void __die(const char *, struct pt_regs *, const char *,
const char *, unsigned long) __attribute__((noreturn));
extern void __die_if_kernel(const char *, struct pt_regs *, const char *,
const char *, unsigned long);
#define die(msg, regs) \
__die(msg, regs, __FILE__ ":", __func__, __LINE__)
#define die_if_kernel(msg, regs) \
__die_if_kernel(msg, regs, __FILE__ ":", __func__, __LINE__)
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_SCORE_SYSTEM_H */
#ifndef _ASM_SCORE_TERMBITS_H
#define _ASM_SCORE_TERMBITS_H
#include <asm-generic/termbits.h>
#endif /* _ASM_SCORE_TERMBITS_H */
#ifndef _ASM_SCORE_TERMIOS_H
#define _ASM_SCORE_TERMIOS_H
#include <asm-generic/termios.h>
#endif /* _ASM_SCORE_TERMIOS_H */
#ifndef _ASM_SCORE_THREAD_INFO_H
#define _ASM_SCORE_THREAD_INFO_H
#ifdef __KERNEL__
#define KU_MASK 0x08
#define KU_USER 0x08
#define KU_KERN 0x00
#ifndef __ASSEMBLY__
#include <asm/processor.h>
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
* - this struct shares the supervisor stack pages
* - if the contents of this structure are changed, the assembly constants
* must also be changed
*/
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned long tp_value; /* thread pointer */
__u32 cpu; /* current CPU */
/* 0 => preemptable, < 0 => BUG */
int preempt_count;
/*
* thread address space:
* 0-0xBFFFFFFF for user-thead
* 0-0xFFFFFFFF for kernel-thread
*/
mm_segment_t addr_limit;
struct restart_block restart_block;
struct pt_regs *regs;
};
/*
* macros/functions for gaining access to the thread information structure
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.cpu = 0, \
.preempt_count = 1, \
.addr_limit = KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/* How to get the thread information struct from C. */
register struct thread_info *__current_thread_info __asm__("r28");
#define current_thread_info() __current_thread_info
/* thread information allocation */
#define THREAD_SIZE_ORDER (1)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_MASK (THREAD_SIZE - 1UL)
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
#define free_thread_info(info) kfree(info)
#endif /* !__ASSEMBLY__ */
#define PREEMPT_ACTIVE 0x10000000
/*
* thread information flags
* - these are process state flags that various assembly files may need to
* access
* - pending work-to-be-done flags are in LSW
* - other flags in MSW
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
#define TIF_MEMDIE 18
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_WORK_MASK (0x0000ffff)
#endif /* __KERNEL__ */
#endif /* _ASM_SCORE_THREAD_INFO_H */
#ifndef _ASM_SCORE_TIMEX_H
#define _ASM_SCORE_TIMEX_H
#define CLOCK_TICK_RATE 27000000 /* Timer input freq. */
#include <asm-generic/timex.h>
#endif /* _ASM_SCORE_TIMEX_H */
#ifndef _ASM_SCORE_TLB_H
#define _ASM_SCORE_TLB_H
/*
* SCORE doesn't need any special per-pte or per-vma handling, except
* we need to flush cache for area to be unmapped.
*/
#define tlb_start_vma(tlb, vma) do {} while (0)
#define tlb_end_vma(tlb, vma) do {} while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do {} while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
extern void score7_FTLB_refill_Handler(void);
#include <asm-generic/tlb.h>
#endif /* _ASM_SCORE_TLB_H */
#ifndef _ASM_SCORE_TLBFLUSH_H
#define _ASM_SCORE_TLBFLUSH_H
#include <linux/mm.h>
/*
* TLB flushing:
*
* - flush_tlb_all() flushes all processes TLB entries
* - flush_tlb_mm(mm) flushes the specified mm context TLB entries
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
*/
extern void local_flush_tlb_all(void);
extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void local_flush_tlb_kernel_range(unsigned long start,
unsigned long end);
extern void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long page);
extern void local_flush_tlb_one(unsigned long vaddr);
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#define flush_tlb_range(vma, vmaddr, end) \
local_flush_tlb_range(vma, vmaddr, end)
#define flush_tlb_kernel_range(vmaddr, end) \
local_flush_tlb_kernel_range(vmaddr, end)
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr)
#ifndef __ASSEMBLY__
static inline unsigned long pevn_get(void)
{
unsigned long val;
__asm__ __volatile__(
"mfcr %0, cr11\n"
"nop\nnop\n"
: "=r" (val));
return val;
}
static inline void pevn_set(unsigned long val)
{
__asm__ __volatile__(
"mtcr %0, cr11\n"
"nop\nnop\nnop\nnop\nnop\n"
: : "r" (val));
}
static inline void pectx_set(unsigned long val)
{
__asm__ __volatile__(
"mtcr %0, cr12\n"
"nop\nnop\nnop\nnop\nnop\n"
: : "r" (val));
}
static inline unsigned long pectx_get(void)
{
unsigned long val;
__asm__ __volatile__(
"mfcr %0, cr12\n"
"nop\nnop\n"
: "=r" (val));
return val;
}
static inline unsigned long tlblock_get(void)
{
unsigned long val;
__asm__ __volatile__(
"mfcr %0, cr7\n"
"nop\nnop\n"
: "=r" (val));
return val;
}
static inline void tlblock_set(unsigned long val)
{
__asm__ __volatile__(
"mtcr %0, cr7\n"
"nop\nnop\nnop\nnop\nnop\n"
: : "r" (val));
}
static inline void tlbpt_set(unsigned long val)
{
__asm__ __volatile__(
"mtcr %0, cr8\n"
"nop\nnop\nnop\nnop\nnop\n"
: : "r" (val));
}
static inline long tlbpt_get(void)
{
long val;
__asm__ __volatile__(
"mfcr %0, cr8\n"
"nop\nnop\n"
: "=r" (val));
return val;
}
static inline void peaddr_set(unsigned long val)
{
__asm__ __volatile__(
"mtcr %0, cr9\n"
"nop\nnop\nnop\nnop\nnop\n"
: : "r" (val));
}
/* TLB operations. */
static inline void tlb_probe(void)
{
__asm__ __volatile__("stlb;nop;nop;nop;nop;nop");
}
static inline void tlb_read(void)
{
__asm__ __volatile__("mftlb;nop;nop;nop;nop;nop");
}
static inline void tlb_write_indexed(void)
{
__asm__ __volatile__("mtptlb;nop;nop;nop;nop;nop");
}
static inline void tlb_write_random(void)
{
__asm__ __volatile__("mtrtlb;nop;nop;nop;nop;nop");
}
#endif /* Not __ASSEMBLY__ */
#endif /* _ASM_SCORE_TLBFLUSH_H */
#ifndef _ASM_SCORE_TOPOLOGY_H
#define _ASM_SCORE_TOPOLOGY_H
#include <asm-generic/topology.h>
#endif /* _ASM_SCORE_TOPOLOGY_H */
#ifndef _ASM_SCORE_TYPES_H
#define _ASM_SCORE_TYPES_H
#include <asm-generic/types.h>
#endif /* _ASM_SCORE_TYPES_H */
#ifndef _ASM_SCORE_UACCESS_H
#define _ASM_SCORE_UACCESS_H
/*
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
struct pt_regs;
extern int fixup_exception(struct pt_regs *regs);
#ifndef __ASSEMBLY__
#define __range_ok(addr, size) \
((((unsigned long)(addr) >= 0x80000000) \
|| ((unsigned long)(size) > 0x80000000) \
|| (((unsigned long)(addr) + (unsigned long)(size)) > 0x80000000)))
#define __access_ok(addr, size) \
(__range_ok((addr), (size)) == 0)
#include <asm-generic/uaccess.h>
#endif /* __ASSEMBLY__ */
#endif /* _ASM_SCORE_UACCESS_H */
#ifndef _ASM_SCORE_UNALIGNED_H
#define _ASM_SCORE_UNALIGNED_H
#include <asm-generic/unaligned.h>
#endif /* _ASM_SCORE_UNALIGNED_H */
#ifndef _ASM_SCORE_UNISTD_H
#define _ASM_SCORE_UNISTD_H
#define __ARCH_HAVE_MMU
#include <asm-generic/unistd.h>
#endif /* _ASM_SCORE_UNISTD_H */
#ifndef _ASM_SCORE_USER_H
#define _ASM_SCORE_USER_H
#endif /* _ASM_SCORE_USER_H */
#
# Makefile for the Linux/SCORE kernel.
#
extra-y := head.o vmlinux.lds
obj-y += entry.o init_task.o irq.o process.o ptrace.o \
setup.o signal.o sys_score.o time.o traps.o
obj-$(CONFIG_MODULES) += module.o
/*
* arch/score/kernel/asm-offsets.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/kbuild.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <asm-generic/cmpxchg-local.h>
void output_ptreg_defines(void)
{
COMMENT("SCORE pt_regs offsets.");
OFFSET(PT_R0, pt_regs, regs[0]);
OFFSET(PT_R1, pt_regs, regs[1]);
OFFSET(PT_R2, pt_regs, regs[2]);
OFFSET(PT_R3, pt_regs, regs[3]);
OFFSET(PT_R4, pt_regs, regs[4]);
OFFSET(PT_R5, pt_regs, regs[5]);
OFFSET(PT_R6, pt_regs, regs[6]);
OFFSET(PT_R7, pt_regs, regs[7]);
OFFSET(PT_R8, pt_regs, regs[8]);
OFFSET(PT_R9, pt_regs, regs[9]);
OFFSET(PT_R10, pt_regs, regs[10]);
OFFSET(PT_R11, pt_regs, regs[11]);
OFFSET(PT_R12, pt_regs, regs[12]);
OFFSET(PT_R13, pt_regs, regs[13]);
OFFSET(PT_R14, pt_regs, regs[14]);
OFFSET(PT_R15, pt_regs, regs[15]);
OFFSET(PT_R16, pt_regs, regs[16]);
OFFSET(PT_R17, pt_regs, regs[17]);
OFFSET(PT_R18, pt_regs, regs[18]);
OFFSET(PT_R19, pt_regs, regs[19]);
OFFSET(PT_R20, pt_regs, regs[20]);
OFFSET(PT_R21, pt_regs, regs[21]);
OFFSET(PT_R22, pt_regs, regs[22]);
OFFSET(PT_R23, pt_regs, regs[23]);
OFFSET(PT_R24, pt_regs, regs[24]);
OFFSET(PT_R25, pt_regs, regs[25]);
OFFSET(PT_R26, pt_regs, regs[26]);
OFFSET(PT_R27, pt_regs, regs[27]);
OFFSET(PT_R28, pt_regs, regs[28]);
OFFSET(PT_R29, pt_regs, regs[29]);
OFFSET(PT_R30, pt_regs, regs[30]);
OFFSET(PT_R31, pt_regs, regs[31]);
OFFSET(PT_ORIG_R4, pt_regs, orig_r4);
OFFSET(PT_ORIG_R7, pt_regs, orig_r7);
OFFSET(PT_CEL, pt_regs, cel);
OFFSET(PT_CEH, pt_regs, ceh);
OFFSET(PT_SR0, pt_regs, sr0);
OFFSET(PT_SR1, pt_regs, sr1);
OFFSET(PT_SR2, pt_regs, sr2);
OFFSET(PT_EPC, pt_regs, cp0_epc);
OFFSET(PT_EMA, pt_regs, cp0_ema);
OFFSET(PT_PSR, pt_regs, cp0_psr);
OFFSET(PT_ECR, pt_regs, cp0_ecr);
OFFSET(PT_CONDITION, pt_regs, cp0_condition);
OFFSET(PT_IS_SYSCALL, pt_regs, is_syscall);
DEFINE(PT_SIZE, sizeof(struct pt_regs));
BLANK();
}
void output_task_defines(void)
{
COMMENT("SCORE task_struct offsets.");
OFFSET(TASK_STATE, task_struct, state);
OFFSET(TASK_THREAD_INFO, task_struct, stack);
OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_MM, task_struct, mm);
OFFSET(TASK_PID, task_struct, pid);
DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
BLANK();
}
void output_thread_info_defines(void)
{
COMMENT("SCORE thread_info offsets.");
OFFSET(TI_TASK, thread_info, task);
OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain);
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_TP_VALUE, thread_info, tp_value);
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
OFFSET(TI_RESTART_BLOCK, thread_info, restart_block);
OFFSET(TI_REGS, thread_info, regs);
DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE);
DEFINE(KERNEL_STACK_MASK, THREAD_MASK);
BLANK();
}
void output_thread_defines(void)
{
COMMENT("SCORE specific thread_struct offsets.");
OFFSET(THREAD_REG0, task_struct, thread.reg0);
OFFSET(THREAD_REG2, task_struct, thread.reg2);
OFFSET(THREAD_REG3, task_struct, thread.reg3);
OFFSET(THREAD_REG12, task_struct, thread.reg12);
OFFSET(THREAD_REG13, task_struct, thread.reg13);
OFFSET(THREAD_REG14, task_struct, thread.reg14);
OFFSET(THREAD_REG15, task_struct, thread.reg15);
OFFSET(THREAD_REG16, task_struct, thread.reg16);
OFFSET(THREAD_REG17, task_struct, thread.reg17);
OFFSET(THREAD_REG18, task_struct, thread.reg18);
OFFSET(THREAD_REG19, task_struct, thread.reg19);
OFFSET(THREAD_REG20, task_struct, thread.reg20);
OFFSET(THREAD_REG21, task_struct, thread.reg21);
OFFSET(THREAD_REG29, task_struct, thread.reg29);
OFFSET(THREAD_PSR, task_struct, thread.cp0_psr);
OFFSET(THREAD_EMA, task_struct, thread.cp0_ema);
OFFSET(THREAD_BADUADDR, task_struct, thread.cp0_baduaddr);
OFFSET(THREAD_ECODE, task_struct, thread.error_code);
OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no);
BLANK();
}
void output_mm_defines(void)
{
COMMENT("Size of struct page");
DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
BLANK();
COMMENT("Linux mm_struct offsets.");
OFFSET(MM_USERS, mm_struct, mm_users);
OFFSET(MM_PGD, mm_struct, pgd);
OFFSET(MM_CONTEXT, mm_struct, context);
BLANK();
DEFINE(_PAGE_SIZE, PAGE_SIZE);
DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
BLANK();
DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
DEFINE(_PTE_T_SIZE, sizeof(pte_t));
BLANK();
DEFINE(_PGD_ORDER, PGD_ORDER);
DEFINE(_PTE_ORDER, PTE_ORDER);
BLANK();
DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
BLANK();
DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
BLANK();
}
void output_sc_defines(void)
{
COMMENT("Linux sigcontext offsets.");
OFFSET(SC_REGS, sigcontext, sc_regs);
OFFSET(SC_MDCEH, sigcontext, sc_mdceh);
OFFSET(SC_MDCEL, sigcontext, sc_mdcel);
OFFSET(SC_PC, sigcontext, sc_pc);
OFFSET(SC_PSR, sigcontext, sc_psr);
OFFSET(SC_ECR, sigcontext, sc_ecr);
OFFSET(SC_EMA, sigcontext, sc_ema);
BLANK();
}
void output_signal_defined(void)
{
COMMENT("Linux signal numbers.");
DEFINE(_SIGHUP, SIGHUP);
DEFINE(_SIGINT, SIGINT);
DEFINE(_SIGQUIT, SIGQUIT);
DEFINE(_SIGILL, SIGILL);
DEFINE(_SIGTRAP, SIGTRAP);
DEFINE(_SIGIOT, SIGIOT);
DEFINE(_SIGABRT, SIGABRT);
DEFINE(_SIGFPE, SIGFPE);
DEFINE(_SIGKILL, SIGKILL);
DEFINE(_SIGBUS, SIGBUS);
DEFINE(_SIGSEGV, SIGSEGV);
DEFINE(_SIGSYS, SIGSYS);
DEFINE(_SIGPIPE, SIGPIPE);
DEFINE(_SIGALRM, SIGALRM);
DEFINE(_SIGTERM, SIGTERM);
DEFINE(_SIGUSR1, SIGUSR1);
DEFINE(_SIGUSR2, SIGUSR2);
DEFINE(_SIGCHLD, SIGCHLD);
DEFINE(_SIGPWR, SIGPWR);
DEFINE(_SIGWINCH, SIGWINCH);
DEFINE(_SIGURG, SIGURG);
DEFINE(_SIGIO, SIGIO);
DEFINE(_SIGSTOP, SIGSTOP);
DEFINE(_SIGTSTP, SIGTSTP);
DEFINE(_SIGCONT, SIGCONT);
DEFINE(_SIGTTIN, SIGTTIN);
DEFINE(_SIGTTOU, SIGTTOU);
DEFINE(_SIGVTALRM, SIGVTALRM);
DEFINE(_SIGPROF, SIGPROF);
DEFINE(_SIGXCPU, SIGXCPU);
DEFINE(_SIGXFSZ, SIGXFSZ);
BLANK();
}
/*
* arch/score/kernel/entry.S
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asmmacro.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
/*
* disable interrupts.
*/
.macro disable_irq
mfcr r8, cr0
srli r8, r8, 1
slli r8, r8, 1
mtcr r8, cr0
nop
nop
nop
nop
nop
.endm
/*
* enable interrupts.
*/
.macro enable_irq
mfcr r8, cr0
ori r8, 1
mtcr r8, cr0
nop
nop
nop
nop
nop
.endm
__INIT
ENTRY(debug_exception_vector)
nop!
nop!
nop!
nop!
nop!
nop!
nop!
nop!
ENTRY(general_exception_vector) # should move to addr 0x200
j general_exception
nop!
nop!
nop!
nop!
nop!
nop!
ENTRY(interrupt_exception_vector) # should move to addr 0x210
j interrupt_exception
nop!
nop!
nop!
nop!
nop!
nop!
.section ".text", "ax"
.align 2;
general_exception:
mfcr r31, cr2
nop
la r30, exception_handlers
andi r31, 0x1f # get ecr.exc_code
slli r31, r31, 2
add r30, r30, r31
lw r30, [r30]
br r30
interrupt_exception:
SAVE_ALL
mfcr r4, cr2
nop
lw r16, [r28, TI_REGS]
sw r0, [r28, TI_REGS]
la r3, ret_from_irq
srli r4, r4, 18 # get ecr.ip[7:2], interrupt No.
mv r5, r0
j do_IRQ
ENTRY(handle_nmi) # NMI #1
SAVE_ALL
mv r4, r0
la r8, nmi_exception_handler
brl r8
j restore_all
ENTRY(handle_adelinsn) # AdEL-instruction #2
SAVE_ALL
mfcr r8, cr6
nop
nop
sw r8, [r0, PT_EMA]
mv r4, r0
la r8, do_adelinsn
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_ibe) # BusEL-instruction #5
SAVE_ALL
mv r4, r0
la r8, do_be
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_pel) # P-EL #6
SAVE_ALL
mv r4, r0
la r8, do_pel
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_ccu) # CCU #8
SAVE_ALL
mv r4, r0
la r8, do_ccu
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_ri) # RI #9
SAVE_ALL
mv r4, r0
la r8, do_ri
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_tr) # Trap #10
SAVE_ALL
mv r4, r0
la r8, do_tr
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_adedata) # AdES-instruction #12
SAVE_ALL
mfcr r8, cr6
nop
nop
sw r8, [r0, PT_EMA]
mv r4, r0
la r8, do_adedata
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_cee) # CeE #16
SAVE_ALL
mv r4, r0
la r8, do_cee
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_cpe) # CpE #17
SAVE_ALL
mv r4, r0
la r8, do_cpe
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_dbe) # BusEL-data #18
SAVE_ALL
mv r4, r0
la r8, do_be
brl r8
mv r4, r0
j ret_from_exception
nop
ENTRY(handle_reserved) # others
SAVE_ALL
mv r4, r0
la r8, do_reserved
brl r8
mv r4, r0
j ret_from_exception
nop
#ifndef CONFIG_PREEMPT
#define resume_kernel restore_all
#else
#define __ret_from_irq ret_from_exception
#endif
.align 2
#ifndef CONFIG_PREEMPT
ENTRY(ret_from_exception)
disable_irq # preempt stop
nop
j __ret_from_irq
nop
#endif
ENTRY(ret_from_irq)
sw r16, [r28, TI_REGS]
ENTRY(__ret_from_irq)
lw r8, [r0, PT_PSR] # returning to kernel mode?
andri.c r8, r8, KU_USER
beq resume_kernel
resume_userspace:
disable_irq
lw r6, [r28, TI_FLAGS] # current->work
li r8, _TIF_WORK_MASK
and.c r8, r8, r6 # ignoring syscall_trace
bne work_pending
nop
j restore_all
nop
#ifdef CONFIG_PREEMPT
resume_kernel:
disable_irq
lw r8, [r28, TI_PRE_COUNT]
cmpz.c r8
bne r8, restore_all
need_resched:
lw r8, [r28, TI_FLAGS]
andri.c r9, r8, _TIF_NEED_RESCHED
beq restore_all
lw r8, [r28, PT_PSR] # Interrupts off?
andri.c r8, r8, 1
beq restore_all
bl preempt_schedule_irq
nop
j need_resched
nop
#endif
ENTRY(ret_from_fork)
bl schedule_tail # r4=struct task_struct *prev
ENTRY(syscall_exit)
nop
disable_irq
lw r6, [r28, TI_FLAGS] # current->work
li r8, _TIF_WORK_MASK
and.c r8, r6, r8
bne syscall_exit_work
ENTRY(restore_all) # restore full frame
RESTORE_ALL_AND_RET
work_pending:
andri.c r8, r6, _TIF_NEED_RESCHED # r6 is preloaded with TI_FLAGS
beq work_notifysig
work_resched:
bl schedule
nop
disable_irq
lw r6, [r28, TI_FLAGS]
li r8, _TIF_WORK_MASK
and.c r8, r6, r8 # is there any work to be done
# other than syscall tracing?
beq restore_all
andri.c r8, r6, _TIF_NEED_RESCHED
bne work_resched
work_notifysig:
mv r4, r0
li r5, 0
bl do_notify_resume # r6 already loaded
nop
j resume_userspace
nop
ENTRY(syscall_exit_work)
li r8, _TIF_SYSCALL_TRACE
and.c r8, r8, r6 # r6 is preloaded with TI_FLAGS
beq work_pending # trace bit set?
nop
enable_irq
mv r4, r0
li r5, 1
bl do_syscall_trace
nop
b resume_userspace
nop
.macro save_context reg
sw r12, [\reg, THREAD_REG12];
sw r13, [\reg, THREAD_REG13];
sw r14, [\reg, THREAD_REG14];
sw r15, [\reg, THREAD_REG15];
sw r16, [\reg, THREAD_REG16];
sw r17, [\reg, THREAD_REG17];
sw r18, [\reg, THREAD_REG18];
sw r19, [\reg, THREAD_REG19];
sw r20, [\reg, THREAD_REG20];
sw r21, [\reg, THREAD_REG21];
sw r29, [\reg, THREAD_REG29];
sw r2, [\reg, THREAD_REG2];
sw r0, [\reg, THREAD_REG0]
.endm
.macro restore_context reg
lw r12, [\reg, THREAD_REG12];
lw r13, [\reg, THREAD_REG13];
lw r14, [\reg, THREAD_REG14];
lw r15, [\reg, THREAD_REG15];
lw r16, [\reg, THREAD_REG16];
lw r17, [\reg, THREAD_REG17];
lw r18, [\reg, THREAD_REG18];
lw r19, [\reg, THREAD_REG19];
lw r20, [\reg, THREAD_REG20];
lw r21, [\reg, THREAD_REG21];
lw r29, [\reg, THREAD_REG29];
lw r0, [\reg, THREAD_REG0];
lw r2, [\reg, THREAD_REG2];
lw r3, [\reg, THREAD_REG3]
.endm
/*
* task_struct *resume(task_struct *prev, task_struct *next,
* struct thread_info *next_ti)
*/
ENTRY(resume)
mfcr r9, cr0
nop
nop
sw r9, [r4, THREAD_PSR]
save_context r4
sw r3, [r4, THREAD_REG3]
mv r28, r6
restore_context r5
mv r8, r6
addi r8, KERNEL_STACK_SIZE
subi r8, 32
la r9, kernelsp;
sw r8, [r9];
mfcr r9, cr0
ldis r7, 0x00ff
nop
and r9, r9, r7
lw r6, [r5, THREAD_PSR]
not r7, r7
and r6, r6, r7
or r6, r6, r9
mtcr r6, cr0
nop; nop; nop; nop; nop
br r3
ENTRY(handle_sys)
SAVE_ALL
enable_irq
sw r4, [r0, PT_ORIG_R4] #for restart syscall
sw r7, [r0, PT_ORIG_R7] #for restart syscall
sw r27, [r0, PT_IS_SYSCALL] # it from syscall
lw r9, [r0, PT_EPC] # skip syscall on return
addi r9, 4
sw r9, [r0, PT_EPC]
cmpi.c r27, __NR_syscalls # check syscall number
bgtu illegal_syscall
slli r8, r27, 3 # get syscall routine
la r11, sys_call_table
add r11, r11, r8
lw r10, [r11] # get syscall entry
lw r11, [r11, 4] # get number of args
cmpz.c r10
beq illegal_syscall
cmpi.c r11, 4 # more than 4 arguments?
bgtu stackargs
stack_done:
lw r8, [r28, TI_FLAGS]
li r9, _TIF_SYSCALL_TRACE
and.c r8, r8, r9
bne syscall_trace_entry
brl r10 # Do The Real system call
cmpi.c r4, 0
blt 1f
ldi r8, 0
sw r8, [r0, PT_R7]
b 2f
1:
cmpi.c r4, -EMAXERRNO-1 # -EMAXERRNO - 1=-1134
ble 2f
ldi r8, 0x1;
sw r8, [r0, PT_R7]
neg r4, r4
2:
sw r4, [r0, PT_R4] # save result
syscall_return:
disable_irq
lw r6, [r28, TI_FLAGS] # current->work
li r8, _TIF_WORK_MASK
and.c r8, r6, r8
bne syscall_return_work
j restore_all
syscall_return_work:
j syscall_exit_work
syscall_trace_entry:
mv r16, r10
mv r4, r0
li r5, 0
bl do_syscall_trace
mv r8, r16
lw r4, [r0, PT_R4] # Restore argument registers
lw r5, [r0, PT_R5]
lw r6, [r0, PT_R6]
lw r7, [r0, PT_R7]
brl r8
li r8, -EMAXERRNO - 1 # error?
sw r8, [r0, PT_R7] # set error flag
neg r4, r4 # error
sw r4, [r0, PT_R0] # set flag for syscall
# restarting
1: sw r4, [r0, PT_R2] # result
j syscall_exit
stackargs:
lw r8, [r0, PT_R0]
andri.c r9, r8, 3 # test whether user sp is align a word
bne bad_stack
subi r11, 5
slli r9, r11, 2
add.c r9, r9, r8
bmi bad_stack
la r9, 3f # calculate branch address
slli r11, r11, 3
sub r9, r9, r11
br r9
2: lw r9, [r8, 20] # argument 6 from usp
sw r9, [r0, 20]
3: lw r9, [r8, 16] # argument 5 from usp
sw r9, [r0, 16]
j stack_done
.section __ex_table,"a"
.word 2b, bad_stack
.word 3b, bad_stack
.previous
/*
* The stackpointer for a call with more than 4 arguments is bad.
* We probably should handle this case a bit more drastic.
*/
bad_stack:
neg r27, r27 # error
sw r27, [r0, PT_ORIG_R4]
sw r27, [r0, PT_R4]
ldi r8, 1 # set error flag
sw r8, [r0, PT_R7]
j syscall_return
illegal_syscall:
ldi r4, -ENOSYS # error
sw r4, [r0, PT_ORIG_R4]
sw r4, [r0, PT_R4]
ldi r9, 1 # set error flag
sw r9, [r0, PT_R7]
j syscall_return
ENTRY(sys_execve)
mv r4, r0
la r8, score_execve
br r8
ENTRY(sys_clone)
mv r4, r0
la r8, score_clone
br r8
ENTRY(sys_rt_sigreturn)
mv r4, r0
la r8, score_rt_sigreturn
br r8
ENTRY(sys_sigaltstack)
mv r4, r0
la r8, score_sigaltstack
br r8
/*
* arch/score/kernel/head.S
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
.extern start_kernel
.global init_thread_union
.global kernelsp
__INIT
ENTRY(_stext)
la r30, __bss_start /* initialize BSS segment. */
la r31, _end
xor r8, r8, r8
1: cmp.c r31, r30
beq 2f
sw r8, [r30] /* clean memory. */
addi r30, 4
b 1b
2: la r28, init_thread_union /* set kernel stack. */
mv r0, r28
addi r0, KERNEL_STACK_SIZE - 32
la r30, kernelsp
sw r0, [r30]
subi r0, 4*4
xor r30, r30, r30
ori r30, 0x02 /* enable MMU. */
mtcr r30, cr4
nop
nop
nop
nop
nop
nop
nop
/* there is no parameter */
xor r4, r4, r4
xor r5, r5, r5
xor r6, r6, r6
xor r7, r7, r7
la r30, start_kernel /* jump to init_arch */
br r30
/*
* arch/score/kernel/init_task.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/init_task.h>
#include <linux/mqueue.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
EXPORT_SYMBOL(init_mm);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD_SIZE aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* arch/score/kernel/irq.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <asm/io.h>
/*
* handles all normal device IRQs
*/
asmlinkage void do_IRQ(int irq)
{
irq_enter();
generic_handle_irq(irq);
irq_exit();
}
static void score_mask(unsigned int irq_nr)
{
unsigned int irq_source = 63 - irq_nr;
if (irq_source < 32)
__raw_writel((__raw_readl((void *)P_INT_MASKL) | \
(1 << irq_source)), (void *)P_INT_MASKL);
else
__raw_writel((__raw_readl((void *)P_INT_MASKH) | \
(1 << (irq_source - 32))), (void *)P_INT_MASKH);
}
static void score_unmask(unsigned int irq_nr)
{
unsigned int irq_source = 63 - irq_nr;
if (irq_source < 32)
__raw_writel((__raw_readl((void *)P_INT_MASKL) & \
~(1 << irq_source)), (void *)P_INT_MASKL);
else
__raw_writel((__raw_readl((void *)P_INT_MASKH) & \
~(1 << (irq_source - 32))), (void *)P_INT_MASKH);
}
struct irq_chip score_irq_chip = {
.name = "Score7-level",
.mask = score_mask,
.mask_ack = score_mask,
.unmask = score_unmask,
};
/*
* initialise the interrupt system
*/
void __init init_IRQ(void)
{
int index;
unsigned long target_addr;
for (index = 0; index < NR_IRQS; ++index)
set_irq_chip_and_handler(index, &score_irq_chip,
handle_level_irq);
for (target_addr = IRQ_VECTOR_BASE_ADDR;
target_addr <= IRQ_VECTOR_END_ADDR;
target_addr += IRQ_VECTOR_SIZE)
memcpy((void *)target_addr, \
interrupt_exception_vector, IRQ_VECTOR_SIZE);
__raw_writel(0xffffffff, (void *)P_INT_MASKL);
__raw_writel(0xffffffff, (void *)P_INT_MASKH);
__asm__ __volatile__(
"mtcr %0, cr3\n\t"
: : "r" (EXCEPTION_VECTOR_BASE_ADDR | \
VECTOR_ADDRESS_OFFSET_MODE16));
}
/*
* Generic, controller-independent functions:
*/
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *)v, cpu;
struct irqaction *action;
unsigned long flags;
if (i == 0) {
seq_puts(p, " ");
for_each_online_cpu(cpu)
seq_printf(p, "CPU%d ", cpu);
seq_putc(p, '\n');
}
if (i < NR_IRQS) {
spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
seq_printf(p, "%3d: ", i);
seq_printf(p, "%10u ", kstat_irqs(i));
seq_printf(p, " %8s", irq_desc[i].chip->name ? : "-");
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
unlock:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
/*
* arch/score/kernel/module.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/module.h>
#include <linux/vmalloc.h>
void *module_alloc(unsigned long size)
{
return size ? vmalloc(size) : NULL;
}
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
vfree(module_region);
}
int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
return 0;
}
int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relindex,
struct module *me)
{
Elf32_Shdr *symsec = sechdrs + symindex;
Elf32_Shdr *relsec = sechdrs + relindex;
Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
Elf32_Rel *rel = (void *)relsec->sh_addr;
unsigned int i;
for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
unsigned long loc;
Elf32_Sym *sym;
s32 offset;
offset = ELF32_R_SYM(rel->r_info);
if ((offset < 0) ||
(offset > (symsec->sh_size / sizeof(Elf32_Sym)))) {
printk(KERN_ERR "%s: bad relocation, section %d reloc %d\n",
me->name, relindex, i);
return -ENOEXEC;
}
sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
if ((rel->r_offset < 0) ||
(rel->r_offset > dstsec->sh_size - sizeof(u32))) {
printk(KERN_ERR "%s: out of bounds relocation, "
"section %d reloc %d offset %d size %d\n",
me->name, relindex, i, rel->r_offset,
dstsec->sh_size);
return -ENOEXEC;
}
loc = dstsec->sh_addr + rel->r_offset;
switch (ELF32_R_TYPE(rel->r_info)) {
case R_SCORE_NONE:
break;
case R_SCORE_ABS32:
*(unsigned long *)loc += sym->st_value;
break;
case R_SCORE_HI16:
break;
case R_SCORE_LO16: {
unsigned long hi16_offset, offset;
unsigned long uvalue;
unsigned long temp, temp_hi;
temp_hi = *((unsigned long *)loc - 1);
temp = *(unsigned long *)loc;
hi16_offset = (((((temp_hi) >> 16) & 0x3) << 15) |
((temp_hi) & 0x7fff)) >> 1;
offset = ((temp >> 16 & 0x03) << 15) |
((temp & 0x7fff) >> 1);
offset = (hi16_offset << 16) | (offset & 0xffff);
uvalue = sym->st_value + offset;
hi16_offset = (uvalue >> 16) << 1;
temp_hi = ((temp_hi) & (~(0x37fff))) |
(hi16_offset & 0x7fff) |
((hi16_offset << 1) & 0x30000);
*((unsigned long *)loc - 1) = temp_hi;
offset = (uvalue & 0xffff) << 1;
temp = (temp & (~(0x37fff))) | (offset & 0x7fff) |
((offset << 1) & 0x30000);
*(unsigned long *)loc = temp;
break;
}
case R_SCORE_24: {
unsigned long hi16_offset, offset;
unsigned long uvalue;
unsigned long temp;
temp = *(unsigned long *)loc;
offset = (temp & 0x03FF7FFE);
hi16_offset = (offset & 0xFFFF0000);
offset = (hi16_offset | ((offset & 0xFFFF) << 1)) >> 2;
uvalue = (sym->st_value + offset) >> 1;
uvalue = uvalue & 0x00ffffff;
temp = (temp & 0xfc008001) |
((uvalue << 2) & 0x3ff0000) |
((uvalue & 0x3fff) << 1);
*(unsigned long *)loc = temp;
break;
}
default:
printk(KERN_ERR "%s: unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel->r_info));
return -ENOEXEC;
}
}
return 0;
}
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
return 0;
}
/* Given an address, look for it in the module exception tables. */
const struct exception_table_entry *search_module_dbetables(unsigned long addr)
{
return 0;
}
/* Put in dbe list if necessary. */
int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *me)
{
return 0;
}
void module_arch_cleanup(struct module *mod) {}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment