Commit d0bd31dc authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xtensa-20180129' of git://github.com/jcmvbkbc/linux-xtensa

Pull Xtensa updates from Max Filippov:

 - add SSP support

 - add KASAN support

 - improvements to xtensa-specific assembly:
    - use ENTRY and ENDPROC consistently
    - clean up and unify word alignment macros
    - clean up and unify fixup marking
    - use 'call' instead of 'callx' where possible

 - various cleanups:
    - consiolidate kernel stack size related definitions
    - replace #ifdef'fed/commented out debug printk statements with
      pr_debug
    - use struct exc_table instead of flat array for exception handling
      data

 - build kernel with -mtext-section-literals; simplify xtensa linker
   script

 - fix futex_atomic_cmpxchg_inatomic()

* tag 'xtensa-20180129' of git://github.com/jcmvbkbc/linux-xtensa: (21 commits)
  xtensa: fix futex_atomic_cmpxchg_inatomic
  xtensa: shut up gcc-8 warnings
  xtensa: print kernel sections info in mem_init
  xtensa: use generic strncpy_from_user with KASAN
  xtensa: use __memset in __xtensa_clear_user
  xtensa: add support for KASAN
  xtensa: move fixmap and kmap just above the KSEG
  xtensa: don't clear swapper_pg_dir in paging_init
  xtensa: extract init_kio
  xtensa: implement early_trap_init
  xtensa: clean up exception handling structure
  xtensa: clean up custom-controlled debug output
  xtensa: enable stack protector
  xtensa: print hardware config ID on startup
  xtensa: consolidate kernel stack size related definitions
  xtensa: clean up functions in assembly code
  xtensa: clean up word alignment macros in assembly code
  xtensa: clean up fixups in assembly code
  xtensa: use call instead of callx in assembly code
  xtensa: build kernel with text-section-literals
  ...
parents aca21de2 ca474809
...@@ -35,5 +35,5 @@ ...@@ -35,5 +35,5 @@
| um: | TODO | | um: | TODO |
| unicore32: | TODO | | unicore32: | TODO |
| x86: | ok | 64-bit only | x86: | ok | 64-bit only
| xtensa: | TODO | | xtensa: | ok |
----------------------- -----------------------
...@@ -35,5 +35,5 @@ ...@@ -35,5 +35,5 @@
| um: | TODO | | um: | TODO |
| unicore32: | TODO | | unicore32: | TODO |
| x86: | ok | | x86: | ok |
| xtensa: | TODO | | xtensa: | ok |
----------------------- -----------------------
...@@ -69,19 +69,10 @@ Default MMUv2-compatible layout. ...@@ -69,19 +69,10 @@ Default MMUv2-compatible layout.
| Userspace | 0x00000000 TASK_SIZE | Userspace | 0x00000000 TASK_SIZE
+------------------+ 0x40000000 +------------------+ 0x40000000
+------------------+ +------------------+
| Page table | 0x80000000 | Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE
+------------------+ 0x80400000
+------------------+ +------------------+
| KMAP area | PKMAP_BASE PTRS_PER_PTE * | KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE
| | DCACHE_N_COLORS * +------------------+ 0x8e400000
| | PAGE_SIZE
| | (4MB * DCACHE_N_COLORS)
+------------------+
| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
| | NR_CPUS *
| | DCACHE_N_COLORS *
| | PAGE_SIZE
+------------------+ FIXADDR_TOP 0xbffff000
+------------------+ +------------------+
| VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB | VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB
+------------------+ VMALLOC_END +------------------+ VMALLOC_END
...@@ -92,6 +83,17 @@ Default MMUv2-compatible layout. ...@@ -92,6 +83,17 @@ Default MMUv2-compatible layout.
| remap area 2 | | remap area 2 |
+------------------+ +------------------+
+------------------+ +------------------+
| KMAP area | PKMAP_BASE PTRS_PER_PTE *
| | DCACHE_N_COLORS *
| | PAGE_SIZE
| | (4MB * DCACHE_N_COLORS)
+------------------+
| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
| | NR_CPUS *
| | DCACHE_N_COLORS *
| | PAGE_SIZE
+------------------+ FIXADDR_TOP 0xcffff000
+------------------+
| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xd0000000 128MB | Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xd0000000 128MB
+------------------+ +------------------+
| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xd8000000 128MB | Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xd8000000 128MB
...@@ -109,19 +111,10 @@ Default MMUv2-compatible layout. ...@@ -109,19 +111,10 @@ Default MMUv2-compatible layout.
| Userspace | 0x00000000 TASK_SIZE | Userspace | 0x00000000 TASK_SIZE
+------------------+ 0x40000000 +------------------+ 0x40000000
+------------------+ +------------------+
| Page table | 0x80000000 | Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE
+------------------+ 0x80400000
+------------------+ +------------------+
| KMAP area | PKMAP_BASE PTRS_PER_PTE * | KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE
| | DCACHE_N_COLORS * +------------------+ 0x8e400000
| | PAGE_SIZE
| | (4MB * DCACHE_N_COLORS)
+------------------+
| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
| | NR_CPUS *
| | DCACHE_N_COLORS *
| | PAGE_SIZE
+------------------+ FIXADDR_TOP 0x9ffff000
+------------------+ +------------------+
| VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB | VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB
+------------------+ VMALLOC_END +------------------+ VMALLOC_END
...@@ -132,6 +125,17 @@ Default MMUv2-compatible layout. ...@@ -132,6 +125,17 @@ Default MMUv2-compatible layout.
| remap area 2 | | remap area 2 |
+------------------+ +------------------+
+------------------+ +------------------+
| KMAP area | PKMAP_BASE PTRS_PER_PTE *
| | DCACHE_N_COLORS *
| | PAGE_SIZE
| | (4MB * DCACHE_N_COLORS)
+------------------+
| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
| | NR_CPUS *
| | DCACHE_N_COLORS *
| | PAGE_SIZE
+------------------+ FIXADDR_TOP 0xaffff000
+------------------+
| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xb0000000 256MB | Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xb0000000 256MB
+------------------+ +------------------+
| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 256MB | Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 256MB
...@@ -150,19 +154,10 @@ Default MMUv2-compatible layout. ...@@ -150,19 +154,10 @@ Default MMUv2-compatible layout.
| Userspace | 0x00000000 TASK_SIZE | Userspace | 0x00000000 TASK_SIZE
+------------------+ 0x40000000 +------------------+ 0x40000000
+------------------+ +------------------+
| Page table | 0x80000000 | Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE
+------------------+ 0x80400000
+------------------+ +------------------+
| KMAP area | PKMAP_BASE PTRS_PER_PTE * | KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE
| | DCACHE_N_COLORS * +------------------+ 0x8e400000
| | PAGE_SIZE
| | (4MB * DCACHE_N_COLORS)
+------------------+
| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
| | NR_CPUS *
| | DCACHE_N_COLORS *
| | PAGE_SIZE
+------------------+ FIXADDR_TOP 0x8ffff000
+------------------+ +------------------+
| VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB | VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB
+------------------+ VMALLOC_END +------------------+ VMALLOC_END
...@@ -173,6 +168,17 @@ Default MMUv2-compatible layout. ...@@ -173,6 +168,17 @@ Default MMUv2-compatible layout.
| remap area 2 | | remap area 2 |
+------------------+ +------------------+
+------------------+ +------------------+
| KMAP area | PKMAP_BASE PTRS_PER_PTE *
| | DCACHE_N_COLORS *
| | PAGE_SIZE
| | (4MB * DCACHE_N_COLORS)
+------------------+
| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
| | NR_CPUS *
| | DCACHE_N_COLORS *
| | PAGE_SIZE
+------------------+ FIXADDR_TOP 0x9ffff000
+------------------+
| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xa0000000 512MB | Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xa0000000 512MB
+------------------+ +------------------+
| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 512MB | Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 512MB
......
...@@ -15,6 +15,9 @@ config XTENSA ...@@ -15,6 +15,9 @@ config XTENSA
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK select GENERIC_SCHED_CLOCK
select GENERIC_STRNCPY_FROM_USER if KASAN
select HAVE_ARCH_KASAN if MMU
select HAVE_CC_STACKPROTECTOR
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
...@@ -79,6 +82,10 @@ config VARIANT_IRQ_SWITCH ...@@ -79,6 +82,10 @@ config VARIANT_IRQ_SWITCH
config HAVE_XTENSA_GPIO32 config HAVE_XTENSA_GPIO32
def_bool n def_bool n
config KASAN_SHADOW_OFFSET
hex
default 0x6e400000
menu "Processor type and features" menu "Processor type and features"
choice choice
......
...@@ -42,10 +42,11 @@ export PLATFORM ...@@ -42,10 +42,11 @@ export PLATFORM
# temporarily until string.h is fixed # temporarily until string.h is fixed
KBUILD_CFLAGS += -ffreestanding -D__linux__ KBUILD_CFLAGS += -ffreestanding -D__linux__
KBUILD_CFLAGS += -pipe -mlongcalls -mtext-section-literals
KBUILD_CFLAGS += -pipe -mlongcalls
KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,) KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,)
KBUILD_CFLAGS += $(call cc-option,-mno-serialize-volatile,)
KBUILD_AFLAGS += -mlongcalls -mtext-section-literals
ifneq ($(CONFIG_LD_NO_RELAX),) ifneq ($(CONFIG_LD_NO_RELAX),)
LDFLAGS := --no-relax LDFLAGS := --no-relax
......
...@@ -42,6 +42,7 @@ __start_a0: ...@@ -42,6 +42,7 @@ __start_a0:
.align 4 .align 4
.section .text, "ax" .section .text, "ax"
.literal_position
.begin literal_prefix .text .begin literal_prefix .text
/* put literals in here! */ /* put literals in here! */
......
...@@ -15,6 +15,12 @@ CFLAGS_REMOVE_inftrees.o = -pg ...@@ -15,6 +15,12 @@ CFLAGS_REMOVE_inftrees.o = -pg
CFLAGS_REMOVE_inffast.o = -pg CFLAGS_REMOVE_inffast.o = -pg
endif endif
KASAN_SANITIZE := n
CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong
CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong
CFLAGS_REMOVE_inftrees.o += -fstack-protector -fstack-protector-strong
CFLAGS_REMOVE_inffast.o += -fstack-protector -fstack-protector-strong
quiet_cmd_copy_zlib = COPY $@ quiet_cmd_copy_zlib = COPY $@
cmd_copy_zlib = cat $< > $@ cmd_copy_zlib = cat $< > $@
......
...@@ -150,5 +150,45 @@ ...@@ -150,5 +150,45 @@
__endl \ar \as __endl \ar \as
.endm .endm
/* Load or store instructions that may cause exceptions use the EX macro. */
#define EX(handler) \
.section __ex_table, "a"; \
.word 97f, handler; \
.previous \
97:
/*
* Extract unaligned word that is split between two registers w0 and w1
* into r regardless of machine endianness. SAR must be loaded with the
* starting bit of the word (see __ssa8).
*/
.macro __src_b r, w0, w1
#ifdef __XTENSA_EB__
src \r, \w0, \w1
#else
src \r, \w1, \w0
#endif
.endm
/*
* Load 2 lowest address bits of r into SAR for __src_b to extract unaligned
* word starting at r from two registers loaded from consecutive aligned
* addresses covering r regardless of machine endianness.
*
* r 0 1 2 3
* LE SAR 0 8 16 24
* BE SAR 32 24 16 8
*/
.macro __ssa8 r
#ifdef __XTENSA_EB__
ssa8b \r
#else
ssa8l \r
#endif
.endm
#endif /* _XTENSA_ASMMACRO_H */ #endif /* _XTENSA_ASMMACRO_H */
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#ifndef _XTENSA_CURRENT_H #ifndef _XTENSA_CURRENT_H
#define _XTENSA_CURRENT_H #define _XTENSA_CURRENT_H
#include <asm/thread_info.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/thread_info.h> #include <linux/thread_info.h>
...@@ -26,8 +28,6 @@ static inline struct task_struct *get_current(void) ...@@ -26,8 +28,6 @@ static inline struct task_struct *get_current(void)
#else #else
#define CURRENT_SHIFT 13
#define GET_CURRENT(reg,sp) \ #define GET_CURRENT(reg,sp) \
GET_THREAD_INFO(reg,sp); \ GET_THREAD_INFO(reg,sp); \
l32i reg, reg, TI_TASK \ l32i reg, reg, TI_TASK \
......
...@@ -44,7 +44,7 @@ enum fixed_addresses { ...@@ -44,7 +44,7 @@ enum fixed_addresses {
__end_of_fixed_addresses __end_of_fixed_addresses
}; };
#define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE) #define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
...@@ -63,7 +63,7 @@ static __always_inline unsigned long fix_to_virt(const unsigned int idx) ...@@ -63,7 +63,7 @@ static __always_inline unsigned long fix_to_virt(const unsigned int idx)
* table. * table.
*/ */
BUILD_BUG_ON(FIXADDR_START < BUILD_BUG_ON(FIXADDR_START <
XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE); TLBTEMP_BASE_1 + TLBTEMP_SIZE);
BUILD_BUG_ON(idx >= __end_of_fixed_addresses); BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
return __fix_to_virt(idx); return __fix_to_virt(idx);
} }
......
...@@ -92,7 +92,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -92,7 +92,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval) u32 oldval, u32 newval)
{ {
int ret = 0; int ret = 0;
u32 prev;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
...@@ -103,26 +102,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -103,26 +102,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__asm__ __volatile__ ( __asm__ __volatile__ (
" # futex_atomic_cmpxchg_inatomic\n" " # futex_atomic_cmpxchg_inatomic\n"
"1: l32i %1, %3, 0\n" " wsr %5, scompare1\n"
" mov %0, %5\n" "1: s32c1i %1, %4, 0\n"
" wsr %1, scompare1\n" " s32i %1, %6, 0\n"
"2: s32c1i %0, %3, 0\n" "2:\n"
"3:\n"
" .section .fixup,\"ax\"\n" " .section .fixup,\"ax\"\n"
" .align 4\n" " .align 4\n"
"4: .long 3b\n" "3: .long 2b\n"
"5: l32r %1, 4b\n" "4: l32r %1, 3b\n"
" movi %0, %6\n" " movi %0, %7\n"
" jx %1\n" " jx %1\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
" .long 1b,5b,2b,5b\n" " .long 1b,4b\n"
" .previous\n" " .previous\n"
: "+r" (ret), "=&r" (prev), "+m" (*uaddr) : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval)
: "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT) : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT)
: "memory"); : "memory");
*uval = prev;
return ret; return ret;
} }
......
...@@ -72,7 +72,7 @@ static inline void *kmap(struct page *page) ...@@ -72,7 +72,7 @@ static inline void *kmap(struct page *page)
* page table. * page table.
*/ */
BUILD_BUG_ON(PKMAP_BASE < BUILD_BUG_ON(PKMAP_BASE <
XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE); TLBTEMP_BASE_1 + TLBTEMP_SIZE);
BUG_ON(in_interrupt()); BUG_ON(in_interrupt());
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
#ifndef __ASSEMBLY__
#ifdef CONFIG_KASAN
#include <linux/kernel.h>
#include <linux/sizes.h>
#include <asm/kmem_layout.h>
/* Start of area covered by KASAN */
#define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000)
/* Start of the shadow map */
#define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE)
/* Size of the shadow map */
#define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)
/* Offset for mem to shadow address transformation */
#define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET)
void __init kasan_early_init(void);
void __init kasan_init(void);
#else
static inline void kasan_early_init(void)
{
}
static inline void kasan_init(void)
{
}
#endif
#endif
#endif
...@@ -71,4 +71,11 @@ ...@@ -71,4 +71,11 @@
#endif #endif
#ifndef CONFIG_KASAN
#define KERNEL_STACK_SHIFT 13
#else
#define KERNEL_STACK_SHIFT 15
#endif
#define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT)
#endif #endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
#define __ALIGN .align 4
#define __ALIGN_STR ".align 4"
#endif
...@@ -52,6 +52,7 @@ DECLARE_PER_CPU(unsigned long, asid_cache); ...@@ -52,6 +52,7 @@ DECLARE_PER_CPU(unsigned long, asid_cache);
#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
void init_mmu(void); void init_mmu(void);
void init_kio(void);
static inline void set_rasid_register (unsigned long val) static inline void set_rasid_register (unsigned long val)
{ {
......
...@@ -3,6 +3,10 @@ static inline void init_mmu(void) ...@@ -3,6 +3,10 @@ static inline void init_mmu(void)
{ {
} }
static inline void init_kio(void)
{
}
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
......
...@@ -36,8 +36,6 @@ ...@@ -36,8 +36,6 @@
#define MAX_LOW_PFN PHYS_PFN(0xfffffffful) #define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
#endif #endif
#define PGTABLE_START 0x80000000
/* /*
* Cache aliasing: * Cache aliasing:
* *
......
...@@ -12,9 +12,9 @@ ...@@ -12,9 +12,9 @@
#define _XTENSA_PGTABLE_H #define _XTENSA_PGTABLE_H
#define __ARCH_USE_5LEVEL_HACK #define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/kmem_layout.h> #include <asm/kmem_layout.h>
#include <asm-generic/pgtable-nopmd.h>
/* /*
* We only use two ring levels, user and kernel space. * We only use two ring levels, user and kernel space.
...@@ -170,6 +170,7 @@ ...@@ -170,6 +170,7 @@
#define PAGE_SHARED_EXEC \ #define PAGE_SHARED_EXEC \
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC) __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE) #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
#if (DCACHE_WAY_SIZE > PAGE_SIZE) #if (DCACHE_WAY_SIZE > PAGE_SIZE)
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#ifndef _XTENSA_PTRACE_H #ifndef _XTENSA_PTRACE_H
#define _XTENSA_PTRACE_H #define _XTENSA_PTRACE_H
#include <asm/kmem_layout.h>
#include <uapi/asm/ptrace.h> #include <uapi/asm/ptrace.h>
/* /*
...@@ -38,20 +39,6 @@ ...@@ -38,20 +39,6 @@
* +-----------------------+ -------- * +-----------------------+ --------
*/ */
#define KERNEL_STACK_SIZE (2 * PAGE_SIZE)
/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */
#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */
#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */
#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */
#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */
#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */
#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */
#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */
#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */
#define EXC_TABLE_SIZE 0x400
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/coprocessor.h> #include <asm/coprocessor.h>
......
...@@ -76,6 +76,7 @@ ...@@ -76,6 +76,7 @@
#define EXCCAUSE_COPROCESSOR5_DISABLED 37 #define EXCCAUSE_COPROCESSOR5_DISABLED 37
#define EXCCAUSE_COPROCESSOR6_DISABLED 38 #define EXCCAUSE_COPROCESSOR6_DISABLED 38
#define EXCCAUSE_COPROCESSOR7_DISABLED 39 #define EXCCAUSE_COPROCESSOR7_DISABLED 39
#define EXCCAUSE_N 64
/* PS register fields. */ /* PS register fields. */
......
/*
* GCC stack protector support.
*
* (This is directly adopted from the ARM implementation)
*
* Stack protector works by putting predefined pattern at the start of
* the stack frame and verifying that it hasn't been overwritten when
* returning from the function. The pattern is called stack canary
* and gcc expects it to be defined by a global variable called
* "__stack_chk_guard" on Xtensa. This unfortunately means that on SMP
* we cannot have a different canary value per task.
*/
#ifndef _ASM_STACKPROTECTOR_H
#define _ASM_STACKPROTECTOR_H 1
#include <linux/random.h>
#include <linux/version.h>
extern unsigned long __stack_chk_guard;
/*
* Initialize the stackprotector canary value.
*
* NOTE: this must only be called from functions that never return,
* and it must always be inlined.
*/
static __always_inline void boot_init_stack_canary(void)
{
unsigned long canary;
/* Try to get a semi random initial value. */
get_random_bytes(&canary, sizeof(canary));
canary ^= LINUX_VERSION_CODE;
current->stack_canary = canary;
__stack_chk_guard = current->stack_canary;
}
#endif /* _ASM_STACKPROTECTOR_H */
...@@ -53,7 +53,7 @@ static inline char *strncpy(char *__dest, const char *__src, size_t __n) ...@@ -53,7 +53,7 @@ static inline char *strncpy(char *__dest, const char *__src, size_t __n)
"bne %1, %5, 1b\n" "bne %1, %5, 1b\n"
"2:" "2:"
: "=r" (__dest), "=r" (__src), "=&r" (__dummy) : "=r" (__dest), "=r" (__src), "=&r" (__dummy)
: "0" (__dest), "1" (__src), "r" (__src+__n) : "0" (__dest), "1" (__src), "r" ((uintptr_t)__src+__n)
: "memory"); : "memory");
return __xdest; return __xdest;
...@@ -101,21 +101,40 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n) ...@@ -101,21 +101,40 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
"2:\n\t" "2:\n\t"
"sub %2, %2, %3" "sub %2, %2, %3"
: "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy) : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
: "0" (__cs), "1" (__ct), "r" (__cs+__n)); : "0" (__cs), "1" (__ct), "r" ((uintptr_t)__cs+__n));
return __res; return __res;
} }
#define __HAVE_ARCH_MEMSET #define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count); extern void *memset(void *__s, int __c, size_t __count);
extern void *__memset(void *__s, int __c, size_t __count);
#define __HAVE_ARCH_MEMCPY #define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n); extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
#define __HAVE_ARCH_MEMMOVE #define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n); extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
/* Don't build bcopy at all ... */ /* Don't build bcopy at all ... */
#define __HAVE_ARCH_BCOPY #define __HAVE_ARCH_BCOPY
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
/*
* For files that are not instrumented (e.g. mm/slub.c) we
* should use not instrumented version of mem* functions.
*/
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)
#ifndef __NO_FORTIFY
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
#endif
#endif
#endif /* _XTENSA_STRING_H */ #endif /* _XTENSA_STRING_H */
...@@ -11,7 +11,9 @@ ...@@ -11,7 +11,9 @@
#ifndef _XTENSA_THREAD_INFO_H #ifndef _XTENSA_THREAD_INFO_H
#define _XTENSA_THREAD_INFO_H #define _XTENSA_THREAD_INFO_H
#ifdef __KERNEL__ #include <asm/kmem_layout.h>
#define CURRENT_SHIFT KERNEL_STACK_SHIFT
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
# include <asm/processor.h> # include <asm/processor.h>
...@@ -81,7 +83,7 @@ struct thread_info { ...@@ -81,7 +83,7 @@ struct thread_info {
static inline struct thread_info *current_thread_info(void) static inline struct thread_info *current_thread_info(void)
{ {
struct thread_info *ti; struct thread_info *ti;
__asm__("extui %0,a1,0,13\n\t" __asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t"
"xor %0, a1, %0" : "=&r" (ti) : ); "xor %0, a1, %0" : "=&r" (ti) : );
return ti; return ti;
} }
...@@ -90,7 +92,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -90,7 +92,7 @@ static inline struct thread_info *current_thread_info(void)
/* how to get the thread information struct from ASM */ /* how to get the thread information struct from ASM */
#define GET_THREAD_INFO(reg,sp) \ #define GET_THREAD_INFO(reg,sp) \
extui reg, sp, 0, 13; \ extui reg, sp, 0, CURRENT_SHIFT; \
xor reg, sp, reg xor reg, sp, reg
#endif #endif
...@@ -127,8 +129,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -127,8 +129,7 @@ static inline struct thread_info *current_thread_info(void)
*/ */
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
#define THREAD_SIZE 8192 //(2*PAGE_SIZE) #define THREAD_SIZE KERNEL_STACK_SIZE
#define THREAD_SIZE_ORDER 1 #define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT)
#endif /* __KERNEL__ */
#endif /* _XTENSA_THREAD_INFO */ #endif /* _XTENSA_THREAD_INFO */
...@@ -12,6 +12,29 @@ ...@@ -12,6 +12,29 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
/*
* Per-CPU exception handling data structure.
* EXCSAVE1 points to it.
*/
struct exc_table {
/* Kernel Stack */
void *kstk;
/* Double exception save area for a0 */
unsigned long double_save;
/* Fixup handler */
void *fixup;
/* For passing a parameter to fixup */
void *fixup_param;
/* For fast syscall handler */
unsigned long syscall_save;
/* Fast user exception handlers */
void *fast_user_handler[EXCCAUSE_N];
/* Fast kernel exception handlers */
void *fast_kernel_handler[EXCCAUSE_N];
/* Default C-Handlers */
void *default_handler[EXCCAUSE_N];
};
/* /*
* handler must be either of the following: * handler must be either of the following:
* void (*)(struct pt_regs *regs); * void (*)(struct pt_regs *regs);
...@@ -19,6 +42,18 @@ ...@@ -19,6 +42,18 @@
*/ */
extern void * __init trap_set_handler(int cause, void *handler); extern void * __init trap_set_handler(int cause, void *handler);
extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
void fast_second_level_miss(void);
/* Initialize minimal exc_table structure sufficient for basic paging */
static inline void __init early_trap_init(void)
{
static struct exc_table exc_table __initdata = {
.fast_kernel_handler[EXCCAUSE_DTLB_MISS] =
fast_second_level_miss,
};
__asm__ __volatile__("wsr %0, excsave1\n" : : "a" (&exc_table));
}
void secondary_trap_init(void); void secondary_trap_init(void);
static inline void spill_registers(void) static inline void spill_registers(void)
......
...@@ -44,6 +44,8 @@ ...@@ -44,6 +44,8 @@
#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
/* /*
* These are the main single-value transfer routines. They * These are the main single-value transfer routines. They
* automatically use the right size if we just have the right pointer * automatically use the right size if we just have the right pointer
...@@ -261,7 +263,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -261,7 +263,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
static inline unsigned long static inline unsigned long
__xtensa_clear_user(void *addr, unsigned long size) __xtensa_clear_user(void *addr, unsigned long size)
{ {
if ( ! memset(addr, 0, size) ) if (!__memset(addr, 0, size))
return size; return size;
return 0; return 0;
} }
...@@ -277,6 +279,8 @@ clear_user(void *addr, unsigned long size) ...@@ -277,6 +279,8 @@ clear_user(void *addr, unsigned long size)
#define __clear_user __xtensa_clear_user #define __clear_user __xtensa_clear_user
#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
extern long __strncpy_user(char *, const char *, long); extern long __strncpy_user(char *, const char *, long);
static inline long static inline long
...@@ -286,6 +290,9 @@ strncpy_from_user(char *dst, const char *src, long count) ...@@ -286,6 +290,9 @@ strncpy_from_user(char *dst, const char *src, long count)
return __strncpy_user(dst, src, count); return __strncpy_user(dst, src, count);
return -EFAULT; return -EFAULT;
} }
#else
long strncpy_from_user(char *dst, const char *src, long count);
#endif
/* /*
* Return the size of a string (including the ending 0!) * Return the size of a string (including the ending 0!)
......
...@@ -17,9 +17,6 @@ obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o ...@@ -17,9 +17,6 @@ obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o
AFLAGS_head.o += -mtext-section-literals
AFLAGS_mxhead.o += -mtext-section-literals
# In the Xtensa architecture, assembly generates literals which must always # In the Xtensa architecture, assembly generates literals which must always
# precede the L32R instruction with a relative offset less than 256 kB. # precede the L32R instruction with a relative offset less than 256 kB.
# Therefore, the .text and .literal section must be combined in parenthesis # Therefore, the .text and .literal section must be combined in parenthesis
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/asmmacro.h>
#include <asm/processor.h> #include <asm/processor.h>
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
...@@ -66,8 +67,6 @@ ...@@ -66,8 +67,6 @@
#define INSN_T 24 #define INSN_T 24
#define INSN_OP1 16 #define INSN_OP1 16
.macro __src_b r, w0, w1; src \r, \w0, \w1; .endm
.macro __ssa8 r; ssa8b \r; .endm
.macro __ssa8r r; ssa8l \r; .endm .macro __ssa8r r; ssa8l \r; .endm
.macro __sh r, s; srl \r, \s; .endm .macro __sh r, s; srl \r, \s; .endm
.macro __sl r, s; sll \r, \s; .endm .macro __sl r, s; sll \r, \s; .endm
...@@ -81,8 +80,6 @@ ...@@ -81,8 +80,6 @@
#define INSN_T 4 #define INSN_T 4
#define INSN_OP1 12 #define INSN_OP1 12
.macro __src_b r, w0, w1; src \r, \w1, \w0; .endm
.macro __ssa8 r; ssa8l \r; .endm
.macro __ssa8r r; ssa8b \r; .endm .macro __ssa8r r; ssa8b \r; .endm
.macro __sh r, s; sll \r, \s; .endm .macro __sh r, s; sll \r, \s; .endm
.macro __sl r, s; srl \r, \s; .endm .macro __sl r, s; srl \r, \s; .endm
...@@ -155,7 +152,7 @@ ...@@ -155,7 +152,7 @@
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*/ */
.literal_position
ENTRY(fast_unaligned) ENTRY(fast_unaligned)
/* Note: We don't expect the address to be aligned on a word /* Note: We don't expect the address to be aligned on a word
......
...@@ -76,6 +76,9 @@ int main(void) ...@@ -76,6 +76,9 @@ int main(void)
DEFINE(TASK_PID, offsetof (struct task_struct, pid)); DEFINE(TASK_PID, offsetof (struct task_struct, pid));
DEFINE(TASK_THREAD, offsetof (struct task_struct, thread)); DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack)); DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack));
#ifdef CONFIG_CC_STACKPROTECTOR
DEFINE(TASK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
#endif
DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct)); DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
/* offsets in thread_info struct */ /* offsets in thread_info struct */
...@@ -129,5 +132,18 @@ int main(void) ...@@ -129,5 +132,18 @@ int main(void)
offsetof(struct debug_table, icount_level_save)); offsetof(struct debug_table, icount_level_save));
#endif #endif
/* struct exc_table */
DEFINE(EXC_TABLE_KSTK, offsetof(struct exc_table, kstk));
DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save));
DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup));
DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param));
DEFINE(EXC_TABLE_SYSCALL_SAVE,
offsetof(struct exc_table, syscall_save));
DEFINE(EXC_TABLE_FAST_USER,
offsetof(struct exc_table, fast_user_handler));
DEFINE(EXC_TABLE_FAST_KERNEL,
offsetof(struct exc_table, fast_kernel_handler));
DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler));
return 0; return 0;
} }
...@@ -212,8 +212,7 @@ ENDPROC(coprocessor_restore) ...@@ -212,8 +212,7 @@ ENDPROC(coprocessor_restore)
ENTRY(fast_coprocessor_double) ENTRY(fast_coprocessor_double)
wsr a0, excsave1 wsr a0, excsave1
movi a0, unrecoverable_exception call0 unrecoverable_exception
callx0 a0
ENDPROC(fast_coprocessor_double) ENDPROC(fast_coprocessor_double)
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/asmmacro.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/coprocessor.h> #include <asm/coprocessor.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
...@@ -125,6 +126,7 @@ ...@@ -125,6 +126,7 @@
* *
* Note: _user_exception might be at an odd address. Don't use call0..call12 * Note: _user_exception might be at an odd address. Don't use call0..call12
*/ */
.literal_position
ENTRY(user_exception) ENTRY(user_exception)
...@@ -475,8 +477,7 @@ common_exception_return: ...@@ -475,8 +477,7 @@ common_exception_return:
1: 1:
irq_save a2, a3 irq_save a2, a3
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
movi a4, trace_hardirqs_off call4 trace_hardirqs_off
callx4 a4
#endif #endif
/* Jump if we are returning from kernel exceptions. */ /* Jump if we are returning from kernel exceptions. */
...@@ -503,24 +504,20 @@ common_exception_return: ...@@ -503,24 +504,20 @@ common_exception_return:
/* Call do_signal() */ /* Call do_signal() */
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
movi a4, trace_hardirqs_on call4 trace_hardirqs_on
callx4 a4
#endif #endif
rsil a2, 0 rsil a2, 0
movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*)
mov a6, a1 mov a6, a1
callx4 a4 call4 do_notify_resume # int do_notify_resume(struct pt_regs*)
j 1b j 1b
3: /* Reschedule */ 3: /* Reschedule */
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
movi a4, trace_hardirqs_on call4 trace_hardirqs_on
callx4 a4
#endif #endif
rsil a2, 0 rsil a2, 0
movi a4, schedule # void schedule (void) call4 schedule # void schedule (void)
callx4 a4
j 1b j 1b
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
...@@ -531,8 +528,7 @@ common_exception_return: ...@@ -531,8 +528,7 @@ common_exception_return:
l32i a4, a2, TI_PRE_COUNT l32i a4, a2, TI_PRE_COUNT
bnez a4, 4f bnez a4, 4f
movi a4, preempt_schedule_irq call4 preempt_schedule_irq
callx4 a4
j 1b j 1b
#endif #endif
...@@ -545,23 +541,20 @@ common_exception_return: ...@@ -545,23 +541,20 @@ common_exception_return:
5: 5:
#ifdef CONFIG_HAVE_HW_BREAKPOINT #ifdef CONFIG_HAVE_HW_BREAKPOINT
_bbci.l a4, TIF_DB_DISABLED, 7f _bbci.l a4, TIF_DB_DISABLED, 7f
movi a4, restore_dbreak call4 restore_dbreak
callx4 a4
7: 7:
#endif #endif
#ifdef CONFIG_DEBUG_TLB_SANITY #ifdef CONFIG_DEBUG_TLB_SANITY
l32i a4, a1, PT_DEPC l32i a4, a1, PT_DEPC
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
movi a4, check_tlb_sanity call4 check_tlb_sanity
callx4 a4
#endif #endif
6: 6:
4: 4:
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
bgei a4, LOCKLEVEL, 1f bgei a4, LOCKLEVEL, 1f
movi a4, trace_hardirqs_on call4 trace_hardirqs_on
callx4 a4
1: 1:
#endif #endif
/* Restore optional registers. */ /* Restore optional registers. */
...@@ -777,6 +770,8 @@ ENDPROC(kernel_exception) ...@@ -777,6 +770,8 @@ ENDPROC(kernel_exception)
* When we get here, a0 is trashed and saved to excsave[debuglevel] * When we get here, a0 is trashed and saved to excsave[debuglevel]
*/ */
.literal_position
ENTRY(debug_exception) ENTRY(debug_exception)
rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
...@@ -916,6 +911,8 @@ ENDPROC(debug_exception) ...@@ -916,6 +911,8 @@ ENDPROC(debug_exception)
unrecoverable_text: unrecoverable_text:
.ascii "Unrecoverable error in exception handler\0" .ascii "Unrecoverable error in exception handler\0"
.literal_position
ENTRY(unrecoverable_exception) ENTRY(unrecoverable_exception)
movi a0, 1 movi a0, 1
...@@ -933,10 +930,8 @@ ENTRY(unrecoverable_exception) ...@@ -933,10 +930,8 @@ ENTRY(unrecoverable_exception)
movi a0, 0 movi a0, 0
addi a1, a1, PT_REGS_OFFSET addi a1, a1, PT_REGS_OFFSET
movi a4, panic
movi a6, unrecoverable_text movi a6, unrecoverable_text
call4 panic
callx4 a4
1: j 1b 1: j 1b
...@@ -1073,8 +1068,7 @@ ENTRY(fast_syscall_unrecoverable) ...@@ -1073,8 +1068,7 @@ ENTRY(fast_syscall_unrecoverable)
xsr a2, depc # restore a2, depc xsr a2, depc # restore a2, depc
wsr a0, excsave1 wsr a0, excsave1
movi a0, unrecoverable_exception call0 unrecoverable_exception
callx0 a0
ENDPROC(fast_syscall_unrecoverable) ENDPROC(fast_syscall_unrecoverable)
...@@ -1101,32 +1095,11 @@ ENDPROC(fast_syscall_unrecoverable) ...@@ -1101,32 +1095,11 @@ ENDPROC(fast_syscall_unrecoverable)
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
* *
* Note: we don't have to save a2; a2 holds the return value * Note: we don't have to save a2; a2 holds the return value
*
* We use the two macros TRY and CATCH:
*
* TRY adds an entry to the __ex_table fixup table for the immediately
* following instruction.
*
* CATCH catches any exception that occurred at one of the preceding TRY
* statements and continues from there
*
* Usage TRY l32i a0, a1, 0
* <other code>
* done: rfe
* CATCH <set return code>
* j done
*/ */
#ifdef CONFIG_FAST_SYSCALL_XTENSA .literal_position
#define TRY \
.section __ex_table, "a"; \
.word 66f, 67f; \
.text; \
66:
#define CATCH \ #ifdef CONFIG_FAST_SYSCALL_XTENSA
67:
ENTRY(fast_syscall_xtensa) ENTRY(fast_syscall_xtensa)
...@@ -1141,9 +1114,9 @@ ENTRY(fast_syscall_xtensa) ...@@ -1141,9 +1114,9 @@ ENTRY(fast_syscall_xtensa)
.Lswp: /* Atomic compare and swap */ .Lswp: /* Atomic compare and swap */
TRY l32i a0, a3, 0 # read old value EX(.Leac) l32i a0, a3, 0 # read old value
bne a0, a4, 1f # same as old value? jump bne a0, a4, 1f # same as old value? jump
TRY s32i a5, a3, 0 # different, modify value EX(.Leac) s32i a5, a3, 0 # different, modify value
l32i a7, a2, PT_AREG7 # restore a7 l32i a7, a2, PT_AREG7 # restore a7
l32i a0, a2, PT_AREG0 # restore a0 l32i a0, a2, PT_AREG0 # restore a0
movi a2, 1 # and return 1 movi a2, 1 # and return 1
...@@ -1156,12 +1129,12 @@ TRY s32i a5, a3, 0 # different, modify value ...@@ -1156,12 +1129,12 @@ TRY s32i a5, a3, 0 # different, modify value
.Lnswp: /* Atomic set, add, and exg_add. */ .Lnswp: /* Atomic set, add, and exg_add. */
TRY l32i a7, a3, 0 # orig EX(.Leac) l32i a7, a3, 0 # orig
addi a6, a6, -SYS_XTENSA_ATOMIC_SET addi a6, a6, -SYS_XTENSA_ATOMIC_SET
add a0, a4, a7 # + arg add a0, a4, a7 # + arg
moveqz a0, a4, a6 # set moveqz a0, a4, a6 # set
addi a6, a6, SYS_XTENSA_ATOMIC_SET addi a6, a6, SYS_XTENSA_ATOMIC_SET
TRY s32i a0, a3, 0 # write new value EX(.Leac) s32i a0, a3, 0 # write new value
mov a0, a2 mov a0, a2
mov a2, a7 mov a2, a7
...@@ -1169,7 +1142,6 @@ TRY s32i a0, a3, 0 # write new value ...@@ -1169,7 +1142,6 @@ TRY s32i a0, a3, 0 # write new value
l32i a0, a0, PT_AREG0 # restore a0 l32i a0, a0, PT_AREG0 # restore a0
rfe rfe
CATCH
.Leac: l32i a7, a2, PT_AREG7 # restore a7 .Leac: l32i a7, a2, PT_AREG7 # restore a7
l32i a0, a2, PT_AREG0 # restore a0 l32i a0, a2, PT_AREG0 # restore a0
movi a2, -EFAULT movi a2, -EFAULT
...@@ -1411,14 +1383,12 @@ ENTRY(fast_syscall_spill_registers) ...@@ -1411,14 +1383,12 @@ ENTRY(fast_syscall_spill_registers)
rsync rsync
movi a6, SIGSEGV movi a6, SIGSEGV
movi a4, do_exit call4 do_exit
callx4 a4
/* shouldn't return, so panic */ /* shouldn't return, so panic */
wsr a0, excsave1 wsr a0, excsave1
movi a0, unrecoverable_exception call0 unrecoverable_exception # should not return
callx0 a0 # should not return
1: j 1b 1: j 1b
...@@ -1564,8 +1534,8 @@ ENDPROC(fast_syscall_spill_registers) ...@@ -1564,8 +1534,8 @@ ENDPROC(fast_syscall_spill_registers)
ENTRY(fast_second_level_miss_double_kernel) ENTRY(fast_second_level_miss_double_kernel)
1: movi a0, unrecoverable_exception 1:
callx0 a0 # should not return call0 unrecoverable_exception # should not return
1: j 1b 1: j 1b
ENDPROC(fast_second_level_miss_double_kernel) ENDPROC(fast_second_level_miss_double_kernel)
...@@ -1887,6 +1857,7 @@ ENDPROC(fast_store_prohibited) ...@@ -1887,6 +1857,7 @@ ENDPROC(fast_store_prohibited)
* void system_call (struct pt_regs* regs, int exccause) * void system_call (struct pt_regs* regs, int exccause)
* a2 a3 * a2 a3
*/ */
.literal_position
ENTRY(system_call) ENTRY(system_call)
...@@ -1896,9 +1867,8 @@ ENTRY(system_call) ...@@ -1896,9 +1867,8 @@ ENTRY(system_call)
l32i a3, a2, PT_AREG2 l32i a3, a2, PT_AREG2
mov a6, a2 mov a6, a2
movi a4, do_syscall_trace_enter
s32i a3, a2, PT_SYSCALL s32i a3, a2, PT_SYSCALL
callx4 a4 call4 do_syscall_trace_enter
mov a3, a6 mov a3, a6
/* syscall = sys_call_table[syscall_nr] */ /* syscall = sys_call_table[syscall_nr] */
...@@ -1930,9 +1900,8 @@ ENTRY(system_call) ...@@ -1930,9 +1900,8 @@ ENTRY(system_call)
1: /* regs->areg[2] = return_value */ 1: /* regs->areg[2] = return_value */
s32i a6, a2, PT_AREG2 s32i a6, a2, PT_AREG2
movi a4, do_syscall_trace_leave
mov a6, a2 mov a6, a2
callx4 a4 call4 do_syscall_trace_leave
retw retw
ENDPROC(system_call) ENDPROC(system_call)
...@@ -2002,6 +1971,12 @@ ENTRY(_switch_to) ...@@ -2002,6 +1971,12 @@ ENTRY(_switch_to)
s32i a1, a2, THREAD_SP # save stack pointer s32i a1, a2, THREAD_SP # save stack pointer
#endif #endif
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
movi a6, __stack_chk_guard
l32i a8, a3, TASK_STACK_CANARY
s32i a8, a6, 0
#endif
/* Disable ints while we manipulate the stack pointer. */ /* Disable ints while we manipulate the stack pointer. */
irq_save a14, a3 irq_save a14, a3
...@@ -2048,12 +2023,10 @@ ENTRY(ret_from_fork) ...@@ -2048,12 +2023,10 @@ ENTRY(ret_from_fork)
/* void schedule_tail (struct task_struct *prev) /* void schedule_tail (struct task_struct *prev)
* Note: prev is still in a6 (return value from fake call4 frame) * Note: prev is still in a6 (return value from fake call4 frame)
*/ */
movi a4, schedule_tail call4 schedule_tail
callx4 a4
movi a4, do_syscall_trace_leave
mov a6, a1 mov a6, a1
callx4 a4 call4 do_syscall_trace_leave
j common_exception_return j common_exception_return
......
...@@ -264,11 +264,8 @@ ENTRY(_startup) ...@@ -264,11 +264,8 @@ ENTRY(_startup)
/* init_arch kick-starts the linux kernel */ /* init_arch kick-starts the linux kernel */
movi a4, init_arch call4 init_arch
callx4 a4 call4 start_kernel
movi a4, start_kernel
callx4 a4
should_never_return: should_never_return:
j should_never_return j should_never_return
...@@ -294,8 +291,7 @@ should_never_return: ...@@ -294,8 +291,7 @@ should_never_return:
movi a6, 0 movi a6, 0
wsr a6, excsave1 wsr a6, excsave1
movi a4, secondary_start_kernel call4 secondary_start_kernel
callx4 a4
j should_never_return j should_never_return
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -22,8 +22,6 @@ ...@@ -22,8 +22,6 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/cache.h> #include <linux/cache.h>
#undef DEBUG_RELOCATE
static int static int
decode_calln_opcode (unsigned char *location) decode_calln_opcode (unsigned char *location)
{ {
...@@ -58,10 +56,9 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, ...@@ -58,10 +56,9 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
unsigned char *location; unsigned char *location;
uint32_t value; uint32_t value;
#ifdef DEBUG_RELOCATE pr_debug("Applying relocate section %u to %u\n", relsec,
printk("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info);
sechdrs[relsec].sh_info);
#endif
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rela[i].r_offset; + rela[i].r_offset;
...@@ -87,7 +84,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, ...@@ -87,7 +84,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
value -= ((unsigned long)location & -4) + 4; value -= ((unsigned long)location & -4) + 4;
if ((value & 3) != 0 || if ((value & 3) != 0 ||
((value + (1 << 19)) >> 20) != 0) { ((value + (1 << 19)) >> 20) != 0) {
printk("%s: relocation out of range, " pr_err("%s: relocation out of range, "
"section %d reloc %d " "section %d reloc %d "
"sym '%s'\n", "sym '%s'\n",
mod->name, relsec, i, mod->name, relsec, i,
...@@ -111,7 +108,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, ...@@ -111,7 +108,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
value -= (((unsigned long)location + 3) & -4); value -= (((unsigned long)location + 3) & -4);
if ((value & 3) != 0 || if ((value & 3) != 0 ||
(signed int)value >> 18 != -1) { (signed int)value >> 18 != -1) {
printk("%s: relocation out of range, " pr_err("%s: relocation out of range, "
"section %d reloc %d " "section %d reloc %d "
"sym '%s'\n", "sym '%s'\n",
mod->name, relsec, i, mod->name, relsec, i,
...@@ -156,7 +153,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, ...@@ -156,7 +153,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
case R_XTENSA_SLOT12_OP: case R_XTENSA_SLOT12_OP:
case R_XTENSA_SLOT13_OP: case R_XTENSA_SLOT13_OP:
case R_XTENSA_SLOT14_OP: case R_XTENSA_SLOT14_OP:
printk("%s: unexpected FLIX relocation: %u\n", pr_err("%s: unexpected FLIX relocation: %u\n",
mod->name, mod->name,
ELF32_R_TYPE(rela[i].r_info)); ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC; return -ENOEXEC;
...@@ -176,13 +173,13 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, ...@@ -176,13 +173,13 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
case R_XTENSA_SLOT12_ALT: case R_XTENSA_SLOT12_ALT:
case R_XTENSA_SLOT13_ALT: case R_XTENSA_SLOT13_ALT:
case R_XTENSA_SLOT14_ALT: case R_XTENSA_SLOT14_ALT:
printk("%s: unexpected ALT relocation: %u\n", pr_err("%s: unexpected ALT relocation: %u\n",
mod->name, mod->name,
ELF32_R_TYPE(rela[i].r_info)); ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC; return -ENOEXEC;
default: default:
printk("%s: unexpected relocation: %u\n", pr_err("%s: unexpected relocation: %u\n",
mod->name, mod->name,
ELF32_R_TYPE(rela[i].r_info)); ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC; return -ENOEXEC;
......
...@@ -29,14 +29,6 @@ ...@@ -29,14 +29,6 @@
#include <asm/pci-bridge.h> #include <asm/pci-bridge.h>
#include <asm/platform.h> #include <asm/platform.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
/* PCI Controller */ /* PCI Controller */
...@@ -101,8 +93,8 @@ pcibios_enable_resources(struct pci_dev *dev, int mask) ...@@ -101,8 +93,8 @@ pcibios_enable_resources(struct pci_dev *dev, int mask)
for(idx=0; idx<6; idx++) { for(idx=0; idx<6; idx++) {
r = &dev->resource[idx]; r = &dev->resource[idx];
if (!r->start && r->end) { if (!r->start && r->end) {
printk (KERN_ERR "PCI: Device %s not available because " pr_err("PCI: Device %s not available because "
"of resource collisions\n", pci_name(dev)); "of resource collisions\n", pci_name(dev));
return -EINVAL; return -EINVAL;
} }
if (r->flags & IORESOURCE_IO) if (r->flags & IORESOURCE_IO)
...@@ -113,7 +105,7 @@ pcibios_enable_resources(struct pci_dev *dev, int mask) ...@@ -113,7 +105,7 @@ pcibios_enable_resources(struct pci_dev *dev, int mask)
if (dev->resource[PCI_ROM_RESOURCE].start) if (dev->resource[PCI_ROM_RESOURCE].start)
cmd |= PCI_COMMAND_MEMORY; cmd |= PCI_COMMAND_MEMORY;
if (cmd != old_cmd) { if (cmd != old_cmd) {
printk("PCI: Enabling device %s (%04x -> %04x)\n", pr_info("PCI: Enabling device %s (%04x -> %04x)\n",
pci_name(dev), old_cmd, cmd); pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd);
} }
...@@ -144,8 +136,8 @@ static void __init pci_controller_apertures(struct pci_controller *pci_ctrl, ...@@ -144,8 +136,8 @@ static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
res = &pci_ctrl->io_resource; res = &pci_ctrl->io_resource;
if (!res->flags) { if (!res->flags) {
if (io_offset) if (io_offset)
printk (KERN_ERR "I/O resource not set for host" pr_err("I/O resource not set for host bridge %d\n",
" bridge %d\n", pci_ctrl->index); pci_ctrl->index);
res->start = 0; res->start = 0;
res->end = IO_SPACE_LIMIT; res->end = IO_SPACE_LIMIT;
res->flags = IORESOURCE_IO; res->flags = IORESOURCE_IO;
...@@ -159,8 +151,8 @@ static void __init pci_controller_apertures(struct pci_controller *pci_ctrl, ...@@ -159,8 +151,8 @@ static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
if (!res->flags) { if (!res->flags) {
if (i > 0) if (i > 0)
continue; continue;
printk(KERN_ERR "Memory resource not set for " pr_err("Memory resource not set for host bridge %d\n",
"host bridge %d\n", pci_ctrl->index); pci_ctrl->index);
res->start = 0; res->start = 0;
res->end = ~0U; res->end = ~0U;
res->flags = IORESOURCE_MEM; res->flags = IORESOURCE_MEM;
...@@ -176,7 +168,7 @@ static int __init pcibios_init(void) ...@@ -176,7 +168,7 @@ static int __init pcibios_init(void)
struct pci_bus *bus; struct pci_bus *bus;
int next_busno = 0, ret; int next_busno = 0, ret;
printk("PCI: Probing PCI hardware\n"); pr_info("PCI: Probing PCI hardware\n");
/* Scan all of the recorded PCI controllers. */ /* Scan all of the recorded PCI controllers. */
for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) { for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
...@@ -232,7 +224,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) ...@@ -232,7 +224,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
for (idx=0; idx<6; idx++) { for (idx=0; idx<6; idx++) {
r = &dev->resource[idx]; r = &dev->resource[idx];
if (!r->start && r->end) { if (!r->start && r->end) {
printk(KERN_ERR "PCI: Device %s not available because " pr_err("PCI: Device %s not available because "
"of resource collisions\n", pci_name(dev)); "of resource collisions\n", pci_name(dev));
return -EINVAL; return -EINVAL;
} }
...@@ -242,8 +234,8 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) ...@@ -242,8 +234,8 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
cmd |= PCI_COMMAND_MEMORY; cmd |= PCI_COMMAND_MEMORY;
} }
if (cmd != old_cmd) { if (cmd != old_cmd) {
printk("PCI: Enabling device %s (%04x -> %04x)\n", pr_info("PCI: Enabling device %s (%04x -> %04x)\n",
pci_name(dev), old_cmd, cmd); pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd);
} }
......
...@@ -58,6 +58,12 @@ void (*pm_power_off)(void) = NULL; ...@@ -58,6 +58,12 @@ void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off); EXPORT_SYMBOL(pm_power_off);
#ifdef CONFIG_CC_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
#if XTENSA_HAVE_COPROCESSORS #if XTENSA_HAVE_COPROCESSORS
void coprocessor_release_all(struct thread_info *ti) void coprocessor_release_all(struct thread_info *ti)
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#endif #endif
#include <asm/bootparam.h> #include <asm/bootparam.h>
#include <asm/kasan.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -156,7 +157,7 @@ static int __init parse_bootparam(const bp_tag_t* tag) ...@@ -156,7 +157,7 @@ static int __init parse_bootparam(const bp_tag_t* tag)
/* Boot parameters must start with a BP_TAG_FIRST tag. */ /* Boot parameters must start with a BP_TAG_FIRST tag. */
if (tag->id != BP_TAG_FIRST) { if (tag->id != BP_TAG_FIRST) {
printk(KERN_WARNING "Invalid boot parameters!\n"); pr_warn("Invalid boot parameters!\n");
return 0; return 0;
} }
...@@ -165,15 +166,14 @@ static int __init parse_bootparam(const bp_tag_t* tag) ...@@ -165,15 +166,14 @@ static int __init parse_bootparam(const bp_tag_t* tag)
/* Parse all tags. */ /* Parse all tags. */
while (tag != NULL && tag->id != BP_TAG_LAST) { while (tag != NULL && tag->id != BP_TAG_LAST) {
for (t = &__tagtable_begin; t < &__tagtable_end; t++) { for (t = &__tagtable_begin; t < &__tagtable_end; t++) {
if (tag->id == t->tag) { if (tag->id == t->tag) {
t->parse(tag); t->parse(tag);
break; break;
} }
} }
if (t == &__tagtable_end) if (t == &__tagtable_end)
printk(KERN_WARNING "Ignoring tag " pr_warn("Ignoring tag 0x%08x\n", tag->id);
"0x%08x\n", tag->id);
tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size); tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size);
} }
...@@ -208,6 +208,8 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname, ...@@ -208,6 +208,8 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
/* round down to nearest 256MB boundary */ /* round down to nearest 256MB boundary */
xtensa_kio_paddr &= 0xf0000000; xtensa_kio_paddr &= 0xf0000000;
init_kio();
return 1; return 1;
} }
#else #else
...@@ -246,6 +248,14 @@ void __init early_init_devtree(void *params) ...@@ -246,6 +248,14 @@ void __init early_init_devtree(void *params)
void __init init_arch(bp_tag_t *bp_start) void __init init_arch(bp_tag_t *bp_start)
{ {
/* Initialize MMU. */
init_mmu();
/* Initialize initial KASAN shadow map */
kasan_early_init();
/* Parse boot parameters */ /* Parse boot parameters */
if (bp_start) if (bp_start)
...@@ -263,10 +273,6 @@ void __init init_arch(bp_tag_t *bp_start) ...@@ -263,10 +273,6 @@ void __init init_arch(bp_tag_t *bp_start)
/* Early hook for platforms */ /* Early hook for platforms */
platform_init(bp_start); platform_init(bp_start);
/* Initialize MMU. */
init_mmu();
} }
/* /*
...@@ -277,13 +283,13 @@ extern char _end[]; ...@@ -277,13 +283,13 @@ extern char _end[];
extern char _stext[]; extern char _stext[];
extern char _WindowVectors_text_start; extern char _WindowVectors_text_start;
extern char _WindowVectors_text_end; extern char _WindowVectors_text_end;
extern char _DebugInterruptVector_literal_start; extern char _DebugInterruptVector_text_start;
extern char _DebugInterruptVector_text_end; extern char _DebugInterruptVector_text_end;
extern char _KernelExceptionVector_literal_start; extern char _KernelExceptionVector_text_start;
extern char _KernelExceptionVector_text_end; extern char _KernelExceptionVector_text_end;
extern char _UserExceptionVector_literal_start; extern char _UserExceptionVector_text_start;
extern char _UserExceptionVector_text_end; extern char _UserExceptionVector_text_end;
extern char _DoubleExceptionVector_literal_start; extern char _DoubleExceptionVector_text_start;
extern char _DoubleExceptionVector_text_end; extern char _DoubleExceptionVector_text_end;
#if XCHAL_EXCM_LEVEL >= 2 #if XCHAL_EXCM_LEVEL >= 2
extern char _Level2InterruptVector_text_start; extern char _Level2InterruptVector_text_start;
...@@ -317,6 +323,13 @@ static inline int mem_reserve(unsigned long start, unsigned long end) ...@@ -317,6 +323,13 @@ static inline int mem_reserve(unsigned long start, unsigned long end)
void __init setup_arch(char **cmdline_p) void __init setup_arch(char **cmdline_p)
{ {
pr_info("config ID: %08x:%08x\n",
get_sr(SREG_EPC), get_sr(SREG_EXCSAVE));
if (get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 ||
get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1)
pr_info("built for config ID: %08x:%08x\n",
XCHAL_HW_CONFIGID0, XCHAL_HW_CONFIGID1);
*cmdline_p = command_line; *cmdline_p = command_line;
platform_setup(cmdline_p); platform_setup(cmdline_p);
strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
...@@ -339,16 +352,16 @@ void __init setup_arch(char **cmdline_p) ...@@ -339,16 +352,16 @@ void __init setup_arch(char **cmdline_p)
mem_reserve(__pa(&_WindowVectors_text_start), mem_reserve(__pa(&_WindowVectors_text_start),
__pa(&_WindowVectors_text_end)); __pa(&_WindowVectors_text_end));
mem_reserve(__pa(&_DebugInterruptVector_literal_start), mem_reserve(__pa(&_DebugInterruptVector_text_start),
__pa(&_DebugInterruptVector_text_end)); __pa(&_DebugInterruptVector_text_end));
mem_reserve(__pa(&_KernelExceptionVector_literal_start), mem_reserve(__pa(&_KernelExceptionVector_text_start),
__pa(&_KernelExceptionVector_text_end)); __pa(&_KernelExceptionVector_text_end));
mem_reserve(__pa(&_UserExceptionVector_literal_start), mem_reserve(__pa(&_UserExceptionVector_text_start),
__pa(&_UserExceptionVector_text_end)); __pa(&_UserExceptionVector_text_end));
mem_reserve(__pa(&_DoubleExceptionVector_literal_start), mem_reserve(__pa(&_DoubleExceptionVector_text_start),
__pa(&_DoubleExceptionVector_text_end)); __pa(&_DoubleExceptionVector_text_end));
#if XCHAL_EXCM_LEVEL >= 2 #if XCHAL_EXCM_LEVEL >= 2
...@@ -380,7 +393,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -380,7 +393,7 @@ void __init setup_arch(char **cmdline_p)
#endif #endif
parse_early_param(); parse_early_param();
bootmem_init(); bootmem_init();
kasan_init();
unflatten_and_copy_device_tree(); unflatten_and_copy_device_tree();
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -582,12 +595,14 @@ c_show(struct seq_file *f, void *slot) ...@@ -582,12 +595,14 @@ c_show(struct seq_file *f, void *slot)
"model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n" "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
"core ID\t\t: " XCHAL_CORE_ID "\n" "core ID\t\t: " XCHAL_CORE_ID "\n"
"build ID\t: 0x%x\n" "build ID\t: 0x%x\n"
"config ID\t: %08x:%08x\n"
"byte order\t: %s\n" "byte order\t: %s\n"
"cpu MHz\t\t: %lu.%02lu\n" "cpu MHz\t\t: %lu.%02lu\n"
"bogomips\t: %lu.%02lu\n", "bogomips\t: %lu.%02lu\n",
num_online_cpus(), num_online_cpus(),
cpumask_pr_args(cpu_online_mask), cpumask_pr_args(cpu_online_mask),
XCHAL_BUILD_UNIQUE_ID, XCHAL_BUILD_UNIQUE_ID,
get_sr(SREG_EPC), get_sr(SREG_EXCSAVE),
XCHAL_HAVE_BE ? "big" : "little", XCHAL_HAVE_BE ? "big" : "little",
ccount_freq/1000000, ccount_freq/1000000,
(ccount_freq/10000) % 100, (ccount_freq/10000) % 100,
......
...@@ -28,8 +28,6 @@ ...@@ -28,8 +28,6 @@
#include <asm/coprocessor.h> #include <asm/coprocessor.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#define DEBUG_SIG 0
extern struct task_struct *coproc_owners[]; extern struct task_struct *coproc_owners[];
struct rt_sigframe struct rt_sigframe
...@@ -399,10 +397,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, ...@@ -399,10 +397,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
regs->areg[8] = (unsigned long) &frame->uc; regs->areg[8] = (unsigned long) &frame->uc;
regs->threadptr = tp; regs->threadptr = tp;
#if DEBUG_SIG pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n",
printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n", current->comm, current->pid, sig, frame, regs->pc);
current->comm, current->pid, sig, frame, regs->pc);
#endif
return 0; return 0;
} }
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/ratelimit.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
...@@ -158,8 +159,7 @@ COPROCESSOR(7), ...@@ -158,8 +159,7 @@ COPROCESSOR(7),
* 2. it is a temporary memory buffer for the exception handlers. * 2. it is a temporary memory buffer for the exception handlers.
*/ */
DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]); DEFINE_PER_CPU(struct exc_table, exc_table);
DEFINE_PER_CPU(struct debug_table, debug_table); DEFINE_PER_CPU(struct debug_table, debug_table);
void die(const char*, struct pt_regs*, long); void die(const char*, struct pt_regs*, long);
...@@ -178,13 +178,14 @@ __die_if_kernel(const char *str, struct pt_regs *regs, long err) ...@@ -178,13 +178,14 @@ __die_if_kernel(const char *str, struct pt_regs *regs, long err)
void do_unhandled(struct pt_regs *regs, unsigned long exccause) void do_unhandled(struct pt_regs *regs, unsigned long exccause)
{ {
__die_if_kernel("Caught unhandled exception - should not happen", __die_if_kernel("Caught unhandled exception - should not happen",
regs, SIGKILL); regs, SIGKILL);
/* If in user mode, send SIGILL signal to current process */ /* If in user mode, send SIGILL signal to current process */
printk("Caught unhandled exception in '%s' " pr_info_ratelimited("Caught unhandled exception in '%s' "
"(pid = %d, pc = %#010lx) - should not happen\n" "(pid = %d, pc = %#010lx) - should not happen\n"
"\tEXCCAUSE is %ld\n", "\tEXCCAUSE is %ld\n",
current->comm, task_pid_nr(current), regs->pc, exccause); current->comm, task_pid_nr(current), regs->pc,
exccause);
force_sig(SIGILL, current); force_sig(SIGILL, current);
} }
...@@ -305,8 +306,8 @@ do_illegal_instruction(struct pt_regs *regs) ...@@ -305,8 +306,8 @@ do_illegal_instruction(struct pt_regs *regs)
/* If in user mode, send SIGILL signal to current process. */ /* If in user mode, send SIGILL signal to current process. */
printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n", pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
current->comm, task_pid_nr(current), regs->pc); current->comm, task_pid_nr(current), regs->pc);
force_sig(SIGILL, current); force_sig(SIGILL, current);
} }
...@@ -325,13 +326,14 @@ do_unaligned_user (struct pt_regs *regs) ...@@ -325,13 +326,14 @@ do_unaligned_user (struct pt_regs *regs)
siginfo_t info; siginfo_t info;
__die_if_kernel("Unhandled unaligned exception in kernel", __die_if_kernel("Unhandled unaligned exception in kernel",
regs, SIGKILL); regs, SIGKILL);
current->thread.bad_vaddr = regs->excvaddr; current->thread.bad_vaddr = regs->excvaddr;
current->thread.error_code = -3; current->thread.error_code = -3;
printk("Unaligned memory access to %08lx in '%s' " pr_info_ratelimited("Unaligned memory access to %08lx in '%s' "
"(pid = %d, pc = %#010lx)\n", "(pid = %d, pc = %#010lx)\n",
regs->excvaddr, current->comm, task_pid_nr(current), regs->pc); regs->excvaddr, current->comm,
task_pid_nr(current), regs->pc);
info.si_signo = SIGBUS; info.si_signo = SIGBUS;
info.si_errno = 0; info.si_errno = 0;
info.si_code = BUS_ADRALN; info.si_code = BUS_ADRALN;
...@@ -365,28 +367,28 @@ do_debug(struct pt_regs *regs) ...@@ -365,28 +367,28 @@ do_debug(struct pt_regs *regs)
} }
static void set_handler(int idx, void *handler) #define set_handler(type, cause, handler) \
{ do { \
unsigned int cpu; unsigned int cpu; \
\
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu) \
per_cpu(exc_table, cpu)[idx] = (unsigned long)handler; per_cpu(exc_table, cpu).type[cause] = (handler);\
} } while (0)
/* Set exception C handler - for temporary use when probing exceptions */ /* Set exception C handler - for temporary use when probing exceptions */
void * __init trap_set_handler(int cause, void *handler) void * __init trap_set_handler(int cause, void *handler)
{ {
void *previous = (void *)per_cpu(exc_table, 0)[ void *previous = per_cpu(exc_table, 0).default_handler[cause];
EXC_TABLE_DEFAULT / 4 + cause];
set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler); set_handler(default_handler, cause, handler);
return previous; return previous;
} }
static void trap_init_excsave(void) static void trap_init_excsave(void)
{ {
unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table); unsigned long excsave1 = (unsigned long)this_cpu_ptr(&exc_table);
__asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1)); __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1));
} }
...@@ -418,10 +420,10 @@ void __init trap_init(void) ...@@ -418,10 +420,10 @@ void __init trap_init(void)
/* Setup default vectors. */ /* Setup default vectors. */
for(i = 0; i < 64; i++) { for (i = 0; i < EXCCAUSE_N; i++) {
set_handler(EXC_TABLE_FAST_USER/4 + i, user_exception); set_handler(fast_user_handler, i, user_exception);
set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception); set_handler(fast_kernel_handler, i, kernel_exception);
set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled); set_handler(default_handler, i, do_unhandled);
} }
/* Setup specific handlers. */ /* Setup specific handlers. */
...@@ -433,11 +435,11 @@ void __init trap_init(void) ...@@ -433,11 +435,11 @@ void __init trap_init(void)
void *handler = dispatch_init_table[i].handler; void *handler = dispatch_init_table[i].handler;
if (fast == 0) if (fast == 0)
set_handler (EXC_TABLE_DEFAULT/4 + cause, handler); set_handler(default_handler, cause, handler);
if (fast && fast & USER) if (fast && fast & USER)
set_handler (EXC_TABLE_FAST_USER/4 + cause, handler); set_handler(fast_user_handler, cause, handler);
if (fast && fast & KRNL) if (fast && fast & KRNL)
set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler); set_handler(fast_kernel_handler, cause, handler);
} }
/* Initialize EXCSAVE_1 to hold the address of the exception table. */ /* Initialize EXCSAVE_1 to hold the address of the exception table. */
......
...@@ -205,9 +205,6 @@ ENDPROC(_KernelExceptionVector) ...@@ -205,9 +205,6 @@ ENDPROC(_KernelExceptionVector)
*/ */
.section .DoubleExceptionVector.text, "ax" .section .DoubleExceptionVector.text, "ax"
.begin literal_prefix .DoubleExceptionVector
.globl _DoubleExceptionVector_WindowUnderflow
.globl _DoubleExceptionVector_WindowOverflow
ENTRY(_DoubleExceptionVector) ENTRY(_DoubleExceptionVector)
...@@ -217,8 +214,12 @@ ENTRY(_DoubleExceptionVector) ...@@ -217,8 +214,12 @@ ENTRY(_DoubleExceptionVector)
/* Check for kernel double exception (usually fatal). */ /* Check for kernel double exception (usually fatal). */
rsr a2, ps rsr a2, ps
_bbci.l a2, PS_UM_BIT, .Lksp _bbsi.l a2, PS_UM_BIT, 1f
j .Lksp
.align 4
.literal_position
1:
/* Check if we are currently handling a window exception. */ /* Check if we are currently handling a window exception. */
/* Note: We don't need to indicate that we enter a critical section. */ /* Note: We don't need to indicate that we enter a critical section. */
...@@ -304,8 +305,7 @@ _DoubleExceptionVector_WindowUnderflow: ...@@ -304,8 +305,7 @@ _DoubleExceptionVector_WindowUnderflow:
.Lunrecoverable: .Lunrecoverable:
rsr a3, excsave1 rsr a3, excsave1
wsr a0, excsave1 wsr a0, excsave1
movi a0, unrecoverable_exception call0 unrecoverable_exception
callx0 a0
.Lfixup:/* Check for a fixup handler or if we were in a critical section. */ .Lfixup:/* Check for a fixup handler or if we were in a critical section. */
...@@ -475,11 +475,8 @@ _DoubleExceptionVector_handle_exception: ...@@ -475,11 +475,8 @@ _DoubleExceptionVector_handle_exception:
rotw -3 rotw -3
j 1b j 1b
ENDPROC(_DoubleExceptionVector) ENDPROC(_DoubleExceptionVector)
.end literal_prefix
.text .text
/* /*
* Fixup handler for TLB miss in double exception handler for window owerflow. * Fixup handler for TLB miss in double exception handler for window owerflow.
...@@ -508,6 +505,8 @@ ENDPROC(_DoubleExceptionVector) ...@@ -508,6 +505,8 @@ ENDPROC(_DoubleExceptionVector)
* a3: exctable, original value in excsave1 * a3: exctable, original value in excsave1
*/ */
.literal_position
ENTRY(window_overflow_restore_a0_fixup) ENTRY(window_overflow_restore_a0_fixup)
rsr a0, ps rsr a0, ps
......
...@@ -45,24 +45,16 @@ jiffies = jiffies_64; ...@@ -45,24 +45,16 @@ jiffies = jiffies_64;
LONG(sym ## _end); \ LONG(sym ## _end); \
LONG(LOADADDR(section)) LONG(LOADADDR(section))
/* Macro to define a section for a vector. /*
* * Macro to define a section for a vector. When CONFIG_VECTORS_OFFSET is
* Use of the MIN function catches the types of errors illustrated in * defined code for every vector is located with other init data. At startup
* the following example: * time head.S copies code for every vector to its final position according
* * to description recorded in the corresponding RELOCATE_ENTRY.
* Assume the section .DoubleExceptionVector.literal is completely
* full. Then a programmer adds code to .DoubleExceptionVector.text
* that produces another literal. The final literal position will
* overlay onto the first word of the adjacent code section
* .DoubleExceptionVector.text. (In practice, the literals will
* overwrite the code, and the first few instructions will be
* garbage.)
*/ */
#ifdef CONFIG_VECTORS_OFFSET #ifdef CONFIG_VECTORS_OFFSET
#define SECTION_VECTOR(sym, section, addr, max_prevsec_size, prevsec) \ #define SECTION_VECTOR(sym, section, addr, prevsec) \
section addr : AT((MIN(LOADADDR(prevsec) + max_prevsec_size, \ section addr : AT(((LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \
LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \
{ \ { \
. = ALIGN(4); \ . = ALIGN(4); \
sym ## _start = ABSOLUTE(.); \ sym ## _start = ABSOLUTE(.); \
...@@ -112,26 +104,19 @@ SECTIONS ...@@ -112,26 +104,19 @@ SECTIONS
#if XCHAL_EXCM_LEVEL >= 6 #if XCHAL_EXCM_LEVEL >= 6
SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR) SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR)
#endif #endif
SECTION_VECTOR (.DebugInterruptVector.literal, DEBUG_VECTOR_VADDR - 4)
SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR) SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR)
SECTION_VECTOR (.KernelExceptionVector.literal, KERNEL_VECTOR_VADDR - 4)
SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4)
SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20)
SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
#endif #endif
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
ENTRY_TEXT
TEXT_TEXT TEXT_TEXT
VMLINUX_SYMBOL(__sched_text_start) = .; SCHED_TEXT
*(.sched.literal .sched.text) CPUIDLE_TEXT
VMLINUX_SYMBOL(__sched_text_end) = .; LOCK_TEXT
VMLINUX_SYMBOL(__cpuidle_text_start) = .;
*(.cpuidle.literal .cpuidle.text)
VMLINUX_SYMBOL(__cpuidle_text_end) = .;
VMLINUX_SYMBOL(__lock_text_start) = .;
*(.spinlock.literal .spinlock.text)
VMLINUX_SYMBOL(__lock_text_end) = .;
} }
_etext = .; _etext = .;
...@@ -196,8 +181,6 @@ SECTIONS ...@@ -196,8 +181,6 @@ SECTIONS
.KernelExceptionVector.text); .KernelExceptionVector.text);
RELOCATE_ENTRY(_UserExceptionVector_text, RELOCATE_ENTRY(_UserExceptionVector_text,
.UserExceptionVector.text); .UserExceptionVector.text);
RELOCATE_ENTRY(_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal);
RELOCATE_ENTRY(_DoubleExceptionVector_text, RELOCATE_ENTRY(_DoubleExceptionVector_text,
.DoubleExceptionVector.text); .DoubleExceptionVector.text);
RELOCATE_ENTRY(_DebugInterruptVector_text, RELOCATE_ENTRY(_DebugInterruptVector_text,
...@@ -230,25 +213,19 @@ SECTIONS ...@@ -230,25 +213,19 @@ SECTIONS
SECTION_VECTOR (_WindowVectors_text, SECTION_VECTOR (_WindowVectors_text,
.WindowVectors.text, .WindowVectors.text,
WINDOW_VECTORS_VADDR, 4, WINDOW_VECTORS_VADDR,
.dummy) .dummy)
SECTION_VECTOR (_DebugInterruptVector_literal,
.DebugInterruptVector.literal,
DEBUG_VECTOR_VADDR - 4,
SIZEOF(.WindowVectors.text),
.WindowVectors.text)
SECTION_VECTOR (_DebugInterruptVector_text, SECTION_VECTOR (_DebugInterruptVector_text,
.DebugInterruptVector.text, .DebugInterruptVector.text,
DEBUG_VECTOR_VADDR, DEBUG_VECTOR_VADDR,
4, .WindowVectors.text)
.DebugInterruptVector.literal)
#undef LAST #undef LAST
#define LAST .DebugInterruptVector.text #define LAST .DebugInterruptVector.text
#if XCHAL_EXCM_LEVEL >= 2 #if XCHAL_EXCM_LEVEL >= 2
SECTION_VECTOR (_Level2InterruptVector_text, SECTION_VECTOR (_Level2InterruptVector_text,
.Level2InterruptVector.text, .Level2InterruptVector.text,
INTLEVEL2_VECTOR_VADDR, INTLEVEL2_VECTOR_VADDR,
SIZEOF(LAST), LAST) LAST)
# undef LAST # undef LAST
# define LAST .Level2InterruptVector.text # define LAST .Level2InterruptVector.text
#endif #endif
...@@ -256,7 +233,7 @@ SECTIONS ...@@ -256,7 +233,7 @@ SECTIONS
SECTION_VECTOR (_Level3InterruptVector_text, SECTION_VECTOR (_Level3InterruptVector_text,
.Level3InterruptVector.text, .Level3InterruptVector.text,
INTLEVEL3_VECTOR_VADDR, INTLEVEL3_VECTOR_VADDR,
SIZEOF(LAST), LAST) LAST)
# undef LAST # undef LAST
# define LAST .Level3InterruptVector.text # define LAST .Level3InterruptVector.text
#endif #endif
...@@ -264,7 +241,7 @@ SECTIONS ...@@ -264,7 +241,7 @@ SECTIONS
SECTION_VECTOR (_Level4InterruptVector_text, SECTION_VECTOR (_Level4InterruptVector_text,
.Level4InterruptVector.text, .Level4InterruptVector.text,
INTLEVEL4_VECTOR_VADDR, INTLEVEL4_VECTOR_VADDR,
SIZEOF(LAST), LAST) LAST)
# undef LAST # undef LAST
# define LAST .Level4InterruptVector.text # define LAST .Level4InterruptVector.text
#endif #endif
...@@ -272,7 +249,7 @@ SECTIONS ...@@ -272,7 +249,7 @@ SECTIONS
SECTION_VECTOR (_Level5InterruptVector_text, SECTION_VECTOR (_Level5InterruptVector_text,
.Level5InterruptVector.text, .Level5InterruptVector.text,
INTLEVEL5_VECTOR_VADDR, INTLEVEL5_VECTOR_VADDR,
SIZEOF(LAST), LAST) LAST)
# undef LAST # undef LAST
# define LAST .Level5InterruptVector.text # define LAST .Level5InterruptVector.text
#endif #endif
...@@ -280,40 +257,23 @@ SECTIONS ...@@ -280,40 +257,23 @@ SECTIONS
SECTION_VECTOR (_Level6InterruptVector_text, SECTION_VECTOR (_Level6InterruptVector_text,
.Level6InterruptVector.text, .Level6InterruptVector.text,
INTLEVEL6_VECTOR_VADDR, INTLEVEL6_VECTOR_VADDR,
SIZEOF(LAST), LAST) LAST)
# undef LAST # undef LAST
# define LAST .Level6InterruptVector.text # define LAST .Level6InterruptVector.text
#endif #endif
SECTION_VECTOR (_KernelExceptionVector_literal,
.KernelExceptionVector.literal,
KERNEL_VECTOR_VADDR - 4,
SIZEOF(LAST), LAST)
#undef LAST
SECTION_VECTOR (_KernelExceptionVector_text, SECTION_VECTOR (_KernelExceptionVector_text,
.KernelExceptionVector.text, .KernelExceptionVector.text,
KERNEL_VECTOR_VADDR, KERNEL_VECTOR_VADDR,
4, LAST)
.KernelExceptionVector.literal) #undef LAST
SECTION_VECTOR (_UserExceptionVector_literal,
.UserExceptionVector.literal,
USER_VECTOR_VADDR - 4,
SIZEOF(.KernelExceptionVector.text),
.KernelExceptionVector.text)
SECTION_VECTOR (_UserExceptionVector_text, SECTION_VECTOR (_UserExceptionVector_text,
.UserExceptionVector.text, .UserExceptionVector.text,
USER_VECTOR_VADDR, USER_VECTOR_VADDR,
4, .KernelExceptionVector.text)
.UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal,
DOUBLEEXC_VECTOR_VADDR - 20,
SIZEOF(.UserExceptionVector.text),
.UserExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text, SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text, .DoubleExceptionVector.text,
DOUBLEEXC_VECTOR_VADDR, DOUBLEEXC_VECTOR_VADDR,
20, .UserExceptionVector.text)
.DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
...@@ -323,7 +283,6 @@ SECTIONS ...@@ -323,7 +283,6 @@ SECTIONS
SECTION_VECTOR (_SecondaryResetVector_text, SECTION_VECTOR (_SecondaryResetVector_text,
.SecondaryResetVector.text, .SecondaryResetVector.text,
RESET_VECTOR1_VADDR, RESET_VECTOR1_VADDR,
SIZEOF(.DoubleExceptionVector.text),
.DoubleExceptionVector.text) .DoubleExceptionVector.text)
. = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text); . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text);
...@@ -373,5 +332,4 @@ SECTIONS ...@@ -373,5 +332,4 @@ SECTIONS
/* Sections to be discarded */ /* Sections to be discarded */
DISCARDS DISCARDS
/DISCARD/ : { *(.exit.literal) }
} }
...@@ -41,7 +41,12 @@ ...@@ -41,7 +41,12 @@
EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
EXPORT_SYMBOL(__strncpy_user); EXPORT_SYMBOL(__strncpy_user);
#endif
EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(copy_page);
......
...@@ -14,9 +14,10 @@ ...@@ -14,9 +14,10 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#include <asm/errno.h> #include <linux/errno.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <variant/core.h> #include <variant/core.h>
#include <asm/asmmacro.h>
/* /*
* computes a partial checksum, e.g. for TCP/UDP fragments * computes a partial checksum, e.g. for TCP/UDP fragments
...@@ -175,23 +176,8 @@ ENDPROC(csum_partial) ...@@ -175,23 +176,8 @@ ENDPROC(csum_partial)
/* /*
* Copy from ds while checksumming, otherwise like csum_partial * Copy from ds while checksumming, otherwise like csum_partial
*
* The macros SRC and DST specify the type of access for the instruction.
* thus we can call a custom exception handler for each access type.
*/ */
#define SRC(y...) \
9999: y; \
.section __ex_table, "a"; \
.long 9999b, 6001f ; \
.previous
#define DST(y...) \
9999: y; \
.section __ex_table, "a"; \
.long 9999b, 6002f ; \
.previous
/* /*
unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
int sum, int *src_err_ptr, int *dst_err_ptr) int sum, int *src_err_ptr, int *dst_err_ptr)
...@@ -244,28 +230,28 @@ ENTRY(csum_partial_copy_generic) ...@@ -244,28 +230,28 @@ ENTRY(csum_partial_copy_generic)
add a10, a10, a2 /* a10 = end of last 32-byte src chunk */ add a10, a10, a2 /* a10 = end of last 32-byte src chunk */
.Loop5: .Loop5:
#endif #endif
SRC( l32i a9, a2, 0 ) EX(10f) l32i a9, a2, 0
SRC( l32i a8, a2, 4 ) EX(10f) l32i a8, a2, 4
DST( s32i a9, a3, 0 ) EX(11f) s32i a9, a3, 0
DST( s32i a8, a3, 4 ) EX(11f) s32i a8, a3, 4
ONES_ADD(a5, a9) ONES_ADD(a5, a9)
ONES_ADD(a5, a8) ONES_ADD(a5, a8)
SRC( l32i a9, a2, 8 ) EX(10f) l32i a9, a2, 8
SRC( l32i a8, a2, 12 ) EX(10f) l32i a8, a2, 12
DST( s32i a9, a3, 8 ) EX(11f) s32i a9, a3, 8
DST( s32i a8, a3, 12 ) EX(11f) s32i a8, a3, 12
ONES_ADD(a5, a9) ONES_ADD(a5, a9)
ONES_ADD(a5, a8) ONES_ADD(a5, a8)
SRC( l32i a9, a2, 16 ) EX(10f) l32i a9, a2, 16
SRC( l32i a8, a2, 20 ) EX(10f) l32i a8, a2, 20
DST( s32i a9, a3, 16 ) EX(11f) s32i a9, a3, 16
DST( s32i a8, a3, 20 ) EX(11f) s32i a8, a3, 20
ONES_ADD(a5, a9) ONES_ADD(a5, a9)
ONES_ADD(a5, a8) ONES_ADD(a5, a8)
SRC( l32i a9, a2, 24 ) EX(10f) l32i a9, a2, 24
SRC( l32i a8, a2, 28 ) EX(10f) l32i a8, a2, 28
DST( s32i a9, a3, 24 ) EX(11f) s32i a9, a3, 24
DST( s32i a8, a3, 28 ) EX(11f) s32i a8, a3, 28
ONES_ADD(a5, a9) ONES_ADD(a5, a9)
ONES_ADD(a5, a8) ONES_ADD(a5, a8)
addi a2, a2, 32 addi a2, a2, 32
...@@ -284,8 +270,8 @@ DST( s32i a8, a3, 28 ) ...@@ -284,8 +270,8 @@ DST( s32i a8, a3, 28 )
add a10, a10, a2 /* a10 = end of last 4-byte src chunk */ add a10, a10, a2 /* a10 = end of last 4-byte src chunk */
.Loop6: .Loop6:
#endif #endif
SRC( l32i a9, a2, 0 ) EX(10f) l32i a9, a2, 0
DST( s32i a9, a3, 0 ) EX(11f) s32i a9, a3, 0
ONES_ADD(a5, a9) ONES_ADD(a5, a9)
addi a2, a2, 4 addi a2, a2, 4
addi a3, a3, 4 addi a3, a3, 4
...@@ -315,8 +301,8 @@ DST( s32i a9, a3, 0 ) ...@@ -315,8 +301,8 @@ DST( s32i a9, a3, 0 )
add a10, a10, a2 /* a10 = end of last 2-byte src chunk */ add a10, a10, a2 /* a10 = end of last 2-byte src chunk */
.Loop7: .Loop7:
#endif #endif
SRC( l16ui a9, a2, 0 ) EX(10f) l16ui a9, a2, 0
DST( s16i a9, a3, 0 ) EX(11f) s16i a9, a3, 0
ONES_ADD(a5, a9) ONES_ADD(a5, a9)
addi a2, a2, 2 addi a2, a2, 2
addi a3, a3, 2 addi a3, a3, 2
...@@ -326,8 +312,8 @@ DST( s16i a9, a3, 0 ) ...@@ -326,8 +312,8 @@ DST( s16i a9, a3, 0 )
4: 4:
/* This section processes a possible trailing odd byte. */ /* This section processes a possible trailing odd byte. */
_bbci.l a4, 0, 8f /* 1-byte chunk */ _bbci.l a4, 0, 8f /* 1-byte chunk */
SRC( l8ui a9, a2, 0 ) EX(10f) l8ui a9, a2, 0
DST( s8i a9, a3, 0 ) EX(11f) s8i a9, a3, 0
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
slli a9, a9, 8 /* shift byte to bits 8..15 */ slli a9, a9, 8 /* shift byte to bits 8..15 */
#endif #endif
...@@ -350,10 +336,10 @@ DST( s8i a9, a3, 0 ) ...@@ -350,10 +336,10 @@ DST( s8i a9, a3, 0 )
add a10, a10, a2 /* a10 = end of last odd-aligned, 2-byte src chunk */ add a10, a10, a2 /* a10 = end of last odd-aligned, 2-byte src chunk */
.Loop8: .Loop8:
#endif #endif
SRC( l8ui a9, a2, 0 ) EX(10f) l8ui a9, a2, 0
SRC( l8ui a8, a2, 1 ) EX(10f) l8ui a8, a2, 1
DST( s8i a9, a3, 0 ) EX(11f) s8i a9, a3, 0
DST( s8i a8, a3, 1 ) EX(11f) s8i a8, a3, 1
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
slli a9, a9, 8 /* combine into a single 16-bit value */ slli a9, a9, 8 /* combine into a single 16-bit value */
#else /* for checksum computation */ #else /* for checksum computation */
...@@ -381,7 +367,7 @@ ENDPROC(csum_partial_copy_generic) ...@@ -381,7 +367,7 @@ ENDPROC(csum_partial_copy_generic)
a12 = original dst for exception handling a12 = original dst for exception handling
*/ */
6001: 10:
_movi a2, -EFAULT _movi a2, -EFAULT
s32i a2, a6, 0 /* src_err_ptr */ s32i a2, a6, 0 /* src_err_ptr */
...@@ -403,7 +389,7 @@ ENDPROC(csum_partial_copy_generic) ...@@ -403,7 +389,7 @@ ENDPROC(csum_partial_copy_generic)
2: 2:
retw retw
6002: 11:
movi a2, -EFAULT movi a2, -EFAULT
s32i a2, a7, 0 /* dst_err_ptr */ s32i a2, a7, 0 /* dst_err_ptr */
movi a2, 0 movi a2, 0
......
...@@ -9,23 +9,9 @@ ...@@ -9,23 +9,9 @@
* Copyright (C) 2002 - 2012 Tensilica Inc. * Copyright (C) 2002 - 2012 Tensilica Inc.
*/ */
#include <linux/linkage.h>
#include <variant/core.h> #include <variant/core.h>
#include <asm/asmmacro.h>
.macro src_b r, w0, w1
#ifdef __XTENSA_EB__
src \r, \w0, \w1
#else
src \r, \w1, \w0
#endif
.endm
.macro ssa8 r
#ifdef __XTENSA_EB__
ssa8b \r
#else
ssa8l \r
#endif
.endm
/* /*
* void *memcpy(void *dst, const void *src, size_t len); * void *memcpy(void *dst, const void *src, size_t len);
...@@ -123,10 +109,8 @@ ...@@ -123,10 +109,8 @@
addi a5, a5, 2 addi a5, a5, 2
j .Ldstaligned # dst is now aligned, return to main algorithm j .Ldstaligned # dst is now aligned, return to main algorithm
.align 4 ENTRY(__memcpy)
.global memcpy WEAK(memcpy)
.type memcpy,@function
memcpy:
entry sp, 16 # minimal stack frame entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
...@@ -209,7 +193,7 @@ memcpy: ...@@ -209,7 +193,7 @@ memcpy:
.Lsrcunaligned: .Lsrcunaligned:
_beqz a4, .Ldone # avoid loading anything for zero-length copies _beqz a4, .Ldone # avoid loading anything for zero-length copies
# copy 16 bytes per iteration for word-aligned dst and unaligned src # copy 16 bytes per iteration for word-aligned dst and unaligned src
ssa8 a3 # set shift amount from byte offset __ssa8 a3 # set shift amount from byte offset
/* set to 1 when running on ISS (simulator) with the /* set to 1 when running on ISS (simulator) with the
lint or ferret client, or 0 to save a few cycles */ lint or ferret client, or 0 to save a few cycles */
...@@ -229,16 +213,16 @@ memcpy: ...@@ -229,16 +213,16 @@ memcpy:
.Loop2: .Loop2:
l32i a7, a3, 4 l32i a7, a3, 4
l32i a8, a3, 8 l32i a8, a3, 8
src_b a6, a6, a7 __src_b a6, a6, a7
s32i a6, a5, 0 s32i a6, a5, 0
l32i a9, a3, 12 l32i a9, a3, 12
src_b a7, a7, a8 __src_b a7, a7, a8
s32i a7, a5, 4 s32i a7, a5, 4
l32i a6, a3, 16 l32i a6, a3, 16
src_b a8, a8, a9 __src_b a8, a8, a9
s32i a8, a5, 8 s32i a8, a5, 8
addi a3, a3, 16 addi a3, a3, 16
src_b a9, a9, a6 __src_b a9, a9, a6
s32i a9, a5, 12 s32i a9, a5, 12
addi a5, a5, 16 addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
...@@ -249,10 +233,10 @@ memcpy: ...@@ -249,10 +233,10 @@ memcpy:
# copy 8 bytes # copy 8 bytes
l32i a7, a3, 4 l32i a7, a3, 4
l32i a8, a3, 8 l32i a8, a3, 8
src_b a6, a6, a7 __src_b a6, a6, a7
s32i a6, a5, 0 s32i a6, a5, 0
addi a3, a3, 8 addi a3, a3, 8
src_b a7, a7, a8 __src_b a7, a7, a8
s32i a7, a5, 4 s32i a7, a5, 4
addi a5, a5, 8 addi a5, a5, 8
mov a6, a8 mov a6, a8
...@@ -261,7 +245,7 @@ memcpy: ...@@ -261,7 +245,7 @@ memcpy:
# copy 4 bytes # copy 4 bytes
l32i a7, a3, 4 l32i a7, a3, 4
addi a3, a3, 4 addi a3, a3, 4
src_b a6, a6, a7 __src_b a6, a6, a7
s32i a6, a5, 0 s32i a6, a5, 0
addi a5, a5, 4 addi a5, a5, 4
mov a6, a7 mov a6, a7
...@@ -288,14 +272,14 @@ memcpy: ...@@ -288,14 +272,14 @@ memcpy:
s8i a6, a5, 0 s8i a6, a5, 0
retw retw
ENDPROC(__memcpy)
/* /*
* void bcopy(const void *src, void *dest, size_t n); * void bcopy(const void *src, void *dest, size_t n);
*/ */
.align 4
.global bcopy ENTRY(bcopy)
.type bcopy,@function
bcopy:
entry sp, 16 # minimal stack frame entry sp, 16 # minimal stack frame
# a2=src, a3=dst, a4=len # a2=src, a3=dst, a4=len
mov a5, a3 mov a5, a3
...@@ -303,6 +287,8 @@ bcopy: ...@@ -303,6 +287,8 @@ bcopy:
mov a2, a5 mov a2, a5
j .Lmovecommon # go to common code for memmove+bcopy j .Lmovecommon # go to common code for memmove+bcopy
ENDPROC(bcopy)
/* /*
* void *memmove(void *dst, const void *src, size_t len); * void *memmove(void *dst, const void *src, size_t len);
* *
...@@ -391,10 +377,8 @@ bcopy: ...@@ -391,10 +377,8 @@ bcopy:
j .Lbackdstaligned # dst is now aligned, j .Lbackdstaligned # dst is now aligned,
# return to main algorithm # return to main algorithm
.align 4 ENTRY(__memmove)
.global memmove WEAK(memmove)
.type memmove,@function
memmove:
entry sp, 16 # minimal stack frame entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
...@@ -485,7 +469,7 @@ memmove: ...@@ -485,7 +469,7 @@ memmove:
.Lbacksrcunaligned: .Lbacksrcunaligned:
_beqz a4, .Lbackdone # avoid loading anything for zero-length copies _beqz a4, .Lbackdone # avoid loading anything for zero-length copies
# copy 16 bytes per iteration for word-aligned dst and unaligned src # copy 16 bytes per iteration for word-aligned dst and unaligned src
ssa8 a3 # set shift amount from byte offset __ssa8 a3 # set shift amount from byte offset
#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS with #define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS with
* the lint or ferret client, or 0 * the lint or ferret client, or 0
* to save a few cycles */ * to save a few cycles */
...@@ -506,15 +490,15 @@ memmove: ...@@ -506,15 +490,15 @@ memmove:
l32i a7, a3, 12 l32i a7, a3, 12
l32i a8, a3, 8 l32i a8, a3, 8
addi a5, a5, -16 addi a5, a5, -16
src_b a6, a7, a6 __src_b a6, a7, a6
s32i a6, a5, 12 s32i a6, a5, 12
l32i a9, a3, 4 l32i a9, a3, 4
src_b a7, a8, a7 __src_b a7, a8, a7
s32i a7, a5, 8 s32i a7, a5, 8
l32i a6, a3, 0 l32i a6, a3, 0
src_b a8, a9, a8 __src_b a8, a9, a8
s32i a8, a5, 4 s32i a8, a5, 4
src_b a9, a6, a9 __src_b a9, a6, a9
s32i a9, a5, 0 s32i a9, a5, 0
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start
...@@ -526,9 +510,9 @@ memmove: ...@@ -526,9 +510,9 @@ memmove:
l32i a7, a3, 4 l32i a7, a3, 4
l32i a8, a3, 0 l32i a8, a3, 0
addi a5, a5, -8 addi a5, a5, -8
src_b a6, a7, a6 __src_b a6, a7, a6
s32i a6, a5, 4 s32i a6, a5, 4
src_b a7, a8, a7 __src_b a7, a8, a7
s32i a7, a5, 0 s32i a7, a5, 0
mov a6, a8 mov a6, a8
.Lback12: .Lback12:
...@@ -537,7 +521,7 @@ memmove: ...@@ -537,7 +521,7 @@ memmove:
addi a3, a3, -4 addi a3, a3, -4
l32i a7, a3, 0 l32i a7, a3, 0
addi a5, a5, -4 addi a5, a5, -4
src_b a6, a7, a6 __src_b a6, a7, a6
s32i a6, a5, 0 s32i a6, a5, 0
mov a6, a7 mov a6, a7
.Lback13: .Lback13:
...@@ -566,11 +550,4 @@ memmove: ...@@ -566,11 +550,4 @@ memmove:
s8i a6, a5, 0 s8i a6, a5, 0
retw retw
ENDPROC(__memmove)
/*
* Local Variables:
* mode:fundamental
* comment-start: "# "
* comment-start-skip: "# *"
* End:
*/
...@@ -11,7 +11,9 @@ ...@@ -11,7 +11,9 @@
* Copyright (C) 2002 Tensilica Inc. * Copyright (C) 2002 Tensilica Inc.
*/ */
#include <linux/linkage.h>
#include <variant/core.h> #include <variant/core.h>
#include <asm/asmmacro.h>
/* /*
* void *memset(void *dst, int c, size_t length) * void *memset(void *dst, int c, size_t length)
...@@ -28,20 +30,10 @@ ...@@ -28,20 +30,10 @@
* the alignment labels). * the alignment labels).
*/ */
/* Load or store instructions that may cause exceptions use the EX macro. */
#define EX(insn,reg1,reg2,offset,handler) \
9: insn reg1, reg2, offset; \
.section __ex_table, "a"; \
.word 9b, handler; \
.previous
.text .text
.align 4 ENTRY(__memset)
.global memset WEAK(memset)
.type memset,@function
memset:
entry sp, 16 # minimal stack frame entry sp, 16 # minimal stack frame
# a2/ dst, a3/ c, a4/ length # a2/ dst, a3/ c, a4/ length
extui a3, a3, 0, 8 # mask to just 8 bits extui a3, a3, 0, 8 # mask to just 8 bits
...@@ -73,10 +65,10 @@ memset: ...@@ -73,10 +65,10 @@ memset:
add a6, a6, a5 # a6 = end of last 16B chunk add a6, a6, a5 # a6 = end of last 16B chunk
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Loop1: .Loop1:
EX(s32i, a3, a5, 0, memset_fixup) EX(10f) s32i a3, a5, 0
EX(s32i, a3, a5, 4, memset_fixup) EX(10f) s32i a3, a5, 4
EX(s32i, a3, a5, 8, memset_fixup) EX(10f) s32i a3, a5, 8
EX(s32i, a3, a5, 12, memset_fixup) EX(10f) s32i a3, a5, 12
addi a5, a5, 16 addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
blt a5, a6, .Loop1 blt a5, a6, .Loop1
...@@ -84,23 +76,23 @@ memset: ...@@ -84,23 +76,23 @@ memset:
.Loop1done: .Loop1done:
bbci.l a4, 3, .L2 bbci.l a4, 3, .L2
# set 8 bytes # set 8 bytes
EX(s32i, a3, a5, 0, memset_fixup) EX(10f) s32i a3, a5, 0
EX(s32i, a3, a5, 4, memset_fixup) EX(10f) s32i a3, a5, 4
addi a5, a5, 8 addi a5, a5, 8
.L2: .L2:
bbci.l a4, 2, .L3 bbci.l a4, 2, .L3
# set 4 bytes # set 4 bytes
EX(s32i, a3, a5, 0, memset_fixup) EX(10f) s32i a3, a5, 0
addi a5, a5, 4 addi a5, a5, 4
.L3: .L3:
bbci.l a4, 1, .L4 bbci.l a4, 1, .L4
# set 2 bytes # set 2 bytes
EX(s16i, a3, a5, 0, memset_fixup) EX(10f) s16i a3, a5, 0
addi a5, a5, 2 addi a5, a5, 2
.L4: .L4:
bbci.l a4, 0, .L5 bbci.l a4, 0, .L5
# set 1 byte # set 1 byte
EX(s8i, a3, a5, 0, memset_fixup) EX(10f) s8i a3, a5, 0
.L5: .L5:
.Lret1: .Lret1:
retw retw
...@@ -114,7 +106,7 @@ memset: ...@@ -114,7 +106,7 @@ memset:
bbci.l a5, 0, .L20 # branch if dst alignment half-aligned bbci.l a5, 0, .L20 # branch if dst alignment half-aligned
# dst is only byte aligned # dst is only byte aligned
# set 1 byte # set 1 byte
EX(s8i, a3, a5, 0, memset_fixup) EX(10f) s8i a3, a5, 0
addi a5, a5, 1 addi a5, a5, 1
addi a4, a4, -1 addi a4, a4, -1
# now retest if dst aligned # now retest if dst aligned
...@@ -122,7 +114,7 @@ memset: ...@@ -122,7 +114,7 @@ memset:
.L20: .L20:
# dst half-aligned # dst half-aligned
# set 2 bytes # set 2 bytes
EX(s16i, a3, a5, 0, memset_fixup) EX(10f) s16i a3, a5, 0
addi a5, a5, 2 addi a5, a5, 2
addi a4, a4, -2 addi a4, a4, -2
j .L0 # dst is now aligned, return to main algorithm j .L0 # dst is now aligned, return to main algorithm
...@@ -141,7 +133,7 @@ memset: ...@@ -141,7 +133,7 @@ memset:
add a6, a5, a4 # a6 = ending address add a6, a5, a4 # a6 = ending address
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Lbyteloop: .Lbyteloop:
EX(s8i, a3, a5, 0, memset_fixup) EX(10f) s8i a3, a5, 0
addi a5, a5, 1 addi a5, a5, 1
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
blt a5, a6, .Lbyteloop blt a5, a6, .Lbyteloop
...@@ -149,12 +141,13 @@ memset: ...@@ -149,12 +141,13 @@ memset:
.Lbytesetdone: .Lbytesetdone:
retw retw
ENDPROC(__memset)
.section .fixup, "ax" .section .fixup, "ax"
.align 4 .align 4
/* We return zero if a failure occurred. */ /* We return zero if a failure occurred. */
memset_fixup: 10:
movi a2, 0 movi a2, 0
retw retw
...@@ -49,17 +49,6 @@ ...@@ -49,17 +49,6 @@
* *
*/ */
/* define DEBUG to print some debugging messages. */
#undef DEBUG
#ifdef DEBUG
# define DBG(x...) printk(x)
#else
# define DBG(x...)
#endif
static int pciauto_upper_iospc; static int pciauto_upper_iospc;
static int pciauto_upper_memspc; static int pciauto_upper_memspc;
...@@ -97,7 +86,7 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit) ...@@ -97,7 +86,7 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit)
{ {
bar_size &= PCI_BASE_ADDRESS_IO_MASK; bar_size &= PCI_BASE_ADDRESS_IO_MASK;
upper_limit = &pciauto_upper_iospc; upper_limit = &pciauto_upper_iospc;
DBG("PCI Autoconfig: BAR %d, I/O, ", bar_nr); pr_debug("PCI Autoconfig: BAR %d, I/O, ", bar_nr);
} }
else else
{ {
...@@ -107,7 +96,7 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit) ...@@ -107,7 +96,7 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit)
bar_size &= PCI_BASE_ADDRESS_MEM_MASK; bar_size &= PCI_BASE_ADDRESS_MEM_MASK;
upper_limit = &pciauto_upper_memspc; upper_limit = &pciauto_upper_memspc;
DBG("PCI Autoconfig: BAR %d, Mem, ", bar_nr); pr_debug("PCI Autoconfig: BAR %d, Mem, ", bar_nr);
} }
/* Allocate a base address (bar_size is negative!) */ /* Allocate a base address (bar_size is negative!) */
...@@ -125,7 +114,8 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit) ...@@ -125,7 +114,8 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit)
if (found_mem64) if (found_mem64)
pci_write_config_dword(dev, (bar+=4), 0x00000000); pci_write_config_dword(dev, (bar+=4), 0x00000000);
DBG("size=0x%x, address=0x%x\n", ~bar_size + 1, *upper_limit); pr_debug("size=0x%x, address=0x%x\n",
~bar_size + 1, *upper_limit);
} }
} }
...@@ -150,7 +140,7 @@ pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn) ...@@ -150,7 +140,7 @@ pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn)
if (irq == -1) if (irq == -1)
irq = 0; irq = 0;
DBG("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin); pr_debug("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
} }
...@@ -289,8 +279,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) ...@@ -289,8 +279,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
int iosave, memsave; int iosave, memsave;
DBG("PCI Autoconfig: Found P2P bridge, device %d\n", pr_debug("PCI Autoconfig: Found P2P bridge, device %d\n",
PCI_SLOT(pci_devfn)); PCI_SLOT(pci_devfn));
/* Allocate PCI I/O and/or memory space */ /* Allocate PCI I/O and/or memory space */
pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1); pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1);
...@@ -306,23 +296,6 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) ...@@ -306,23 +296,6 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
} }
#if 0
/* Skip legacy mode IDE controller */
if ((pci_class >> 16) == PCI_CLASS_STORAGE_IDE) {
unsigned char prg_iface;
pci_read_config_byte(dev, PCI_CLASS_PROG, &prg_iface);
if (!(prg_iface & PCIAUTO_IDE_MODE_MASK)) {
DBG("PCI Autoconfig: Skipping legacy mode "
"IDE controller\n");
continue;
}
}
#endif
/* /*
* Found a peripheral, enable some standard * Found a peripheral, enable some standard
* settings * settings
...@@ -337,8 +310,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) ...@@ -337,8 +310,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80); pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80);
/* Allocate PCI I/O and/or memory space */ /* Allocate PCI I/O and/or memory space */
DBG("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n", pr_debug("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n",
current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn) ); current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn));
pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5); pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5);
pciauto_setup_irq(pci_ctrl, dev, pci_devfn); pciauto_setup_irq(pci_ctrl, dev, pci_devfn);
......
...@@ -11,16 +11,10 @@ ...@@ -11,16 +11,10 @@
* Copyright (C) 2002 Tensilica Inc. * Copyright (C) 2002 Tensilica Inc.
*/ */
#include <variant/core.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/linkage.h>
/* Load or store instructions that may cause exceptions use the EX macro. */ #include <variant/core.h>
#include <asm/asmmacro.h>
#define EX(insn,reg1,reg2,offset,handler) \
9: insn reg1, reg2, offset; \
.section __ex_table, "a"; \
.word 9b, handler; \
.previous
/* /*
* char *__strncpy_user(char *dst, const char *src, size_t len) * char *__strncpy_user(char *dst, const char *src, size_t len)
...@@ -54,10 +48,8 @@ ...@@ -54,10 +48,8 @@
# a12/ tmp # a12/ tmp
.text .text
.align 4 ENTRY(__strncpy_user)
.global __strncpy_user
.type __strncpy_user,@function
__strncpy_user:
entry sp, 16 # minimal stack frame entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
mov a11, a2 # leave dst in return value register mov a11, a2 # leave dst in return value register
...@@ -75,9 +67,9 @@ __strncpy_user: ...@@ -75,9 +67,9 @@ __strncpy_user:
j .Ldstunaligned j .Ldstunaligned
.Lsrc1mod2: # src address is odd .Lsrc1mod2: # src address is odd
EX(l8ui, a9, a3, 0, fixup_l) # get byte 0 EX(11f) l8ui a9, a3, 0 # get byte 0
addi a3, a3, 1 # advance src pointer addi a3, a3, 1 # advance src pointer
EX(s8i, a9, a11, 0, fixup_s) # store byte 0 EX(10f) s8i a9, a11, 0 # store byte 0
beqz a9, .Lret # if byte 0 is zero beqz a9, .Lret # if byte 0 is zero
addi a11, a11, 1 # advance dst pointer addi a11, a11, 1 # advance dst pointer
addi a4, a4, -1 # decrement len addi a4, a4, -1 # decrement len
...@@ -85,16 +77,16 @@ __strncpy_user: ...@@ -85,16 +77,16 @@ __strncpy_user:
bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned
.Lsrc2mod4: # src address is 2 mod 4 .Lsrc2mod4: # src address is 2 mod 4
EX(l8ui, a9, a3, 0, fixup_l) # get byte 0 EX(11f) l8ui a9, a3, 0 # get byte 0
/* 1-cycle interlock */ /* 1-cycle interlock */
EX(s8i, a9, a11, 0, fixup_s) # store byte 0 EX(10f) s8i a9, a11, 0 # store byte 0
beqz a9, .Lret # if byte 0 is zero beqz a9, .Lret # if byte 0 is zero
addi a11, a11, 1 # advance dst pointer addi a11, a11, 1 # advance dst pointer
addi a4, a4, -1 # decrement len addi a4, a4, -1 # decrement len
beqz a4, .Lret # if len is zero beqz a4, .Lret # if len is zero
EX(l8ui, a9, a3, 1, fixup_l) # get byte 0 EX(11f) l8ui a9, a3, 1 # get byte 0
addi a3, a3, 2 # advance src pointer addi a3, a3, 2 # advance src pointer
EX(s8i, a9, a11, 0, fixup_s) # store byte 0 EX(10f) s8i a9, a11, 0 # store byte 0
beqz a9, .Lret # if byte 0 is zero beqz a9, .Lret # if byte 0 is zero
addi a11, a11, 1 # advance dst pointer addi a11, a11, 1 # advance dst pointer
addi a4, a4, -1 # decrement len addi a4, a4, -1 # decrement len
...@@ -117,12 +109,12 @@ __strncpy_user: ...@@ -117,12 +109,12 @@ __strncpy_user:
add a12, a12, a11 # a12 = end of last 4B chunck add a12, a12, a11 # a12 = end of last 4B chunck
#endif #endif
.Loop1: .Loop1:
EX(l32i, a9, a3, 0, fixup_l) # get word from src EX(11f) l32i a9, a3, 0 # get word from src
addi a3, a3, 4 # advance src pointer addi a3, a3, 4 # advance src pointer
bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a5, .Lz0 # if byte 0 is zero
bnone a9, a6, .Lz1 # if byte 1 is zero bnone a9, a6, .Lz1 # if byte 1 is zero
bnone a9, a7, .Lz2 # if byte 2 is zero bnone a9, a7, .Lz2 # if byte 2 is zero
EX(s32i, a9, a11, 0, fixup_s) # store word to dst EX(10f) s32i a9, a11, 0 # store word to dst
bnone a9, a8, .Lz3 # if byte 3 is zero bnone a9, a8, .Lz3 # if byte 3 is zero
addi a11, a11, 4 # advance dst pointer addi a11, a11, 4 # advance dst pointer
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
...@@ -132,7 +124,7 @@ __strncpy_user: ...@@ -132,7 +124,7 @@ __strncpy_user:
.Loop1done: .Loop1done:
bbci.l a4, 1, .L100 bbci.l a4, 1, .L100
# copy 2 bytes # copy 2 bytes
EX(l16ui, a9, a3, 0, fixup_l) EX(11f) l16ui a9, a3, 0
addi a3, a3, 2 # advance src pointer addi a3, a3, 2 # advance src pointer
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
bnone a9, a7, .Lz0 # if byte 2 is zero bnone a9, a7, .Lz0 # if byte 2 is zero
...@@ -141,13 +133,13 @@ __strncpy_user: ...@@ -141,13 +133,13 @@ __strncpy_user:
bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a5, .Lz0 # if byte 0 is zero
bnone a9, a6, .Lz1 # if byte 1 is zero bnone a9, a6, .Lz1 # if byte 1 is zero
#endif #endif
EX(s16i, a9, a11, 0, fixup_s) EX(10f) s16i a9, a11, 0
addi a11, a11, 2 # advance dst pointer addi a11, a11, 2 # advance dst pointer
.L100: .L100:
bbci.l a4, 0, .Lret bbci.l a4, 0, .Lret
EX(l8ui, a9, a3, 0, fixup_l) EX(11f) l8ui a9, a3, 0
/* slot */ /* slot */
EX(s8i, a9, a11, 0, fixup_s) EX(10f) s8i a9, a11, 0
beqz a9, .Lret # if byte is zero beqz a9, .Lret # if byte is zero
addi a11, a11, 1-3 # advance dst ptr 1, but also cancel addi a11, a11, 1-3 # advance dst ptr 1, but also cancel
# the effect of adding 3 in .Lz3 code # the effect of adding 3 in .Lz3 code
...@@ -161,14 +153,14 @@ __strncpy_user: ...@@ -161,14 +153,14 @@ __strncpy_user:
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
movi a9, 0 movi a9, 0
#endif /* __XTENSA_EB__ */ #endif /* __XTENSA_EB__ */
EX(s8i, a9, a11, 0, fixup_s) EX(10f) s8i a9, a11, 0
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw retw
.Lz1: # byte 1 is zero .Lz1: # byte 1 is zero
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
extui a9, a9, 16, 16 extui a9, a9, 16, 16
#endif /* __XTENSA_EB__ */ #endif /* __XTENSA_EB__ */
EX(s16i, a9, a11, 0, fixup_s) EX(10f) s16i a9, a11, 0
addi a11, a11, 1 # advance dst pointer addi a11, a11, 1 # advance dst pointer
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw retw
...@@ -176,9 +168,9 @@ __strncpy_user: ...@@ -176,9 +168,9 @@ __strncpy_user:
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
extui a9, a9, 16, 16 extui a9, a9, 16, 16
#endif /* __XTENSA_EB__ */ #endif /* __XTENSA_EB__ */
EX(s16i, a9, a11, 0, fixup_s) EX(10f) s16i a9, a11, 0
movi a9, 0 movi a9, 0
EX(s8i, a9, a11, 2, fixup_s) EX(10f) s8i a9, a11, 2
addi a11, a11, 2 # advance dst pointer addi a11, a11, 2 # advance dst pointer
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw retw
...@@ -196,9 +188,9 @@ __strncpy_user: ...@@ -196,9 +188,9 @@ __strncpy_user:
add a12, a11, a4 # a12 = ending address add a12, a11, a4 # a12 = ending address
#endif /* XCHAL_HAVE_LOOPS */ #endif /* XCHAL_HAVE_LOOPS */
.Lnextbyte: .Lnextbyte:
EX(l8ui, a9, a3, 0, fixup_l) EX(11f) l8ui a9, a3, 0
addi a3, a3, 1 addi a3, a3, 1
EX(s8i, a9, a11, 0, fixup_s) EX(10f) s8i a9, a11, 0
beqz a9, .Lunalignedend beqz a9, .Lunalignedend
addi a11, a11, 1 addi a11, a11, 1
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
...@@ -209,6 +201,7 @@ __strncpy_user: ...@@ -209,6 +201,7 @@ __strncpy_user:
sub a2, a11, a2 # compute strlen sub a2, a11, a2 # compute strlen
retw retw
ENDPROC(__strncpy_user)
.section .fixup, "ax" .section .fixup, "ax"
.align 4 .align 4
...@@ -218,8 +211,7 @@ __strncpy_user: ...@@ -218,8 +211,7 @@ __strncpy_user:
* implementation in memset(). Thus, we differentiate between * implementation in memset(). Thus, we differentiate between
* load/store fixups. */ * load/store fixups. */
fixup_s: 10:
fixup_l: 11:
movi a2, -EFAULT movi a2, -EFAULT
retw retw
...@@ -11,15 +11,9 @@ ...@@ -11,15 +11,9 @@
* Copyright (C) 2002 Tensilica Inc. * Copyright (C) 2002 Tensilica Inc.
*/ */
#include <linux/linkage.h>
#include <variant/core.h> #include <variant/core.h>
#include <asm/asmmacro.h>
/* Load or store instructions that may cause exceptions use the EX macro. */
#define EX(insn,reg1,reg2,offset,handler) \
9: insn reg1, reg2, offset; \
.section __ex_table, "a"; \
.word 9b, handler; \
.previous
/* /*
* size_t __strnlen_user(const char *s, size_t len) * size_t __strnlen_user(const char *s, size_t len)
...@@ -49,10 +43,8 @@ ...@@ -49,10 +43,8 @@
# a10/ tmp # a10/ tmp
.text .text
.align 4 ENTRY(__strnlen_user)
.global __strnlen_user
.type __strnlen_user,@function
__strnlen_user:
entry sp, 16 # minimal stack frame entry sp, 16 # minimal stack frame
# a2/ s, a3/ len # a2/ s, a3/ len
addi a4, a2, -4 # because we overincrement at the end; addi a4, a2, -4 # because we overincrement at the end;
...@@ -77,7 +69,7 @@ __strnlen_user: ...@@ -77,7 +69,7 @@ __strnlen_user:
add a10, a10, a4 # a10 = end of last 4B chunk add a10, a10, a4 # a10 = end of last 4B chunk
#endif /* XCHAL_HAVE_LOOPS */ #endif /* XCHAL_HAVE_LOOPS */
.Loop: .Loop:
EX(l32i, a9, a4, 4, lenfixup) # get next word of string EX(10f) l32i a9, a4, 4 # get next word of string
addi a4, a4, 4 # advance string pointer addi a4, a4, 4 # advance string pointer
bnone a9, a5, .Lz0 # if byte 0 is zero bnone a9, a5, .Lz0 # if byte 0 is zero
bnone a9, a6, .Lz1 # if byte 1 is zero bnone a9, a6, .Lz1 # if byte 1 is zero
...@@ -88,7 +80,7 @@ __strnlen_user: ...@@ -88,7 +80,7 @@ __strnlen_user:
#endif #endif
.Ldone: .Ldone:
EX(l32i, a9, a4, 4, lenfixup) # load 4 bytes for remaining checks EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks
bbci.l a3, 1, .L100 bbci.l a3, 1, .L100
# check two more bytes (bytes 0, 1 of word) # check two more bytes (bytes 0, 1 of word)
...@@ -125,14 +117,14 @@ __strnlen_user: ...@@ -125,14 +117,14 @@ __strnlen_user:
retw retw
.L1mod2: # address is odd .L1mod2: # address is odd
EX(l8ui, a9, a4, 4, lenfixup) # get byte 0 EX(10f) l8ui a9, a4, 4 # get byte 0
addi a4, a4, 1 # advance string pointer addi a4, a4, 1 # advance string pointer
beqz a9, .Lz3 # if byte 0 is zero beqz a9, .Lz3 # if byte 0 is zero
bbci.l a4, 1, .Laligned # if string pointer is now word-aligned bbci.l a4, 1, .Laligned # if string pointer is now word-aligned
.L2mod4: # address is 2 mod 4 .L2mod4: # address is 2 mod 4
addi a4, a4, 2 # advance ptr for aligned access addi a4, a4, 2 # advance ptr for aligned access
EX(l32i, a9, a4, 0, lenfixup) # get word with first two bytes of string EX(10f) l32i a9, a4, 0 # get word with first two bytes of string
bnone a9, a7, .Lz2 # if byte 2 (of word, not string) is zero bnone a9, a7, .Lz2 # if byte 2 (of word, not string) is zero
bany a9, a8, .Laligned # if byte 3 (of word, not string) is nonzero bany a9, a8, .Laligned # if byte 3 (of word, not string) is nonzero
# byte 3 is zero # byte 3 is zero
...@@ -140,8 +132,10 @@ __strnlen_user: ...@@ -140,8 +132,10 @@ __strnlen_user:
sub a2, a4, a2 # subtract to get length sub a2, a4, a2 # subtract to get length
retw retw
ENDPROC(__strnlen_user)
.section .fixup, "ax" .section .fixup, "ax"
.align 4 .align 4
lenfixup: 10:
movi a2, 0 movi a2, 0
retw retw
...@@ -53,30 +53,13 @@ ...@@ -53,30 +53,13 @@
* a11/ original length * a11/ original length
*/ */
#include <linux/linkage.h>
#include <variant/core.h> #include <variant/core.h>
#include <asm/asmmacro.h>
#ifdef __XTENSA_EB__
#define ALIGN(R, W0, W1) src R, W0, W1
#define SSA8(R) ssa8b R
#else
#define ALIGN(R, W0, W1) src R, W1, W0
#define SSA8(R) ssa8l R
#endif
/* Load or store instructions that may cause exceptions use the EX macro. */
#define EX(insn,reg1,reg2,offset,handler) \
9: insn reg1, reg2, offset; \
.section __ex_table, "a"; \
.word 9b, handler; \
.previous
.text .text
.align 4 ENTRY(__xtensa_copy_user)
.global __xtensa_copy_user
.type __xtensa_copy_user,@function
__xtensa_copy_user:
entry sp, 16 # minimal stack frame entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len # a2/ dst, a3/ src, a4/ len
mov a5, a2 # copy dst so that a2 is return value mov a5, a2 # copy dst so that a2 is return value
...@@ -89,7 +72,7 @@ __xtensa_copy_user: ...@@ -89,7 +72,7 @@ __xtensa_copy_user:
# per iteration # per iteration
movi a8, 3 # if source is also aligned, movi a8, 3 # if source is also aligned,
bnone a3, a8, .Laligned # then use word copy bnone a3, a8, .Laligned # then use word copy
SSA8( a3) # set shift amount from byte offset __ssa8 a3 # set shift amount from byte offset
bnez a4, .Lsrcunaligned bnez a4, .Lsrcunaligned
movi a2, 0 # return success for len==0 movi a2, 0 # return success for len==0
retw retw
...@@ -102,9 +85,9 @@ __xtensa_copy_user: ...@@ -102,9 +85,9 @@ __xtensa_copy_user:
bltui a4, 7, .Lbytecopy # do short copies byte by byte bltui a4, 7, .Lbytecopy # do short copies byte by byte
# copy 1 byte # copy 1 byte
EX(l8ui, a6, a3, 0, fixup) EX(10f) l8ui a6, a3, 0
addi a3, a3, 1 addi a3, a3, 1
EX(s8i, a6, a5, 0, fixup) EX(10f) s8i a6, a5, 0
addi a5, a5, 1 addi a5, a5, 1
addi a4, a4, -1 addi a4, a4, -1
bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
...@@ -112,11 +95,11 @@ __xtensa_copy_user: ...@@ -112,11 +95,11 @@ __xtensa_copy_user:
.Ldst2mod4: # dst 16-bit aligned .Ldst2mod4: # dst 16-bit aligned
# copy 2 bytes # copy 2 bytes
bltui a4, 6, .Lbytecopy # do short copies byte by byte bltui a4, 6, .Lbytecopy # do short copies byte by byte
EX(l8ui, a6, a3, 0, fixup) EX(10f) l8ui a6, a3, 0
EX(l8ui, a7, a3, 1, fixup) EX(10f) l8ui a7, a3, 1
addi a3, a3, 2 addi a3, a3, 2
EX(s8i, a6, a5, 0, fixup) EX(10f) s8i a6, a5, 0
EX(s8i, a7, a5, 1, fixup) EX(10f) s8i a7, a5, 1
addi a5, a5, 2 addi a5, a5, 2
addi a4, a4, -2 addi a4, a4, -2
j .Ldstaligned # dst is now aligned, return to main algorithm j .Ldstaligned # dst is now aligned, return to main algorithm
...@@ -135,9 +118,9 @@ __xtensa_copy_user: ...@@ -135,9 +118,9 @@ __xtensa_copy_user:
add a7, a3, a4 # a7 = end address for source add a7, a3, a4 # a7 = end address for source
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Lnextbyte: .Lnextbyte:
EX(l8ui, a6, a3, 0, fixup) EX(10f) l8ui a6, a3, 0
addi a3, a3, 1 addi a3, a3, 1
EX(s8i, a6, a5, 0, fixup) EX(10f) s8i a6, a5, 0
addi a5, a5, 1 addi a5, a5, 1
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
blt a3, a7, .Lnextbyte blt a3, a7, .Lnextbyte
...@@ -161,15 +144,15 @@ __xtensa_copy_user: ...@@ -161,15 +144,15 @@ __xtensa_copy_user:
add a8, a8, a3 # a8 = end of last 16B source chunk add a8, a8, a3 # a8 = end of last 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Loop1: .Loop1:
EX(l32i, a6, a3, 0, fixup) EX(10f) l32i a6, a3, 0
EX(l32i, a7, a3, 4, fixup) EX(10f) l32i a7, a3, 4
EX(s32i, a6, a5, 0, fixup) EX(10f) s32i a6, a5, 0
EX(l32i, a6, a3, 8, fixup) EX(10f) l32i a6, a3, 8
EX(s32i, a7, a5, 4, fixup) EX(10f) s32i a7, a5, 4
EX(l32i, a7, a3, 12, fixup) EX(10f) l32i a7, a3, 12
EX(s32i, a6, a5, 8, fixup) EX(10f) s32i a6, a5, 8
addi a3, a3, 16 addi a3, a3, 16
EX(s32i, a7, a5, 12, fixup) EX(10f) s32i a7, a5, 12
addi a5, a5, 16 addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
blt a3, a8, .Loop1 blt a3, a8, .Loop1
...@@ -177,31 +160,31 @@ __xtensa_copy_user: ...@@ -177,31 +160,31 @@ __xtensa_copy_user:
.Loop1done: .Loop1done:
bbci.l a4, 3, .L2 bbci.l a4, 3, .L2
# copy 8 bytes # copy 8 bytes
EX(l32i, a6, a3, 0, fixup) EX(10f) l32i a6, a3, 0
EX(l32i, a7, a3, 4, fixup) EX(10f) l32i a7, a3, 4
addi a3, a3, 8 addi a3, a3, 8
EX(s32i, a6, a5, 0, fixup) EX(10f) s32i a6, a5, 0
EX(s32i, a7, a5, 4, fixup) EX(10f) s32i a7, a5, 4
addi a5, a5, 8 addi a5, a5, 8
.L2: .L2:
bbci.l a4, 2, .L3 bbci.l a4, 2, .L3
# copy 4 bytes # copy 4 bytes
EX(l32i, a6, a3, 0, fixup) EX(10f) l32i a6, a3, 0
addi a3, a3, 4 addi a3, a3, 4
EX(s32i, a6, a5, 0, fixup) EX(10f) s32i a6, a5, 0
addi a5, a5, 4 addi a5, a5, 4
.L3: .L3:
bbci.l a4, 1, .L4 bbci.l a4, 1, .L4
# copy 2 bytes # copy 2 bytes
EX(l16ui, a6, a3, 0, fixup) EX(10f) l16ui a6, a3, 0
addi a3, a3, 2 addi a3, a3, 2
EX(s16i, a6, a5, 0, fixup) EX(10f) s16i a6, a5, 0
addi a5, a5, 2 addi a5, a5, 2
.L4: .L4:
bbci.l a4, 0, .L5 bbci.l a4, 0, .L5
# copy 1 byte # copy 1 byte
EX(l8ui, a6, a3, 0, fixup) EX(10f) l8ui a6, a3, 0
EX(s8i, a6, a5, 0, fixup) EX(10f) s8i a6, a5, 0
.L5: .L5:
movi a2, 0 # return success for len bytes copied movi a2, 0 # return success for len bytes copied
retw retw
...@@ -217,7 +200,7 @@ __xtensa_copy_user: ...@@ -217,7 +200,7 @@ __xtensa_copy_user:
# copy 16 bytes per iteration for word-aligned dst and unaligned src # copy 16 bytes per iteration for word-aligned dst and unaligned src
and a10, a3, a8 # save unalignment offset for below and a10, a3, a8 # save unalignment offset for below
sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware) sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware)
EX(l32i, a6, a3, 0, fixup) # load first word EX(10f) l32i a6, a3, 0 # load first word
#if XCHAL_HAVE_LOOPS #if XCHAL_HAVE_LOOPS
loopnez a7, .Loop2done loopnez a7, .Loop2done
#else /* !XCHAL_HAVE_LOOPS */ #else /* !XCHAL_HAVE_LOOPS */
...@@ -226,19 +209,19 @@ __xtensa_copy_user: ...@@ -226,19 +209,19 @@ __xtensa_copy_user:
add a12, a12, a3 # a12 = end of last 16B source chunk add a12, a12, a3 # a12 = end of last 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */ #endif /* !XCHAL_HAVE_LOOPS */
.Loop2: .Loop2:
EX(l32i, a7, a3, 4, fixup) EX(10f) l32i a7, a3, 4
EX(l32i, a8, a3, 8, fixup) EX(10f) l32i a8, a3, 8
ALIGN( a6, a6, a7) __src_b a6, a6, a7
EX(s32i, a6, a5, 0, fixup) EX(10f) s32i a6, a5, 0
EX(l32i, a9, a3, 12, fixup) EX(10f) l32i a9, a3, 12
ALIGN( a7, a7, a8) __src_b a7, a7, a8
EX(s32i, a7, a5, 4, fixup) EX(10f) s32i a7, a5, 4
EX(l32i, a6, a3, 16, fixup) EX(10f) l32i a6, a3, 16
ALIGN( a8, a8, a9) __src_b a8, a8, a9
EX(s32i, a8, a5, 8, fixup) EX(10f) s32i a8, a5, 8
addi a3, a3, 16 addi a3, a3, 16
ALIGN( a9, a9, a6) __src_b a9, a9, a6
EX(s32i, a9, a5, 12, fixup) EX(10f) s32i a9, a5, 12
addi a5, a5, 16 addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS #if !XCHAL_HAVE_LOOPS
blt a3, a12, .Loop2 blt a3, a12, .Loop2
...@@ -246,43 +229,44 @@ __xtensa_copy_user: ...@@ -246,43 +229,44 @@ __xtensa_copy_user:
.Loop2done: .Loop2done:
bbci.l a4, 3, .L12 bbci.l a4, 3, .L12
# copy 8 bytes # copy 8 bytes
EX(l32i, a7, a3, 4, fixup) EX(10f) l32i a7, a3, 4
EX(l32i, a8, a3, 8, fixup) EX(10f) l32i a8, a3, 8
ALIGN( a6, a6, a7) __src_b a6, a6, a7
EX(s32i, a6, a5, 0, fixup) EX(10f) s32i a6, a5, 0
addi a3, a3, 8 addi a3, a3, 8
ALIGN( a7, a7, a8) __src_b a7, a7, a8
EX(s32i, a7, a5, 4, fixup) EX(10f) s32i a7, a5, 4
addi a5, a5, 8 addi a5, a5, 8
mov a6, a8 mov a6, a8
.L12: .L12:
bbci.l a4, 2, .L13 bbci.l a4, 2, .L13
# copy 4 bytes # copy 4 bytes
EX(l32i, a7, a3, 4, fixup) EX(10f) l32i a7, a3, 4
addi a3, a3, 4 addi a3, a3, 4
ALIGN( a6, a6, a7) __src_b a6, a6, a7
EX(s32i, a6, a5, 0, fixup) EX(10f) s32i a6, a5, 0
addi a5, a5, 4 addi a5, a5, 4
mov a6, a7 mov a6, a7
.L13: .L13:
add a3, a3, a10 # readjust a3 with correct misalignment add a3, a3, a10 # readjust a3 with correct misalignment
bbci.l a4, 1, .L14 bbci.l a4, 1, .L14
# copy 2 bytes # copy 2 bytes
EX(l8ui, a6, a3, 0, fixup) EX(10f) l8ui a6, a3, 0
EX(l8ui, a7, a3, 1, fixup) EX(10f) l8ui a7, a3, 1
addi a3, a3, 2 addi a3, a3, 2
EX(s8i, a6, a5, 0, fixup) EX(10f) s8i a6, a5, 0
EX(s8i, a7, a5, 1, fixup) EX(10f) s8i a7, a5, 1
addi a5, a5, 2 addi a5, a5, 2
.L14: .L14:
bbci.l a4, 0, .L15 bbci.l a4, 0, .L15
# copy 1 byte # copy 1 byte
EX(l8ui, a6, a3, 0, fixup) EX(10f) l8ui a6, a3, 0
EX(s8i, a6, a5, 0, fixup) EX(10f) s8i a6, a5, 0
.L15: .L15:
movi a2, 0 # return success for len bytes copied movi a2, 0 # return success for len bytes copied
retw retw
ENDPROC(__xtensa_copy_user)
.section .fixup, "ax" .section .fixup, "ax"
.align 4 .align 4
...@@ -294,7 +278,7 @@ __xtensa_copy_user: ...@@ -294,7 +278,7 @@ __xtensa_copy_user:
*/ */
fixup: 10:
sub a2, a5, a2 /* a2 <-- bytes copied */ sub a2, a5, a2 /* a2 <-- bytes copied */
sub a2, a11, a2 /* a2 <-- bytes not copied */ sub a2, a11, a2 /* a2 <-- bytes not copied */
retw retw
...@@ -5,3 +5,8 @@ ...@@ -5,3 +5,8 @@
obj-y := init.o misc.o obj-y := init.o misc.o
obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o
obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_KASAN) += kasan_init.o
KASAN_SANITIZE_fault.o := n
KASAN_SANITIZE_kasan_init.o := n
KASAN_SANITIZE_mmu.o := n
...@@ -33,9 +33,6 @@ ...@@ -33,9 +33,6 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
//#define printd(x...) printk(x)
#define printd(x...) do { } while(0)
/* /*
* Note: * Note:
* The kernel provides one architecture bit PG_arch_1 in the page flags that * The kernel provides one architecture bit PG_arch_1 in the page flags that
......
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
void bad_page_fault(struct pt_regs*, unsigned long, int); void bad_page_fault(struct pt_regs*, unsigned long, int);
#undef DEBUG_PAGE_FAULT
/* /*
* This routine handles page faults. It determines the address, * This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate * and the problem, and then passes it off to one of the appropriate
...@@ -68,10 +66,10 @@ void do_page_fault(struct pt_regs *regs) ...@@ -68,10 +66,10 @@ void do_page_fault(struct pt_regs *regs)
exccause == EXCCAUSE_ITLB_MISS || exccause == EXCCAUSE_ITLB_MISS ||
exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
#ifdef DEBUG_PAGE_FAULT pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid, current->comm, current->pid,
address, exccause, regs->pc, is_write? "w":"", is_exec? "x":""); address, exccause, regs->pc,
#endif is_write ? "w" : "", is_exec ? "x" : "");
if (user_mode(regs)) if (user_mode(regs))
flags |= FAULT_FLAG_USER; flags |= FAULT_FLAG_USER;
...@@ -247,10 +245,8 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) ...@@ -247,10 +245,8 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
/* Are we prepared to handle this kernel fault? */ /* Are we prepared to handle this kernel fault? */
if ((entry = search_exception_tables(regs->pc)) != NULL) { if ((entry = search_exception_tables(regs->pc)) != NULL) {
#ifdef DEBUG_PAGE_FAULT pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n", current->comm, regs->pc, entry->fixup);
current->comm, regs->pc, entry->fixup);
#endif
current->thread.bad_uaddr = address; current->thread.bad_uaddr = address;
regs->pc = entry->fixup; regs->pc = entry->fixup;
return; return;
...@@ -259,9 +255,9 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) ...@@ -259,9 +255,9 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
/* Oops. The kernel tried to access some bad page. We'll have to /* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice. * terminate things with extreme prejudice.
*/ */
printk(KERN_ALERT "Unable to handle kernel paging request at virtual " pr_alert("Unable to handle kernel paging request at virtual "
"address %08lx\n pc = %08lx, ra = %08lx\n", "address %08lx\n pc = %08lx, ra = %08lx\n",
address, regs->pc, regs->areg[0]); address, regs->pc, regs->areg[0]);
die("Oops", regs, sig); die("Oops", regs, sig);
do_exit(sig); do_exit(sig);
} }
...@@ -100,29 +100,51 @@ void __init mem_init(void) ...@@ -100,29 +100,51 @@ void __init mem_init(void)
mem_init_print_info(NULL); mem_init_print_info(NULL);
pr_info("virtual kernel memory layout:\n" pr_info("virtual kernel memory layout:\n"
#ifdef CONFIG_KASAN
" kasan : 0x%08lx - 0x%08lx (%5lu MB)\n"
#endif
#ifdef CONFIG_MMU
" vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
#endif
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
" pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n" " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
" fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
#endif #endif
#ifdef CONFIG_MMU " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n"
" vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n" " .text : 0x%08lx - 0x%08lx (%5lu kB)\n"
" .rodata : 0x%08lx - 0x%08lx (%5lu kB)\n"
" .data : 0x%08lx - 0x%08lx (%5lu kB)\n"
" .init : 0x%08lx - 0x%08lx (%5lu kB)\n"
" .bss : 0x%08lx - 0x%08lx (%5lu kB)\n",
#ifdef CONFIG_KASAN
KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE,
KASAN_SHADOW_SIZE >> 20,
#endif #endif
" lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n", #ifdef CONFIG_MMU
VMALLOC_START, VMALLOC_END,
(VMALLOC_END - VMALLOC_START) >> 20,
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
(LAST_PKMAP*PAGE_SIZE) >> 10, (LAST_PKMAP*PAGE_SIZE) >> 10,
FIXADDR_START, FIXADDR_TOP, FIXADDR_START, FIXADDR_TOP,
(FIXADDR_TOP - FIXADDR_START) >> 10, (FIXADDR_TOP - FIXADDR_START) >> 10,
#endif #endif
#ifdef CONFIG_MMU
VMALLOC_START, VMALLOC_END,
(VMALLOC_END - VMALLOC_START) >> 20,
PAGE_OFFSET, PAGE_OFFSET + PAGE_OFFSET, PAGE_OFFSET +
(max_low_pfn - min_low_pfn) * PAGE_SIZE, (max_low_pfn - min_low_pfn) * PAGE_SIZE,
#else #else
min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE, min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE,
#endif #endif
((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20); ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
(unsigned long)_text, (unsigned long)_etext,
(unsigned long)(_etext - _text) >> 10,
(unsigned long)__start_rodata, (unsigned long)_sdata,
(unsigned long)(_sdata - __start_rodata) >> 10,
(unsigned long)_sdata, (unsigned long)_edata,
(unsigned long)(_edata - _sdata) >> 10,
(unsigned long)__init_begin, (unsigned long)__init_end,
(unsigned long)(__init_end - __init_begin) >> 10,
(unsigned long)__bss_start, (unsigned long)__bss_stop,
(unsigned long)(__bss_stop - __bss_start) >> 10);
} }
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
......
/*
* Xtensa KASAN shadow map initialization
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2017 Cadence Design Systems Inc.
*/
#include <linux/bootmem.h>
#include <linux/init_task.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <asm/initialize_mmu.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
void __init kasan_early_init(void)
{
unsigned long vaddr = KASAN_SHADOW_START;
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
int i;
for (i = 0; i < PTRS_PER_PTE; ++i)
set_pte(kasan_zero_pte + i,
mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL));
for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
BUG_ON(!pmd_none(*pmd));
set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte));
}
early_trap_init();
}
static void __init populate(void *start, void *end)
{
unsigned long n_pages = (end - start) / PAGE_SIZE;
unsigned long n_pmds = n_pages / PTRS_PER_PTE;
unsigned long i, j;
unsigned long vaddr = (unsigned long)start;
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
pr_debug("%s: %p - %p\n", __func__, start, end);
for (i = j = 0; i < n_pmds; ++i) {
int k;
for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
phys_addr_t phys =
memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
MEMBLOCK_ALLOC_ANYWHERE);
set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
}
}
for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
set_pmd(pmd + i, __pmd((unsigned long)pte));
local_flush_tlb_all();
memset(start, 0, end - start);
}
void __init kasan_init(void)
{
int i;
BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
(KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
/*
* Replace shadow map pages that cover addresses from VMALLOC area
* start to the end of KSEG with clean writable pages.
*/
populate(kasan_mem_to_shadow((void *)VMALLOC_START),
kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
/* Write protect kasan_zero_page and zero-initialize it again. */
for (i = 0; i < PTRS_PER_PTE; ++i)
set_pte(kasan_zero_pte + i,
mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO));
local_flush_tlb_all();
memset(kasan_zero_page, 0, PAGE_SIZE);
/* At this point kasan is fully initialized. Enable error messages. */
current->kasan_depth = 0;
pr_info("KernelAddressSanitizer initialized\n");
}
...@@ -56,7 +56,6 @@ static void __init fixedrange_init(void) ...@@ -56,7 +56,6 @@ static void __init fixedrange_init(void)
void __init paging_init(void) void __init paging_init(void)
{ {
memset(swapper_pg_dir, 0, PAGE_SIZE);
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
fixedrange_init(); fixedrange_init();
pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP); pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP);
...@@ -82,6 +81,23 @@ void init_mmu(void) ...@@ -82,6 +81,23 @@ void init_mmu(void)
set_itlbcfg_register(0); set_itlbcfg_register(0);
set_dtlbcfg_register(0); set_dtlbcfg_register(0);
#endif #endif
init_kio();
local_flush_tlb_all();
/* Set rasid register to a known value. */
set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
/* Set PTEVADDR special register to the start of the page
* table, which is in kernel mappable space (ie. not
* statically mapped). This register's value is undefined on
* reset.
*/
set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR);
}
void init_kio(void)
{
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
/* /*
* Update the IO area mapping in case xtensa_kio_paddr has changed * Update the IO area mapping in case xtensa_kio_paddr has changed
...@@ -95,17 +111,4 @@ void init_mmu(void) ...@@ -95,17 +111,4 @@ void init_mmu(void)
write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
XCHAL_KIO_BYPASS_VADDR + 6); XCHAL_KIO_BYPASS_VADDR + 6);
#endif #endif
local_flush_tlb_all();
/* Set rasid register to a known value. */
set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
/* Set PTEVADDR special register to the start of the page
* table, which is in kernel mappable space (ie. not
* statically mapped). This register's value is undefined on
* reset.
*/
set_ptevaddr_register(PGTABLE_START);
} }
...@@ -95,10 +95,8 @@ void local_flush_tlb_range(struct vm_area_struct *vma, ...@@ -95,10 +95,8 @@ void local_flush_tlb_range(struct vm_area_struct *vma,
if (mm->context.asid[cpu] == NO_CONTEXT) if (mm->context.asid[cpu] == NO_CONTEXT)
return; return;
#if 0 pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
printk("[tlbrange<%02lx,%08lx,%08lx>]\n", (unsigned long)mm->context.asid[cpu], start, end);
(unsigned long)mm->context.asid[cpu], start, end);
#endif
local_irq_save(flags); local_irq_save(flags);
if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
......
...@@ -185,7 +185,7 @@ int __init rs_init(void) ...@@ -185,7 +185,7 @@ int __init rs_init(void)
serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES); serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES);
printk ("%s %s\n", serial_name, serial_version); pr_info("%s %s\n", serial_name, serial_version);
/* Initialize the tty_driver structure */ /* Initialize the tty_driver structure */
...@@ -214,7 +214,7 @@ static __exit void rs_exit(void) ...@@ -214,7 +214,7 @@ static __exit void rs_exit(void)
int error; int error;
if ((error = tty_unregister_driver(serial_driver))) if ((error = tty_unregister_driver(serial_driver)))
printk("ISS_SERIAL: failed to unregister serial driver (%d)\n", pr_err("ISS_SERIAL: failed to unregister serial driver (%d)\n",
error); error);
put_tty_driver(serial_driver); put_tty_driver(serial_driver);
tty_port_destroy(&serial_port); tty_port_destroy(&serial_port);
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
* *
*/ */
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/list.h> #include <linux/list.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
...@@ -606,8 +608,6 @@ struct iss_net_init { ...@@ -606,8 +608,6 @@ struct iss_net_init {
* those fields. They will be later initialized in iss_net_init. * those fields. They will be later initialized in iss_net_init.
*/ */
#define ERR KERN_ERR "iss_net_setup: "
static int __init iss_net_setup(char *str) static int __init iss_net_setup(char *str)
{ {
struct iss_net_private *device = NULL; struct iss_net_private *device = NULL;
...@@ -619,14 +619,14 @@ static int __init iss_net_setup(char *str) ...@@ -619,14 +619,14 @@ static int __init iss_net_setup(char *str)
end = strchr(str, '='); end = strchr(str, '=');
if (!end) { if (!end) {
printk(ERR "Expected '=' after device number\n"); pr_err("Expected '=' after device number\n");
return 1; return 1;
} }
*end = 0; *end = 0;
rc = kstrtouint(str, 0, &n); rc = kstrtouint(str, 0, &n);
*end = '='; *end = '=';
if (rc < 0) { if (rc < 0) {
printk(ERR "Failed to parse '%s'\n", str); pr_err("Failed to parse '%s'\n", str);
return 1; return 1;
} }
str = end; str = end;
...@@ -642,13 +642,13 @@ static int __init iss_net_setup(char *str) ...@@ -642,13 +642,13 @@ static int __init iss_net_setup(char *str)
spin_unlock(&devices_lock); spin_unlock(&devices_lock);
if (device && device->index == n) { if (device && device->index == n) {
printk(ERR "Device %u already configured\n", n); pr_err("Device %u already configured\n", n);
return 1; return 1;
} }
new = alloc_bootmem(sizeof(*new)); new = alloc_bootmem(sizeof(*new));
if (new == NULL) { if (new == NULL) {
printk(ERR "Alloc_bootmem failed\n"); pr_err("Alloc_bootmem failed\n");
return 1; return 1;
} }
...@@ -660,8 +660,6 @@ static int __init iss_net_setup(char *str) ...@@ -660,8 +660,6 @@ static int __init iss_net_setup(char *str)
return 1; return 1;
} }
#undef ERR
__setup("eth", iss_net_setup); __setup("eth", iss_net_setup);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment