Commit 368dd5ac authored by Akira Takeuchi's avatar Akira Takeuchi Committed by David Howells

MN10300: And Panasonic AM34 subarch and implement SMP

Implement the Panasonic MN10300 AM34 CPU subarch and implement SMP support for
MN10300.  Also implement support for the MN2WS0060 processor and the ASB2364
evaluation board which are AM34 based.
Signed-off-by: default avatarAkira Takeuchi <takeuchi.akr@jp.panasonic.com>
Signed-off-by: default avatarKiyoshi Owada <owada.kiyoshi@jp.panasonic.com>
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
parent 04157a6e
......@@ -4448,7 +4448,7 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/panasonic-laptop.c
PANASONIC MN10300/AM33 PORT
PANASONIC MN10300/AM33/AM34 PORT
M: David Howells <dhowells@redhat.com>
M: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
L: linux-am33-list@redhat.com (moderated for non-subscribers)
......
......@@ -48,7 +48,7 @@ config GENERIC_CALIBRATE_DELAY
def_bool y
config GENERIC_CMOS_UPDATE
def_bool y
def_bool n
config GENERIC_FIND_NEXT_BIT
def_bool y
......@@ -72,10 +72,6 @@ config GENERIC_HARDIRQS
config HOTPLUG_CPU
def_bool n
config HZ
int
default 1000
mainmenu "Matsushita MN10300/AM33 Kernel Configuration"
source "init/Kconfig"
......@@ -98,6 +94,9 @@ config MN10300_UNIT_ASB2303
config MN10300_UNIT_ASB2305
bool "ASB2305"
config MN10300_UNIT_ASB2364
bool "ASB2364"
endchoice
choice
......@@ -115,17 +114,13 @@ config MN10300_PROC_MN103E010
select MN10300_PROC_HAS_TTYSM1
select MN10300_PROC_HAS_TTYSM2
endchoice
choice
prompt "Processor core support"
default MN10300_CPU_AM33V2
help
This option specifies the processor core for which the kernel will be
compiled. It affects the instruction set used.
config MN10300_CPU_AM33V2
bool "AM33v2"
config MN10300_PROC_MN2WS0050
bool "MN2WS0050"
depends on MN10300_UNIT_ASB2364
select AM34_2
select MN10300_PROC_HAS_TTYSM0
select MN10300_PROC_HAS_TTYSM1
select MN10300_PROC_HAS_TTYSM2
endchoice
......@@ -138,7 +133,7 @@ config MN10300_HAS_ATOMIC_OPS_UNIT
config FPU
bool "FPU present"
default y
depends on MN10300_PROC_MN103E010
depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
config LAZY_SAVE_FPU
bool "Save FPU state lazily"
......@@ -179,24 +174,55 @@ config KERNEL_TEXT_ADDRESS
config KERNEL_ZIMAGE_BASE_ADDRESS
hex "Base address of compressed vmlinux image"
default "0x90700000"
default "0x50700000"
config BOOT_STACK_OFFSET
hex
default "0xF00" if SMP
default "0xFF0" if !SMP
config BOOT_STACK_SIZE
hex
depends on SMP
default "0x100"
endmenu
config PREEMPT
bool "Preemptible Kernel"
help
This option reduces the latency of the kernel when reacting to
real-time or interactive events by allowing a low priority process to
be preempted even if it is in kernel mode executing a system call.
This allows applications to run more reliably even when the system is
under load.
config SMP
bool "Symmetric multi-processing support"
default y
depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
you have a system with more than one CPU, say Y.
If you say N here, the kernel will run on single and multiprocessor
machines, but will use only one CPU of a multiprocessor machine. If
you say Y here, the kernel will run on many, but not all,
singleprocessor machines. On a singleprocessor machine, the kernel
will run faster if you say N here.
See also <file:Documentation/i386/IO-APIC.txt>,
<file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
<http://www.tldp.org/docs.html#howto>.
Say Y here if you are building a kernel for a desktop, embedded
or real-time system. Say N if you are unsure.
If you don't know what to do here, say N.
config NR_CPUS
int
depends on SMP
default "2"
config USE_GENERIC_SMP_HELPERS
bool
depends on SMP
default y
source "kernel/Kconfig.preempt"
config MN10300_CURRENT_IN_E2
bool "Hold current task address in E2 register"
depends on !SMP
default y
help
This option removes the E2/R2 register from the set available to gcc
......@@ -218,12 +244,14 @@ config MN10300_USING_JTAG
suppresses the use of certain hardware debugging features, such as
single-stepping, which are taken over completely by the JTAG unit.
source "kernel/Kconfig.hz"
config MN10300_RTC
bool "Using MN10300 RTC"
depends on MN10300_PROC_MN103E010
depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
select GENERIC_CMOS_UPDATE
default n
help
This option enables support for the RTC, thus enabling time to be
tracked, even when system is powered down. This is available on-chip
on the MN103E010.
......@@ -315,14 +343,23 @@ config MN10300_TTYSM1
choice
prompt "Select the timer to supply the clock for SIF1"
default MN10300_TTYSM0_TIMER9
default MN10300_TTYSM1_TIMER12 \
if !(AM33_2 || AM33_3)
default MN10300_TTYSM1_TIMER9 \
if AM33_2 || AM33_3
depends on MN10300_TTYSM1
config MN10300_TTYSM1_TIMER12
bool "Use timer 12 (16-bit)"
depends on !(AM33_2 || AM33_3)
config MN10300_TTYSM1_TIMER9
bool "Use timer 9 (16-bit)"
depends on AM33_2 || AM33_3
config MN10300_TTYSM1_TIMER3
bool "Use timer 3 (8-bit)"
depends on AM33_2 || AM33_3
endchoice
......@@ -337,17 +374,33 @@ config MN10300_TTYSM2
choice
prompt "Select the timer to supply the clock for SIF2"
default MN10300_TTYSM0_TIMER10
default MN10300_TTYSM2_TIMER3 \
if !(AM33_2 || AM33_3)
default MN10300_TTYSM2_TIMER10 \
if AM33_2 || AM33_3
depends on MN10300_TTYSM2
config MN10300_TTYSM2_TIMER9
bool "Use timer 9 (16-bit)"
depends on !(AM33_2 || AM33_3)
config MN10300_TTYSM2_TIMER1
bool "Use timer 1 (8-bit)"
depends on !(AM33_2 || AM33_3)
config MN10300_TTYSM2_TIMER3
bool "Use timer 3 (8-bit)"
depends on !(AM33_2 || AM33_3)
config MN10300_TTYSM2_TIMER10
bool "Use timer 10 (16-bit)"
depends on AM33_2 || AM33_3
endchoice
config MN10300_TTYSM2_CTS
bool "Enable the use of the CTS line /dev/ttySM2"
depends on MN10300_TTYSM2
depends on MN10300_TTYSM2 && AM33_2
endmenu
......
......@@ -36,6 +36,9 @@ endif
ifeq ($(CONFIG_MN10300_PROC_MN103E010),y)
PROCESSOR := mn103e010
endif
ifeq ($(CONFIG_MN10300_PROC_MN2WS0050),y)
PROCESSOR := mn2ws0050
endif
ifeq ($(CONFIG_MN10300_UNIT_ASB2303),y)
UNIT := asb2303
......@@ -43,6 +46,9 @@ endif
ifeq ($(CONFIG_MN10300_UNIT_ASB2305),y)
UNIT := asb2305
endif
ifeq ($(CONFIG_MN10300_UNIT_ASB2364),y)
UNIT := asb2364
endif
head-y := arch/mn10300/kernel/head.o arch/mn10300/kernel/init_task.o
......
......@@ -15,10 +15,28 @@
#include <linux/linkage.h>
#include <asm/cpu-regs.h>
#include <asm/cache.h>
#ifdef CONFIG_SMP
#include <proc/smp-regs.h>
#endif
.globl startup_32
startup_32:
# first save off parameters from bootloader
#ifdef CONFIG_SMP
#
# Secondary CPUs jump directly to the kernel entry point
#
# Must save primary CPU's D0-D2 registers as they hold boot parameters
#
mov (CPUID), d3
and CPUID_MASK,d3
beq startup_primary
mov CONFIG_KERNEL_TEXT_ADDRESS,a0
jmp (a0)
startup_primary:
#endif /* CONFIG_SMP */
# first save parameters from bootloader
mov param_save_area,a0
mov d0,(a0)
mov d1,(4,a0)
......
......@@ -114,6 +114,8 @@ extern void die(const char *, struct pt_regs *, enum exception_code)
extern int die_if_no_fixup(const char *, struct pt_regs *, enum exception_code);
#define NUM2EXCEP_IRQ_LEVEL(num) (EXCEP_IRQ_LEVEL0 + (num) * 8)
#endif /* __ASSEMBLY__ */
#endif /* _ASM_EXCEPTIONS_H */
......@@ -18,6 +18,9 @@
#ifndef __ASM_OFFSETS_H__
#include <asm/asm-offsets.h>
#endif
#ifdef CONFIG_SMP
#include <proc/smp-regs.h>
#endif
#define pi break
......@@ -37,9 +40,25 @@
movm [d2,d3,a2,a3,exreg0,exreg1,exother],(sp)
mov sp,fp # FRAME pointer in A3
add -12,sp # allow for calls to be made
#ifdef CONFIG_SMP
#ifdef CONFIG_PREEMPT /* FIXME */
mov epsw,d2
and ~EPSW_IE,epsw
#endif
mov (CPUID),a0
add a0,a0
add a0,a0
mov (___frame,a0),a1
mov a1,(REG_NEXT,fp)
mov fp,(___frame,a0)
#ifdef CONFIG_PREEMPT /* FIXME */
mov d2,epsw
#endif
#else /* CONFIG_SMP */
mov (__frame),a1
mov a1,(REG_NEXT,fp)
mov fp,(__frame)
#endif /* CONFIG_SMP */
and ~EPSW_FE,epsw # disable the FPU inside the kernel
......@@ -57,10 +76,27 @@
.macro RESTORE_ALL
# peel back the stack to the calling frame
# - this permits execve() to discard extra frames due to kernel syscalls
#ifdef CONFIG_SMP
#ifdef CONFIG_PREEMPT /* FIXME */
mov epsw,d2
and ~EPSW_IE,epsw
#endif
mov (CPUID),a0
add a0,a0
add a0,a0
mov (___frame,a0),fp
mov fp,sp
mov (REG_NEXT,fp),d0 # userspace has regs->next == 0
mov d0,(___frame,a0)
#ifdef CONFIG_PREEMPT /* FIXME */
mov d2,epsw
#endif
#else /* CONFIG_SMP */
mov (__frame),fp
mov fp,sp
mov (REG_NEXT,fp),d0 # userspace has regs->next == 0
mov d0,(__frame)
#endif /* CONFIG_SMP */
#ifndef CONFIG_MN10300_USING_JTAG
mov (REG_EPSW,fp),d0
......
......@@ -19,8 +19,10 @@
/* assembly code in softirq.h is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
#ifdef CONFIG_MN10300_WD_TIMER
unsigned int __nmi_count; /* arch dependent */
unsigned int __irq_count; /* arch dependent */
#endif
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
......
......@@ -15,24 +15,19 @@
#ifdef __KERNEL__
/* interrupt controller registers */
#define GxICR(X) __SYSREG(0xd4000000 + (X) * 4, u16) /* group irq ctrl regs */
#define IAGR __SYSREG(0xd4000100, u16) /* intr acceptance group reg */
#define IAGR_GN 0x00fc /* group number register
* (documentation _has_ to be wrong)
*/
/*
* Interrupt controller registers
* - Registers 64-191 are at addresses offset from the main array
*/
#define GxICR(X) \
__SYSREG(0xd4000000 + (X) * 4 + \
(((X) >= 64) && ((X) < 192)) * 0xf00, u16)
#define EXTMD __SYSREG(0xd4000200, u16) /* external pin intr spec reg */
#define GET_XIRQ_TRIGGER(X) ((EXTMD >> ((X) * 2)) & 3)
#define GxICR_u8(X) \
__SYSREG(0xd4000000 + (X) * 4 + \
(((X) >= 64) && ((X) < 192)) * 0xf00, u8)
#define SET_XIRQ_TRIGGER(X,Y) \
do { \
u16 x = EXTMD; \
x &= ~(3 << ((X) * 2)); \
x |= ((Y) & 3) << ((X) * 2); \
EXTMD = x; \
} while (0)
#include <proc/intctl-regs.h>
#define XIRQ_TRIGGER_LOWLEVEL 0
#define XIRQ_TRIGGER_HILEVEL 1
......@@ -59,10 +54,18 @@ do { \
#define GxICR_LEVEL_5 0x5000 /* - level 5 */
#define GxICR_LEVEL_6 0x6000 /* - level 6 */
#define GxICR_LEVEL_SHIFT 12
#define GxICR_NMI 0x8000 /* nmi request flag */
#define NUM2GxICR_LEVEL(num) ((num) << GxICR_LEVEL_SHIFT)
#ifndef __ASSEMBLY__
extern void set_intr_level(int irq, u16 level);
extern void set_intr_postackable(int irq);
extern void mn10300_intc_set_level(unsigned int irq, unsigned int level);
extern void mn10300_intc_clear(unsigned int irq);
extern void mn10300_intc_set(unsigned int irq);
extern void mn10300_intc_enable(unsigned int irq);
extern void mn10300_intc_disable(unsigned int irq);
extern void mn10300_set_lateack_irq_type(int irq);
#endif
/* external interrupts */
......
......@@ -22,7 +22,11 @@
#define NO_IRQ INT_MAX
/* hardware irq numbers */
#ifdef CONFIG_SMP
#define NR_IRQS GxICR_NUM_EXT_IRQS
#else
#define NR_IRQS GxICR_NUM_IRQS
#endif
/* external hardware irq numbers */
#define NR_XIRQS GxICR_NUM_XIRQS
......
......@@ -13,6 +13,9 @@
#define _ASM_IRQFLAGS_H
#include <asm/cpu-regs.h>
#ifndef __ASSEMBLY__
#include <linux/smp.h>
#endif
/*
* interrupt control
......@@ -60,11 +63,12 @@ static inline unsigned long arch_local_irq_save(void)
/*
* we make sure arch_irq_enable() doesn't cause priority inversion
*/
extern unsigned long __mn10300_irq_enabled_epsw;
extern unsigned long __mn10300_irq_enabled_epsw[];
static inline void arch_local_irq_enable(void)
{
unsigned long tmp;
int cpu = raw_smp_processor_id();
asm volatile(
" mov epsw,%0 \n"
......@@ -72,8 +76,8 @@ static inline void arch_local_irq_enable(void)
" or %2,%0 \n"
" mov %0,epsw \n"
: "=&d"(tmp)
: "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw)
: "memory");
: "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw[cpu])
: "memory", "cc");
}
static inline void arch_local_irq_restore(unsigned long flags)
......@@ -105,6 +109,9 @@ static inline bool arch_irqs_disabled(void)
*/
static inline void arch_safe_halt(void)
{
#ifdef CONFIG_SMP
arch_local_irq_enable();
#else
asm volatile(
" or %0,epsw \n"
" nop \n"
......@@ -113,8 +120,21 @@ static inline void arch_safe_halt(void)
:
: "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)
: "cc");
#endif
}
#define __sleep_cpu() \
do { \
asm volatile( \
" bset %1,(%0)\n" \
"1: btst %1,(%0)\n" \
" bne 1b\n" \
: \
: "i"(&CPUM), "i"(CPUM_SLEEP) \
: "cc" \
); \
} while (0)
static inline void arch_local_cli(void)
{
asm volatile(
......
......@@ -90,9 +90,15 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
* The vmalloc() routines also leaves a hole of 4kB between each vmalloced
* area to catch addressing errors.
*/
#ifndef __ASSEMBLY__
#define VMALLOC_OFFSET (8UL * 1024 * 1024)
#define VMALLOC_START (0x70000000UL)
#define VMALLOC_END (0x7C000000UL)
#else
#define VMALLOC_OFFSET (8 * 1024 * 1024)
#define VMALLOC_START (0x70000000)
#define VMALLOC_END (0x7C000000)
#endif
#ifndef __ASSEMBLY__
extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
......@@ -329,11 +335,7 @@ static inline int pte_exec_kernel(pte_t pte)
return 1;
}
/*
* Bits 0 and 1 are taken, split up the 29 bits of offset
* into this range:
*/
#define PTE_FILE_MAX_BITS 29
#define PTE_FILE_MAX_BITS 30
#define pte_to_pgoff(pte) (pte_val(pte) >> 2)
#define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE)
......@@ -379,8 +381,13 @@ static inline void ptep_mkdirty(pte_t *ptep)
* Macro to mark a page protection value as "uncacheable". On processors which
* do not support it, this is a no-op.
*/
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE)
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHE)
/*
* Macro to mark a page protection value as "Write-Through".
* On processors which do not support it, this is a no-op.
*/
#define pgprot_through(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE_WT)
/*
* Conversion functions: convert a page and protection to a page entry,
......
......@@ -33,6 +33,8 @@ struct mm_struct;
__pc; \
})
extern void get_mem_info(unsigned long *mem_base, unsigned long *mem_size);
extern void show_registers(struct pt_regs *regs);
/*
......@@ -43,17 +45,22 @@ extern void show_registers(struct pt_regs *regs);
struct mn10300_cpuinfo {
int type;
unsigned long loops_per_sec;
unsigned long loops_per_jiffy;
char hard_math;
unsigned long *pgd_quick;
unsigned long *pte_quick;
unsigned long pgtable_cache_sz;
};
extern struct mn10300_cpuinfo boot_cpu_data;
#ifdef CONFIG_SMP
#if CONFIG_NR_CPUS < 2 || CONFIG_NR_CPUS > 8
# error Sorry, NR_CPUS should be 2 to 8
#endif
extern struct mn10300_cpuinfo cpu_data[];
#define current_cpu_data cpu_data[smp_processor_id()]
#else /* CONFIG_SMP */
#define cpu_data &boot_cpu_data
#define current_cpu_data boot_cpu_data
#endif /* CONFIG_SMP */
extern void identify_cpu(struct mn10300_cpuinfo *);
extern void print_cpu_info(struct mn10300_cpuinfo *);
......@@ -92,21 +99,21 @@ struct thread_struct {
unsigned long a3; /* kernel FP */
unsigned long wchan;
unsigned long usp;
struct pt_regs *__frame;
struct pt_regs *frame;
unsigned long fpu_flags;
#define THREAD_USING_FPU 0x00000001 /* T if this task is using the FPU */
#define THREAD_HAS_FPU 0x00000002 /* T if this task owns the FPU right now */
struct fpu_state_struct fpu_state;
};
#define INIT_THREAD \
{ \
.uregs = init_uregs, \
.pc = 0, \
.sp = 0, \
.a3 = 0, \
.wchan = 0, \
.__frame = NULL, \
#define INIT_THREAD \
{ \
.uregs = init_uregs, \
.pc = 0, \
.sp = 0, \
.a3 = 0, \
.wchan = 0, \
.frame = NULL, \
}
#define INIT_MMAP \
......@@ -118,6 +125,19 @@ struct thread_struct {
* - need to discard the frame stacked by the kernel thread invoking the execve
* syscall (see RESTORE_ALL macro)
*/
#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) /* FIXME */
#define start_thread(regs, new_pc, new_sp) do { \
int cpu; \
preempt_disable(); \
cpu = CPUID; \
set_fs(USER_DS); \
___frame[cpu] = current->thread.uregs; \
___frame[cpu]->epsw = EPSW_nSL | EPSW_IE | EPSW_IM;\
___frame[cpu]->pc = new_pc; \
___frame[cpu]->sp = new_sp; \
preempt_enable(); \
} while (0)
#else /* CONFIG_SMP && CONFIG_PREEMPT */
#define start_thread(regs, new_pc, new_sp) do { \
set_fs(USER_DS); \
__frame = current->thread.uregs; \
......@@ -125,6 +145,7 @@ struct thread_struct {
__frame->pc = new_pc; \
__frame->sp = new_sp; \
} while (0)
#endif /* CONFIG_SMP && CONFIG_PREEMPT */
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
......
......@@ -40,7 +40,6 @@
#define PT_PC 26
#define NR_PTREGS 27
#ifndef __ASSEMBLY__
/*
* This defines the way registers are stored in the event of an exception
* - the strange order is due to the MOVM instruction
......@@ -75,7 +74,6 @@ struct pt_regs {
unsigned long epsw;
unsigned long pc;
};
#endif
/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
#define PTRACE_GETREGS 12
......@@ -86,12 +84,13 @@ struct pt_regs {
/* options set using PTRACE_SETOPTIONS */
#define PTRACE_O_TRACESYSGOOD 0x00000001
#if defined(__KERNEL__)
#ifdef __KERNEL__
#ifdef CONFIG_SMP
extern struct pt_regs *___frame[]; /* current frame pointer */
#else
extern struct pt_regs *__frame; /* current frame pointer */
#if !defined(__ASSEMBLY__)
struct task_struct;
#endif
#define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
#define instruction_pointer(regs) ((regs)->pc)
......@@ -100,9 +99,7 @@ extern void show_regs(struct pt_regs *);
#define arch_has_single_step() (1)
#endif /* !__ASSEMBLY */
#define profile_pc(regs) ((regs)->pc)
#endif /* __KERNEL__ */
#endif /* __KERNEL__ */
#endif /* _ASM_PTRACE_H */
......@@ -50,7 +50,7 @@ static inline void mn10300_proc_hard_reset(void)
RSTCTR |= RSTCTR_CHIPRST;
}
extern unsigned int watchdog_alert_counter;
extern unsigned int watchdog_alert_counter[];
extern void watchdog_go(void);
extern asmlinkage void watchdog_handler(void);
......
......@@ -15,25 +15,14 @@
#include <linux/init.h>
extern void check_rtc_time(void);
extern void __init calibrate_clock(void);
extern unsigned long __init get_initial_rtc_time(void);
#else /* !CONFIG_MN10300_RTC */
static inline void check_rtc_time(void)
{
}
static inline void calibrate_clock(void)
{
}
static inline unsigned long get_initial_rtc_time(void)
{
return 0;
}
#endif /* !CONFIG_MN10300_RTC */
#include <asm-generic/rtc.h>
......
/*
* Helpers used by both rw spinlocks and rw semaphores.
*
* Based in part on code from semaphore.h and
* spinlock.h Copyright 1996 Linus Torvalds.
*
* Copyright 1999 Red Hat, Inc.
*
* Written by Benjamin LaHaise.
*
* Modified by Matsushita Electric Industrial Co., Ltd.
* Modifications:
* 13-Nov-2006 MEI Temporarily delete lock functions for SMP support.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#ifndef _ASM_RWLOCK_H
#define _ASM_RWLOCK_H
#define RW_LOCK_BIAS 0x01000000
#ifndef CONFIG_SMP
typedef struct { unsigned long a[100]; } __dummy_lock_t;
#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
#define RW_LOCK_BIAS_STR "0x01000000"
#define __build_read_lock_ptr(rw, helper) \
do { \
asm volatile( \
" mov (%0),d3 \n" \
" sub 1,d3 \n" \
" mov d3,(%0) \n" \
" blt 1f \n" \
" bra 2f \n" \
"1: jmp 3f \n" \
"2: \n" \
" .section .text.lock,\"ax\" \n" \
"3: call "helper"[],0 \n" \
" jmp 2b \n" \
" .previous" \
: \
: "d" (rw) \
: "memory", "d3", "cc"); \
} while (0)
#define __build_read_lock_const(rw, helper) \
do { \
asm volatile( \
" mov (%0),d3 \n" \
" sub 1,d3 \n" \
" mov d3,(%0) \n" \
" blt 1f \n" \
" bra 2f \n" \
"1: jmp 3f \n" \
"2: \n" \
" .section .text.lock,\"ax\" \n" \
"3: call "helper"[],0 \n" \
" jmp 2b \n" \
" .previous" \
: \
: "d" (rw) \
: "memory", "d3", "cc"); \
} while (0)
#define __build_read_lock(rw, helper) \
do { \
if (__builtin_constant_p(rw)) \
__build_read_lock_const(rw, helper); \
else \
__build_read_lock_ptr(rw, helper); \
} while (0)
#define __build_write_lock_ptr(rw, helper) \
do { \
asm volatile( \
" mov (%0),d3 \n" \
" sub 1,d3 \n" \
" mov d3,(%0) \n" \
" blt 1f \n" \
" bra 2f \n" \
"1: jmp 3f \n" \
"2: \n" \
" .section .text.lock,\"ax\" \n" \
"3: call "helper"[],0 \n" \
" jmp 2b \n" \
" .previous" \
: \
: "d" (rw) \
: "memory", "d3", "cc"); \
} while (0)
#define __build_write_lock_const(rw, helper) \
do { \
asm volatile( \
" mov (%0),d3 \n" \
" sub 1,d3 \n" \
" mov d3,(%0) \n" \
" blt 1f \n" \
" bra 2f \n" \
"1: jmp 3f \n" \
"2: \n" \
" .section .text.lock,\"ax\" \n" \
"3: call "helper"[],0 \n" \
" jmp 2b \n" \
" .previous" \
: \
: "d" (rw) \
: "memory", "d3", "cc"); \
} while (0)
#define __build_write_lock(rw, helper) \
do { \
if (__builtin_constant_p(rw)) \
__build_write_lock_const(rw, helper); \
else \
__build_write_lock_ptr(rw, helper); \
} while (0)
#endif /* CONFIG_SMP */
#endif /* _ASM_RWLOCK_H */
......@@ -20,18 +20,25 @@
/* serial port 0 */
#define SC0CTR __SYSREG(0xd4002000, u16) /* control reg */
#define SC01CTR_CK 0x0007 /* clock source select */
#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
#define SC1CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow (serial port 1 only) */
#define SC01CTR_CK_IOCLK_8 0x0001 /* - 1/8 IOCLK */
#define SC01CTR_CK_IOCLK_32 0x0002 /* - 1/32 IOCLK */
#define SC01CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
#define SC01CTR_CK_EXTERN 0x0007 /* - external closk */
#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
#define SC0CTR_CK_TM2UFLOW_2 0x0003 /* - 1/2 timer 2 underflow (serial port 0 only) */
#define SC1CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow (serial port 1 only) */
#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 1 underflow (serial port 0 only) */
#define SC1CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 2 underflow (serial port 1 only) */
#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 0 underflow (serial port 0 only) */
#define SC0CTR_CK_TM2UFLOW_8 0x0005 /* - 1/8 timer 2 underflow (serial port 0 only) */
#define SC1CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow (serial port 1 only) */
#define SC1CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow (serial port 1 only) */
#define SC1CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 1 underflow (serial port 1 only) */
#define SC1CTR_CK_TM3UFLOW_8 0x0005 /* - 1/8 timer 3 underflow (serial port 1 only) */
#define SC01CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
#define SC01CTR_CK_EXTERN 0x0007 /* - external closk */
#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 0 underflow (serial port 0 only) */
#define SC0CTR_CK_TM2UFLOW_8 0x0005 /* - 1/8 timer 2 underflow (serial port 0 only) */
#define SC1CTR_CK_TM12UFLOW_8 0x0000 /* - 1/8 timer 12 underflow (serial port 1 only) */
#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
#define SC01CTR_STB 0x0008 /* stop bit select */
#define SC01CTR_STB_1BIT 0x0000 /* - 1 stop bit */
#define SC01CTR_STB_2BIT 0x0008 /* - 2 stop bits */
......@@ -100,11 +107,23 @@
/* serial port 2 */
#define SC2CTR __SYSREG(0xd4002020, u16) /* control reg */
#ifdef CONFIG_AM33_2
#define SC2CTR_CK 0x0003 /* clock source select */
#define SC2CTR_CK_TM10UFLOW 0x0000 /* - timer 10 underflow */
#define SC2CTR_CK_TM2UFLOW 0x0001 /* - timer 2 underflow */
#define SC2CTR_CK_EXTERN 0x0002 /* - external closk */
#define SC2CTR_CK_TM3UFLOW 0x0003 /* - timer 3 underflow */
#else /* CONFIG_AM33_2 */
#define SC2CTR_CK 0x0007 /* clock source select */
#define SC2CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow */
#define SC2CTR_CK_IOCLK_8 0x0001 /* - 1/8 IOCLK */
#define SC2CTR_CK_IOCLK_32 0x0002 /* - 1/32 IOCLK */
#define SC2CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow */
#define SC2CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 1 underflow */
#define SC2CTR_CK_TM3UFLOW_8 0x0005 /* - 1/8 timer 3 underflow */
#define SC2CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
#define SC2CTR_CK_EXTERN 0x0007 /* - external closk */
#endif /* CONFIG_AM33_2 */
#define SC2CTR_STB 0x0008 /* stop bit select */
#define SC2CTR_STB_1BIT 0x0000 /* - 1 stop bit */
#define SC2CTR_STB_2BIT 0x0008 /* - 2 stop bits */
......@@ -134,9 +153,14 @@
#define SC2ICR_RES 0x04 /* receive error select */
#define SC2ICR_RI 0x01 /* receive interrupt cause */
#define SC2TXB __SYSREG(0xd4002018, u8) /* transmit buffer reg */
#define SC2RXB __SYSREG(0xd4002019, u8) /* receive buffer reg */
#define SC2STR __SYSREG(0xd400201c, u8) /* status reg */
#define SC2TXB __SYSREG(0xd4002028, u8) /* transmit buffer reg */
#define SC2RXB __SYSREG(0xd4002029, u8) /* receive buffer reg */
#ifdef CONFIG_AM33_2
#define SC2STR __SYSREG(0xd400202c, u8) /* status reg */
#else /* CONFIG_AM33_2 */
#define SC2STR __SYSREG(0xd400202c, u16) /* status reg */
#endif /* CONFIG_AM33_2 */
#define SC2STR_OEF 0x0001 /* overrun error found */
#define SC2STR_PEF 0x0002 /* parity error found */
#define SC2STR_FEF 0x0004 /* framing error found */
......@@ -146,10 +170,17 @@
#define SC2STR_RXF 0x0040 /* receive status */
#define SC2STR_TXF 0x0080 /* transmit status */
#ifdef CONFIG_AM33_2
#define SC2TIM __SYSREG(0xd400202d, u8) /* status reg */
#endif
#ifdef CONFIG_AM33_2
#define SC2RXIRQ 24 /* serial 2 Receive IRQ */
#define SC2TXIRQ 25 /* serial 2 Transmit IRQ */
#else /* CONFIG_AM33_2 */
#define SC2RXIRQ 68 /* serial 2 Receive IRQ */
#define SC2TXIRQ 69 /* serial 2 Transmit IRQ */
#endif /* CONFIG_AM33_2 */
#define SC2RXICR GxICR(SC2RXIRQ) /* serial 2 receive intr ctrl reg */
#define SC2TXICR GxICR(SC2TXIRQ) /* serial 2 transmit intr ctrl reg */
......
......@@ -9,10 +9,8 @@
* 2 of the Licence, or (at your option) any later version.
*/
/*
* The ASB2305 has an 18.432 MHz clock the UART
*/
#define BASE_BAUD (18432000 / 16)
#ifndef _ASM_SERIAL_H
#define _ASM_SERIAL_H
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
......@@ -34,3 +32,5 @@
#endif
#include <unit/serial.h>
#endif /* _ASM_SERIAL_H */
......@@ -3,6 +3,16 @@
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* Modified by Matsushita Electric Industrial Co., Ltd.
* Modifications:
* 13-Nov-2006 MEI Define IPI-IRQ number and add inline/macro function
* for SMP support.
* 22-Jan-2007 MEI Add the define related to SMP_BOOT_IRQ.
* 23-Feb-2007 MEI Add the define related to SMP icahce invalidate.
* 23-Jun-2008 MEI Delete INTC_IPI.
* 22-Jul-2008 MEI Add smp_nmi_call_function and related defines.
* 04-Aug-2008 MEI Delete USE_DOIRQ_CACHE_IPI.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
......@@ -11,8 +21,98 @@
#ifndef _ASM_SMP_H
#define _ASM_SMP_H
#ifndef __ASSEMBLY__
#include <linux/threads.h>
#include <linux/cpumask.h>
#endif
#ifdef CONFIG_SMP
#error SMP not yet supported for MN10300
#include <proc/smp-regs.h>
#define RESCHEDULE_IPI 63
#define CALL_FUNC_SINGLE_IPI 192
#define LOCAL_TIMER_IPI 193
#define FLUSH_CACHE_IPI 194
#define CALL_FUNCTION_NMI_IPI 195
#define GDB_NMI_IPI 196
#define SMP_BOOT_IRQ 195
#define RESCHEDULE_GxICR_LV GxICR_LEVEL_6
#define CALL_FUNCTION_GxICR_LV GxICR_LEVEL_4
#define LOCAL_TIMER_GxICR_LV GxICR_LEVEL_4
#define FLUSH_CACHE_GxICR_LV GxICR_LEVEL_0
#define SMP_BOOT_GxICR_LV GxICR_LEVEL_0
#define TIME_OUT_COUNT_BOOT_IPI 100
#define DELAY_TIME_BOOT_IPI 75000
#ifndef __ASSEMBLY__
/**
* raw_smp_processor_id - Determine the raw CPU ID of the CPU running it
*
* What we really want to do is to use the CPUID hardware CPU register to get
* this information, but accesses to that aren't cached, and run at system bus
* speed, not CPU speed. A copy of this value is, however, stored in the
* thread_info struct, and that can be cached.
*
* An alternate way of dealing with this could be to use the EPSW.S bits to
* cache this information for systems with up to four CPUs.
*/
#if 0
#define raw_smp_processor_id() (CPUID)
#else
#define raw_smp_processor_id() (current_thread_info()->cpu)
#endif
static inline int cpu_logical_map(int cpu)
{
return cpu;
}
static inline int cpu_number_map(int cpu)
{
return cpu;
}
extern cpumask_t cpu_boot_map;
extern void smp_init_cpus(void);
extern void smp_cache_interrupt(void);
extern void send_IPI_allbutself(int irq);
extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#ifdef CONFIG_HOTPLUG_CPU
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_PREEMPT /* FIXME */
#define __frame \
({ \
struct pt_regs *f; \
preempt_disable(); \
f = ___frame[CPUID]; \
preempt_enable(); \
f; \
})
#else
#define __frame ___frame[CPUID]
#endif
#endif /* __ASSEMBLY__ */
#else /* CONFIG_SMP */
#ifndef __ASSEMBLY__
static inline void smp_init_cpus(void) {}
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_SMP */
#endif /* _ASM_SMP_H */
......@@ -11,6 +11,183 @@
#ifndef _ASM_SPINLOCK_H
#define _ASM_SPINLOCK_H
#error SMP spinlocks not implemented for MN10300
#include <asm/atomic.h>
#include <asm/rwlock.h>
#include <asm/page.h>
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
* We make no fairness assumptions. They have a cost.
*/
#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(
" bclr 1,(0,%0) \n"
:
: "a"(&lock->slock)
: "memory", "cc");
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
int ret;
asm volatile(
" mov 1,%0 \n"
" bset %0,(%1) \n"
" bne 1f \n"
" clr %0 \n"
"1: xor 1,%0 \n"
: "=d"(ret)
: "a"(&lock->slock)
: "memory", "cc");
return ret;
}
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
asm volatile(
"1: bset 1,(0,%0) \n"
" bne 1b \n"
:
: "a"(&lock->slock)
: "memory", "cc");
}
static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
int temp;
asm volatile(
"1: bset 1,(0,%2) \n"
" beq 3f \n"
" mov %1,epsw \n"
"2: mov (0,%2),%0 \n"
" or %0,%0 \n"
" bne 2b \n"
" mov %3,%0 \n"
" mov %0,epsw \n"
" nop \n"
" nop \n"
" bra 1b\n"
"3: \n"
: "=&d" (temp)
: "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
: "memory", "cc");
}
#ifdef __KERNEL__
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
*
* NOTE! it is quite common to have readers in interrupts
* but no interrupt writers. For those circumstances we
* can "mix" irq-safe locks - any writer needs to get a
* irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*/
/**
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
#define arch_read_can_lock(x) ((int)(x)->lock > 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
/*
* On mn10300, we implement read-write locks as a 32-bit counter
* with the high bit (sign) being the "contended" bit.
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
__build_read_lock(rw, "__read_lock_failed");
#else
{
atomic_t *count = (atomic_t *)rw;
while (atomic_dec_return(count) < 0)
atomic_inc(count);
}
#endif
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
__build_write_lock(rw, "__write_lock_failed");
#else
{
atomic_t *count = (atomic_t *)rw;
while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
atomic_add(RW_LOCK_BIAS, count);
}
#endif
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
__build_read_unlock(rw);
#else
{
atomic_t *count = (atomic_t *)rw;
atomic_inc(count);
}
#endif
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
__build_write_unlock(rw);
#else
{
atomic_t *count = (atomic_t *)rw;
atomic_add(RW_LOCK_BIAS, count);
}
#endif
}
static inline int arch_read_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
atomic_dec(count);
if (atomic_read(count) >= 0)
return 1;
atomic_inc(count);
return 0;
}
static inline int arch_write_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
return 1;
atomic_add(RW_LOCK_BIAS, count);
return 0;
}
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#endif /* __KERNEL__ */
#endif /* _ASM_SPINLOCK_H */
#ifndef _ASM_SPINLOCK_TYPES_H
#define _ASM_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H
# error "please don't include this file directly"
#endif
typedef struct arch_spinlock {
unsigned int slock;
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
unsigned int lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif /* _ASM_SPINLOCK_TYPES_H */
......@@ -12,6 +12,7 @@
#define _ASM_SYSTEM_H
#include <asm/cpu-regs.h>
#include <asm/intctl-regs.h>
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
......@@ -57,8 +58,6 @@ do { \
#define nop() asm volatile ("nop")
#endif /* !__ASSEMBLY__ */
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
......@@ -85,17 +84,19 @@ do { \
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#else
#define set_mb(var, value) do { xchg(&var, value); } while (0)
#else /* CONFIG_SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#endif
#define set_mb(var, value) do { var = value; mb(); } while (0)
#endif /* CONFIG_SMP */
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define read_barrier_depends() do {} while (0)
#define smp_read_barrier_depends() do {} while (0)
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_SYSTEM_H */
This diff is collapsed.
......@@ -16,8 +16,7 @@
#define TICK_SIZE (tick_nsec / 1000)
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ - this should probably be set
* to something appropriate, but what? */
#define CLOCK_TICK_RATE MN10300_JCCLK /* Underlying HZ */
extern cycles_t cacheflush_time;
......
......@@ -377,7 +377,7 @@ unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
#if 0
#error don't use - these macros don't increment to & from pointers
#error "don't use - these macros don't increment to & from pointers"
/* Optimize just a little bit when we know the size of the move. */
#define __constant_copy_user(to, from, size) \
do { \
......
......@@ -10,8 +10,9 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
switch_to.o mn10300_ksyms.o kernel_execve.o $(fpu-obj-y)
obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
obj-$(CONFIG_SMP) += smp.o smp-low.o
obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
obj-$(CONFIG_MN10300_TTYSM) += mn10300-serial.o mn10300-serial-low.o \
mn10300-debug.o
......
......@@ -66,7 +66,7 @@ void foo(void)
OFFSET(THREAD_SP, thread_struct, sp);
OFFSET(THREAD_A3, thread_struct, a3);
OFFSET(THREAD_USP, thread_struct, usp);
OFFSET(THREAD_FRAME, thread_struct, __frame);
OFFSET(THREAD_FRAME, thread_struct, frame);
#ifdef CONFIG_FPU
OFFSET(THREAD_FPU_FLAGS, thread_struct, fpu_flags);
OFFSET(THREAD_FPU_STATE, thread_struct, fpu_state);
......
......@@ -28,25 +28,17 @@
#include <asm/asm-offsets.h>
#include <asm/frame.inc>
#if defined(CONFIG_SMP) && defined(CONFIG_GDBSTUB)
#include <asm/gdb-stub.h>
#endif /* CONFIG_SMP && CONFIG_GDBSTUB */
#ifdef CONFIG_PREEMPT
#define preempt_stop __cli
#define preempt_stop LOCAL_IRQ_DISABLE
#else
#define preempt_stop
#define resume_kernel restore_all
#endif
.macro __cli
and ~EPSW_IM,epsw
or EPSW_IE|MN10300_CLI_LEVEL,epsw
nop
nop
nop
.endm
.macro __sti
or EPSW_IE|EPSW_IM_7,epsw
.endm
.am33_2
###############################################################################
......@@ -88,7 +80,7 @@ syscall_call:
syscall_exit:
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
__cli
LOCAL_IRQ_DISABLE
mov (TI_flags,a2),d2
btst _TIF_ALLWORK_MASK,d2
bne syscall_exit_work
......@@ -105,7 +97,7 @@ restore_all:
syscall_exit_work:
btst _TIF_SYSCALL_TRACE,d2
beq work_pending
__sti # could let syscall_trace_exit() call
LOCAL_IRQ_ENABLE # could let syscall_trace_exit() call
# schedule() instead
mov fp,d0
call syscall_trace_exit[],0 # do_syscall_trace(regs)
......@@ -121,7 +113,7 @@ work_resched:
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
__cli
LOCAL_IRQ_DISABLE
# is there any work to be done other than syscall tracing?
mov (TI_flags,a2),d2
......@@ -168,7 +160,7 @@ ret_from_intr:
ENTRY(resume_userspace)
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
__cli
LOCAL_IRQ_DISABLE
# is there any work to be done on int/exception return?
mov (TI_flags,a2),d2
......@@ -178,7 +170,7 @@ ENTRY(resume_userspace)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
__cli
LOCAL_IRQ_DISABLE
mov (TI_preempt_count,a2),d0 # non-zero preempt_count ?
cmp 0,d0
bne restore_all
......@@ -281,6 +273,79 @@ ENTRY(nmi_handler)
add -4,sp
mov d0,(sp)
mov (TBR),d0
#ifdef CONFIG_SMP
add -4,sp
mov d0,(sp) # save d0(TBR)
movhu (NMIAGR),d0
and NMIAGR_GN,d0
lsr 0x2,d0
cmp CALL_FUNCTION_NMI_IPI,d0
bne 5f # if not call function, jump
# function call nmi ipi
add 4,sp # no need to store TBR
mov GxICR_DETECT,d0 # clear NMI request
movbu d0,(GxICR(CALL_FUNCTION_NMI_IPI))
movhu (GxICR(CALL_FUNCTION_NMI_IPI)),d0
and ~EPSW_NMID,epsw # enable NMI
mov (sp),d0 # restore d0
SAVE_ALL
call smp_nmi_call_function_interrupt[],0
RESTORE_ALL
5:
#ifdef CONFIG_GDBSTUB
cmp GDB_NMI_IPI,d0
bne 3f # if not gdb nmi ipi, jump
# gdb nmi ipi
add 4,sp # no need to store TBR
mov GxICR_DETECT,d0 # clear NMI
movbu d0,(GxICR(GDB_NMI_IPI))
movhu (GxICR(GDB_NMI_IPI)),d0
and ~EPSW_NMID,epsw # enable NMI
#ifdef CONFIG_MN10300_CACHE_ENABLED
mov (gdbstub_nmi_opr_type),d0
cmp GDBSTUB_NMI_CACHE_PURGE,d0
bne 4f # if not gdb cache purge, jump
# gdb cache purge nmi ipi
add -20,sp
mov d1,(4,sp)
mov a0,(8,sp)
mov a1,(12,sp)
mov mdr,d0
mov d0,(16,sp)
call gdbstub_local_purge_cache[],0
mov 0x1,d0
mov (CPUID),d1
asl d1,d0
mov gdbstub_nmi_cpumask,a0
bclr d0,(a0)
mov (4,sp),d1
mov (8,sp),a0
mov (12,sp),a1
mov (16,sp),d0
mov d0,mdr
add 20,sp
mov (sp),d0
add 4,sp
rti
4:
#endif /* CONFIG_MN10300_CACHE_ENABLED */
# gdb wait nmi ipi
mov (sp),d0
SAVE_ALL
call gdbstub_nmi_wait[],0
RESTORE_ALL
3:
#endif /* CONFIG_GDBSTUB */
mov (sp),d0 # restore TBR to d0
add 4,sp
#endif /* CONFIG_SMP */
bra __common_exception_nonmi
ENTRY(__common_exception)
......@@ -314,15 +379,21 @@ __common_exception_nonmi:
mov d0,(REG_ORIG_D0,fp)
#ifdef CONFIG_GDBSTUB
#ifdef CONFIG_SMP
call gdbstub_busy_check[],0
and d0,d0 # check return value
beq 2f
#else /* CONFIG_SMP */
btst 0x01,(gdbstub_busy)
beq 2f
#endif /* CONFIG_SMP */
and ~EPSW_IE,epsw
mov fp,d0
mov a2,d1
call gdbstub_exception[],0 # gdbstub itself caused an exception
bra restore_all
2:
#endif
#endif /* CONFIG_GDBSTUB */
mov fp,d0 # arg 0: stacked register file
mov a2,d1 # arg 1: exception number
......@@ -357,11 +428,7 @@ ENTRY(set_excp_vector)
add exception_table,d0
mov d1,(d0)
mov 4,d1
#if defined(CONFIG_MN10300_CACHE_WBACK)
jmp mn10300_dcache_flush_inv_range2
#else
ret [],0
#endif
###############################################################################
#
......
......@@ -18,6 +18,7 @@
#include <asm/thread_info.h>
#include <asm/frame.inc>
#include <asm/intctl-regs.h>
#include <asm/irqflags.h>
#include <unit/serial.h>
.text
......@@ -69,7 +70,7 @@ gdbstub_io_rx_overflow:
bra gdbstub_io_rx_done
gdbstub_io_rx_enter:
or EPSW_IE|EPSW_IM_1,epsw
LOCAL_CHANGE_INTR_MASK_LEVEL(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL+1))
add -4,sp
SAVE_ALL
......@@ -80,7 +81,7 @@ gdbstub_io_rx_enter:
mov fp,d0
call gdbstub_rx_irq[],0 # gdbstub_rx_irq(regs,excep)
and ~EPSW_IE,epsw
LOCAL_CLI
bclr 0x01,(gdbstub_busy)
.globl gdbstub_return
......
......@@ -23,6 +23,7 @@
#include <asm/exceptions.h>
#include <asm/serial-regs.h>
#include <unit/serial.h>
#include <asm/smp.h>
/*
* initialise the GDB stub
......@@ -45,22 +46,34 @@ void gdbstub_io_init(void)
XIRQxICR(GDBPORT_SERIAL_IRQ) = 0;
tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
#if CONFIG_GDBSTUB_IRQ_LEVEL == 0
IVAR0 = EXCEP_IRQ_LEVEL0;
set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
#elif CONFIG_GDBSTUB_IRQ_LEVEL == 1
IVAR1 = EXCEP_IRQ_LEVEL1;
#elif CONFIG_GDBSTUB_IRQ_LEVEL == 2
IVAR2 = EXCEP_IRQ_LEVEL2;
#elif CONFIG_GDBSTUB_IRQ_LEVEL == 3
IVAR3 = EXCEP_IRQ_LEVEL3;
#elif CONFIG_GDBSTUB_IRQ_LEVEL == 4
IVAR4 = EXCEP_IRQ_LEVEL4;
#elif CONFIG_GDBSTUB_IRQ_LEVEL == 5
IVAR5 = EXCEP_IRQ_LEVEL5;
#else
#error "Unknown irq level for gdbstub."
#endif
set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
gdbstub_io_rx_handler);
XIRQxICR(GDBPORT_SERIAL_IRQ) &= ~GxICR_REQUEST;
XIRQxICR(GDBPORT_SERIAL_IRQ) = GxICR_ENABLE | GxICR_LEVEL_0;
XIRQxICR(GDBPORT_SERIAL_IRQ) =
GxICR_ENABLE | NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL);
tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
GDBPORT_SERIAL_IER = UART_IER_RDI | UART_IER_RLSI;
/* permit level 0 IRQs to take place */
asm volatile(
" and %0,epsw \n"
" or %1,epsw \n"
:
: "i"(~EPSW_IM), "i"(EPSW_IE | EPSW_IM_1)
);
local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
}
/*
......@@ -87,6 +100,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
{
unsigned ix;
u8 ch, st;
#if defined(CONFIG_MN10300_WD_TIMER)
int cpu;
#endif
*_ch = 0xff;
......@@ -104,8 +120,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
if (nonblock)
return -EAGAIN;
#ifdef CONFIG_MN10300_WD_TIMER
watchdog_alert_counter = 0;
#endif /* CONFIG_MN10300_WD_TIMER */
for (cpu = 0; cpu < NR_CPUS; cpu++)
watchdog_alert_counter[cpu] = 0;
#endif
goto try_again;
}
......
......@@ -58,9 +58,12 @@ void __init gdbstub_io_init(void)
gdbstub_io_set_baud(115200);
/* we want to get serial receive interrupts */
set_intr_level(gdbstub_port->rx_irq, GxICR_LEVEL_0);
set_intr_level(gdbstub_port->tx_irq, GxICR_LEVEL_0);
set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
set_intr_level(gdbstub_port->rx_irq,
NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
set_intr_level(gdbstub_port->tx_irq,
NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
gdbstub_io_rx_handler);
*gdbstub_port->rx_icr |= GxICR_ENABLE;
tmp = *gdbstub_port->rx_icr;
......@@ -84,12 +87,7 @@ void __init gdbstub_io_init(void)
tmp = *gdbstub_port->_control;
/* permit level 0 IRQs only */
asm volatile(
" and %0,epsw \n"
" or %1,epsw \n"
:
: "i"(~EPSW_IM), "i"(EPSW_IE|EPSW_IM_1)
);
local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
}
/*
......@@ -184,6 +182,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
{
unsigned ix;
u8 ch, st;
#if defined(CONFIG_MN10300_WD_TIMER)
int cpu;
#endif
*_ch = 0xff;
......@@ -201,8 +202,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
if (nonblock)
return -EAGAIN;
#ifdef CONFIG_MN10300_WD_TIMER
watchdog_alert_counter = 0;
#endif /* CONFIG_MN10300_WD_TIMER */
for (cpu = 0; cpu < NR_CPUS; cpu++)
watchdog_alert_counter[cpu] = 0;
#endif
goto try_again;
}
......
......@@ -440,15 +440,11 @@ static const unsigned char gdbstub_insn_sizes[256] =
static int __gdbstub_mark_bp(u8 *addr, int ix)
{
if (addr < (u8 *) 0x70000000UL)
return 0;
/* 70000000-7fffffff: vmalloc area */
if (addr < (u8 *) 0x80000000UL)
/* vmalloc area */
if (((u8 *) VMALLOC_START <= addr) && (addr < (u8 *) VMALLOC_END))
goto okay;
if (addr < (u8 *) 0x8c000000UL)
return 0;
/* 8c000000-93ffffff: SRAM, SDRAM */
if (addr < (u8 *) 0x94000000UL)
/* SRAM, SDRAM */
if (((u8 *) 0x80000000UL <= addr) && (addr < (u8 *) 0xa0000000UL))
goto okay;
return 0;
......@@ -1197,9 +1193,8 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
mn10300_set_gdbleds(1);
asm volatile("mov mdr,%0" : "=d"(mdr));
asm volatile("mov epsw,%0" : "=d"(epsw));
asm volatile("mov %0,epsw"
:: "d"((epsw & ~EPSW_IM) | EPSW_IE | EPSW_IM_1));
local_save_flags(epsw);
local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
gdbstub_store_fpu();
......
......@@ -19,6 +19,12 @@
#include <asm/frame.inc>
#include <asm/param.h>
#include <unit/serial.h>
#ifdef CONFIG_SMP
#include <asm/smp.h>
#include <asm/intctl-regs.h>
#include <asm/cpu-regs.h>
#include <proc/smp-regs.h>
#endif /* CONFIG_SMP */
__HEAD
......@@ -30,17 +36,51 @@
.globl _start
.type _start,@function
_start:
#ifdef CONFIG_SMP
#
# If this is a secondary CPU (AP), then deal with that elsewhere
#
mov (CPUID),d3
and CPUID_MASK,d3
bne startup_secondary
#
# We're dealing with the primary CPU (BP) here, then.
# Keep BP's D0,D1,D2 register for boot check.
#
# Set up the Boot IPI for each secondary CPU
mov 0x1,a0
loop_set_secondary_icr:
mov a0,a1
asl CROSS_ICR_CPU_SHIFT,a1
add CROSS_GxICR(SMP_BOOT_IRQ,0),a1
movhu (a1),d3
or GxICR_ENABLE|GxICR_LEVEL_0,d3
movhu d3,(a1)
movhu (a1),d3 # flush
inc a0
cmp NR_CPUS,a0
bne loop_set_secondary_icr
#endif /* CONFIG_SMP */
# save commandline pointer
mov d0,a3
# preload the PGD pointer register
mov swapper_pg_dir,d0
mov d0,(PTBR)
clr d0
movbu d0,(PIDR)
# turn on the TLBs
mov MMUCTR_IIV|MMUCTR_DIV,d0
mov d0,(MMUCTR)
#ifdef CONFIG_AM34_2
mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
#else
mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
#endif
mov d0,(MMUCTR)
# turn on AM33v2 exception handling mode and set the trap table base
......@@ -51,6 +91,11 @@ _start:
mov d0,(TBR)
# invalidate and enable both of the caches
#ifdef CONFIG_SMP
mov ECHCTR,a0
clr d0
mov d0,(a0)
#endif
mov CHCTR,a0
clr d0
movhu d0,(a0) # turn off first
......@@ -206,6 +251,44 @@ __no_parameters:
call processor_init[],0
call unit_init[],0
#ifdef CONFIG_SMP
# mark the primary CPU in cpu_boot_map
mov cpu_boot_map,a0
mov 0x1,d0
mov d0,(a0)
# signal each secondary CPU to begin booting
mov 0x1,d2 # CPU ID
loop_request_boot_secondary:
mov d2,a0
# send SMP_BOOT_IPI to secondary CPU
asl CROSS_ICR_CPU_SHIFT,a0
add CROSS_GxICR(SMP_BOOT_IRQ,0),a0
movhu (a0),d0
or GxICR_REQUEST|GxICR_DETECT,d0
movhu d0,(a0)
movhu (a0),d0 # flush
# wait up to 100ms for AP's IPI to be received
clr d3
wait_on_secondary_boot:
mov DELAY_TIME_BOOT_IPI,d0
call __delay[],0
inc d3
mov cpu_boot_map,a0
mov (a0),d0
lsr d2,d0
btst 0x1,d0
bne 1f
cmp TIME_OUT_COUNT_BOOT_IPI,d3
bne wait_on_secondary_boot
1:
inc d2
cmp NR_CPUS,d2
bne loop_request_boot_secondary
#endif /* CONFIG_SMP */
#ifdef CONFIG_GDBSTUB
call gdbstub_init[],0
......@@ -217,7 +300,118 @@ __gdbstub_pause:
#endif
jmp start_kernel
.size _start, _start-.
.size _start,.-_start
###############################################################################
#
# Secondary CPU boot point
#
###############################################################################
#ifdef CONFIG_SMP
startup_secondary:
# preload the PGD pointer register
mov swapper_pg_dir,d0
mov d0,(PTBR)
clr d0
movbu d0,(PIDR)
# turn on the TLBs
mov MMUCTR_IIV|MMUCTR_DIV,d0
mov d0,(MMUCTR)
#ifdef CONFIG_AM34_2
mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
#else
mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
#endif
mov d0,(MMUCTR)
# turn on AM33v2 exception handling mode and set the trap table base
movhu (CPUP),d0
or CPUP_EXM_AM33V2,d0
movhu d0,(CPUP)
# set the interrupt vector table
mov CONFIG_INTERRUPT_VECTOR_BASE,d0
mov d0,(TBR)
# invalidate and enable both of the caches
mov ECHCTR,a0
clr d0
mov d0,(a0)
mov CHCTR,a0
clr d0
movhu d0,(a0) # turn off first
mov CHCTR_ICINV|CHCTR_DCINV,d0
movhu d0,(a0)
setlb
mov (a0),d0
btst CHCTR_ICBUSY|CHCTR_DCBUSY,d0 # wait till not busy (use CPU loop buffer)
lne
#ifdef CONFIG_MN10300_CACHE_ENABLED
#ifdef CONFIG_MN10300_CACHE_WBACK
#ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
#else
mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
#endif /* !NOWRALLOC */
#else
mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
#endif /* WBACK */
movhu d0,(a0) # enable
#endif /* ENABLED */
# Clear the boot IPI interrupt for this CPU
movhu (GxICR(SMP_BOOT_IRQ)),d0
and ~GxICR_REQUEST,d0
movhu d0,(GxICR(SMP_BOOT_IRQ))
movhu (GxICR(SMP_BOOT_IRQ)),d0 # flush
/* get stack */
mov CONFIG_INTERRUPT_VECTOR_BASE + CONFIG_BOOT_STACK_OFFSET,a0
mov (CPUID),d0
and CPUID_MASK,d0
mulu CONFIG_BOOT_STACK_SIZE,d0
sub d0,a0
mov a0,sp
# init interrupt for AP
call smp_prepare_cpu_init[],0
# mark this secondary CPU in cpu_boot_map
mov (CPUID),d0
mov 0x1,d1
asl d0,d1
mov cpu_boot_map,a0
bset d1,(a0)
or EPSW_IE|EPSW_IM_1,epsw # permit level 0 interrupts
nop
nop
#ifdef CONFIG_MN10300_CACHE_WBACK
# flush the local cache if it's in writeback mode
call mn10300_local_dcache_flush_inv[],0
setlb
mov (CHCTR),d0
btst CHCTR_DCBUSY,d0 # wait till not busy (use CPU loop buffer)
lne
#endif
# now sleep waiting for further instructions
secondary_sleep:
mov CPUM_SLEEP,d0
movhu d0,(CPUM)
nop
nop
bra secondary_sleep
.size startup_secondary,.-startup_secondary
#endif /* CONFIG_SMP */
###############################################################################
#
#
#
###############################################################################
ENTRY(__head_end)
/*
......
......@@ -18,3 +18,15 @@ extern int kernel_thread_helper(int);
* entry.S
*/
extern void ret_from_fork(struct task_struct *) __attribute__((noreturn));
/*
* smp-low.S
*/
#ifdef CONFIG_SMP
extern void mn10300_low_ipi_handler(void);
#endif
/*
* time.c
*/
extern irqreturn_t local_timer_interrupt(void);
......@@ -12,11 +12,34 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/cpumask.h>
#include <asm/setup.h>
#include <asm/serial-regs.h>
unsigned long __mn10300_irq_enabled_epsw = EPSW_IE | EPSW_IM_7;
#ifdef CONFIG_SMP
#undef GxICR
#define GxICR(X) CROSS_GxICR(X, irq_affinity_online[X])
#undef GxICR_u8
#define GxICR_u8(X) CROSS_GxICR_u8(X, irq_affinity_online[X])
#endif /* CONFIG_SMP */
unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
[0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
};
EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
#ifdef CONFIG_SMP
static char irq_affinity_online[NR_IRQS] = {
[0 ... NR_IRQS - 1] = 0
};
#define NR_IRQ_WORDS ((NR_IRQS + 31) / 32)
static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
[0 ... NR_IRQ_WORDS - 1] = 0
};
#endif /* CONFIG_SMP */
atomic_t irq_err_count;
/*
......@@ -24,30 +47,65 @@ atomic_t irq_err_count;
*/
static void mn10300_cpupic_ack(unsigned int irq)
{
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
GxICR_u8(irq) = GxICR_DETECT;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
static void __mask_and_set_icr(unsigned int irq,
unsigned int mask, unsigned int set)
{
unsigned long flags;
u16 tmp;
*(volatile u8 *) &GxICR(irq) = GxICR_DETECT;
flags = arch_local_cli_save();
tmp = GxICR(irq);
GxICR(irq) = (tmp & mask) | set;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
static void mn10300_cpupic_mask(unsigned int irq)
{
u16 tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL);
tmp = GxICR(irq);
__mask_and_set_icr(irq, GxICR_LEVEL, 0);
}
static void mn10300_cpupic_mask_ack(unsigned int irq)
{
u16 tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
tmp = GxICR(irq);
#ifdef CONFIG_SMP
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
if (!test_and_clear_bit(irq, irq_affinity_request)) {
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
tmp = GxICR(irq);
} else {
u16 tmp2;
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL);
tmp2 = GxICR(irq);
irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity);
GxICR(irq) = (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
tmp = GxICR(irq);
}
arch_local_irq_restore(flags);
#else /* CONFIG_SMP */
__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
#endif /* CONFIG_SMP */
}
static void mn10300_cpupic_unmask(unsigned int irq)
{
u16 tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
tmp = GxICR(irq);
__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE);
}
static void mn10300_cpupic_unmask_clear(unsigned int irq)
......@@ -56,11 +114,89 @@ static void mn10300_cpupic_unmask_clear(unsigned int irq)
* device has ceased to assert its interrupt line and the interrupt
* channel has been disabled in the PIC, so for level-triggered
* interrupts we need to clear the request bit when we re-enable */
u16 tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
tmp = GxICR(irq);
#ifdef CONFIG_SMP
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
if (!test_and_clear_bit(irq, irq_affinity_request)) {
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
tmp = GxICR(irq);
} else {
tmp = GxICR(irq);
irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
tmp = GxICR(irq);
}
arch_local_irq_restore(flags);
#else /* CONFIG_SMP */
__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
#endif /* CONFIG_SMP */
}
#ifdef CONFIG_SMP
static int
mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask)
{
unsigned long flags;
int err;
flags = arch_local_cli_save();
/* check irq no */
switch (irq) {
case TMJCIRQ:
case RESCHEDULE_IPI:
case CALL_FUNC_SINGLE_IPI:
case LOCAL_TIMER_IPI:
case FLUSH_CACHE_IPI:
case CALL_FUNCTION_NMI_IPI:
case GDB_NMI_IPI:
#ifdef CONFIG_MN10300_TTYSM0
case SC0RXIRQ:
case SC0TXIRQ:
#ifdef CONFIG_MN10300_TTYSM0_TIMER8
case TM8IRQ:
#elif CONFIG_MN10300_TTYSM0_TIMER2
case TM2IRQ:
#endif /* CONFIG_MN10300_TTYSM0_TIMER8 */
#endif /* CONFIG_MN10300_TTYSM0 */
#ifdef CONFIG_MN10300_TTYSM1
case SC1RXIRQ:
case SC1TXIRQ:
#ifdef CONFIG_MN10300_TTYSM1_TIMER12
case TM12IRQ:
#elif CONFIG_MN10300_TTYSM1_TIMER9
case TM9IRQ:
#elif CONFIG_MN10300_TTYSM1_TIMER3
case TM3IRQ:
#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
#endif /* CONFIG_MN10300_TTYSM1 */
#ifdef CONFIG_MN10300_TTYSM2
case SC2RXIRQ:
case SC2TXIRQ:
case TM10IRQ:
#endif /* CONFIG_MN10300_TTYSM2 */
err = -1;
break;
default:
set_bit(irq, irq_affinity_request);
err = 0;
break;
}
arch_local_irq_restore(flags);
return err;
}
#endif /* CONFIG_SMP */
/*
* MN10300 PIC level-triggered IRQ handling.
*
......@@ -79,6 +215,9 @@ static struct irq_chip mn10300_cpu_pic_level = {
.mask = mn10300_cpupic_mask,
.mask_ack = mn10300_cpupic_mask,
.unmask = mn10300_cpupic_unmask_clear,
#ifdef CONFIG_SMP
.set_affinity = mn10300_cpupic_setaffinity,
#endif /* CONFIG_SMP */
};
/*
......@@ -94,6 +233,9 @@ static struct irq_chip mn10300_cpu_pic_edge = {
.mask = mn10300_cpupic_mask,
.mask_ack = mn10300_cpupic_mask_ack,
.unmask = mn10300_cpupic_unmask,
#ifdef CONFIG_SMP
.set_affinity = mn10300_cpupic_setaffinity,
#endif /* CONFIG_SMP */
};
/*
......@@ -111,14 +253,34 @@ void ack_bad_irq(int irq)
*/
void set_intr_level(int irq, u16 level)
{
u16 tmp;
BUG_ON(in_interrupt());
if (in_interrupt())
BUG();
__mask_and_set_icr(irq, GxICR_ENABLE, level);
}
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_ENABLE) | level;
tmp = GxICR(irq);
void mn10300_intc_set_level(unsigned int irq, unsigned int level)
{
set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL);
}
void mn10300_intc_clear(unsigned int irq)
{
__mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT);
}
void mn10300_intc_set(unsigned int irq)
{
__mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT);
}
void mn10300_intc_enable(unsigned int irq)
{
mn10300_cpupic_unmask(irq);
}
void mn10300_intc_disable(unsigned int irq)
{
mn10300_cpupic_mask(irq);
}
/*
......@@ -126,7 +288,7 @@ void set_intr_level(int irq, u16 level)
* than before
* - see Documentation/mn10300/features.txt
*/
void set_intr_postackable(int irq)
void mn10300_set_lateack_irq_type(int irq)
{
set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
handle_level_irq);
......@@ -147,6 +309,7 @@ void __init init_IRQ(void)
* interrupts */
set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
handle_level_irq);
unit_init_IRQ();
}
......@@ -156,6 +319,7 @@ void __init init_IRQ(void)
asmlinkage void do_IRQ(void)
{
unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
unsigned int cpu_id = smp_processor_id();
int irq;
sp = current_stack_pointer();
......@@ -163,12 +327,14 @@ asmlinkage void do_IRQ(void)
/* make sure local_irq_enable() doesn't muck up the interrupt priority
* setting in EPSW */
old_irq_enabled_epsw = __mn10300_irq_enabled_epsw;
old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
local_save_flags(epsw);
__mn10300_irq_enabled_epsw = EPSW_IE | (EPSW_IM & epsw);
__mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
__IRQ_STAT(smp_processor_id(), __irq_count)++;
#ifdef CONFIG_MN10300_WD_TIMER
__IRQ_STAT(cpu_id, __irq_count)++;
#endif
irq_enter();
......@@ -188,7 +354,7 @@ asmlinkage void do_IRQ(void)
local_irq_restore(epsw);
}
__mn10300_irq_enabled_epsw = old_irq_enabled_epsw;
__mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
irq_exit();
}
......@@ -239,11 +405,13 @@ int show_interrupts(struct seq_file *p, void *v)
/* polish off with NMI and error counters */
case NR_IRQS:
#ifdef CONFIG_MN10300_WD_TIMER
seq_printf(p, "NMI: ");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "%10u ", nmi_count(j));
seq_putc(p, '\n');
#endif
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
break;
......@@ -251,3 +419,51 @@ int show_interrupts(struct seq_file *p, void *v)
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
void migrate_irqs(void)
{
irq_desc_t *desc;
int irq;
unsigned int self, new;
unsigned long flags;
self = smp_processor_id();
for (irq = 0; irq < NR_IRQS; irq++) {
desc = irq_desc + irq;
if (desc->status == IRQ_PER_CPU)
continue;
if (cpu_isset(self, irq_desc[irq].affinity) &&
!cpus_intersects(irq_affinity[irq], cpu_online_map)) {
int cpu_id;
cpu_id = first_cpu(cpu_online_map);
cpu_set(cpu_id, irq_desc[irq].affinity);
}
/* We need to operate irq_affinity_online atomically. */
arch_local_cli_save(flags);
if (irq_affinity_online[irq] == self) {
u16 x, tmp;
x = CROSS_GxICR(irq, self);
CROSS_GxICR(irq, self) = x & GxICR_LEVEL;
tmp = CROSS_GxICR(irq, self);
new = any_online_cpu(irq_desc[irq].affinity);
irq_affinity_online[irq] = new;
CROSS_GxICR(irq, new) =
(x & GxICR_LEVEL) | GxICR_DETECT;
tmp = CROSS_GxICR(irq, new);
x &= GxICR_LEVEL | GxICR_ENABLE;
if (CROSS_GxICR(irq, self) & GxICR_REQUEST)
x |= GxICR_REQUEST | GxICR_DETECT;
CROSS_GxICR(irq, new) = x;
tmp = CROSS_GxICR(irq, new);
}
arch_local_irq_restore(flags);
}
}
#endif /* CONFIG_HOTPLUG_CPU */
......@@ -39,7 +39,7 @@
###############################################################################
.balign L1_CACHE_BYTES
ENTRY(mn10300_serial_vdma_interrupt)
or EPSW_IE,psw # permit overriding by
# or EPSW_IE,psw # permit overriding by
# debugging interrupts
movm [d2,d3,a2,a3,exreg0],(sp)
......@@ -164,7 +164,7 @@ mnsc_vdma_tx_noint:
rti
mnsc_vdma_tx_empty:
mov +(GxICR_LEVEL_1|GxICR_DETECT),d2
mov +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
movhu d2,(e3) # disable the interrupt
movhu (e3),d2 # flush
......@@ -175,7 +175,7 @@ mnsc_vdma_tx_break:
movhu (SCxCTR,e2),d2 # turn on break mode
or SC01CTR_BKE,d2
movhu d2,(SCxCTR,e2)
mov +(GxICR_LEVEL_1|GxICR_DETECT),d2
mov +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
movhu d2,(e3) # disable transmit interrupts on this
# channel
movhu (e3),d2 # flush
......
This diff is collapsed.
......@@ -16,6 +16,7 @@
#include <asm/intctl-regs.h>
#include <asm/timer-regs.h>
#include <asm/frame.inc>
#include <linux/threads.h>
.text
......@@ -53,7 +54,13 @@ watchdog_handler:
.type touch_nmi_watchdog,@function
touch_nmi_watchdog:
clr d0
mov d0,(watchdog_alert_counter)
clr d1
mov watchdog_alert_counter, a0
setlb
mov d0, (a0+)
inc d1
cmp NR_CPUS, d1
lne
ret [],0
.size touch_nmi_watchdog,.-touch_nmi_watchdog
......@@ -30,7 +30,7 @@
static DEFINE_SPINLOCK(watchdog_print_lock);
static unsigned int watchdog;
static unsigned int watchdog_hz = 1;
unsigned int watchdog_alert_counter;
unsigned int watchdog_alert_counter[NR_CPUS];
EXPORT_SYMBOL(touch_nmi_watchdog);
......@@ -39,9 +39,6 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
* is to check its timer makes IRQ counts. If they are not
* changing then that CPU has some problem.
*
* as these watchdog NMI IRQs are generated on every CPU, we only
* have to check the current processor.
*
* since NMIs dont listen to _any_ locks, we have to be extremely
* careful not to rely on unsafe variables. The printk might lock
* up though, so we have to break up any console locks first ...
......@@ -69,8 +66,8 @@ int __init check_watchdog(void)
printk(KERN_INFO "OK.\n");
/* now that we know it works we can reduce NMI frequency to
* something more reasonable; makes a difference in some configs
/* now that we know it works we can reduce NMI frequency to something
* more reasonable; makes a difference in some configs
*/
watchdog_hz = 1;
......@@ -121,15 +118,22 @@ void __init watchdog_go(void)
}
}
#ifdef CONFIG_SMP
static void watchdog_dump_register(void *dummy)
{
printk(KERN_ERR "--- Register Dump (CPU%d) ---\n", CPUID);
show_registers(__frame);
}
#endif
asmlinkage
void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
{
/*
* Since current-> is always on the stack, and we always switch
* the stack NMI-atomically, it's safe to use smp_processor_id().
*/
int sum, cpu = smp_processor_id();
int sum, cpu;
int irq = NMIIRQ;
u8 wdt, tmp;
......@@ -138,43 +142,61 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
tmp = WDCTR;
NMICR = NMICR_WDIF;
nmi_count(cpu)++;
nmi_count(smp_processor_id())++;
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
sum = irq_stat[cpu].__irq_count;
if (last_irq_sums[cpu] == sum) {
/*
* Ayiee, looks like this CPU is stuck ...
* wait a few IRQs (5 seconds) before doing the oops ...
*/
watchdog_alert_counter++;
if (watchdog_alert_counter == 5 * watchdog_hz) {
spin_lock(&watchdog_print_lock);
for_each_online_cpu(cpu) {
sum = irq_stat[cpu].__irq_count;
if ((last_irq_sums[cpu] == sum)
#if defined(CONFIG_GDBSTUB) && defined(CONFIG_SMP)
&& !(CHK_GDBSTUB_BUSY()
|| atomic_read(&cpu_doing_single_step))
#endif
) {
/*
* We are in trouble anyway, lets at least try
* to get a message out.
* Ayiee, looks like this CPU is stuck ...
* wait a few IRQs (5 seconds) before doing the oops ...
*/
bust_spinlocks(1);
printk(KERN_ERR
"NMI Watchdog detected LOCKUP on CPU%d,"
" pc %08lx, registers:\n",
cpu, regs->pc);
show_registers(regs);
printk("console shuts up ...\n");
console_silent();
spin_unlock(&watchdog_print_lock);
bust_spinlocks(0);
watchdog_alert_counter[cpu]++;
if (watchdog_alert_counter[cpu] == 5 * watchdog_hz) {
spin_lock(&watchdog_print_lock);
/*
* We are in trouble anyway, lets at least try
* to get a message out.
*/
bust_spinlocks(1);
printk(KERN_ERR
"NMI Watchdog detected LOCKUP on CPU%d,"
" pc %08lx, registers:\n",
cpu, regs->pc);
#ifdef CONFIG_SMP
printk(KERN_ERR
"--- Register Dump (CPU%d) ---\n",
CPUID);
#endif
show_registers(regs);
#ifdef CONFIG_SMP
smp_nmi_call_function(watchdog_dump_register,
NULL, 1);
#endif
printk(KERN_NOTICE "console shuts up ...\n");
console_silent();
spin_unlock(&watchdog_print_lock);
bust_spinlocks(0);
#ifdef CONFIG_GDBSTUB
if (gdbstub_busy)
gdbstub_exception(regs, excep);
else
gdbstub_intercept(regs, excep);
if (CHK_GDBSTUB_BUSY_AND_ACTIVE())
gdbstub_exception(regs, excep);
else
gdbstub_intercept(regs, excep);
#endif
do_exit(SIGSEGV);
do_exit(SIGSEGV);
}
} else {
last_irq_sums[cpu] = sum;
watchdog_alert_counter[cpu] = 0;
}
} else {
last_irq_sums[cpu] = sum;
watchdog_alert_counter = 0;
}
WDCTR = wdt | WDCTR_WDRST;
......
......@@ -57,6 +57,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
/*
* we use this if we don't have any better idle routine
*/
......@@ -69,6 +70,35 @@ static void default_idle(void)
local_irq_enable();
}
#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
/*
* On SMP it's slightly faster (but much more power-consuming!)
* to poll the ->work.need_resched flag instead of waiting for the
* cross-CPU IPI to arrive. Use this option with caution.
*/
static inline void poll_idle(void)
{
int oldval;
local_irq_enable();
/*
* Deal with another CPU just having chosen a thread to
* run here:
*/
oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
if (!oldval) {
set_thread_flag(TIF_POLLING_NRFLAG);
while (!need_resched())
cpu_relax();
clear_thread_flag(TIF_POLLING_NRFLAG);
} else {
set_need_resched();
}
}
#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
/*
* the idle thread
* - there's no useful work to be done, so just try to conserve power and have
......@@ -77,8 +107,6 @@ static void default_idle(void)
*/
void cpu_idle(void)
{
int cpu = smp_processor_id();
/* endless idle loop with no priority at all */
for (;;) {
while (!need_resched()) {
......@@ -86,8 +114,13 @@ void cpu_idle(void)
smp_rmb();
idle = pm_idle;
if (!idle)
if (!idle) {
#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
idle = poll_idle;
#else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
idle = default_idle;
#endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
}
idle();
}
......@@ -233,7 +266,7 @@ int copy_thread(unsigned long clone_flags,
}
/* set up things up so the scheduler can start the new task */
p->thread.__frame = c_kregs;
p->thread.frame = c_kregs;
p->thread.a3 = (unsigned long) c_kregs;
p->thread.sp = c_ksp;
p->thread.pc = (unsigned long) ret_from_fork;
......
......@@ -41,7 +41,7 @@ static __init int profile_init(void)
tmp = TM11ICR;
printk(KERN_INFO "Profiling initiated on timer 11, priority 0, %uHz\n",
mn10300_ioclk / 8 / (TM11BR + 1));
MN10300_IOCLK / 8 / (TM11BR + 1));
printk(KERN_INFO "Profile histogram stored %p-%p\n",
prof_buffer, (u8 *)(prof_buffer + prof_len) - 1);
......
......@@ -20,18 +20,22 @@
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
/* time for RTC to update itself in ioclks */
static unsigned long mn10300_rtc_update_period;
/*
* Read the current RTC time
*/
void read_persistent_clock(struct timespec *ts)
{
struct rtc_time tm;
get_rtc_time(&tm);
ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec);
ts->tv_nsec = 0;
ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec);
/* if rtc is way off in the past, set something reasonable */
if (ts->tv_sec < 0)
ts->tv_sec = mktime(2009, 1, 1, 12, 0, 0);
}
/*
......@@ -115,39 +119,14 @@ int update_persistent_clock(struct timespec now)
*/
void __init calibrate_clock(void)
{
unsigned long count0, counth, count1;
unsigned char status;
/* make sure the RTC is running and is set to operate in 24hr mode */
status = RTSRC;
RTCRB |= RTCRB_SET;
RTCRB |= RTCRB_TM_24HR;
RTCRB &= ~RTCRB_DM_BINARY;
RTCRA |= RTCRA_DVR;
RTCRA &= ~RTCRA_DVR;
RTCRB &= ~RTCRB_SET;
/* work out the clock speed by counting clock cycles between ends of
* the RTC update cycle - track the RTC through one complete update
* cycle (1 second)
*/
startup_timestamp_counter();
while (!(RTCRA & RTCRA_UIP)) {}
while ((RTCRA & RTCRA_UIP)) {}
count0 = TMTSCBC;
while (!(RTCRA & RTCRA_UIP)) {}
counth = TMTSCBC;
while ((RTCRA & RTCRA_UIP)) {}
count1 = TMTSCBC;
shutdown_timestamp_counter();
MN10300_TSCCLK = count0 - count1; /* the timers count down */
mn10300_rtc_update_period = counth - count1;
MN10300_TSC_PER_HZ = MN10300_TSCCLK / HZ;
}
......@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/cpu.h>
#include <asm/processor.h>
#include <linux/console.h>
#include <asm/uaccess.h>
......@@ -30,7 +31,6 @@
#include <asm/io.h>
#include <asm/smp.h>
#include <proc/proc.h>
#include <asm/busctl-regs.h>
#include <asm/fpu.h>
#include <asm/sections.h>
......@@ -64,11 +64,13 @@ unsigned long memory_size;
struct thread_info *__current_ti = &init_thread_union.thread_info;
struct task_struct *__current = &init_task;
#define mn10300_known_cpus 3
#define mn10300_known_cpus 5
static const char *const mn10300_cputypes[] = {
"am33v1",
"am33v2",
"am34v1",
"am33-1",
"am33-2",
"am34-1",
"am33-3",
"am34-2",
"unknown"
};
......@@ -123,6 +125,7 @@ void __init setup_arch(char **cmdline_p)
cpu_init();
unit_setup();
smp_init_cpus();
parse_mem_cmdline(cmdline_p);
init_mm.start_code = (unsigned long)&_text;
......@@ -179,7 +182,6 @@ void __init setup_arch(char **cmdline_p)
void __init cpu_init(void)
{
unsigned long cpurev = CPUREV, type;
unsigned long base, size;
type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
if (type > mn10300_known_cpus)
......@@ -189,47 +191,46 @@ void __init cpu_init(void)
mn10300_cputypes[type],
(cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S);
/* determine the memory size and base from the memory controller regs */
memory_size = 0;
base = SDBASE(0);
if (base & SDBASE_CE) {
size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
size = ~size + 1;
base &= SDBASE_CBA;
get_mem_info(&phys_memory_base, &memory_size);
phys_memory_end = phys_memory_base + memory_size;
printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base);
memory_size += size;
phys_memory_base = base;
}
fpu_init_state();
}
base = SDBASE(1);
if (base & SDBASE_CE) {
size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
size = ~size + 1;
base &= SDBASE_CBA;
static struct cpu cpu_devices[NR_CPUS];
printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base);
memory_size += size;
if (phys_memory_base == 0)
phys_memory_base = base;
}
static int __init topology_init(void)
{
int i;
phys_memory_end = phys_memory_base + memory_size;
for_each_present_cpu(i)
register_cpu(&cpu_devices[i], i);
#ifdef CONFIG_FPU
fpu_init_state();
#endif
return 0;
}
subsys_initcall(topology_init);
/*
* Get CPU information for use by the procfs.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
#ifdef CONFIG_SMP
struct mn10300_cpuinfo *c = v;
unsigned long cpu_id = c - cpu_data;
unsigned long cpurev = c->type, type, icachesz, dcachesz;
#else /* CONFIG_SMP */
unsigned long cpu_id = 0;
unsigned long cpurev = CPUREV, type, icachesz, dcachesz;
#endif /* CONFIG_SMP */
type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
#ifdef CONFIG_SMP
if (!cpu_online(cpu_id))
return 0;
#endif
type = (cpurev & CPUREV_TYPE) >> CPUREV_TYPE_S;
if (type > mn10300_known_cpus)
type = mn10300_known_cpus;
......@@ -244,13 +245,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1024;
seq_printf(m,
"processor : 0\n"
"processor : %ld\n"
"vendor_id : Matsushita\n"
"cpu core : %s\n"
"cpu rev : %lu\n"
"model name : " PROCESSOR_MODEL_NAME "\n"
"icache size: %lu\n"
"dcache size: %lu\n",
cpu_id,
mn10300_cputypes[type],
(cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S,
icachesz,
......@@ -262,8 +264,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"bogomips : %lu.%02lu\n\n",
MN10300_IOCLK / 1000000,
(MN10300_IOCLK / 10000) % 100,
#ifdef CONFIG_SMP
c->loops_per_jiffy / (500000 / HZ),
(c->loops_per_jiffy / (5000 / HZ)) % 100
#else /* CONFIG_SMP */
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100
#endif /* CONFIG_SMP */
);
return 0;
......
/* SMP IPI low-level handler
*
* Copyright (C) 2006-2007 Matsushita Electric Industrial Co., Ltd.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/system.h>
#include <asm/thread_info.h>
#include <asm/cpu-regs.h>
#include <proc/smp-regs.h>
#include <asm/asm-offsets.h>
#include <asm/frame.inc>
.am33_2
###############################################################################
#
# IPI interrupt handler
#
###############################################################################
.globl mn10300_low_ipi_handler
mn10300_low_ipi_handler:
add -4,sp
mov d0,(sp)
movhu (IAGR),d0
and IAGR_GN,d0
lsr 0x2,d0
#ifdef CONFIG_MN10300_CACHE_ENABLED
cmp FLUSH_CACHE_IPI,d0
beq mn10300_flush_cache_ipi
#endif
cmp SMP_BOOT_IRQ,d0
beq mn10300_smp_boot_ipi
/* OTHERS */
mov (sp),d0
add 4,sp
#ifdef CONFIG_GDBSTUB
jmp gdbstub_io_rx_handler
#else
jmp end
#endif
###############################################################################
#
# Cache flush IPI interrupt handler
#
###############################################################################
#ifdef CONFIG_MN10300_CACHE_ENABLED
mn10300_flush_cache_ipi:
mov (sp),d0
add 4,sp
/* FLUSH_CACHE_IPI */
add -4,sp
SAVE_ALL
mov GxICR_DETECT,d2
movbu d2,(GxICR(FLUSH_CACHE_IPI)) # ACK the interrupt
movhu (GxICR(FLUSH_CACHE_IPI)),d2
call smp_cache_interrupt[],0
RESTORE_ALL
jmp end
#endif
###############################################################################
#
# SMP boot CPU IPI interrupt handler
#
###############################################################################
mn10300_smp_boot_ipi:
/* clear interrupt */
movhu (GxICR(SMP_BOOT_IRQ)),d0
and ~GxICR_REQUEST,d0
movhu d0,(GxICR(SMP_BOOT_IRQ))
mov (sp),d0
add 4,sp
# get stack
mov (CPUID),a0
add -1,a0
add a0,a0
add a0,a0
mov (start_stack,a0),a0
mov a0,sp
jmp initialize_secondary
# Jump here after RTI to suppress the icache lookahead
end:
This diff is collapsed.
......@@ -15,6 +15,9 @@
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/cpu-regs.h>
#ifdef CONFIG_SMP
#include <proc/smp-regs.h>
#endif /* CONFIG_SMP */
.text
......@@ -35,7 +38,14 @@ ENTRY(__switch_to)
mov d1,a1
# save prev context
#ifdef CONFIG_SMP
mov (CPUID),a2
add a2,a2
add a2,a2
mov (___frame,a2),d0
#else /* CONFIG_SMP */
mov (__frame),d0
#endif /* CONFIG_SMP */
mov d0,(THREAD_FRAME,a0)
mov __switch_back,d0
mov d0,(THREAD_PC,a0)
......@@ -59,7 +69,14 @@ ENTRY(__switch_to)
#endif
mov (THREAD_FRAME,a1),a2
#ifdef CONFIG_SMP
mov (CPUID),a0
add a0,a0
add a0,a0
mov a2,(___frame,a0)
#else /* CONFIG_SMP */
mov a2,(__frame)
#endif /* CONFIG_SMP */
mov (THREAD_PC,a1),a2
mov d2,d0 # for ret_from_fork
mov d0,a0 # for __switch_to
......
......@@ -22,12 +22,7 @@
#include <asm/processor.h>
#include <asm/intctl-regs.h>
#include <asm/rtc.h>
#ifdef CONFIG_MN10300_RTC
unsigned long mn10300_ioclk; /* system I/O clock frequency */
unsigned long mn10300_iobclk; /* system I/O clock frequency */
unsigned long mn10300_tsc_per_HZ; /* number of ioclks per jiffy */
#endif /* CONFIG_MN10300_RTC */
#include "internal.h"
static unsigned long mn10300_last_tsc; /* time-stamp counter at last time
* interrupt occurred */
......@@ -95,6 +90,19 @@ static void __init mn10300_sched_clock_init(void)
__muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK);
}
/**
* local_timer_interrupt - Local timer interrupt handler
*
* Handle local timer interrupts for this CPU. They may have been propagated
* to this CPU from the CPU that actually gets them by way of an IPI.
*/
irqreturn_t local_timer_interrupt(void)
{
profile_tick(CPU_PROFILING);
update_process_times(user_mode(get_irq_regs()));
return IRQ_HANDLED;
}
/*
* advance the kernel's time keeping clocks (xtime and jiffies)
* - we use Timer 0 & 1 cascaded as a clock to nudge us the next time
......@@ -103,6 +111,7 @@ static void __init mn10300_sched_clock_init(void)
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
unsigned tsc, elapse;
irqreturn_t ret;
write_seqlock(&xtime_lock);
......@@ -114,15 +123,16 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
mn10300_last_tsc -= MN10300_TSC_PER_HZ;
/* advance the kernel's time tracking system */
profile_tick(CPU_PROFILING);
do_timer(1);
}
write_sequnlock(&xtime_lock);
update_process_times(user_mode(get_irq_regs()));
return IRQ_HANDLED;
ret = local_timer_interrupt();
#ifdef CONFIG_SMP
send_IPI_allbutself(LOCAL_TIMER_IPI);
#endif
return ret;
}
/*
......@@ -148,7 +158,7 @@ void __init time_init(void)
/* use timer 0 & 1 cascaded to tick at as close to HZ as possible */
setup_irq(TMJCIRQ, &timer_irq);
set_intr_level(TMJCIRQ, TMJCICR_LEVEL);
set_intr_level(TMJCIRQ, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
startup_jiffies_counter();
......
......@@ -45,8 +45,13 @@
#error "INTERRUPT_VECTOR_BASE not aligned to 16MiB boundary!"
#endif
#ifdef CONFIG_SMP
struct pt_regs *___frame[NR_CPUS]; /* current frame pointer */
EXPORT_SYMBOL(___frame);
#else /* CONFIG_SMP */
struct pt_regs *__frame; /* current frame pointer */
EXPORT_SYMBOL(__frame);
#endif /* CONFIG_SMP */
int kstack_depth_to_print = 24;
......@@ -221,11 +226,14 @@ void show_registers_only(struct pt_regs *regs)
printk(KERN_EMERG "threadinfo=%p task=%p)\n",
current_thread_info(), current);
if ((unsigned long) current >= 0x90000000UL &&
(unsigned long) current < 0x94000000UL)
if ((unsigned long) current >= PAGE_OFFSET &&
(unsigned long) current < (unsigned long)high_memory)
printk(KERN_EMERG "Process %s (pid: %d)\n",
current->comm, current->pid);
#ifdef CONFIG_SMP
printk(KERN_EMERG "CPUID: %08x\n", CPUID);
#endif
printk(KERN_EMERG "CPUP: %04hx\n", CPUP);
printk(KERN_EMERG "TBR: %08x\n", TBR);
printk(KERN_EMERG "DEAR: %08x\n", DEAR);
......@@ -521,8 +529,12 @@ void __init set_intr_stub(enum exception_code code, void *handler)
{
unsigned long addr;
u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code);
unsigned long flags;
addr = (unsigned long) handler - (unsigned long) vector;
flags = arch_local_cli_save();
vector[0] = 0xdc; /* JMP handler */
vector[1] = addr;
vector[2] = addr >> 8;
......@@ -532,6 +544,8 @@ void __init set_intr_stub(enum exception_code code, void *handler)
vector[6] = 0xcb;
vector[7] = 0xcb;
arch_local_irq_restore(flags);
#ifndef CONFIG_MN10300_CACHE_SNOOP
mn10300_dcache_flush_inv();
mn10300_icache_inv();
......
......@@ -38,14 +38,14 @@ EXPORT_SYMBOL(__delay);
*/
void __udelay(unsigned long usecs)
{
signed long ioclk, stop;
unsigned long start, stop, cnt;
/* usecs * CLK / 1E6 */
stop = __muldiv64u(usecs, MN10300_TSCCLK, 1000000);
stop = TMTSCBC - stop;
start = TMTSCBC;
do {
ioclk = TMTSCBC;
} while (stop < ioclk);
cnt = start - TMTSCBC;
} while (cnt < stop);
}
EXPORT_SYMBOL(__udelay);
......@@ -39,10 +39,6 @@ void bust_spinlocks(int yes)
{
if (yes) {
oops_in_progress = 1;
#ifdef CONFIG_SMP
/* Many serial drivers do __global_cli() */
global_irq_lock = 0;
#endif
} else {
int loglevel_save = console_loglevel;
#ifdef CONFIG_VT
......@@ -334,10 +330,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
*/
out_of_memory:
up_read(&mm->mmap_sem);
if ((fault_code & MMUFCR_xFC_ACCESS) != MMUFCR_xFC_ACCESS_USR)
goto no_context;
pagefault_out_of_memory();
return;
printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
do_exit(SIGKILL);
goto no_context;
do_sigbus:
up_read(&mm->mmap_sem);
......
......@@ -13,6 +13,4 @@
#include <unit/clock.h>
#define MN10300_WDCLK MN10300_IOCLK
#endif /* _ASM_PROC_CLOCK_H */
#ifndef _ASM_PROC_INTCTL_REGS_H
#define _ASM_PROC_INTCTL_REGS_H
#ifndef _ASM_INTCTL_REGS_H
# error "please don't include this file directly"
#endif
/* intr acceptance group reg */
#define IAGR __SYSREG(0xd4000100, u16)
/* group number register */
#define IAGR_GN 0x00fc
#define __GET_XIRQ_TRIGGER(X, Z) (((Z) >> ((X) * 2)) & 3)
#define __SET_XIRQ_TRIGGER(X, Y, Z) \
({ \
typeof(Z) x = (Z); \
x &= ~(3 << ((X) * 2)); \
x |= ((Y) & 3) << ((X) * 2); \
(Z) = x; \
})
/* external pin intr spec reg */
#define EXTMD __SYSREG(0xd4000200, u16)
#define GET_XIRQ_TRIGGER(X) __GET_XIRQ_TRIGGER(X, EXTMD)
#define SET_XIRQ_TRIGGER(X, Y) __SET_XIRQ_TRIGGER(X, Y, EXTMD)
#endif /* _ASM_PROC_INTCTL_REGS_H */
This diff is collapsed.
#
# Makefile for the linux kernel.
#
obj-y := proc-init.o
This diff is collapsed.
/* clock.h: proc-specific clocks
*
* Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* Modified by Matsushita Electric Industrial Co., Ltd.
* Modifications:
* 23-Feb-2007 MEI Delete define for watchdog timer.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _ASM_PROC_CLOCK_H
#define _ASM_PROC_CLOCK_H
#include <unit/clock.h>
#endif /* _ASM_PROC_CLOCK_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment