Commit 7fbb58a0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS fixes from Ralf Baechle:
 "A fair number of 4.2 fixes also because Markos opened the flood gates.

   - Patch up the math used calculate the location for the page bitmap.

   - The FDC (Not what you think, FDC stands for Fast Debug Channel) IRQ
     around was causing issues on non-Malta platforms, so move the code
     to a Malta specific location.

   - A spelling fix replicated through several files.

   - Fix to the emulation of an R2 instruction for R6 cores.

   - Fix the JR emulation for R6.

   - Further patching of mindless 64 bit issues.

   - Ensure the kernel won't crash on CPUs with L2 caches with >= 8
     ways.

   - Use compat_sys_getsockopt for O32 ABI on 64 bit kernels.

   - Fix cache flushing for multithreaded cores.

   - A build fix"

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus:
  MIPS: O32: Use compat_sys_getsockopt.
  MIPS: c-r4k: Extend way_string array
  MIPS: Pistachio: Support CDMM & Fast Debug Channel
  MIPS: Malta: Make GIC FDC IRQ workaround Malta specific
  MIPS: c-r4k: Fix cache flushing for MT cores
  Revert "MIPS: Kconfig: Disable SMP/CPS for 64-bit"
  MIPS: cps-vec: Use macros for various arithmetics and memory operations
  MIPS: kernel: cps-vec: Replace KSEG0 with CKSEG0
  MIPS: kernel: cps-vec: Use ta0-ta3 pseudo-registers for 64-bit
  MIPS: kernel: cps-vec: Replace mips32r2 ISA level with mips64r2
  MIPS: kernel: cps-vec: Replace 'la' macro with PTR_LA
  MIPS: kernel: smp-cps: Fix 64-bit compatibility errors due to pointer casting
  MIPS: Fix erroneous JR emulation for MIPS R6
  MIPS: Fix branch emulation for BLTC and BGEC instructions
  MIPS: kernel: traps: Fix broken indentation
  MIPS: bootmem: Don't use memory holes for page bitmap
  MIPS: O32: Do not handle require 32 bytes from the stack to be readable.
  MIPS, CPUFREQ: Fix spelling of Institute.
  MIPS: Lemote 2F: Fix build caused by recent mass rename.
parents 1daa1cfb 51d53674
...@@ -2231,7 +2231,7 @@ config MIPS_CMP ...@@ -2231,7 +2231,7 @@ config MIPS_CMP
config MIPS_CPS config MIPS_CPS
bool "MIPS Coherent Processing System support" bool "MIPS Coherent Processing System support"
depends on SYS_SUPPORTS_MIPS_CPS && !64BIT depends on SYS_SUPPORTS_MIPS_CPS
select MIPS_CM select MIPS_CM
select MIPS_CPC select MIPS_CPC
select MIPS_CPS_PM if HOTPLUG_CPU select MIPS_CPS_PM if HOTPLUG_CPU
......
/* /*
* Copyright (C) 2010 Loongson Inc. & Lemote Inc. & * Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
* Insititute of Computing Technology * Institute of Computing Technology
* Author: Xiang Gao, gaoxiang@ict.ac.cn * Author: Xiang Gao, gaoxiang@ict.ac.cn
* Huacai Chen, chenhc@lemote.com * Huacai Chen, chenhc@lemote.com
* Xiaofu Meng, Shuangshuang Zhang * Xiaofu Meng, Shuangshuang Zhang
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
extern int smp_num_siblings; extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[]; extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[]; extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map;
#define raw_smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
......
...@@ -600,7 +600,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, ...@@ -600,7 +600,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
break; break;
case blezl_op: /* not really i_format */ case blezl_op: /* not really i_format */
if (NO_R6EMU) if (!insn.i_format.rt && NO_R6EMU)
goto sigill_r6; goto sigill_r6;
case blez_op: case blez_op:
/* /*
...@@ -635,7 +635,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, ...@@ -635,7 +635,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
break; break;
case bgtzl_op: case bgtzl_op:
if (NO_R6EMU) if (!insn.i_format.rt && NO_R6EMU)
goto sigill_r6; goto sigill_r6;
case bgtz_op: case bgtz_op:
/* /*
......
...@@ -60,7 +60,7 @@ LEAF(mips_cps_core_entry) ...@@ -60,7 +60,7 @@ LEAF(mips_cps_core_entry)
nop nop
/* This is an NMI */ /* This is an NMI */
la k0, nmi_handler PTR_LA k0, nmi_handler
jr k0 jr k0
nop nop
...@@ -107,10 +107,10 @@ not_nmi: ...@@ -107,10 +107,10 @@ not_nmi:
mul t1, t1, t0 mul t1, t1, t0
mul t1, t1, t2 mul t1, t1, t2
li a0, KSEG0 li a0, CKSEG0
add a1, a0, t1 PTR_ADD a1, a0, t1
1: cache Index_Store_Tag_I, 0(a0) 1: cache Index_Store_Tag_I, 0(a0)
add a0, a0, t0 PTR_ADD a0, a0, t0
bne a0, a1, 1b bne a0, a1, 1b
nop nop
icache_done: icache_done:
...@@ -134,12 +134,12 @@ icache_done: ...@@ -134,12 +134,12 @@ icache_done:
mul t1, t1, t0 mul t1, t1, t0
mul t1, t1, t2 mul t1, t1, t2
li a0, KSEG0 li a0, CKSEG0
addu a1, a0, t1 PTR_ADDU a1, a0, t1
subu a1, a1, t0 PTR_SUBU a1, a1, t0
1: cache Index_Store_Tag_D, 0(a0) 1: cache Index_Store_Tag_D, 0(a0)
bne a0, a1, 1b bne a0, a1, 1b
add a0, a0, t0 PTR_ADD a0, a0, t0
dcache_done: dcache_done:
/* Set Kseg0 CCA to that in s0 */ /* Set Kseg0 CCA to that in s0 */
...@@ -152,11 +152,11 @@ dcache_done: ...@@ -152,11 +152,11 @@ dcache_done:
/* Enter the coherent domain */ /* Enter the coherent domain */
li t0, 0xff li t0, 0xff
sw t0, GCR_CL_COHERENCE_OFS(v1) PTR_S t0, GCR_CL_COHERENCE_OFS(v1)
ehb ehb
/* Jump to kseg0 */ /* Jump to kseg0 */
la t0, 1f PTR_LA t0, 1f
jr t0 jr t0
nop nop
...@@ -178,9 +178,9 @@ dcache_done: ...@@ -178,9 +178,9 @@ dcache_done:
nop nop
/* Off we go! */ /* Off we go! */
lw t1, VPEBOOTCFG_PC(v0) PTR_L t1, VPEBOOTCFG_PC(v0)
lw gp, VPEBOOTCFG_GP(v0) PTR_L gp, VPEBOOTCFG_GP(v0)
lw sp, VPEBOOTCFG_SP(v0) PTR_L sp, VPEBOOTCFG_SP(v0)
jr t1 jr t1
nop nop
END(mips_cps_core_entry) END(mips_cps_core_entry)
...@@ -217,7 +217,7 @@ LEAF(excep_intex) ...@@ -217,7 +217,7 @@ LEAF(excep_intex)
.org 0x480 .org 0x480
LEAF(excep_ejtag) LEAF(excep_ejtag)
la k0, ejtag_debug_handler PTR_LA k0, ejtag_debug_handler
jr k0 jr k0
nop nop
END(excep_ejtag) END(excep_ejtag)
...@@ -229,7 +229,7 @@ LEAF(mips_cps_core_init) ...@@ -229,7 +229,7 @@ LEAF(mips_cps_core_init)
nop nop
.set push .set push
.set mips32r2 .set mips64r2
.set mt .set mt
/* Only allow 1 TC per VPE to execute... */ /* Only allow 1 TC per VPE to execute... */
...@@ -237,7 +237,7 @@ LEAF(mips_cps_core_init) ...@@ -237,7 +237,7 @@ LEAF(mips_cps_core_init)
/* ...and for the moment only 1 VPE */ /* ...and for the moment only 1 VPE */
dvpe dvpe
la t1, 1f PTR_LA t1, 1f
jr.hb t1 jr.hb t1
nop nop
...@@ -250,25 +250,25 @@ LEAF(mips_cps_core_init) ...@@ -250,25 +250,25 @@ LEAF(mips_cps_core_init)
mfc0 t0, CP0_MVPCONF0 mfc0 t0, CP0_MVPCONF0
srl t0, t0, MVPCONF0_PVPE_SHIFT srl t0, t0, MVPCONF0_PVPE_SHIFT
andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
addiu t7, t0, 1 addiu ta3, t0, 1
/* If there's only 1, we're done */ /* If there's only 1, we're done */
beqz t0, 2f beqz t0, 2f
nop nop
/* Loop through each VPE within this core */ /* Loop through each VPE within this core */
li t5, 1 li ta1, 1
1: /* Operate on the appropriate TC */ 1: /* Operate on the appropriate TC */
mtc0 t5, CP0_VPECONTROL mtc0 ta1, CP0_VPECONTROL
ehb ehb
/* Bind TC to VPE (1:1 TC:VPE mapping) */ /* Bind TC to VPE (1:1 TC:VPE mapping) */
mttc0 t5, CP0_TCBIND mttc0 ta1, CP0_TCBIND
/* Set exclusive TC, non-active, master */ /* Set exclusive TC, non-active, master */
li t0, VPECONF0_MVP li t0, VPECONF0_MVP
sll t1, t5, VPECONF0_XTC_SHIFT sll t1, ta1, VPECONF0_XTC_SHIFT
or t0, t0, t1 or t0, t0, t1
mttc0 t0, CP0_VPECONF0 mttc0 t0, CP0_VPECONF0
...@@ -280,8 +280,8 @@ LEAF(mips_cps_core_init) ...@@ -280,8 +280,8 @@ LEAF(mips_cps_core_init)
mttc0 t0, CP0_TCHALT mttc0 t0, CP0_TCHALT
/* Next VPE */ /* Next VPE */
addiu t5, t5, 1 addiu ta1, ta1, 1
slt t0, t5, t7 slt t0, ta1, ta3
bnez t0, 1b bnez t0, 1b
nop nop
...@@ -298,19 +298,19 @@ LEAF(mips_cps_core_init) ...@@ -298,19 +298,19 @@ LEAF(mips_cps_core_init)
LEAF(mips_cps_boot_vpes) LEAF(mips_cps_boot_vpes)
/* Retrieve CM base address */ /* Retrieve CM base address */
la t0, mips_cm_base PTR_LA t0, mips_cm_base
lw t0, 0(t0) PTR_L t0, 0(t0)
/* Calculate a pointer to this cores struct core_boot_config */ /* Calculate a pointer to this cores struct core_boot_config */
lw t0, GCR_CL_ID_OFS(t0) PTR_L t0, GCR_CL_ID_OFS(t0)
li t1, COREBOOTCFG_SIZE li t1, COREBOOTCFG_SIZE
mul t0, t0, t1 mul t0, t0, t1
la t1, mips_cps_core_bootcfg PTR_LA t1, mips_cps_core_bootcfg
lw t1, 0(t1) PTR_L t1, 0(t1)
addu t0, t0, t1 PTR_ADDU t0, t0, t1
/* Calculate this VPEs ID. If the core doesn't support MT use 0 */ /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
has_mt t6, 1f has_mt ta2, 1f
li t9, 0 li t9, 0
/* Find the number of VPEs present in the core */ /* Find the number of VPEs present in the core */
...@@ -334,24 +334,24 @@ LEAF(mips_cps_boot_vpes) ...@@ -334,24 +334,24 @@ LEAF(mips_cps_boot_vpes)
1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
li t1, VPEBOOTCFG_SIZE li t1, VPEBOOTCFG_SIZE
mul v0, t9, t1 mul v0, t9, t1
lw t7, COREBOOTCFG_VPECONFIG(t0) PTR_L ta3, COREBOOTCFG_VPECONFIG(t0)
addu v0, v0, t7 PTR_ADDU v0, v0, ta3
#ifdef CONFIG_MIPS_MT #ifdef CONFIG_MIPS_MT
/* If the core doesn't support MT then return */ /* If the core doesn't support MT then return */
bnez t6, 1f bnez ta2, 1f
nop nop
jr ra jr ra
nop nop
.set push .set push
.set mips32r2 .set mips64r2
.set mt .set mt
1: /* Enter VPE configuration state */ 1: /* Enter VPE configuration state */
dvpe dvpe
la t1, 1f PTR_LA t1, 1f
jr.hb t1 jr.hb t1
nop nop
1: mfc0 t1, CP0_MVPCONTROL 1: mfc0 t1, CP0_MVPCONTROL
...@@ -360,12 +360,12 @@ LEAF(mips_cps_boot_vpes) ...@@ -360,12 +360,12 @@ LEAF(mips_cps_boot_vpes)
ehb ehb
/* Loop through each VPE */ /* Loop through each VPE */
lw t6, COREBOOTCFG_VPEMASK(t0) PTR_L ta2, COREBOOTCFG_VPEMASK(t0)
move t8, t6 move t8, ta2
li t5, 0 li ta1, 0
/* Check whether the VPE should be running. If not, skip it */ /* Check whether the VPE should be running. If not, skip it */
1: andi t0, t6, 1 1: andi t0, ta2, 1
beqz t0, 2f beqz t0, 2f
nop nop
...@@ -373,7 +373,7 @@ LEAF(mips_cps_boot_vpes) ...@@ -373,7 +373,7 @@ LEAF(mips_cps_boot_vpes)
mfc0 t0, CP0_VPECONTROL mfc0 t0, CP0_VPECONTROL
ori t0, t0, VPECONTROL_TARGTC ori t0, t0, VPECONTROL_TARGTC
xori t0, t0, VPECONTROL_TARGTC xori t0, t0, VPECONTROL_TARGTC
or t0, t0, t5 or t0, t0, ta1
mtc0 t0, CP0_VPECONTROL mtc0 t0, CP0_VPECONTROL
ehb ehb
...@@ -384,8 +384,8 @@ LEAF(mips_cps_boot_vpes) ...@@ -384,8 +384,8 @@ LEAF(mips_cps_boot_vpes)
/* Calculate a pointer to the VPEs struct vpe_boot_config */ /* Calculate a pointer to the VPEs struct vpe_boot_config */
li t0, VPEBOOTCFG_SIZE li t0, VPEBOOTCFG_SIZE
mul t0, t0, t5 mul t0, t0, ta1
addu t0, t0, t7 addu t0, t0, ta3
/* Set the TC restart PC */ /* Set the TC restart PC */
lw t1, VPEBOOTCFG_PC(t0) lw t1, VPEBOOTCFG_PC(t0)
...@@ -423,9 +423,9 @@ LEAF(mips_cps_boot_vpes) ...@@ -423,9 +423,9 @@ LEAF(mips_cps_boot_vpes)
mttc0 t0, CP0_VPECONF0 mttc0 t0, CP0_VPECONF0
/* Next VPE */ /* Next VPE */
2: srl t6, t6, 1 2: srl ta2, ta2, 1
addiu t5, t5, 1 addiu ta1, ta1, 1
bnez t6, 1b bnez ta2, 1b
nop nop
/* Leave VPE configuration state */ /* Leave VPE configuration state */
...@@ -445,7 +445,7 @@ LEAF(mips_cps_boot_vpes) ...@@ -445,7 +445,7 @@ LEAF(mips_cps_boot_vpes)
/* This VPE should be offline, halt the TC */ /* This VPE should be offline, halt the TC */
li t0, TCHALT_H li t0, TCHALT_H
mtc0 t0, CP0_TCHALT mtc0 t0, CP0_TCHALT
la t0, 1f PTR_LA t0, 1f
1: jr.hb t0 1: jr.hb t0
nop nop
...@@ -466,10 +466,10 @@ LEAF(mips_cps_boot_vpes) ...@@ -466,10 +466,10 @@ LEAF(mips_cps_boot_vpes)
.set noat .set noat
lw $1, TI_CPU(gp) lw $1, TI_CPU(gp)
sll $1, $1, LONGLOG sll $1, $1, LONGLOG
la \dest, __per_cpu_offset PTR_LA \dest, __per_cpu_offset
addu $1, $1, \dest addu $1, $1, \dest
lw $1, 0($1) lw $1, 0($1)
la \dest, cps_cpu_state PTR_LA \dest, cps_cpu_state
addu \dest, \dest, $1 addu \dest, \dest, $1
.set pop .set pop
.endm .endm
......
...@@ -73,10 +73,11 @@ NESTED(handle_sys, PT_SIZE, sp) ...@@ -73,10 +73,11 @@ NESTED(handle_sys, PT_SIZE, sp)
.set noreorder .set noreorder
.set nomacro .set nomacro
1: user_lw(t5, 16(t0)) # argument #5 from usp load_a4: user_lw(t5, 16(t0)) # argument #5 from usp
4: user_lw(t6, 20(t0)) # argument #6 from usp load_a5: user_lw(t6, 20(t0)) # argument #6 from usp
3: user_lw(t7, 24(t0)) # argument #7 from usp load_a6: user_lw(t7, 24(t0)) # argument #7 from usp
2: user_lw(t8, 28(t0)) # argument #8 from usp load_a7: user_lw(t8, 28(t0)) # argument #8 from usp
loads_done:
sw t5, 16(sp) # argument #5 to ksp sw t5, 16(sp) # argument #5 to ksp
sw t6, 20(sp) # argument #6 to ksp sw t6, 20(sp) # argument #6 to ksp
...@@ -85,10 +86,10 @@ NESTED(handle_sys, PT_SIZE, sp) ...@@ -85,10 +86,10 @@ NESTED(handle_sys, PT_SIZE, sp)
.set pop .set pop
.section __ex_table,"a" .section __ex_table,"a"
PTR 1b,bad_stack PTR load_a4, bad_stack_a4
PTR 2b,bad_stack PTR load_a5, bad_stack_a5
PTR 3b,bad_stack PTR load_a6, bad_stack_a6
PTR 4b,bad_stack PTR load_a7, bad_stack_a7
.previous .previous
lw t0, TI_FLAGS($28) # syscall tracing enabled? lw t0, TI_FLAGS($28) # syscall tracing enabled?
...@@ -153,8 +154,8 @@ syscall_trace_entry: ...@@ -153,8 +154,8 @@ syscall_trace_entry:
/* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */
/* /*
* The stackpointer for a call with more than 4 arguments is bad. * Our open-coded access area sanity test for the stack pointer
* We probably should handle this case a bit more drastic. * failed. We probably should handle this case a bit more drastic.
*/ */
bad_stack: bad_stack:
li v0, EFAULT li v0, EFAULT
...@@ -163,6 +164,22 @@ bad_stack: ...@@ -163,6 +164,22 @@ bad_stack:
sw t0, PT_R7(sp) sw t0, PT_R7(sp)
j o32_syscall_exit j o32_syscall_exit
bad_stack_a4:
li t5, 0
b load_a5
bad_stack_a5:
li t6, 0
b load_a6
bad_stack_a6:
li t7, 0
b load_a7
bad_stack_a7:
li t8, 0
b loads_done
/* /*
* The system call does not exist in this kernel * The system call does not exist in this kernel
*/ */
......
...@@ -69,16 +69,17 @@ NESTED(handle_sys, PT_SIZE, sp) ...@@ -69,16 +69,17 @@ NESTED(handle_sys, PT_SIZE, sp)
daddu t1, t0, 32 daddu t1, t0, 32
bltz t1, bad_stack bltz t1, bad_stack
1: lw a4, 16(t0) # argument #5 from usp load_a4: lw a4, 16(t0) # argument #5 from usp
2: lw a5, 20(t0) # argument #6 from usp load_a5: lw a5, 20(t0) # argument #6 from usp
3: lw a6, 24(t0) # argument #7 from usp load_a6: lw a6, 24(t0) # argument #7 from usp
4: lw a7, 28(t0) # argument #8 from usp (for indirect syscalls) load_a7: lw a7, 28(t0) # argument #8 from usp
loads_done:
.section __ex_table,"a" .section __ex_table,"a"
PTR 1b, bad_stack PTR load_a4, bad_stack_a4
PTR 2b, bad_stack PTR load_a5, bad_stack_a5
PTR 3b, bad_stack PTR load_a6, bad_stack_a6
PTR 4b, bad_stack PTR load_a7, bad_stack_a7
.previous .previous
li t1, _TIF_WORK_SYSCALL_ENTRY li t1, _TIF_WORK_SYSCALL_ENTRY
...@@ -167,6 +168,22 @@ bad_stack: ...@@ -167,6 +168,22 @@ bad_stack:
sd t0, PT_R7(sp) sd t0, PT_R7(sp)
j o32_syscall_exit j o32_syscall_exit
bad_stack_a4:
li a4, 0
b load_a5
bad_stack_a5:
li a5, 0
b load_a6
bad_stack_a6:
li a6, 0
b load_a7
bad_stack_a7:
li a7, 0
b loads_done
not_o32_scall: not_o32_scall:
/* /*
* This is not an o32 compatibility syscall, pass it on * This is not an o32 compatibility syscall, pass it on
...@@ -383,7 +400,7 @@ EXPORT(sys32_call_table) ...@@ -383,7 +400,7 @@ EXPORT(sys32_call_table)
PTR sys_connect /* 4170 */ PTR sys_connect /* 4170 */
PTR sys_getpeername PTR sys_getpeername
PTR sys_getsockname PTR sys_getsockname
PTR sys_getsockopt PTR compat_sys_getsockopt
PTR sys_listen PTR sys_listen
PTR compat_sys_recv /* 4175 */ PTR compat_sys_recv /* 4175 */
PTR compat_sys_recvfrom PTR compat_sys_recvfrom
......
...@@ -337,6 +337,11 @@ static void __init bootmem_init(void) ...@@ -337,6 +337,11 @@ static void __init bootmem_init(void)
min_low_pfn = start; min_low_pfn = start;
if (end <= reserved_end) if (end <= reserved_end)
continue; continue;
#ifdef CONFIG_BLK_DEV_INITRD
/* mapstart should be after initrd_end */
if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
continue;
#endif
if (start >= mapstart) if (start >= mapstart)
continue; continue;
mapstart = max(reserved_end, start); mapstart = max(reserved_end, start);
...@@ -366,14 +371,6 @@ static void __init bootmem_init(void) ...@@ -366,14 +371,6 @@ static void __init bootmem_init(void)
max_low_pfn = PFN_DOWN(HIGHMEM_START); max_low_pfn = PFN_DOWN(HIGHMEM_START);
} }
#ifdef CONFIG_BLK_DEV_INITRD
/*
* mapstart should be after initrd_end
*/
if (initrd_end)
mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
#endif
/* /*
* Initialize the boot-time allocator with low memory only. * Initialize the boot-time allocator with low memory only.
*/ */
......
...@@ -133,7 +133,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) ...@@ -133,7 +133,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
/* /*
* Patch the start of mips_cps_core_entry to provide: * Patch the start of mips_cps_core_entry to provide:
* *
* v0 = CM base address * v1 = CM base address
* s0 = kseg0 CCA * s0 = kseg0 CCA
*/ */
entry_code = (u32 *)&mips_cps_core_entry; entry_code = (u32 *)&mips_cps_core_entry;
...@@ -369,7 +369,7 @@ void play_dead(void) ...@@ -369,7 +369,7 @@ void play_dead(void)
static void wait_for_sibling_halt(void *ptr_cpu) static void wait_for_sibling_halt(void *ptr_cpu)
{ {
unsigned cpu = (unsigned)ptr_cpu; unsigned cpu = (unsigned long)ptr_cpu;
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
unsigned halted; unsigned halted;
unsigned long flags; unsigned long flags;
...@@ -430,7 +430,7 @@ static void cps_cpu_die(unsigned int cpu) ...@@ -430,7 +430,7 @@ static void cps_cpu_die(unsigned int cpu)
*/ */
err = smp_call_function_single(cpu_death_sibling, err = smp_call_function_single(cpu_death_sibling,
wait_for_sibling_halt, wait_for_sibling_halt,
(void *)cpu, 1); (void *)(unsigned long)cpu, 1);
if (err) if (err)
panic("Failed to call remote sibling CPU\n"); panic("Failed to call remote sibling CPU\n");
} }
......
...@@ -63,6 +63,13 @@ EXPORT_SYMBOL(cpu_sibling_map); ...@@ -63,6 +63,13 @@ EXPORT_SYMBOL(cpu_sibling_map);
cpumask_t cpu_core_map[NR_CPUS] __read_mostly; cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map); EXPORT_SYMBOL(cpu_core_map);
/*
* A logcal cpu mask containing only one VPE per core to
* reduce the number of IPIs on large MT systems.
*/
cpumask_t cpu_foreign_map __read_mostly;
EXPORT_SYMBOL(cpu_foreign_map);
/* representing cpus for which sibling maps can be computed */ /* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map; static cpumask_t cpu_sibling_setup_map;
...@@ -103,6 +110,29 @@ static inline void set_cpu_core_map(int cpu) ...@@ -103,6 +110,29 @@ static inline void set_cpu_core_map(int cpu)
} }
} }
/*
* Calculate a new cpu_foreign_map mask whenever a
* new cpu appears or disappears.
*/
static inline void calculate_cpu_foreign_map(void)
{
int i, k, core_present;
cpumask_t temp_foreign_map;
/* Re-calculate the mask */
for_each_online_cpu(i) {
core_present = 0;
for_each_cpu(k, &temp_foreign_map)
if (cpu_data[i].package == cpu_data[k].package &&
cpu_data[i].core == cpu_data[k].core)
core_present = 1;
if (!core_present)
cpumask_set_cpu(i, &temp_foreign_map);
}
cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
}
struct plat_smp_ops *mp_ops; struct plat_smp_ops *mp_ops;
EXPORT_SYMBOL(mp_ops); EXPORT_SYMBOL(mp_ops);
...@@ -146,6 +176,8 @@ asmlinkage void start_secondary(void) ...@@ -146,6 +176,8 @@ asmlinkage void start_secondary(void)
set_cpu_sibling_map(cpu); set_cpu_sibling_map(cpu);
set_cpu_core_map(cpu); set_cpu_core_map(cpu);
calculate_cpu_foreign_map();
cpumask_set_cpu(cpu, &cpu_callin_map); cpumask_set_cpu(cpu, &cpu_callin_map);
synchronise_count_slave(cpu); synchronise_count_slave(cpu);
...@@ -173,9 +205,18 @@ void __irq_entry smp_call_function_interrupt(void) ...@@ -173,9 +205,18 @@ void __irq_entry smp_call_function_interrupt(void)
static void stop_this_cpu(void *dummy) static void stop_this_cpu(void *dummy)
{ {
/* /*
* Remove this CPU: * Remove this CPU. Be a bit slow here and
* set the bits for every online CPU so we don't miss
* any IPI whilst taking this VPE down.
*/ */
cpumask_copy(&cpu_foreign_map, cpu_online_mask);
/* Make it visible to every other CPU */
smp_mb();
set_cpu_online(smp_processor_id(), false); set_cpu_online(smp_processor_id(), false);
calculate_cpu_foreign_map();
local_irq_disable(); local_irq_disable();
while (1); while (1);
} }
...@@ -197,6 +238,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -197,6 +238,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
mp_ops->prepare_cpus(max_cpus); mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0); set_cpu_sibling_map(0);
set_cpu_core_map(0); set_cpu_core_map(0);
calculate_cpu_foreign_map();
#ifndef CONFIG_HOTPLUG_CPU #ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(cpu_possible_mask); init_cpu_present(cpu_possible_mask);
#endif #endif
......
...@@ -2130,10 +2130,10 @@ void per_cpu_trap_init(bool is_boot_cpu) ...@@ -2130,10 +2130,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
BUG_ON(current->mm); BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current); enter_lazy_tlb(&init_mm, current);
/* Boot CPU's cache setup in setup_arch(). */ /* Boot CPU's cache setup in setup_arch(). */
if (!is_boot_cpu) if (!is_boot_cpu)
cpu_cache_init(); cpu_cache_init();
tlb_init(); tlb_init();
TLBMISS_HANDLER_SETUP(); TLBMISS_HANDLER_SETUP();
} }
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
* Copyright (C) 2000, 2001 Ralf Baechle (ralf@gnu.org) * Copyright (C) 2000, 2001 Ralf Baechle (ralf@gnu.org)
* *
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com * Author: Fuxin Zhang, zhangfx@lemote.com
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* Copyright 2003 ICT CAS * Copyright 2003 ICT CAS
* Author: Michael Guo <guoyi@ict.ac.cn> * Author: Michael Guo <guoyi@ict.ac.cn>
* *
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com * Author: Fuxin Zhang, zhangfx@lemote.com
* *
* Copyright (C) 2009 Lemote Inc. * Copyright (C) 2009 Lemote Inc.
......
/* /*
* CS5536 General timer functions * CS5536 General timer functions
* *
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Yanhua, yanh@lemote.com * Author: Yanhua, yanh@lemote.com
* *
* Copyright (C) 2009 Lemote Inc. * Copyright (C) 2009 Lemote Inc.
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* Copyright 2003 ICT CAS * Copyright 2003 ICT CAS
* Author: Michael Guo <guoyi@ict.ac.cn> * Author: Michael Guo <guoyi@ict.ac.cn>
* *
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com * Author: Fuxin Zhang, zhangfx@lemote.com
* *
* Copyright (C) 2009 Lemote Inc. * Copyright (C) 2009 Lemote Inc.
......
/* /*
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com * Author: Fuxin Zhang, zhangfx@lemote.com
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
......
/* /*
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com * Author: Fuxin Zhang, zhangfx@lemote.com
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
......
/* /*
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com * Author: Fuxin Zhang, zhangfx@lemote.com
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
......
/* /*
* Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
* Author: Yanhua, yanh@lemote.com * Author: Yanhua, yanh@lemote.com
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/clock.h> #include <asm/clock.h>
#include <asm/mach-loongson/loongson.h> #include <asm/mach-loongson64/loongson.h>
static LIST_HEAD(clock_list); static LIST_HEAD(clock_list);
static DEFINE_SPINLOCK(clock_lock); static DEFINE_SPINLOCK(clock_lock);
......
/* /*
* Copyright (C) 2010 Loongson Inc. & Lemote Inc. & * Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
* Insititute of Computing Technology * Institute of Computing Technology
* Author: Xiang Gao, gaoxiang@ict.ac.cn * Author: Xiang Gao, gaoxiang@ict.ac.cn
* Huacai Chen, chenhc@lemote.com * Huacai Chen, chenhc@lemote.com
* Xiaofu Meng, Shuangshuang Zhang * Xiaofu Meng, Shuangshuang Zhang
......
...@@ -451,7 +451,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, ...@@ -451,7 +451,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
/* Fall through */ /* Fall through */
case jr_op: case jr_op:
/* For R6, JR already emulated in jalr_op */ /* For R6, JR already emulated in jalr_op */
if (NO_R6EMU && insn.r_format.opcode == jr_op) if (NO_R6EMU && insn.r_format.func == jr_op)
break; break;
*contpc = regs->regs[insn.r_format.rs]; *contpc = regs->regs[insn.r_format.rs];
return 1; return 1;
...@@ -551,7 +551,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, ...@@ -551,7 +551,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
dec_insn.next_pc_inc; dec_insn.next_pc_inc;
return 1; return 1;
case blezl_op: case blezl_op:
if (NO_R6EMU) if (!insn.i_format.rt && NO_R6EMU)
break; break;
case blez_op: case blez_op:
...@@ -588,7 +588,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, ...@@ -588,7 +588,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
dec_insn.next_pc_inc; dec_insn.next_pc_inc;
return 1; return 1;
case bgtzl_op: case bgtzl_op:
if (NO_R6EMU) if (!insn.i_format.rt && NO_R6EMU)
break; break;
case bgtz_op: case bgtz_op:
/* /*
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <asm/cacheflush.h> /* for run_uncached() */ #include <asm/cacheflush.h> /* for run_uncached() */
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/dma-coherence.h> #include <asm/dma-coherence.h>
#include <asm/mips-cm.h>
/* /*
* Special Variant of smp_call_function for use by cache functions: * Special Variant of smp_call_function for use by cache functions:
...@@ -51,9 +52,16 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) ...@@ -51,9 +52,16 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
{ {
preempt_disable(); preempt_disable();
#ifndef CONFIG_MIPS_MT_SMP /*
smp_call_function(func, info, 1); * The Coherent Manager propagates address-based cache ops to other
#endif * cores but not index-based ops. However, r4k_on_each_cpu is used
* in both cases so there is no easy way to tell what kind of op is
* executed to the other cores. The best we can probably do is
* to restrict that call when a CM is not present because both
* CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
*/
if (!mips_cm_present())
smp_call_function_many(&cpu_foreign_map, func, info, 1);
func(info); func(info);
preempt_enable(); preempt_enable();
} }
...@@ -937,7 +945,9 @@ static void b5k_instruction_hazard(void) ...@@ -937,7 +945,9 @@ static void b5k_instruction_hazard(void)
} }
static char *way_string[] = { NULL, "direct mapped", "2-way", static char *way_string[] = { NULL, "direct mapped", "2-way",
"3-way", "4-way", "5-way", "6-way", "7-way", "8-way" "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
"9-way", "10-way", "11-way", "12-way",
"13-way", "14-way", "15-way", "16-way",
}; };
static void probe_pcache(void) static void probe_pcache(void)
......
...@@ -119,18 +119,24 @@ void read_persistent_clock(struct timespec *ts) ...@@ -119,18 +119,24 @@ void read_persistent_clock(struct timespec *ts)
int get_c0_fdc_int(void) int get_c0_fdc_int(void)
{ {
int mips_cpu_fdc_irq; /*
* Some cores claim the FDC is routable through the GIC, but it doesn't
* actually seem to be connected for those Malta bitstreams.
*/
switch (current_cpu_type()) {
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
return -1;
};
if (cpu_has_veic) if (cpu_has_veic)
mips_cpu_fdc_irq = -1; return -1;
else if (gic_present) else if (gic_present)
mips_cpu_fdc_irq = gic_get_c0_fdc_int(); return gic_get_c0_fdc_int();
else if (cp0_fdc_irq >= 0) else if (cp0_fdc_irq >= 0)
mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq; return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
else else
mips_cpu_fdc_irq = -1; return -1;
return mips_cpu_fdc_irq;
} }
int get_c0_perfcount_int(void) int get_c0_perfcount_int(void)
......
...@@ -63,13 +63,19 @@ void __init plat_mem_setup(void) ...@@ -63,13 +63,19 @@ void __init plat_mem_setup(void)
plat_setup_iocoherency(); plat_setup_iocoherency();
} }
#define DEFAULT_CPC_BASE_ADDR 0x1bde0000 #define DEFAULT_CPC_BASE_ADDR 0x1bde0000
#define DEFAULT_CDMM_BASE_ADDR 0x1bdd0000
phys_addr_t mips_cpc_default_phys_base(void) phys_addr_t mips_cpc_default_phys_base(void)
{ {
return DEFAULT_CPC_BASE_ADDR; return DEFAULT_CPC_BASE_ADDR;
} }
phys_addr_t mips_cdmm_phys_base(void)
{
return DEFAULT_CDMM_BASE_ADDR;
}
static void __init mips_nmi_setup(void) static void __init mips_nmi_setup(void)
{ {
void *base; void *base;
......
...@@ -27,6 +27,11 @@ int get_c0_perfcount_int(void) ...@@ -27,6 +27,11 @@ int get_c0_perfcount_int(void)
return gic_get_c0_perfcount_int(); return gic_get_c0_perfcount_int();
} }
int get_c0_fdc_int(void)
{
return gic_get_c0_fdc_int();
}
void __init plat_time_init(void) void __init plat_time_init(void)
{ {
struct device_node *np; struct device_node *np;
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* *
* The 2E revision of loongson processor not support this feature. * The 2E revision of loongson processor not support this feature.
* *
* Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
* Author: Yanhua, yanh@lemote.com * Author: Yanhua, yanh@lemote.com
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
......
...@@ -257,16 +257,6 @@ int gic_get_c0_fdc_int(void) ...@@ -257,16 +257,6 @@ int gic_get_c0_fdc_int(void)
return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
} }
/*
* Some cores claim the FDC is routable but it doesn't actually seem to
* be connected.
*/
switch (current_cpu_type()) {
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
return -1;
}
return irq_create_mapping(gic_irq_domain, return irq_create_mapping(gic_irq_domain,
GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment