Commit 340ee4b9 authored by Ralf Baechle's avatar Ralf Baechle

Virtual SMP support for the 34K.

Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent d03d0a57
......@@ -1344,6 +1344,14 @@ config CPU_HAS_PREFETCH
config MIPS_MT
bool "Enable MIPS MT"
choice
prompt "MIPS MT options"
depends on MIPS_MT
config MIPS_MT_SMP
bool "Use 1 TC on each available VPE for SMP"
select SMP
config MIPS_VPE_LOADER
bool "VPE loader support."
depends on MIPS_MT
......@@ -1351,6 +1359,8 @@ config MIPS_VPE_LOADER
Includes a loader for loading an elf relocatable object
onto another VPE and running it.
endchoice
config MIPS_VPE_LOADER_TOM
bool "Load VPE program into memory hidden from linux"
depends on MIPS_VPE_LOADER
......@@ -1496,7 +1506,7 @@ source "mm/Kconfig"
config SMP
bool "Multi-Processing support"
depends on CPU_RM9000 || (SIBYTE_SB1250 && !SIBYTE_STANDALONE) || SGI_IP27
depends on CPU_RM9000 || (SIBYTE_SB1250 && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
......
......@@ -34,6 +34,8 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_MIPS_MT_SMP) += smp_mt.o
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
......
......@@ -323,6 +323,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER mdmx mdmx sti silent /* #22 */
BUILD_HANDLER watch watch sti verbose /* #23 */
BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
BUILD_HANDLER mt mt sti verbose /* #25 */
BUILD_HANDLER dsp dsp sti silent /* #26 */
BUILD_HANDLER reserved reserved sti verbose /* others */
......
/*
* Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
*
* Elizabeth Clarke (beth@mips.com)
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
#include <asm/atomic.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/cacheflush.h>
#include <asm/mips-boards/maltaint.h>
#define MIPS_CPU_IPI_RESCHED_IRQ 0
#define MIPS_CPU_IPI_CALL_IRQ 1
static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
#if 0
static void dump_mtregisters(int vpe, int tc)
{
printk("vpe %d tc %d\n", vpe, tc);
settc(tc);
printk(" c0 status 0x%lx\n", read_vpe_c0_status());
printk(" vpecontrol 0x%lx\n", read_vpe_c0_vpecontrol());
printk(" vpeconf0 0x%lx\n", read_vpe_c0_vpeconf0());
printk(" tcstatus 0x%lx\n", read_tc_c0_tcstatus());
printk(" tcrestart 0x%lx\n", read_tc_c0_tcrestart());
printk(" tcbind 0x%lx\n", read_tc_c0_tcbind());
printk(" tchalt 0x%lx\n", read_tc_c0_tchalt());
}
#endif
void __init sanitize_tlb_entries(void)
{
int i, tlbsiz;
unsigned long mvpconf0, ncpu;
if (!cpu_has_mipsmt)
return;
set_c0_mvpcontrol(MVPCONTROL_VPC);
/* Disable TLB sharing */
clear_c0_mvpcontrol(MVPCONTROL_STLB);
mvpconf0 = read_c0_mvpconf0();
printk(KERN_INFO "MVPConf0 0x%lx TLBS %lx PTLBE %ld\n", mvpconf0,
(mvpconf0 & MVPCONF0_TLBS) >> MVPCONF0_TLBS_SHIFT,
(mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT);
tlbsiz = (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT;
ncpu = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
printk(" tlbsiz %d ncpu %ld\n", tlbsiz, ncpu);
if (tlbsiz > 0) {
/* share them out across the vpe's */
tlbsiz /= ncpu;
printk(KERN_INFO "setting Config1.MMU_size to %d\n", tlbsiz);
for (i = 0; i < ncpu; i++) {
settc(i);
if (i == 0)
write_c0_config1((read_c0_config1() & ~(0x3f << 25)) | (tlbsiz << 25));
else
write_vpe_c0_config1((read_vpe_c0_config1() & ~(0x3f << 25)) |
(tlbsiz << 25));
}
}
clear_c0_mvpcontrol(MVPCONTROL_VPC);
}
#if 0
/*
* Use c0_MVPConf0 to find out how many CPUs are available, setting up
* phys_cpu_present_map and the logical/physical mappings.
*/
void __init prom_build_cpu_map(void)
{
int i, num, ncpus;
cpus_clear(phys_cpu_present_map);
/* assume we boot on cpu 0.... */
cpu_set(0, phys_cpu_present_map);
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
if (cpu_has_mipsmt) {
ncpus = ((read_c0_mvpconf0() & (MVPCONF0_PVPE)) >> MVPCONF0_PVPE_SHIFT) + 1;
for (i=1, num=0; i< NR_CPUS && i<ncpus; i++) {
cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
printk(KERN_INFO "%i available secondary CPU(s)\n", num);
}
}
#endif
static void ipi_resched_dispatch (struct pt_regs *regs)
{
do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ, regs);
}
static void ipi_call_dispatch (struct pt_regs *regs)
{
do_IRQ(MIPS_CPU_IPI_CALL_IRQ, regs);
}
irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
return IRQ_HANDLED;
}
irqreturn_t ipi_call_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
smp_call_function_interrupt();
return IRQ_HANDLED;
}
static struct irqaction irq_resched = {
.handler = ipi_resched_interrupt,
.flags = SA_INTERRUPT,
.name = "IPI_resched"
};
static struct irqaction irq_call = {
.handler = ipi_call_interrupt,
.flags = SA_INTERRUPT,
.name = "IPI_call"
};
/*
* Common setup before any secondaries are started
* Make sure all CPU's are in a sensible state before we boot any of the
* secondarys
*/
void prom_prepare_cpus(unsigned int max_cpus)
{
unsigned long val;
int i, num;
if (!cpu_has_mipsmt)
return;
/* disable MT so we can configure */
dvpe();
dmt();
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
val = read_c0_mvpconf0();
/* we'll always have more TC's than VPE's, so loop setting everything
to a sensible state */
for (i = 0, num = 0; i <= ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT); i++) {
settc(i);
/* VPE's */
if (i <= ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) {
/* deactivate all but vpe0 */
if (i != 0) {
unsigned long tmp = read_vpe_c0_vpeconf0();
tmp &= ~VPECONF0_VPA;
/* master VPE */
tmp |= VPECONF0_MVP;
write_vpe_c0_vpeconf0(tmp);
/* Record this as available CPU */
if (i < max_cpus) {
cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
}
/* disable multi-threading with TC's */
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
if (i != 0) {
write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
write_vpe_c0_cause(read_vpe_c0_cause() & ~CAUSEF_IP);
/* set config to be the same as vpe0, particularly kseg0 coherency alg */
write_vpe_c0_config( read_c0_config());
}
}
/* TC's */
if (i != 0) {
unsigned long tmp;
/* bind a TC to each VPE, May as well put all excess TC's
on the last VPE */
if ( i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1) )
write_tc_c0_tcbind(read_tc_c0_tcbind() | ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) );
else {
write_tc_c0_tcbind( read_tc_c0_tcbind() | i);
/* and set XTC */
write_vpe_c0_vpeconf0( read_vpe_c0_vpeconf0() | (i << VPECONF0_XTC_SHIFT));
}
tmp = read_tc_c0_tcstatus();
/* mark not allocated and not dynamically allocatable */
tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
tmp |= TCSTATUS_IXMT; /* interrupt exempt */
write_tc_c0_tcstatus(tmp);
write_tc_c0_tchalt(TCHALT_H);
}
}
/* Release config state */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
/* We'll wait until starting the secondaries before starting MVPE */
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
/* set up ipi interrupts */
if (cpu_has_vint) {
set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
}
cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ;
setup_irq(cpu_ipi_resched_irq, &irq_resched);
setup_irq(cpu_ipi_call_irq, &irq_call);
/* need to mark IPI's as IRQ_PER_CPU */
irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU;
irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU;
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
* smp_bootstrap is the place to resume from
* __KSTK_TOS(idle) is apparently the stack pointer
* (unsigned long)idle->thread_info the gp
* assumes a 1:1 mapping of TC => VPE
*/
void prom_boot_secondary(int cpu, struct task_struct *idle)
{
dvpe();
set_c0_mvpcontrol(MVPCONTROL_VPC);
settc(cpu);
/* restart */
write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
/* enable the tc this vpe/cpu will be running */
write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A);
write_tc_c0_tchalt(0);
/* enable the VPE */
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
/* stack pointer */
write_tc_gpr_sp( __KSTK_TOS(idle));
/* global pointer */
write_tc_gpr_gp((unsigned long)idle->thread_info);
flush_icache_range((unsigned long)idle->thread_info,
(unsigned long)idle->thread_info +
sizeof(struct thread_info));
/* finally out of configuration and into chaos */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
evpe(EVPE_ENABLE);
}
void prom_init_secondary(void)
{
write_c0_status((read_c0_status() & ~ST0_IM ) |
(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7));
}
void prom_smp_finish(void)
{
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
local_irq_enable();
}
void prom_cpus_done(void)
{
}
void core_send_ipi(int cpu, unsigned int action)
{
int i;
unsigned long flags;
int vpflags;
local_irq_save (flags);
vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */
switch (action) {
case SMP_CALL_FUNCTION:
i = C_SW1;
break;
case SMP_RESCHEDULE_YOURSELF:
default:
i = C_SW0;
break;
}
/* 1:1 mapping of vpe and tc... */
settc(cpu);
write_vpe_c0_cause(read_vpe_c0_cause() | i);
evpe(vpflags);
local_irq_restore(flags);
}
......@@ -28,6 +28,8 @@
#include <asm/cpu.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/module.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
......@@ -56,6 +58,7 @@ extern asmlinkage void handle_tr(void);
extern asmlinkage void handle_fpe(void);
extern asmlinkage void handle_mdmx(void);
extern asmlinkage void handle_watch(void);
extern asmlinkage void handle_mt(void);
extern asmlinkage void handle_dsp(void);
extern asmlinkage void handle_mcheck(void);
extern asmlinkage void handle_reserved(void);
......@@ -797,6 +800,14 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
(regs->cp0_status & ST0_TS) ? "" : "not ");
}
asmlinkage void do_mt(struct pt_regs *regs)
{
die_if_kernel("MIPS MT Thread exception in kernel", regs);
force_sig(SIGILL, current);
}
asmlinkage void do_dsp(struct pt_regs *regs)
{
if (cpu_has_dsp)
......@@ -1338,6 +1349,9 @@ void __init trap_init(void)
if (cpu_has_mcheck)
set_except_vector(24, handle_mcheck);
if (cpu_has_mipsmt)
set_except_vector(25, handle_mt);
if (cpu_has_dsp)
set_except_vector(26, handle_dsp);
......
......@@ -32,7 +32,7 @@
* mknod /dev/vpe0 c 63 0
* mknod /dev/vpe1 c 63 1
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fs.h>
......@@ -49,6 +49,7 @@
#include <linux/poll.h>
#include <linux/bootmem.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/cacheflush.h>
#include <asm/atomic.h>
#include <asm/cpu.h>
......@@ -697,7 +698,7 @@ int vpe_run(vpe_t * v)
dvpe();
/* Put MVPE's into 'configuration state' */
write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_VPC);
set_c0_mvpcontrol(MVPCONTROL_VPC);
if (!list_empty(&v->tc)) {
if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
......@@ -760,7 +761,7 @@ int vpe_run(vpe_t * v)
write_vpe_c0_cause(0);
/* take system out of configuration state */
write_c0_mvpcontrol(read_c0_mvpcontrol() & ~MVPCONTROL_VPC);
clear_c0_mvpcontrol(MVPCONTROL_VPC);
/* clear interrupts enabled IE, ERL, EXL, and KSU from c0 status */
write_vpe_c0_status(read_vpe_c0_status() & ~(ST0_ERL | ST0_KSU | ST0_IE | ST0_EXL));
......@@ -1134,7 +1135,7 @@ int vpe_free(vpe_handle vpe)
evpe_flags = dvpe();
/* Put MVPE's into 'configuration state' */
write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_VPC);
set_c0_mvpcontrol(MVPCONTROL_VPC);
settc(t->index);
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
......@@ -1145,7 +1146,7 @@ int vpe_free(vpe_handle vpe)
v->state = VPE_STATE_UNUSED;
write_c0_mvpcontrol(read_c0_mvpcontrol() & ~MVPCONTROL_VPC);
clear_c0_mvpcontrol(MVPCONTROL_VPC);
evpe(evpe_flags);
return 0;
......@@ -1191,7 +1192,7 @@ static int __init vpe_module_init(void)
dvpe();
/* Put MVPE's into 'configuration state' */
write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_VPC);
set_c0_mvpcontrol(MVPCONTROL_VPC);
/* dump_mtregs(); */
......@@ -1270,7 +1271,7 @@ static int __init vpe_module_init(void)
}
/* release config state */
write_c0_mvpcontrol(read_c0_mvpcontrol() & ~MVPCONTROL_VPC);
clear_c0_mvpcontrol(MVPCONTROL_VPC);
return 0;
}
......
......@@ -61,6 +61,15 @@ static unsigned int display_count = 0;
static unsigned int timer_tick_count=0;
static int mips_cpu_timer_irq;
static inline void scroll_display_message(void)
{
if ((timer_tick_count++ % HZ) == 0) {
mips_display_message(&display_string[display_count++]);
if (display_count == MAX_DISPLAY_COUNT)
display_count = 0;
}
}
static void mips_timer_dispatch (struct pt_regs *regs)
{
do_IRQ (mips_cpu_timer_irq, regs);
......@@ -68,17 +77,42 @@ static void mips_timer_dispatch (struct pt_regs *regs)
irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
#ifdef CONFIG_SMP
int cpu = smp_processor_id();
if (cpu == 0) {
/*
* CPU 0 handles the global timer interrupt job and process accounting
* resets count/compare registers to trigger next timer int.
*/
(void) timer_interrupt(irq, dev_id, regs);
scroll_display_message();
}
else {
/* Everyone else needs to reset the timer int here as
ll_local_timer_interrupt doesn't */
/*
* FIXME: need to cope with counter underflow.
* More support needs to be added to kernel/time for
* counter/timer interrupts on multiple CPU's
*/
write_c0_compare (read_c0_count() + (mips_hpt_frequency/HZ));
/*
* other CPUs should do profiling and process accounting
*/
local_timer_interrupt (irq, dev_id, regs);
}
return IRQ_HANDLED;
#else
irqreturn_t r;
r = timer_interrupt(irq, dev_id, regs);
if ((timer_tick_count++ % HZ) == 0) {
mips_display_message(&display_string[display_count++]);
if (display_count == MAX_DISPLAY_COUNT)
display_count = 0;
}
scroll_display_message();
return r;
#endif
}
/*
......@@ -176,6 +210,13 @@ void __init mips_timer_setup(struct irqaction *irq)
irq->handler = mips_timer_interrupt; /* we use our own handler */
setup_irq(mips_cpu_timer_irq, irq);
#ifdef CONFIG_SMP
/* irq_desc(riptor) is a global resource, when the interrupt overlaps
on seperate cpu's the first one tries to handle the second interrupt.
The effect is that the int remains disabled on the second cpu.
Mark the interrupt with IRQ_PER_CPU to avoid any confusion */
irq_desc[mips_cpu_timer_irq].status |= IRQ_PER_CPU;
#endif
/* to generate the first timer interrupt */
write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
......
......@@ -7,6 +7,7 @@
#ifndef _ASM_MIPSMTREGS_H
#define _ASM_MIPSMTREGS_H
#include <asm/mipsregs.h>
#include <asm/war.h>
#ifndef __ASSEMBLY__
......@@ -383,6 +384,7 @@ do { \
#define read_tc_gpr_gp() mftgpr(28)
#define write_tc_gpr_gp(val) mttgpr(28, val)
__BUILD_SET_C0(mvpcontrol)
#endif /* Not __ASSEMBLY__ */
......
......@@ -37,8 +37,6 @@
#define _ULCAST_ (unsigned long)
#endif
#include <asm/mipsmtregs.h>
/*
* Coprocessor 0 register names
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment