Commit e105eabb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus:
  [MIPS] SMTC: Fix SMTC dyntick support.
  [MIPS] SMTC: Close tiny holes in the SMTC IPI replay system.
  [MIPS] SMTC: Fix holes in SMTC and FPU affinity support.
  [MIPS] SMTC: Build fix: Fix filename in Makefile
  [MIPS] Build fix: Fix irq flags type
parents 1db9b837 8531a35e
...@@ -1403,7 +1403,6 @@ config MIPS_MT_SMTC ...@@ -1403,7 +1403,6 @@ config MIPS_MT_SMTC
depends on CPU_MIPS32_R2 depends on CPU_MIPS32_R2
#depends on CPU_MIPS64_R2 # once there is hardware ... #depends on CPU_MIPS64_R2 # once there is hardware ...
depends on SYS_SUPPORTS_MULTITHREADING depends on SYS_SUPPORTS_MULTITHREADING
select GENERIC_CLOCKEVENTS_BROADCAST
select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI select CPU_MIPSR2_IRQ_EI
select MIPS_MT select MIPS_MT
...@@ -1451,32 +1450,17 @@ config MIPS_VPE_LOADER ...@@ -1451,32 +1450,17 @@ config MIPS_VPE_LOADER
Includes a loader for loading an elf relocatable object Includes a loader for loading an elf relocatable object
onto another VPE and running it. onto another VPE and running it.
config MIPS_MT_SMTC_INSTANT_REPLAY
bool "Low-latency Dispatch of Deferred SMTC IPIs"
depends on MIPS_MT_SMTC && !PREEMPT
default y
help
SMTC pseudo-interrupts between TCs are deferred and queued
if the target TC is interrupt-inhibited (IXMT). In the first
SMTC prototypes, these queued IPIs were serviced on return
to user mode, or on entry into the kernel idle loop. The
INSTANT_REPLAY option dispatches them as part of local_irq_restore()
processing, which adds runtime overhead (hence the option to turn
it off), but ensures that IPIs are handled promptly even under
heavy I/O interrupt load.
config MIPS_MT_SMTC_IM_BACKSTOP config MIPS_MT_SMTC_IM_BACKSTOP
bool "Use per-TC register bits as backstop for inhibited IM bits" bool "Use per-TC register bits as backstop for inhibited IM bits"
depends on MIPS_MT_SMTC depends on MIPS_MT_SMTC
default y default n
help help
To support multiple TC microthreads acting as "CPUs" within To support multiple TC microthreads acting as "CPUs" within
a VPE, VPE-wide interrupt mask bits must be specially manipulated a VPE, VPE-wide interrupt mask bits must be specially manipulated
during interrupt handling. To support legacy drivers and interrupt during interrupt handling. To support legacy drivers and interrupt
controller management code, SMTC has a "backstop" to track and controller management code, SMTC has a "backstop" to track and
if necessary restore the interrupt mask. This has some performance if necessary restore the interrupt mask. This has some performance
impact on interrupt service overhead. Disable it only if you know impact on interrupt service overhead.
what you are doing.
config MIPS_MT_SMTC_IRQAFF config MIPS_MT_SMTC_IRQAFF
bool "Support IRQ affinity API" bool "Support IRQ affinity API"
...@@ -1486,10 +1470,8 @@ config MIPS_MT_SMTC_IRQAFF ...@@ -1486,10 +1470,8 @@ config MIPS_MT_SMTC_IRQAFF
Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.)
for SMTC Linux kernel. Requires platform support, of which for SMTC Linux kernel. Requires platform support, of which
an example can be found in the MIPS kernel i8259 and Malta an example can be found in the MIPS kernel i8259 and Malta
platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY platform code. Adds some overhead to interrupt dispatch, and
be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to should be used only if you know what you are doing.
interrupt dispatch, and should be used only if you know what
you are doing.
config MIPS_VPE_LOADER_TOM config MIPS_VPE_LOADER_TOM
bool "Load VPE program into memory hidden from linux" bool "Load VPE program into memory hidden from linux"
......
...@@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ ...@@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
......
...@@ -12,6 +12,14 @@ ...@@ -12,6 +12,14 @@
#include <asm/smtc_ipi.h> #include <asm/smtc_ipi.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/cevt-r4k.h>
/*
* The SMTC Kernel for the 34K, 1004K, et. al. replaces several
* of these routines with SMTC-specific variants.
*/
#ifndef CONFIG_MIPS_MT_SMTC
static int mips_next_event(unsigned long delta, static int mips_next_event(unsigned long delta,
struct clock_event_device *evt) struct clock_event_device *evt)
...@@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta, ...@@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta,
unsigned int cnt; unsigned int cnt;
int res; int res;
#ifdef CONFIG_MIPS_MT_SMTC
{
unsigned long flags, vpflags;
local_irq_save(flags);
vpflags = dvpe();
#endif
cnt = read_c0_count(); cnt = read_c0_count();
cnt += delta; cnt += delta;
write_c0_compare(cnt); write_c0_compare(cnt);
res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
#ifdef CONFIG_MIPS_MT_SMTC
evpe(vpflags);
local_irq_restore(flags);
}
#endif
return res; return res;
} }
static void mips_set_mode(enum clock_event_mode mode, #endif /* CONFIG_MIPS_MT_SMTC */
struct clock_event_device *evt)
void mips_set_clock_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{ {
/* Nothing to do ... */ /* Nothing to do ... */
} }
static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
static int cp0_timer_irq_installed; int cp0_timer_irq_installed;
/* #ifndef CONFIG_MIPS_MT_SMTC
* Timer ack for an R4k-compatible timer of a known frequency.
*/
static void c0_timer_ack(void)
{
write_c0_compare(read_c0_compare());
}
/* irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
* Possibly handle a performance counter interrupt.
* Return true if the timer interrupt should not be checked
*/
static inline int handle_perf_irq(int r2)
{
/*
* The performance counter overflow interrupt may be shared with the
* timer interrupt (cp0_perfcount_irq < 0). If it is and a
* performance counter has overflowed (perf_irq() == IRQ_HANDLED)
* and we can't reliably determine if a counter interrupt has also
* happened (!r2) then don't check for a timer interrupt.
*/
return (cp0_perfcount_irq < 0) &&
perf_irq() == IRQ_HANDLED &&
!r2;
}
static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{ {
const int r2 = cpu_has_mips_r2; const int r2 = cpu_has_mips_r2;
struct clock_event_device *cd; struct clock_event_device *cd;
...@@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) ...@@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
* interrupt. Being the paranoiacs we are we check anyway. * interrupt. Being the paranoiacs we are we check anyway.
*/ */
if (!r2 || (read_c0_cause() & (1 << 30))) { if (!r2 || (read_c0_cause() & (1 << 30))) {
c0_timer_ack(); /* Clear Count/Compare Interrupt */
#ifdef CONFIG_MIPS_MT_SMTC write_c0_compare(read_c0_compare());
if (cpu_data[cpu].vpe_id)
goto out;
cpu = 0;
#endif
cd = &per_cpu(mips_clockevent_device, cpu); cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd); cd->event_handler(cd);
} }
...@@ -107,65 +78,16 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) ...@@ -107,65 +78,16 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static struct irqaction c0_compare_irqaction = { #endif /* Not CONFIG_MIPS_MT_SMTC */
struct irqaction c0_compare_irqaction = {
.handler = c0_compare_interrupt, .handler = c0_compare_interrupt,
#ifdef CONFIG_MIPS_MT_SMTC
.flags = IRQF_DISABLED,
#else
.flags = IRQF_DISABLED | IRQF_PERCPU, .flags = IRQF_DISABLED | IRQF_PERCPU,
#endif
.name = "timer", .name = "timer",
}; };
#ifdef CONFIG_MIPS_MT_SMTC
DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
static void smtc_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
}
static void mips_broadcast(cpumask_t mask)
{
unsigned int cpu;
for_each_cpu_mask(cpu, mask)
smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
}
static void setup_smtc_dummy_clockevent_device(void)
{
//uint64_t mips_freq = mips_hpt_^frequency;
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
cd = &per_cpu(smtc_dummy_clockevent_device, cpu); void mips_event_handler(struct clock_event_device *dev)
cd->name = "SMTC";
cd->features = CLOCK_EVT_FEAT_DUMMY;
/* Calculate the min / max delta */
cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
cd->shift = 0; //32;
cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
cd->rating = 200;
cd->irq = 17; //-1;
// if (cpu)
// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
// else
cd->cpumask = cpumask_of_cpu(cpu);
cd->set_mode = smtc_set_mode;
cd->broadcast = mips_broadcast;
clockevents_register_device(cd);
}
#endif
static void mips_event_handler(struct clock_event_device *dev)
{ {
} }
...@@ -177,7 +99,23 @@ static int c0_compare_int_pending(void) ...@@ -177,7 +99,23 @@ static int c0_compare_int_pending(void)
return (read_c0_cause() >> cp0_compare_irq) & 0x100; return (read_c0_cause() >> cp0_compare_irq) & 0x100;
} }
static int c0_compare_int_usable(void) /*
* Compare interrupt can be routed and latched outside the core,
* so a single execution hazard barrier may not be enough to give
* it time to clear as seen in the Cause register. 4 time the
* pipeline depth seems reasonably conservative, and empirically
* works better in configurations with high CPU/bus clock ratios.
*/
#define compare_change_hazard() \
do { \
irq_disable_hazard(); \
irq_disable_hazard(); \
irq_disable_hazard(); \
irq_disable_hazard(); \
} while (0)
int c0_compare_int_usable(void)
{ {
unsigned int delta; unsigned int delta;
unsigned int cnt; unsigned int cnt;
...@@ -187,7 +125,7 @@ static int c0_compare_int_usable(void) ...@@ -187,7 +125,7 @@ static int c0_compare_int_usable(void)
*/ */
if (c0_compare_int_pending()) { if (c0_compare_int_pending()) {
write_c0_compare(read_c0_count()); write_c0_compare(read_c0_count());
irq_disable_hazard(); compare_change_hazard();
if (c0_compare_int_pending()) if (c0_compare_int_pending())
return 0; return 0;
} }
...@@ -196,7 +134,7 @@ static int c0_compare_int_usable(void) ...@@ -196,7 +134,7 @@ static int c0_compare_int_usable(void)
cnt = read_c0_count(); cnt = read_c0_count();
cnt += delta; cnt += delta;
write_c0_compare(cnt); write_c0_compare(cnt);
irq_disable_hazard(); compare_change_hazard();
if ((int)(read_c0_count() - cnt) < 0) if ((int)(read_c0_count() - cnt) < 0)
break; break;
/* increase delta if the timer was already expired */ /* increase delta if the timer was already expired */
...@@ -205,11 +143,12 @@ static int c0_compare_int_usable(void) ...@@ -205,11 +143,12 @@ static int c0_compare_int_usable(void)
while ((int)(read_c0_count() - cnt) <= 0) while ((int)(read_c0_count() - cnt) <= 0)
; /* Wait for expiry */ ; /* Wait for expiry */
compare_change_hazard();
if (!c0_compare_int_pending()) if (!c0_compare_int_pending())
return 0; return 0;
write_c0_compare(read_c0_count()); write_c0_compare(read_c0_count());
irq_disable_hazard(); compare_change_hazard();
if (c0_compare_int_pending()) if (c0_compare_int_pending())
return 0; return 0;
...@@ -219,6 +158,8 @@ static int c0_compare_int_usable(void) ...@@ -219,6 +158,8 @@ static int c0_compare_int_usable(void)
return 1; return 1;
} }
#ifndef CONFIG_MIPS_MT_SMTC
int __cpuinit mips_clockevent_init(void) int __cpuinit mips_clockevent_init(void)
{ {
uint64_t mips_freq = mips_hpt_frequency; uint64_t mips_freq = mips_hpt_frequency;
...@@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void) ...@@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void)
if (!cpu_has_counter || !mips_hpt_frequency) if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO; return -ENXIO;
#ifdef CONFIG_MIPS_MT_SMTC
setup_smtc_dummy_clockevent_device();
/*
* On SMTC we only register VPE0's compare interrupt as clockevent
* device.
*/
if (cpu)
return 0;
#endif
if (!c0_compare_int_usable()) if (!c0_compare_int_usable())
return -ENXIO; return -ENXIO;
...@@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void) ...@@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void)
cd->rating = 300; cd->rating = 300;
cd->irq = irq; cd->irq = irq;
#ifdef CONFIG_MIPS_MT_SMTC
cd->cpumask = CPU_MASK_ALL;
#else
cd->cpumask = cpumask_of_cpu(cpu); cd->cpumask = cpumask_of_cpu(cpu);
#endif
cd->set_next_event = mips_next_event; cd->set_next_event = mips_next_event;
cd->set_mode = mips_set_mode; cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler; cd->event_handler = mips_event_handler;
clockevents_register_device(cd); clockevents_register_device(cd);
...@@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void) ...@@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void)
cp0_timer_irq_installed = 1; cp0_timer_irq_installed = 1;
#ifdef CONFIG_MIPS_MT_SMTC
#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
#else
setup_irq(irq, &c0_compare_irqaction); setup_irq(irq, &c0_compare_irqaction);
#endif
return 0; return 0;
} }
#endif /* Not CONFIG_MIPS_MT_SMTC */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 MIPS Technologies, Inc.
* Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
* Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h>
#include <asm/cevt-r4k.h>
/*
* Variant clock event timer support for SMTC on MIPS 34K, 1004K
* or other MIPS MT cores.
*
* Notes on SMTC Support:
*
* SMTC has multiple microthread TCs pretending to be Linux CPUs.
* But there's only one Count/Compare pair per VPE, and Compare
* interrupts are taken opportunisitically by available TCs
* bound to the VPE with the Count register. The new timer
* framework provides for global broadcasts, but we really
* want VPE-level multicasts for best behavior. So instead
* of invoking the high-level clock-event broadcast code,
* this version of SMTC support uses the historical SMTC
* multicast mechanisms "under the hood", appearing to the
* generic clock layer as if the interrupts are per-CPU.
*
* The approach taken here is to maintain a set of NR_CPUS
* virtual timers, and track which "CPU" needs to be alerted
* at each event.
*
* It's unlikely that we'll see a MIPS MT core with more than
* 2 VPEs, but we *know* that we won't need to handle more
* VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
* is always going to be overkill, but always going to be enough.
*/
unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
static int smtc_nextinvpe[NR_CPUS];
/*
* Timestamps stored are absolute values to be programmed
* into Count register. Valid timestamps will never be zero.
* If a Zero Count value is actually calculated, it is converted
* to be a 1, which will introduce 1 or two CPU cycles of error
* roughly once every four billion events, which at 1000 HZ means
* about once every 50 days. If that's actually a problem, one
* could alternate squashing 0 to 1 and to -1.
*/
#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
#define ISVALID(x) ((x) != 0L)
/*
* Time comparison is subtle, as it's really truncated
* modular arithmetic.
*/
#define IS_SOONER(a, b, reference) \
(((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
/*
* CATCHUP_INCREMENT, used when the function falls behind the counter.
* Could be an increasing function instead of a constant;
*/
#define CATCHUP_INCREMENT 64
static int mips_next_event(unsigned long delta,
struct clock_event_device *evt)
{
unsigned long flags;
unsigned int mtflags;
unsigned long timestamp, reference, previous;
unsigned long nextcomp = 0L;
int vpe = current_cpu_data.vpe_id;
int cpu = smp_processor_id();
local_irq_save(flags);
mtflags = dmt();
/*
* Maintain the per-TC virtual timer
* and program the per-VPE shared Count register
* as appropriate here...
*/
reference = (unsigned long)read_c0_count();
timestamp = MAKEVALID(reference + delta);
/*
* To really model the clock, we have to catch the case
* where the current next-in-VPE timestamp is the old
* timestamp for the calling CPE, but the new value is
* in fact later. In that case, we have to do a full
* scan and discover the new next-in-VPE CPU id and
* timestamp.
*/
previous = smtc_nexttime[vpe][cpu];
if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
&& IS_SOONER(previous, timestamp, reference)) {
int i;
int soonest = cpu;
/*
* Update timestamp array here, so that new
* value gets considered along with those of
* other virtual CPUs on the VPE.
*/
smtc_nexttime[vpe][cpu] = timestamp;
for_each_online_cpu(i) {
if (ISVALID(smtc_nexttime[vpe][i])
&& IS_SOONER(smtc_nexttime[vpe][i],
smtc_nexttime[vpe][soonest], reference)) {
soonest = i;
}
}
smtc_nextinvpe[vpe] = soonest;
nextcomp = smtc_nexttime[vpe][soonest];
/*
* Otherwise, we don't have to process the whole array rank,
* we just have to see if the event horizon has gotten closer.
*/
} else {
if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
IS_SOONER(timestamp,
smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
smtc_nextinvpe[vpe] = cpu;
nextcomp = timestamp;
}
/*
* Since next-in-VPE may me the same as the executing
* virtual CPU, we update the array *after* checking
* its value.
*/
smtc_nexttime[vpe][cpu] = timestamp;
}
/*
* It may be that, in fact, we don't need to update Compare,
* but if we do, we want to make sure we didn't fall into
* a crack just behind Count.
*/
if (ISVALID(nextcomp)) {
write_c0_compare(nextcomp);
ehb();
/*
* We never return an error, we just make sure
* that we trigger the handlers as quickly as
* we can if we fell behind.
*/
while ((nextcomp - (unsigned long)read_c0_count())
> (unsigned long)LONG_MAX) {
nextcomp += CATCHUP_INCREMENT;
write_c0_compare(nextcomp);
ehb();
}
}
emt(mtflags);
local_irq_restore(flags);
return 0;
}
void smtc_distribute_timer(int vpe)
{
unsigned long flags;
unsigned int mtflags;
int cpu;
struct clock_event_device *cd;
unsigned long nextstamp = 0L;
unsigned long reference;
repeat:
for_each_online_cpu(cpu) {
/*
* Find virtual CPUs within the current VPE who have
* unserviced timer requests whose time is now past.
*/
local_irq_save(flags);
mtflags = dmt();
if (cpu_data[cpu].vpe_id == vpe &&
ISVALID(smtc_nexttime[vpe][cpu])) {
reference = (unsigned long)read_c0_count();
if ((smtc_nexttime[vpe][cpu] - reference)
> (unsigned long)LONG_MAX) {
smtc_nexttime[vpe][cpu] = 0L;
emt(mtflags);
local_irq_restore(flags);
/*
* We don't send IPIs to ourself.
*/
if (cpu != smp_processor_id()) {
smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
} else {
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
}
} else {
/* Local to VPE but Valid Time not yet reached. */
if (!ISVALID(nextstamp) ||
IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
reference)) {
smtc_nextinvpe[vpe] = cpu;
nextstamp = smtc_nexttime[vpe][cpu];
}
emt(mtflags);
local_irq_restore(flags);
}
} else {
emt(mtflags);
local_irq_restore(flags);
}
}
/* Reprogram for interrupt at next soonest timestamp for VPE */
if (ISVALID(nextstamp)) {
write_c0_compare(nextstamp);
ehb();
if ((nextstamp - (unsigned long)read_c0_count())
> (unsigned long)LONG_MAX)
goto repeat;
}
}
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
int cpu = smp_processor_id();
/* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
handle_perf_irq(1);
if (read_c0_cause() & (1 << 30)) {
/* Clear Count/Compare Interrupt */
write_c0_compare(read_c0_compare());
smtc_distribute_timer(cpu_data[cpu].vpe_id);
}
return IRQ_HANDLED;
}
int __cpuinit mips_clockevent_init(void)
{
uint64_t mips_freq = mips_hpt_frequency;
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
unsigned int irq;
int i;
int j;
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
if (cpu == 0) {
for (i = 0; i < num_possible_cpus(); i++) {
smtc_nextinvpe[i] = 0;
for (j = 0; j < num_possible_cpus(); j++)
smtc_nexttime[i][j] = 0L;
}
/*
* SMTC also can't have the usablility test
* run by secondary TCs once Compare is in use.
*/
if (!c0_compare_int_usable())
return -ENXIO;
}
/*
* With vectored interrupts things are getting platform specific.
* get_c0_compare_int is a hook to allow a platform to return the
* interrupt number of it's liking.
*/
irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
if (get_c0_compare_int)
irq = get_c0_compare_int();
cd = &per_cpu(mips_clockevent_device, cpu);
cd->name = "MIPS";
cd->features = CLOCK_EVT_FEAT_ONESHOT;
/* Calculate the min / max delta */
cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
cd->shift = 32;
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->rating = 300;
cd->irq = irq;
cd->cpumask = cpumask_of_cpu(cpu);
cd->set_next_event = mips_next_event;
cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
clockevents_register_device(cd);
/*
* On SMTC we only want to do the data structure
* initialization and IRQ setup once.
*/
if (cpu)
return 0;
/*
* And we need the hwmask associated with the c0_compare
* vector to be initialized.
*/
irq_hwmask[irq] = (0x100 << cp0_compare_irq);
if (cp0_timer_irq_installed)
return 0;
cp0_timer_irq_installed = 1;
setup_irq(irq, &c0_compare_irqaction);
return 0;
}
...@@ -54,14 +54,18 @@ extern void r4k_wait(void); ...@@ -54,14 +54,18 @@ extern void r4k_wait(void);
* interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
* using this version a gamble. * using this version a gamble.
*/ */
static void r4k_wait_irqoff(void) void r4k_wait_irqoff(void)
{ {
local_irq_disable(); local_irq_disable();
if (!need_resched()) if (!need_resched())
__asm__(" .set mips3 \n" __asm__(" .set push \n"
" .set mips3 \n"
" wait \n" " wait \n"
" .set mips0 \n"); " .set pop \n");
local_irq_enable(); local_irq_enable();
__asm__(" .globl __pastwait \n"
"__pastwait: \n");
return;
} }
/* /*
......
...@@ -79,11 +79,6 @@ FEXPORT(syscall_exit) ...@@ -79,11 +79,6 @@ FEXPORT(syscall_exit)
FEXPORT(restore_all) # restore full frame FEXPORT(restore_all) # restore full frame
#ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC
/* Detect and execute deferred IPI "interrupts" */
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
jal deferred_smtc_ipi
LONG_S s0, TI_REGS($28)
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
/* Re-arm any temporarily masked interrupts not explicitly "acked" */ /* Re-arm any temporarily masked interrupts not explicitly "acked" */
mfc0 v0, CP0_TCSTATUS mfc0 v0, CP0_TCSTATUS
...@@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame ...@@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame
xor t0, t0, t3 xor t0, t0, t3
mtc0 t0, CP0_TCCONTEXT mtc0 t0, CP0_TCCONTEXT
#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
/* Detect and execute deferred IPI "interrupts" */
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
jal deferred_smtc_ipi
LONG_S s0, TI_REGS($28)
#endif /* CONFIG_MIPS_MT_SMTC */ #endif /* CONFIG_MIPS_MT_SMTC */
.set noat .set noat
RESTORE_TEMP RESTORE_TEMP
......
...@@ -282,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp) ...@@ -282,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp)
and t0, a0, t1 and t0, a0, t1
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
mfc0 t2, CP0_TCCONTEXT mfc0 t2, CP0_TCCONTEXT
or t0, t0, t2 or t2, t0, t2
mtc0 t0, CP0_TCCONTEXT mtc0 t2, CP0_TCCONTEXT
#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
xor t1, t1, t0 xor t1, t1, t0
mtc0 t1, CP0_STATUS mtc0 t1, CP0_STATUS
......
...@@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh); ...@@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh);
/* /*
* FPU Use Factor empirically derived from experiments on 34K * FPU Use Factor empirically derived from experiments on 34K
*/ */
#define FPUSEFACTOR 333 #define FPUSEFACTOR 2000
static __init int mt_fp_affinity_init(void) static __init int mt_fp_affinity_init(void)
{ {
......
...@@ -55,7 +55,7 @@ void __noreturn cpu_idle(void) ...@@ -55,7 +55,7 @@ void __noreturn cpu_idle(void)
while (1) { while (1) {
tick_nohz_stop_sched_tick(1); tick_nohz_stop_sched_tick(1);
while (!need_resched()) { while (!need_resched()) {
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG #ifdef CONFIG_MIPS_MT_SMTC
extern void smtc_idle_loop_hook(void); extern void smtc_idle_loop_hook(void);
smtc_idle_loop_hook(); smtc_idle_loop_hook();
...@@ -145,19 +145,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, ...@@ -145,19 +145,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
*/ */
p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC restores TCStatus after Status, and the CU bits
* are aliased there.
*/
childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
#endif
clear_tsk_thread_flag(p, TIF_USEDFPU); clear_tsk_thread_flag(p, TIF_USEDFPU);
#ifdef CONFIG_MIPS_MT_FPAFF #ifdef CONFIG_MIPS_MT_FPAFF
clear_tsk_thread_flag(p, TIF_FPUBOUND); clear_tsk_thread_flag(p, TIF_FPUBOUND);
/*
* FPU affinity support is cleaner if we track the
* user-visible CPU affinity from the very beginning.
* The generic cpus_allowed mask will already have
* been copied from the parent before copy_thread
* is invoked.
*/
p->thread.user_cpus_allowed = p->cpus_allowed;
#endif /* CONFIG_MIPS_MT_FPAFF */ #endif /* CONFIG_MIPS_MT_FPAFF */
if (clone_flags & CLONE_SETTLS) if (clone_flags & CLONE_SETTLS)
......
...@@ -238,7 +238,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) ...@@ -238,7 +238,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case FPC_EIR: { /* implementation / version register */ case FPC_EIR: { /* implementation / version register */
unsigned int flags; unsigned int flags;
#ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC
unsigned int irqflags; unsigned long irqflags;
unsigned int mtflags; unsigned int mtflags;
#endif /* CONFIG_MIPS_MT_SMTC */ #endif /* CONFIG_MIPS_MT_SMTC */
......
This diff is collapsed.
...@@ -825,8 +825,10 @@ static void mt_ase_fp_affinity(void) ...@@ -825,8 +825,10 @@ static void mt_ase_fp_affinity(void)
if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
cpumask_t tmask; cpumask_t tmask;
cpus_and(tmask, current->thread.user_cpus_allowed, current->thread.user_cpus_allowed
mt_fpu_cpumask); = current->cpus_allowed;
cpus_and(tmask, current->cpus_allowed,
mt_fpu_cpumask);
set_cpus_allowed(current, tmask); set_cpus_allowed(current, tmask);
set_thread_flag(TIF_FPUBOUND); set_thread_flag(TIF_FPUBOUND);
} }
......
...@@ -15,6 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += malta-console.o ...@@ -15,6 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += malta-console.o
obj-$(CONFIG_PCI) += malta-pci.o obj-$(CONFIG_PCI) += malta-pci.o
# FIXME FIXME FIXME # FIXME FIXME FIXME
obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o
EXTRA_CFLAGS += -Werror EXTRA_CFLAGS += -Werror
...@@ -84,12 +84,17 @@ static void msmtc_cpus_done(void) ...@@ -84,12 +84,17 @@ static void msmtc_cpus_done(void)
static void __init msmtc_smp_setup(void) static void __init msmtc_smp_setup(void)
{ {
mipsmt_build_cpu_map(0); /*
* we won't get the definitive value until
* we've run smtc_prepare_cpus later, but
* we would appear to need an upper bound now.
*/
smp_num_siblings = smtc_build_cpu_map(0);
} }
static void __init msmtc_prepare_cpus(unsigned int max_cpus) static void __init msmtc_prepare_cpus(unsigned int max_cpus)
{ {
mipsmt_prepare_cpus(); smtc_prepare_cpus(max_cpus);
} }
struct plat_smp_ops msmtc_smp_ops = { struct plat_smp_ops msmtc_smp_ops = {
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Kevin D. Kissell
*/
/*
* Definitions used for common event timer implementation
* for MIPS 4K-type processors and their MIPS MT variants.
* Avoids unsightly extern declarations in C files.
*/
#ifndef __ASM_CEVT_R4K_H
#define __ASM_CEVT_R4K_H
DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
void mips_event_handler(struct clock_event_device *dev);
int c0_compare_int_usable(void);
void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *);
irqreturn_t c0_compare_interrupt(int, void *);
extern struct irqaction c0_compare_irqaction;
extern int cp0_timer_irq_installed;
/*
* Possibly handle a performance counter interrupt.
* Return true if the timer interrupt should not be checked
*/
static inline int handle_perf_irq(int r2)
{
/*
* The performance counter overflow interrupt may be shared with the
* timer interrupt (cp0_perfcount_irq < 0). If it is and a
* performance counter has overflowed (perf_irq() == IRQ_HANDLED)
* and we can't reliably determine if a counter interrupt has also
* happened (!r2) then don't check for a timer interrupt.
*/
return (cp0_perfcount_irq < 0) &&
perf_irq() == IRQ_HANDLED &&
!r2;
}
#endif /* __ASM_CEVT_R4K_H */
...@@ -38,8 +38,17 @@ __asm__( ...@@ -38,8 +38,17 @@ __asm__(
" .set pop \n" " .set pop \n"
" .endm"); " .endm");
extern void smtc_ipi_replay(void);
static inline void raw_local_irq_enable(void) static inline void raw_local_irq_enable(void)
{ {
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of call overhead on each local_irq_enable()
*/
smtc_ipi_replay();
#endif
__asm__ __volatile__( __asm__ __volatile__(
"raw_local_irq_enable" "raw_local_irq_enable"
: /* no outputs */ : /* no outputs */
...@@ -47,6 +56,7 @@ static inline void raw_local_irq_enable(void) ...@@ -47,6 +56,7 @@ static inline void raw_local_irq_enable(void)
: "memory"); : "memory");
} }
/* /*
* For cli() we have to insert nops to make sure that the new value * For cli() we have to insert nops to make sure that the new value
* has actually arrived in the status register before the end of this * has actually arrived in the status register before the end of this
...@@ -185,15 +195,14 @@ __asm__( ...@@ -185,15 +195,14 @@ __asm__(
" .set pop \n" " .set pop \n"
" .endm \n"); " .endm \n");
extern void smtc_ipi_replay(void);
static inline void raw_local_irq_restore(unsigned long flags) static inline void raw_local_irq_restore(unsigned long flags)
{ {
unsigned long __tmp1; unsigned long __tmp1;
#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY #ifdef CONFIG_MIPS_MT_SMTC
/* /*
* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred * SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of branch and call overhead on each * IPIs, at the cost of branch and call overhead on each
* local_irq_restore() * local_irq_restore()
*/ */
...@@ -208,6 +217,17 @@ static inline void raw_local_irq_restore(unsigned long flags) ...@@ -208,6 +217,17 @@ static inline void raw_local_irq_restore(unsigned long flags)
: "memory"); : "memory");
} }
static inline void __raw_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
__asm__ __volatile__(
"raw_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
static inline int raw_irqs_disabled_flags(unsigned long flags) static inline int raw_irqs_disabled_flags(unsigned long flags)
{ {
#ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC
......
...@@ -1462,7 +1462,7 @@ set_c0_##name(unsigned int set) \ ...@@ -1462,7 +1462,7 @@ set_c0_##name(unsigned int set) \
{ \ { \
unsigned int res; \ unsigned int res; \
unsigned int omt; \ unsigned int omt; \
unsigned int flags; \ unsigned long flags; \
\ \
local_irq_save(flags); \ local_irq_save(flags); \
omt = __dmt(); \ omt = __dmt(); \
...@@ -1480,7 +1480,7 @@ clear_c0_##name(unsigned int clear) \ ...@@ -1480,7 +1480,7 @@ clear_c0_##name(unsigned int clear) \
{ \ { \
unsigned int res; \ unsigned int res; \
unsigned int omt; \ unsigned int omt; \
unsigned int flags; \ unsigned long flags; \
\ \
local_irq_save(flags); \ local_irq_save(flags); \
omt = __dmt(); \ omt = __dmt(); \
...@@ -1498,7 +1498,7 @@ change_c0_##name(unsigned int change, unsigned int new) \ ...@@ -1498,7 +1498,7 @@ change_c0_##name(unsigned int change, unsigned int new) \
{ \ { \
unsigned int res; \ unsigned int res; \
unsigned int omt; \ unsigned int omt; \
unsigned int flags; \ unsigned long flags; \
\ \
local_irq_save(flags); \ local_irq_save(flags); \
\ \
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
*/ */
#include <asm/mips_mt.h> #include <asm/mips_mt.h>
#include <asm/smtc_ipi.h>
/* /*
* System-wide SMTC status information * System-wide SMTC status information
...@@ -38,14 +39,15 @@ struct mm_struct; ...@@ -38,14 +39,15 @@ struct mm_struct;
struct task_struct; struct task_struct;
void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
void self_ipi(struct smtc_ipi *);
void smtc_flush_tlb_asid(unsigned long asid); void smtc_flush_tlb_asid(unsigned long asid);
extern int mipsmt_build_cpu_map(int startslot); extern int smtc_build_cpu_map(int startslot);
extern void mipsmt_prepare_cpus(void); extern void smtc_prepare_cpus(int cpus);
extern void smtc_smp_finish(void); extern void smtc_smp_finish(void);
extern void smtc_boot_secondary(int cpu, struct task_struct *t); extern void smtc_boot_secondary(int cpu, struct task_struct *t);
extern void smtc_cpus_done(void); extern void smtc_cpus_done(void);
/* /*
* Sharing the TLB between multiple VPEs means that the * Sharing the TLB between multiple VPEs means that the
* "random" index selection function is not allowed to * "random" index selection function is not allowed to
......
...@@ -297,14 +297,31 @@ ...@@ -297,14 +297,31 @@
#ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC
.set mips32r2 .set mips32r2
/* /*
* This may not really be necessary if ints are already * We need to make sure the read-modify-write
* inhibited here. * of Status below isn't perturbed by an interrupt
* or cross-TC access, so we need to do at least a DMT,
* protected by an interrupt-inhibit. But setting IXMT
* also creates a few-cycle window where an IPI could
* be queued and not be detected before potentially
* returning to a WAIT or user-mode loop. It must be
* replayed.
*
* We're in the middle of a context switch, and
* we can't dispatch it directly without trashing
* some registers, so we'll try to detect this unlikely
* case and program a software interrupt in the VPE,
* as would be done for a cross-VPE IPI. To accomodate
* the handling of that case, we're doing a DVPE instead
* of just a DMT here to protect against other threads.
* This is a lot of cruft to cover a tiny window.
* If you can find a better design, implement it!
*
*/ */
mfc0 v0, CP0_TCSTATUS mfc0 v0, CP0_TCSTATUS
ori v0, TCSTATUS_IXMT ori v0, TCSTATUS_IXMT
mtc0 v0, CP0_TCSTATUS mtc0 v0, CP0_TCSTATUS
_ehb _ehb
DMT 5 # dmt a1 DVPE 5 # dvpe a1
jal mips_ihb jal mips_ihb
#endif /* CONFIG_MIPS_MT_SMTC */ #endif /* CONFIG_MIPS_MT_SMTC */
mfc0 a0, CP0_STATUS mfc0 a0, CP0_STATUS
...@@ -325,17 +342,50 @@ ...@@ -325,17 +342,50 @@
*/ */
LONG_L v1, PT_TCSTATUS(sp) LONG_L v1, PT_TCSTATUS(sp)
_ehb _ehb
mfc0 v0, CP0_TCSTATUS mfc0 a0, CP0_TCSTATUS
andi v1, TCSTATUS_IXMT andi v1, TCSTATUS_IXMT
/* We know that TCStatua.IXMT should be set from above */ bnez v1, 0f
xori v0, v0, TCSTATUS_IXMT
or v0, v0, v1 /*
mtc0 v0, CP0_TCSTATUS * We'd like to detect any IPIs queued in the tiny window
_ehb * above and request an software interrupt to service them
andi a1, a1, VPECONTROL_TE * when we ERET.
*
* Computing the offset into the IPIQ array of the executing
* TC's IPI queue in-line would be tedious. We use part of
* the TCContext register to hold 16 bits of offset that we
* can add in-line to find the queue head.
*/
mfc0 v0, CP0_TCCONTEXT
la a2, IPIQ
srl v0, v0, 16
addu a2, a2, v0
LONG_L v0, 0(a2)
beqz v0, 0f
/*
* If we have a queue, provoke dispatch within the VPE by setting C_SW1
*/
mfc0 v0, CP0_CAUSE
ori v0, v0, C_SW1
mtc0 v0, CP0_CAUSE
0:
/*
* This test should really never branch but
* let's be prudent here. Having atomized
* the shared register modifications, we can
* now EVPE, and must do so before interrupts
* are potentially re-enabled.
*/
andi a1, a1, MVPCONTROL_EVP
beqz a1, 1f beqz a1, 1f
emt evpe
1: 1:
/* We know that TCStatua.IXMT should be set from above */
xori a0, a0, TCSTATUS_IXMT
or a0, a0, v1
mtc0 a0, CP0_TCSTATUS
_ehb
.set mips0 .set mips0
#endif /* CONFIG_MIPS_MT_SMTC */ #endif /* CONFIG_MIPS_MT_SMTC */
LONG_L v1, PT_EPC(sp) LONG_L v1, PT_EPC(sp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment