Commit ce20269d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus:
  [MIPS] SMTC: Fix recursion in instant IPI replay code.
  [MIPS] BCM1480: Fix setting of irq affinity.
  [MIPS] do_page_fault() needs to use raw_smp_processor_id().
  [MIPS] SMTC: Fix false trigger of debug code on single VPE.
  [MIPS] SMTC: irq_{enter,leave} and kstats keeping for relayed timer ints.
  [MIPS] lockdep: Deal with interrupt disable hazard in TRACE_IRQFLAGS
  [MIPS] lockdep: Handle interrupts in R3000 style c0_status register.
  [MIPS] MV64340: Add missing prototype for mv64340_irq_init().
  [MIPS] MT: MIPS_MT_SMTC_INSTANT_REPLAY currently conflicts with PREEMPT.
  [MIPS] EV64120: Include <asm/irq.h> to fix warning.
  [MIPS] Ocelot: Fix warning.
  [MIPS] Ocelot: Give PMON_v1_setup a proper prototype.
parents 9754c5f6 8a1e97ee
...@@ -1606,7 +1606,7 @@ config MIPS_MT_FPAFF ...@@ -1606,7 +1606,7 @@ config MIPS_MT_FPAFF
config MIPS_MT_SMTC_INSTANT_REPLAY config MIPS_MT_SMTC_INSTANT_REPLAY
bool "Low-latency Dispatch of Deferred SMTC IPIs" bool "Low-latency Dispatch of Deferred SMTC IPIs"
depends on MIPS_MT_SMTC depends on MIPS_MT_SMTC && !PREEMPT
default y default y
help help
SMTC pseudo-interrupts between TCs are deferred and queued SMTC pseudo-interrupts between TCs are deferred and queued
......
...@@ -32,7 +32,6 @@ void __init prom_init(void) ...@@ -32,7 +32,6 @@ void __init prom_init(void)
char **arg = (char **) fw_arg1; char **arg = (char **) fw_arg1;
char **env = (char **) fw_arg2; char **env = (char **) fw_arg2;
struct callvectors *cv = (struct callvectors *) fw_arg3; struct callvectors *cv = (struct callvectors *) fw_arg3;
uint32_t tmp;
int i; int i;
/* save the PROM vectors for debugging use */ /* save the PROM vectors for debugging use */
......
...@@ -79,7 +79,7 @@ static char reset_reason; ...@@ -79,7 +79,7 @@ static char reset_reason;
static void __init setup_l3cache(unsigned long size); static void __init setup_l3cache(unsigned long size);
/* setup code for a handoff from a version 1 PMON 2000 PROM */ /* setup code for a handoff from a version 1 PMON 2000 PROM */
void PMON_v1_setup() static void PMON_v1_setup(void)
{ {
/* A wired TLB entry for the GT64120A and the serial port. The /* A wired TLB entry for the GT64120A and the serial port. The
GT64120A is going to be hit on every IRQ anyway - there's GT64120A is going to be hit on every IRQ anyway - there's
......
...@@ -121,7 +121,11 @@ FEXPORT(restore_partial) # restore partial frame ...@@ -121,7 +121,11 @@ FEXPORT(restore_partial) # restore partial frame
SAVE_AT SAVE_AT
SAVE_TEMP SAVE_TEMP
LONG_L v0, PT_STATUS(sp) LONG_L v0, PT_STATUS(sp)
and v0, 1 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
and v0, ST0_IEP
#else
and v0, ST0_IE
#endif
beqz v0, 1f beqz v0, 1f
jal trace_hardirqs_on jal trace_hardirqs_on
b 2f b 2f
......
...@@ -128,6 +128,37 @@ handle_vcei: ...@@ -128,6 +128,37 @@ handle_vcei:
.align 5 .align 5
NESTED(handle_int, PT_SIZE, sp) NESTED(handle_int, PT_SIZE, sp)
#ifdef CONFIG_TRACE_IRQFLAGS
/*
* Check to see if the interrupted code has just disabled
* interrupts and ignore this interrupt for now if so.
*
* local_irq_disable() disables interrupts and then calls
* trace_hardirqs_off() to track the state. If an interrupt is taken
* after interrupts are disabled but before the state is updated
* it will appear to restore_all that it is incorrectly returning with
* interrupts disabled
*/
.set push
.set noat
mfc0 k0, CP0_STATUS
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
and k0, ST0_IEP
bnez k0, 1f
mfc0 k0, EP0_EPC
.set noreorder
j k0
rfe
#else
and k0, ST0_IE
bnez k0, 1f
eret
#endif
1:
.set pop
#endif
SAVE_ALL SAVE_ALL
CLI CLI
TRACE_IRQS_OFF TRACE_IRQS_OFF
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/cpu.h> #include <asm/cpu.h>
...@@ -14,6 +15,7 @@ ...@@ -14,6 +15,7 @@
#include <asm/hazards.h> #include <asm/hazards.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/mips-boards/maltaint.h>
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/time.h> #include <asm/time.h>
...@@ -75,7 +77,7 @@ static struct smtc_ipi_q freeIPIq; ...@@ -75,7 +77,7 @@ static struct smtc_ipi_q freeIPIq;
void ipi_decode(struct smtc_ipi *); void ipi_decode(struct smtc_ipi *);
static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
static void setup_cross_vpe_interrupts(void); static void setup_cross_vpe_interrupts(unsigned int nvpe);
void init_smtc_stats(void); void init_smtc_stats(void);
/* Global SMTC Status */ /* Global SMTC Status */
...@@ -168,7 +170,10 @@ __setup("tintq=", tintq); ...@@ -168,7 +170,10 @@ __setup("tintq=", tintq);
int imstuckcount[2][8]; int imstuckcount[2][8];
/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
int vpemask[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}}; int vpemask[2][8] = {
{0, 0, 1, 0, 0, 0, 0, 1},
{0, 0, 0, 0, 0, 0, 0, 1}
};
int tcnoprog[NR_CPUS]; int tcnoprog[NR_CPUS];
static atomic_t idle_hook_initialized = {0}; static atomic_t idle_hook_initialized = {0};
static int clock_hang_reported[NR_CPUS]; static int clock_hang_reported[NR_CPUS];
...@@ -501,8 +506,7 @@ void mipsmt_prepare_cpus(void) ...@@ -501,8 +506,7 @@ void mipsmt_prepare_cpus(void)
/* If we have multiple VPEs running, set up the cross-VPE interrupt */ /* If we have multiple VPEs running, set up the cross-VPE interrupt */
if (nvpe > 1) setup_cross_vpe_interrupts(nvpe);
setup_cross_vpe_interrupts();
/* Set up queue of free IPI "messages". */ /* Set up queue of free IPI "messages". */
nipi = NR_CPUS * IPIBUF_PER_CPU; nipi = NR_CPUS * IPIBUF_PER_CPU;
...@@ -607,7 +611,12 @@ void smtc_cpus_done(void) ...@@ -607,7 +611,12 @@ void smtc_cpus_done(void)
int setup_irq_smtc(unsigned int irq, struct irqaction * new, int setup_irq_smtc(unsigned int irq, struct irqaction * new,
unsigned long hwmask) unsigned long hwmask)
{ {
unsigned int vpe = current_cpu_data.vpe_id;
irq_hwmask[irq] = hwmask; irq_hwmask[irq] = hwmask;
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
vpemask[vpe][irq - MIPSCPU_INT_BASE] = 1;
#endif
return setup_irq(irq, new); return setup_irq(irq, new);
} }
...@@ -812,12 +821,15 @@ void ipi_decode(struct smtc_ipi *pipi) ...@@ -812,12 +821,15 @@ void ipi_decode(struct smtc_ipi *pipi)
smtc_ipi_nq(&freeIPIq, pipi); smtc_ipi_nq(&freeIPIq, pipi);
switch (type_copy) { switch (type_copy) {
case SMTC_CLOCK_TICK: case SMTC_CLOCK_TICK:
irq_enter();
kstat_this_cpu.irqs[MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR]++;
/* Invoke Clock "Interrupt" */ /* Invoke Clock "Interrupt" */
ipi_timer_latch[dest_copy] = 0; ipi_timer_latch[dest_copy] = 0;
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
clock_hang_reported[dest_copy] = 0; clock_hang_reported[dest_copy] = 0;
#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
local_timer_interrupt(0, NULL); local_timer_interrupt(0, NULL);
irq_exit();
break; break;
case LINUX_SMP_IPI: case LINUX_SMP_IPI:
switch ((int)arg_copy) { switch ((int)arg_copy) {
...@@ -965,8 +977,11 @@ static void ipi_irq_dispatch(void) ...@@ -965,8 +977,11 @@ static void ipi_irq_dispatch(void)
static struct irqaction irq_ipi; static struct irqaction irq_ipi;
static void setup_cross_vpe_interrupts(void) static void setup_cross_vpe_interrupts(unsigned int nvpe)
{ {
if (nvpe < 1)
return;
if (!cpu_has_vint) if (!cpu_has_vint)
panic("SMTC Kernel requires Vectored Interupt support"); panic("SMTC Kernel requires Vectored Interupt support");
...@@ -984,10 +999,17 @@ static void setup_cross_vpe_interrupts(void) ...@@ -984,10 +999,17 @@ static void setup_cross_vpe_interrupts(void)
/* /*
* SMTC-specific hacks invoked from elsewhere in the kernel. * SMTC-specific hacks invoked from elsewhere in the kernel.
*
* smtc_ipi_replay is called from raw_local_irq_restore which is only ever
* called with interrupts disabled. We do rely on interrupts being disabled
* here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
* result in a recursive call to raw_local_irq_restore().
*/ */
void smtc_ipi_replay(void) static void __smtc_ipi_replay(void)
{ {
unsigned int cpu = smp_processor_id();
/* /*
* To the extent that we've ever turned interrupts off, * To the extent that we've ever turned interrupts off,
* we may have accumulated deferred IPIs. This is subtle. * we may have accumulated deferred IPIs. This is subtle.
...@@ -1002,17 +1024,30 @@ void smtc_ipi_replay(void) ...@@ -1002,17 +1024,30 @@ void smtc_ipi_replay(void)
* is clear, and we'll handle it as a real pseudo-interrupt * is clear, and we'll handle it as a real pseudo-interrupt
* and not a pseudo-pseudo interrupt. * and not a pseudo-pseudo interrupt.
*/ */
if (IPIQ[smp_processor_id()].depth > 0) { if (IPIQ[cpu].depth > 0) {
struct smtc_ipi *pipi; while (1) {
extern void self_ipi(struct smtc_ipi *); struct smtc_ipi_q *q = &IPIQ[cpu];
struct smtc_ipi *pipi;
extern void self_ipi(struct smtc_ipi *);
spin_lock(&q->lock);
pipi = __smtc_ipi_dq(q);
spin_unlock(&q->lock);
if (!pipi)
break;
while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) {
self_ipi(pipi); self_ipi(pipi);
smtc_cpu_stats[smp_processor_id()].selfipis++; smtc_cpu_stats[cpu].selfipis++;
} }
} }
} }
void smtc_ipi_replay(void)
{
raw_local_irq_disable();
__smtc_ipi_replay();
}
EXPORT_SYMBOL(smtc_ipi_replay); EXPORT_SYMBOL(smtc_ipi_replay);
void smtc_idle_loop_hook(void) void smtc_idle_loop_hook(void)
...@@ -1117,7 +1152,13 @@ void smtc_idle_loop_hook(void) ...@@ -1117,7 +1152,13 @@ void smtc_idle_loop_hook(void)
* is in use, there should never be any. * is in use, there should never be any.
*/ */
#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
smtc_ipi_replay(); {
unsigned long flags;
local_irq_save(flags);
__smtc_ipi_replay();
local_irq_restore(flags);
}
#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
} }
......
...@@ -42,7 +42,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -42,7 +42,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
siginfo_t info; siginfo_t info;
#if 0 #if 0
printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", smp_processor_id(), printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
current->comm, current->pid, field, address, write, current->comm, current->pid, field, address, write,
field, regs->cp0_epc); field, regs->cp0_epc);
#endif #endif
...@@ -165,7 +165,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -165,7 +165,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
"virtual address %0*lx, epc == %0*lx, ra == %0*lx\n", "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
smp_processor_id(), field, address, field, regs->cp0_epc, raw_smp_processor_id(), field, address, field, regs->cp0_epc,
field, regs->regs[31]); field, regs->regs[31]);
die("Oops", regs); die("Oops", regs);
...@@ -228,7 +228,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -228,7 +228,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
pmd_t *pmd, *pmd_k; pmd_t *pmd, *pmd_k;
pte_t *pte_k; pte_t *pte_k;
pgd = (pgd_t *) pgd_current[smp_processor_id()] + offset; pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
pgd_k = init_mm.pgd + offset; pgd_k = init_mm.pgd + offset;
if (!pgd_present(*pgd_k)) if (!pgd_present(*pgd_k))
......
#include <linux/pci.h> #include <linux/pci.h>
#include <asm/irq.h>
int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{ {
......
...@@ -141,11 +141,11 @@ static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask) ...@@ -141,11 +141,11 @@ static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
unsigned long flags; unsigned long flags;
unsigned int irq_dirty; unsigned int irq_dirty;
i = first_cpu(mask); if (cpus_weight(mask) != 1) {
if (next_cpu(i, mask) <= NR_CPUS) {
printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
return; return;
} }
i = first_cpu(mask);
/* Convert logical CPU to physical CPU */ /* Convert logical CPU to physical CPU */
cpu = cpu_logical_map(i); cpu = cpu_logical_map(i);
......
...@@ -13,29 +13,9 @@ ...@@ -13,29 +13,9 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <asm/hazards.h> #include <asm/hazards.h>
/*
* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred IPIs,
* at the cost of branch and call overhead on each local_irq_restore()
*/
#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
extern void smtc_ipi_replay(void);
#define irq_restore_epilog(flags) \
do { \
if (!(flags & 0x0400)) \
smtc_ipi_replay(); \
} while (0)
#else
#define irq_restore_epilog(ignore) do { } while (0)
#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
__asm__ ( __asm__ (
" .macro raw_local_irq_enable \n" " .macro raw_local_irq_enable \n"
" .set push \n" " .set push \n"
...@@ -205,17 +185,28 @@ __asm__ ( ...@@ -205,17 +185,28 @@ __asm__ (
" .set pop \n" " .set pop \n"
" .endm \n"); " .endm \n");
#define raw_local_irq_restore(flags) \ extern void smtc_ipi_replay(void);
do { \
unsigned long __tmp1; \ static inline void raw_local_irq_restore(unsigned long flags)
\ {
__asm__ __volatile__( \ unsigned long __tmp1;
"raw_local_irq_restore\t%0" \
: "=r" (__tmp1) \ #ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
: "0" (flags) \ /*
: "memory"); \ * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred
irq_restore_epilog(flags); \ * IPIs, at the cost of branch and call overhead on each
} while(0) * local_irq_restore()
*/
if (unlikely(!(flags & 0x0400)))
smtc_ipi_replay();
#endif
__asm__ __volatile__(
"raw_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
static inline int raw_irqs_disabled_flags(unsigned long flags) static inline int raw_irqs_disabled_flags(unsigned long flags)
{ {
......
...@@ -54,5 +54,6 @@ struct mv_pci_controller { ...@@ -54,5 +54,6 @@ struct mv_pci_controller {
}; };
extern void ll_mv64340_irq(void); extern void ll_mv64340_irq(void);
extern void mv64340_irq_init(unsigned int base);
#endif /* __ASM_MIPS_MARVELL_H */ #endif /* __ASM_MIPS_MARVELL_H */
...@@ -65,12 +65,10 @@ static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p) ...@@ -65,12 +65,10 @@ static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p)
spin_unlock_irqrestore(&q->lock, flags); spin_unlock_irqrestore(&q->lock, flags);
} }
static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q)
{ {
struct smtc_ipi *p; struct smtc_ipi *p;
long flags;
spin_lock_irqsave(&q->lock, flags);
if (q->head == NULL) if (q->head == NULL)
p = NULL; p = NULL;
else { else {
...@@ -81,7 +79,19 @@ static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) ...@@ -81,7 +79,19 @@ static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q)
if (q->head == NULL) if (q->head == NULL)
q->tail = NULL; q->tail = NULL;
} }
return p;
}
static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q)
{
unsigned long flags;
struct smtc_ipi *p;
spin_lock_irqsave(&q->lock, flags);
p = __smtc_ipi_dq(q);
spin_unlock_irqrestore(&q->lock, flags); spin_unlock_irqrestore(&q->lock, flags);
return p; return p;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment