Commit 31bbed52 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/uv' into x86/core

parents 0fbba487 b2b35259
...@@ -379,6 +379,7 @@ static inline u32 safe_apic_wait_icr_idle(void) ...@@ -379,6 +379,7 @@ static inline u32 safe_apic_wait_icr_idle(void)
static inline void ack_APIC_irq(void) static inline void ack_APIC_irq(void)
{ {
#ifdef CONFIG_X86_LOCAL_APIC
/* /*
* ack_APIC_irq() actually gets compiled as a single instruction * ack_APIC_irq() actually gets compiled as a single instruction
* ... yummie. * ... yummie.
...@@ -386,6 +387,7 @@ static inline void ack_APIC_irq(void) ...@@ -386,6 +387,7 @@ static inline void ack_APIC_irq(void)
/* Docs say use 0 for future compatibility */ /* Docs say use 0 for future compatibility */
apic_write(APIC_EOI, 0); apic_write(APIC_EOI, 0);
#endif
} }
static inline unsigned default_get_apic_id(unsigned long x) static inline unsigned default_get_apic_id(unsigned long x)
......
...@@ -33,6 +33,8 @@ BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7, ...@@ -33,6 +33,8 @@ BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
smp_invalidate_interrupt) smp_invalidate_interrupt)
#endif #endif
BUILD_INTERRUPT(generic_interrupt, GENERIC_INTERRUPT_VECTOR)
/* /*
* every pentium local APIC has two 'local interrupts', with a * every pentium local APIC has two 'local interrupts', with a
* soft-definable vector attached to both interrupts, one of * soft-definable vector attached to both interrupts, one of
......
...@@ -12,6 +12,7 @@ typedef struct { ...@@ -12,6 +12,7 @@ typedef struct {
unsigned int apic_timer_irqs; /* arch dependent */ unsigned int apic_timer_irqs; /* arch dependent */
unsigned int irq_spurious_count; unsigned int irq_spurious_count;
#endif #endif
unsigned int generic_irqs; /* arch dependent */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned int irq_resched_count; unsigned int irq_resched_count;
unsigned int irq_call_count; unsigned int irq_call_count;
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
/* Interrupt handlers registered during init_IRQ */ /* Interrupt handlers registered during init_IRQ */
extern void apic_timer_interrupt(void); extern void apic_timer_interrupt(void);
extern void generic_interrupt(void);
extern void error_interrupt(void); extern void error_interrupt(void);
extern void spurious_interrupt(void); extern void spurious_interrupt(void);
extern void thermal_interrupt(void); extern void thermal_interrupt(void);
......
...@@ -36,6 +36,7 @@ static inline int irq_canonicalize(int irq) ...@@ -36,6 +36,7 @@ static inline int irq_canonicalize(int irq)
extern void fixup_irqs(void); extern void fixup_irqs(void);
#endif #endif
extern void (*generic_interrupt_extension)(void);
extern void init_IRQ(void); extern void init_IRQ(void);
extern void native_init_IRQ(void); extern void native_init_IRQ(void);
extern bool handle_irq(unsigned irq, struct pt_regs *regs); extern bool handle_irq(unsigned irq, struct pt_regs *regs);
......
...@@ -111,6 +111,11 @@ ...@@ -111,6 +111,11 @@
*/ */
#define LOCAL_PERF_VECTOR 0xee #define LOCAL_PERF_VECTOR 0xee
/*
* Generic system vector for platform specific use
*/
#define GENERIC_INTERRUPT_VECTOR 0xed
/* /*
* First APIC vector available to drivers: (vectors 0x30-0xee) we * First APIC vector available to drivers: (vectors 0x30-0xee) we
* start at 0x31(0x41) to spread out vectors evenly between priority * start at 0x31(0x41) to spread out vectors evenly between priority
......
...@@ -199,6 +199,10 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); ...@@ -199,6 +199,10 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
#define SCIR_CPU_ACTIVITY 0x02 /* not idle */ #define SCIR_CPU_ACTIVITY 0x02 /* not idle */
#define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */ #define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */
/* Loop through all installed blades */
#define for_each_possible_blade(bid) \
for ((bid) = 0; (bid) < uv_num_possible_blades(); (bid)++)
/* /*
* Macros for converting between kernel virtual addresses, socket local physical * Macros for converting between kernel virtual addresses, socket local physical
* addresses, and UV global physical addresses. * addresses, and UV global physical addresses.
......
...@@ -111,7 +111,7 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64 ...@@ -111,7 +111,7 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64
### ###
# 64 bit specific files # 64 bit specific files
ifeq ($(CONFIG_X86_64),y) ifeq ($(CONFIG_X86_64),y)
obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
obj-$(CONFIG_AUDIT) += audit_64.o obj-$(CONFIG_AUDIT) += audit_64.o
......
...@@ -984,6 +984,8 @@ apicinterrupt UV_BAU_MESSAGE \ ...@@ -984,6 +984,8 @@ apicinterrupt UV_BAU_MESSAGE \
#endif #endif
apicinterrupt LOCAL_TIMER_VECTOR \ apicinterrupt LOCAL_TIMER_VECTOR \
apic_timer_interrupt smp_apic_timer_interrupt apic_timer_interrupt smp_apic_timer_interrupt
apicinterrupt GENERIC_INTERRUPT_VECTOR \
generic_interrupt smp_generic_interrupt
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \ apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \
......
...@@ -15,6 +15,9 @@ ...@@ -15,6 +15,9 @@
atomic_t irq_err_count; atomic_t irq_err_count;
/* Function pointer for generic interrupt vector handling */
void (*generic_interrupt_extension)(void) = NULL;
/* /*
* 'what should we do if we get a hw irq event on an illegal vector'. * 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves. * each architecture has to answer this themselves.
...@@ -56,6 +59,12 @@ static int show_other_interrupts(struct seq_file *p) ...@@ -56,6 +59,12 @@ static int show_other_interrupts(struct seq_file *p)
seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
seq_printf(p, " Local timer interrupts\n"); seq_printf(p, " Local timer interrupts\n");
#endif #endif
if (generic_interrupt_extension) {
seq_printf(p, "PLT: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->generic_irqs);
seq_printf(p, " Platform interrupts\n");
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
seq_printf(p, "RES: "); seq_printf(p, "RES: ");
for_each_online_cpu(j) for_each_online_cpu(j)
...@@ -163,6 +172,8 @@ u64 arch_irq_stat_cpu(unsigned int cpu) ...@@ -163,6 +172,8 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
sum += irq_stats(cpu)->apic_timer_irqs; sum += irq_stats(cpu)->apic_timer_irqs;
#endif #endif
if (generic_interrupt_extension)
sum += irq_stats(cpu)->generic_irqs;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
sum += irq_stats(cpu)->irq_resched_count; sum += irq_stats(cpu)->irq_resched_count;
sum += irq_stats(cpu)->irq_call_count; sum += irq_stats(cpu)->irq_call_count;
...@@ -226,4 +237,27 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) ...@@ -226,4 +237,27 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
return 1; return 1;
} }
/*
* Handler for GENERIC_INTERRUPT_VECTOR.
*/
void smp_generic_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
ack_APIC_irq();
exit_idle();
irq_enter();
inc_irq_stat(generic_irqs);
if (generic_interrupt_extension)
generic_interrupt_extension();
irq_exit();
set_irq_regs(old_regs);
}
EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
...@@ -175,6 +175,9 @@ void __init native_init_IRQ(void) ...@@ -175,6 +175,9 @@ void __init native_init_IRQ(void)
/* self generated IPI for local APIC timer */ /* self generated IPI for local APIC timer */
alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
/* generic IPI for platform specific use */
alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
/* IPI vectors for APIC spurious and error interrupts */ /* IPI vectors for APIC spurious and error interrupts */
alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
......
...@@ -147,6 +147,9 @@ static void __init apic_intr_init(void) ...@@ -147,6 +147,9 @@ static void __init apic_intr_init(void)
/* self generated IPI for local APIC timer */ /* self generated IPI for local APIC timer */
alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
/* generic IPI for platform specific use */
alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
/* IPI vectors for APIC spurious and error interrupts */ /* IPI vectors for APIC spurious and error interrupts */
alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
......
/*
* SGI RTC clock/timer routines.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Copyright (c) 2009 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) Dimitri Sivanich
*/
#include <linux/clockchips.h>
#include <asm/uv/uv_mmrs.h>
#include <asm/uv/uv_hub.h>
#include <asm/uv/bios.h>
#include <asm/uv/uv.h>
#include <asm/apic.h>
#include <asm/cpu.h>
#define RTC_NAME "sgi_rtc"
static cycle_t uv_read_rtc(void);
static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
static void uv_rtc_timer_setup(enum clock_event_mode,
struct clock_event_device *);
static struct clocksource clocksource_uv = {
.name = RTC_NAME,
.rating = 400,
.read = uv_read_rtc,
.mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK,
.shift = 10,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct clock_event_device clock_event_device_uv = {
.name = RTC_NAME,
.features = CLOCK_EVT_FEAT_ONESHOT,
.shift = 20,
.rating = 400,
.irq = -1,
.set_next_event = uv_rtc_next_event,
.set_mode = uv_rtc_timer_setup,
.event_handler = NULL,
};
static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
/* There is one of these allocated per node */
struct uv_rtc_timer_head {
spinlock_t lock;
/* next cpu waiting for timer, local node relative: */
int next_cpu;
/* number of cpus on this node: */
int ncpus;
struct {
int lcpu; /* systemwide logical cpu number */
u64 expires; /* next timer expiration for this cpu */
} cpu[1];
};
/*
* Access to uv_rtc_timer_head via blade id.
*/
static struct uv_rtc_timer_head **blade_info __read_mostly;
static int uv_rtc_enable;
/*
* Hardware interface routines
*/
/* Send IPIs to another node */
static void uv_rtc_send_IPI(int cpu)
{
unsigned long apicid, val;
int pnode;
apicid = cpu_physical_id(cpu);
pnode = uv_apicid_to_pnode(apicid);
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
(apicid << UVH_IPI_INT_APIC_ID_SHFT) |
(GENERIC_INTERRUPT_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
}
/* Check for an RTC interrupt pending */
static int uv_intr_pending(int pnode)
{
return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
UVH_EVENT_OCCURRED0_RTC1_MASK;
}
/* Setup interrupt and return non-zero if early expiration occurred. */
static int uv_setup_intr(int cpu, u64 expires)
{
u64 val;
int pnode = uv_cpu_to_pnode(cpu);
uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
UVH_RTC1_INT_CONFIG_M_MASK);
uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
UVH_EVENT_OCCURRED0_RTC1_MASK);
val = (GENERIC_INTERRUPT_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
((u64)cpu_physical_id(cpu) << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
/* Set configuration */
uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
/* Initialize comparator value */
uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
return (expires < uv_read_rtc() && !uv_intr_pending(pnode));
}
/*
* Per-cpu timer tracking routines
*/
static __init void uv_rtc_deallocate_timers(void)
{
int bid;
for_each_possible_blade(bid) {
kfree(blade_info[bid]);
}
kfree(blade_info);
}
/* Allocate per-node list of cpu timer expiration times. */
static __init int uv_rtc_allocate_timers(void)
{
int cpu;
blade_info = kmalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL);
if (!blade_info)
return -ENOMEM;
memset(blade_info, 0, uv_possible_blades * sizeof(void *));
for_each_present_cpu(cpu) {
int nid = cpu_to_node(cpu);
int bid = uv_cpu_to_blade_id(cpu);
int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
struct uv_rtc_timer_head *head = blade_info[bid];
if (!head) {
head = kmalloc_node(sizeof(struct uv_rtc_timer_head) +
(uv_blade_nr_possible_cpus(bid) *
2 * sizeof(u64)),
GFP_KERNEL, nid);
if (!head) {
uv_rtc_deallocate_timers();
return -ENOMEM;
}
spin_lock_init(&head->lock);
head->ncpus = uv_blade_nr_possible_cpus(bid);
head->next_cpu = -1;
blade_info[bid] = head;
}
head->cpu[bcpu].lcpu = cpu;
head->cpu[bcpu].expires = ULLONG_MAX;
}
return 0;
}
/* Find and set the next expiring timer. */
static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode)
{
u64 lowest = ULLONG_MAX;
int c, bcpu = -1;
head->next_cpu = -1;
for (c = 0; c < head->ncpus; c++) {
u64 exp = head->cpu[c].expires;
if (exp < lowest) {
bcpu = c;
lowest = exp;
}
}
if (bcpu >= 0) {
head->next_cpu = bcpu;
c = head->cpu[bcpu].lcpu;
if (uv_setup_intr(c, lowest))
/* If we didn't set it up in time, trigger */
uv_rtc_send_IPI(c);
} else {
uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
UVH_RTC1_INT_CONFIG_M_MASK);
}
}
/*
* Set expiration time for current cpu.
*
* Returns 1 if we missed the expiration time.
*/
static int uv_rtc_set_timer(int cpu, u64 expires)
{
int pnode = uv_cpu_to_pnode(cpu);
int bid = uv_cpu_to_blade_id(cpu);
struct uv_rtc_timer_head *head = blade_info[bid];
int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
u64 *t = &head->cpu[bcpu].expires;
unsigned long flags;
int next_cpu;
spin_lock_irqsave(&head->lock, flags);
next_cpu = head->next_cpu;
*t = expires;
/* Will this one be next to go off? */
if (next_cpu < 0 || bcpu == next_cpu ||
expires < head->cpu[next_cpu].expires) {
head->next_cpu = bcpu;
if (uv_setup_intr(cpu, expires)) {
*t = ULLONG_MAX;
uv_rtc_find_next_timer(head, pnode);
spin_unlock_irqrestore(&head->lock, flags);
return 1;
}
}
spin_unlock_irqrestore(&head->lock, flags);
return 0;
}
/*
* Unset expiration time for current cpu.
*
* Returns 1 if this timer was pending.
*/
static int uv_rtc_unset_timer(int cpu)
{
int pnode = uv_cpu_to_pnode(cpu);
int bid = uv_cpu_to_blade_id(cpu);
struct uv_rtc_timer_head *head = blade_info[bid];
int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
u64 *t = &head->cpu[bcpu].expires;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&head->lock, flags);
if (head->next_cpu == bcpu && uv_read_rtc() >= *t)
rc = 1;
*t = ULLONG_MAX;
/* Was the hardware setup for this timer? */
if (head->next_cpu == bcpu)
uv_rtc_find_next_timer(head, pnode);
spin_unlock_irqrestore(&head->lock, flags);
return rc;
}
/*
* Kernel interface routines.
*/
/*
* Read the RTC.
*/
static cycle_t uv_read_rtc(void)
{
return (cycle_t)uv_read_local_mmr(UVH_RTC);
}
/*
* Program the next event, relative to now
*/
static int uv_rtc_next_event(unsigned long delta,
struct clock_event_device *ced)
{
int ced_cpu = cpumask_first(ced->cpumask);
return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc());
}
/*
* Setup the RTC timer in oneshot mode
*/
static void uv_rtc_timer_setup(enum clock_event_mode mode,
struct clock_event_device *evt)
{
int ced_cpu = cpumask_first(evt->cpumask);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_RESUME:
/* Nothing to do here yet */
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
uv_rtc_unset_timer(ced_cpu);
break;
}
}
static void uv_rtc_interrupt(void)
{
struct clock_event_device *ced = &__get_cpu_var(cpu_ced);
int cpu = smp_processor_id();
if (!ced || !ced->event_handler)
return;
if (uv_rtc_unset_timer(cpu) != 1)
return;
ced->event_handler(ced);
}
static int __init uv_enable_rtc(char *str)
{
uv_rtc_enable = 1;
return 1;
}
__setup("uvrtc", uv_enable_rtc);
static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
{
struct clock_event_device *ced = &__get_cpu_var(cpu_ced);
*ced = clock_event_device_uv;
ced->cpumask = cpumask_of(smp_processor_id());
clockevents_register_device(ced);
}
static __init int uv_rtc_setup_clock(void)
{
int rc;
if (!uv_rtc_enable || !is_uv_system() || generic_interrupt_extension)
return -ENODEV;
generic_interrupt_extension = uv_rtc_interrupt;
clocksource_uv.mult = clocksource_hz2mult(sn_rtc_cycles_per_second,
clocksource_uv.shift);
rc = clocksource_register(&clocksource_uv);
if (rc) {
generic_interrupt_extension = NULL;
return rc;
}
/* Setup and register clockevents */
rc = uv_rtc_allocate_timers();
if (rc) {
clocksource_unregister(&clocksource_uv);
generic_interrupt_extension = NULL;
return rc;
}
clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second,
NSEC_PER_SEC, clock_event_device_uv.shift);
clock_event_device_uv.min_delta_ns = NSEC_PER_SEC /
sn_rtc_cycles_per_second;
clock_event_device_uv.max_delta_ns = clocksource_uv.mask *
(NSEC_PER_SEC / sn_rtc_cycles_per_second);
rc = schedule_on_each_cpu(uv_rtc_register_clockevents);
if (rc) {
clocksource_unregister(&clocksource_uv);
generic_interrupt_extension = NULL;
uv_rtc_deallocate_timers();
}
return rc;
}
arch_initcall(uv_rtc_setup_clock);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment