Commit 94e13063 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] mmtimer driver update

- reduce processing in timer interrupt through the use of a tasklet
- fix various race conditions
- use the correct interrupt vector for the SN2 RTC
Signed-off-by: default avatarDimitri Sivanich <sivanich@sgi.com>
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent d38884cb
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
* 11/01/01 - jbarnes - initial revision * 11/01/01 - jbarnes - initial revision
* 9/10/04 - Christoph Lameter - remove interrupt support for kernel inclusion * 9/10/04 - Christoph Lameter - remove interrupt support for kernel inclusion
* 10/1/04 - Christoph Lameter - provide posix clock CLOCK_SGI_CYCLE * 10/1/04 - Christoph Lameter - provide posix clock CLOCK_SGI_CYCLE
* 10/13/04 - Christoph Lameter, Dimitri Sivanich - provide timer interrupt support * 10/13/04 - Christoph Lameter, Dimitri Sivanich - provide timer interrupt
* via the posix timer interface * support via the posix timer interface
*/ */
#include <linux/types.h> #include <linux/types.h>
...@@ -82,45 +82,23 @@ static struct file_operations mmtimer_fops = { ...@@ -82,45 +82,23 @@ static struct file_operations mmtimer_fops = {
* node. RTC0 is used by SAL. * node. RTC0 is used by SAL.
*/ */
#define NUM_COMPARATORS 3 #define NUM_COMPARATORS 3
/* /* Check for an RTC interrupt pending */
* Check for an interrupt and clear the pending bit if
* one is waiting.
*/
static int inline mmtimer_int_pending(int comparator) static int inline mmtimer_int_pending(int comparator)
{ {
int pending = 0; if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) &
SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator)
switch (comparator) { return 1;
case 0: else
if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) & return 0;
SH_EVENT_OCCURRED_RTC1_INT_MASK) { }
HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), /* Clear the RTC interrupt pending bit */
SH_EVENT_OCCURRED_RTC1_INT_MASK); static void inline mmtimer_clr_int_pending(int comparator)
pending = 1; {
} HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS),
break; SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator);
case 1:
if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) &
SH_EVENT_OCCURRED_RTC2_INT_MASK) {
HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS),
SH_EVENT_OCCURRED_RTC2_INT_MASK);
pending = 1;
}
break;
case 2:
if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) &
SH_EVENT_OCCURRED_RTC3_INT_MASK) {
HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS),
SH_EVENT_OCCURRED_RTC3_INT_MASK);
pending = 1;
}
break;
default:
return -EFAULT;
}
return pending;
} }
/* Setup timer on comparator RTC1 */
static void inline mmtimer_setup_int_0(u64 expires) static void inline mmtimer_setup_int_0(u64 expires)
{ {
u64 val; u64 val;
...@@ -131,8 +109,8 @@ static void inline mmtimer_setup_int_0(u64 expires) ...@@ -131,8 +109,8 @@ static void inline mmtimer_setup_int_0(u64 expires)
/* Initialize comparator value */ /* Initialize comparator value */
HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), -1L); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), -1L);
HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), /* Clear pending bit */
SH_EVENT_OCCURRED_RTC1_INT_MASK); mmtimer_clr_int_pending(0);
val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) | val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) |
((u64)cpu_physical_id(smp_processor_id()) << ((u64)cpu_physical_id(smp_processor_id()) <<
...@@ -150,6 +128,7 @@ static void inline mmtimer_setup_int_0(u64 expires) ...@@ -150,6 +128,7 @@ static void inline mmtimer_setup_int_0(u64 expires)
} }
/* Setup timer on comparator RTC2 */
static void inline mmtimer_setup_int_1(u64 expires) static void inline mmtimer_setup_int_1(u64 expires)
{ {
u64 val; u64 val;
...@@ -158,8 +137,7 @@ static void inline mmtimer_setup_int_1(u64 expires) ...@@ -158,8 +137,7 @@ static void inline mmtimer_setup_int_1(u64 expires)
HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), -1L); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), -1L);
HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mmtimer_clr_int_pending(1);
SH_EVENT_OCCURRED_RTC2_INT_MASK);
val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) | val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) |
((u64)cpu_physical_id(smp_processor_id()) << ((u64)cpu_physical_id(smp_processor_id()) <<
...@@ -172,6 +150,7 @@ static void inline mmtimer_setup_int_1(u64 expires) ...@@ -172,6 +150,7 @@ static void inline mmtimer_setup_int_1(u64 expires)
HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), expires); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), expires);
} }
/* Setup timer on comparator RTC3 */
static void inline mmtimer_setup_int_2(u64 expires) static void inline mmtimer_setup_int_2(u64 expires)
{ {
u64 val; u64 val;
...@@ -180,8 +159,7 @@ static void inline mmtimer_setup_int_2(u64 expires) ...@@ -180,8 +159,7 @@ static void inline mmtimer_setup_int_2(u64 expires)
HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), -1L); HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), -1L);
HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mmtimer_clr_int_pending(2);
SH_EVENT_OCCURRED_RTC3_INT_MASK);
val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) | val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) |
((u64)cpu_physical_id(smp_processor_id()) << ((u64)cpu_physical_id(smp_processor_id()) <<
...@@ -220,8 +198,7 @@ static int inline mmtimer_setup(int comparator, unsigned long expires) ...@@ -220,8 +198,7 @@ static int inline mmtimer_setup(int comparator, unsigned long expires)
/* We might've missed our expiration time */ /* We might've missed our expiration time */
diff = rtc_time() - expires; diff = rtc_time() - expires;
if (diff > 0) { if (diff > 0) {
if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) & if (mmtimer_int_pending(comparator)) {
(SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator)) {
/* We'll get an interrupt for this once we're done */ /* We'll get an interrupt for this once we're done */
return 0; return 0;
} }
...@@ -259,6 +236,9 @@ static int inline mmtimer_disable_int(long nasid, int comparator) ...@@ -259,6 +236,9 @@ static int inline mmtimer_disable_int(long nasid, int comparator)
typedef struct mmtimer { typedef struct mmtimer {
spinlock_t lock ____cacheline_aligned; spinlock_t lock ____cacheline_aligned;
struct k_itimer *timer; struct k_itimer *timer;
int i;
int cpu;
struct tasklet_struct tasklet;
} mmtimer_t; } mmtimer_t;
/* /*
...@@ -435,12 +415,12 @@ static int sgi_clock_set(struct timespec *tp) ...@@ -435,12 +415,12 @@ static int sgi_clock_set(struct timespec *tp)
* exponentially in order to ensure that the next interrupt * exponentially in order to ensure that the next interrupt
* can be properly scheduled.. * can be properly scheduled..
*/ */
static int inline reschedule_periodic_timer(mmtimer_t *base, struct k_itimer *t, int i) static int inline reschedule_periodic_timer(mmtimer_t *x)
{ {
int n; int n;
struct k_itimer *t = x->timer;
t->it_timer.magic = i; t->it_timer.magic = x->i;
base[i].timer = t;
t->it_overrun--; t->it_overrun--;
n = 0; n = 0;
...@@ -452,7 +432,7 @@ static int inline reschedule_periodic_timer(mmtimer_t *base, struct k_itimer *t, ...@@ -452,7 +432,7 @@ static int inline reschedule_periodic_timer(mmtimer_t *base, struct k_itimer *t,
if (n > 20) if (n > 20)
return 1; return 1;
} while (mmtimer_setup(i, t->it_timer.expires)); } while (mmtimer_setup(x->i, t->it_timer.expires));
return 0; return 0;
} }
...@@ -475,57 +455,70 @@ static irqreturn_t ...@@ -475,57 +455,70 @@ static irqreturn_t
mmtimer_interrupt(int irq, void *dev_id, struct pt_regs *regs) mmtimer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{ {
int i; int i;
mmtimer_t *base = timers + cpuid_to_cnodeid(smp_processor_id()) * NUM_COMPARATORS; mmtimer_t *base = timers + cpuid_to_cnodeid(smp_processor_id()) *
NUM_COMPARATORS;
unsigned long expires = 0;
int result = IRQ_NONE; int result = IRQ_NONE;
/* /*
* Do this once for each comparison register * Do this once for each comparison register
*/ */
for (i = 0; i < NUM_COMPARATORS; i++) { for (i = 0; i < NUM_COMPARATORS; i++) {
unsigned long flags; /* Make sure this doesn't get reused before tasklet_sched */
spin_lock(&base[i].lock);
if (mmtimer_int_pending(i) > 0) { if (base[i].cpu == smp_processor_id()) {
struct k_itimer *t; if (base[i].timer)
int m = 0; expires = base[i].timer->it_timer.expires;
/* expires test won't work with shared irqs */
mmtimer_disable_int(-1, i); if ((mmtimer_int_pending(i) > 0) ||
spin_lock(&base[i].lock); (expires && (expires < rtc_time()))) {
t = base[i].timer; mmtimer_clr_int_pending(i);
base[i].timer = NULL; tasklet_schedule(&base[i].tasklet);
if (t) { result = IRQ_HANDLED;
m = t->it_timer.magic;
t->it_timer.magic = TIMER_OFF;
} }
spin_unlock(&base[i].lock); }
spin_unlock(&base[i].lock);
expires = 0;
}
return result;
}
if (t == NULL || m == TIMER_OFF) void mmtimer_tasklet(unsigned long data) {
/* No timer left here, bail out */ mmtimer_t *x = (mmtimer_t *)data;
goto out; struct k_itimer *t = x->timer;
unsigned long flags;
spin_lock_irqsave(&t->it_lock, flags); if (t == NULL)
t->it_overrun = 0; return;
if (posix_timer_event(t, 0) == 0) { /* Send signal and deal with periodic signals */
spin_lock_irqsave(&t->it_lock, flags);
spin_lock(&x->lock);
/* If timer was deleted between interrupt and here, leave */
if (t != x->timer)
goto out;
t->it_overrun = 0;
if(t->it_incr) { if (tasklist_lock.write_lock || posix_timer_event(t, 0) != 0) {
/* Periodic timer */
spin_lock(&base[i].lock);
if (base[i].timer == NULL)
reschedule_periodic_timer(base, t, i);
spin_unlock(&base[i].lock);
}
} else { // printk(KERN_WARNING "mmtimer: cannot deliver signal.\n");
printk(KERN_WARNING "mmtimer: unable to deliver signal");
t->it_overrun++; t->it_overrun++;
} }
t->it_overrun_last = t->it_overrun; if(t->it_incr) {
spin_unlock_irqrestore(&t->it_lock, flags); /* Periodic timer */
out: if (reschedule_periodic_timer(x)) {
result = IRQ_HANDLED; printk(KERN_WARNING "mmtimer: unable to reschedule\n");
x->timer = NULL;
} }
} else {
/* Ensure we don't false trigger in mmtimer_interrupt */
t->it_timer.expires = 0;
} }
return result; t->it_overrun_last = t->it_overrun;
out:
spin_unlock(&x->lock);
spin_unlock_irqrestore(&t->it_lock, flags);
} }
static int sgi_timer_create(struct k_itimer *timer) static int sgi_timer_create(struct k_itimer *timer)
...@@ -552,6 +545,7 @@ static int sgi_timer_del(struct k_itimer *timr) ...@@ -552,6 +545,7 @@ static int sgi_timer_del(struct k_itimer *timr)
mmtimer_disable_int(cnodeid_to_nasid(nodeid),i); mmtimer_disable_int(cnodeid_to_nasid(nodeid),i);
t->timer = NULL; t->timer = NULL;
timr->it_timer.magic = TIMER_OFF; timr->it_timer.magic = TIMER_OFF;
timr->it_timer.expires = 0;
spin_unlock_irqrestore(&t->lock, irqflags); spin_unlock_irqrestore(&t->lock, irqflags);
} }
return 0; return 0;
...@@ -625,8 +619,9 @@ static int sgi_timer_set(struct k_itimer *timr, int flags, ...@@ -625,8 +619,9 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
nodeid = cpuid_to_cnodeid(smp_processor_id()); nodeid = cpuid_to_cnodeid(smp_processor_id());
base = timers + nodeid * NUM_COMPARATORS; base = timers + nodeid * NUM_COMPARATORS;
retry: retry:
/* Don't use an allocated timer, or a deleted one that's pending */
for(i = 0; i< NUM_COMPARATORS; i++) { for(i = 0; i< NUM_COMPARATORS; i++) {
if (!base[i].timer) { if (!base[i].timer && !base[i].tasklet.state) {
break; break;
} }
} }
...@@ -638,11 +633,12 @@ static int sgi_timer_set(struct k_itimer *timr, int flags, ...@@ -638,11 +633,12 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
spin_lock_irqsave(&base[i].lock, irqflags); spin_lock_irqsave(&base[i].lock, irqflags);
if (base[i].timer) { if (base[i].timer || base[i].tasklet.state != 0) {
spin_unlock_irqrestore(&base[i].lock, irqflags); spin_unlock_irqrestore(&base[i].lock, irqflags);
goto retry; goto retry;
} }
base[i].timer = timr; base[i].timer = timr;
base[i].cpu = smp_processor_id();
timr->it_timer.magic = i; timr->it_timer.magic = i;
timr->it_timer.data = nodeid; timr->it_timer.data = nodeid;
...@@ -653,10 +649,11 @@ static int sgi_timer_set(struct k_itimer *timr, int flags, ...@@ -653,10 +649,11 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
if (mmtimer_setup(i, when)) { if (mmtimer_setup(i, when)) {
mmtimer_disable_int(-1, i); mmtimer_disable_int(-1, i);
posix_timer_event(timr, 0); posix_timer_event(timr, 0);
timr->it_timer.expires = 0;
} }
} else { } else {
timr->it_timer.expires -= period; timr->it_timer.expires -= period;
if (reschedule_periodic_timer(base, timr, i)) if (reschedule_periodic_timer(base+i))
err = -EINVAL; err = -EINVAL;
} }
...@@ -705,6 +702,9 @@ static int __init mmtimer_init(void) ...@@ -705,6 +702,9 @@ static int __init mmtimer_init(void)
for (i=0; i< NUM_COMPARATORS*MAX_COMPACT_NODES; i++) { for (i=0; i< NUM_COMPARATORS*MAX_COMPACT_NODES; i++) {
spin_lock_init(&timers[i].lock); spin_lock_init(&timers[i].lock);
timers[i].timer = NULL; timers[i].timer = NULL;
timers[i].cpu = 0;
timers[i].i = i % NUM_COMPARATORS;
tasklet_init(&timers[i].tasklet, mmtimer_tasklet, (unsigned long) (timers+i));
} }
if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, SA_PERCPU_IRQ, MMTIMER_NAME, NULL)) { if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, SA_PERCPU_IRQ, MMTIMER_NAME, NULL)) {
......
...@@ -12,16 +12,21 @@ ...@@ -12,16 +12,21 @@
#define SGI_UART_VECTOR (0xe9) #define SGI_UART_VECTOR (0xe9)
#define SGI_PCIBR_ERROR (0x33) #define SGI_PCIBR_ERROR (0x33)
// These two IRQ's are used by partitioning. /* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */
#define SGI_XPC_ACTIVATE (0x30) #define SGI_XPC_ACTIVATE (0x30)
#define SGI_II_ERROR (0x31) #define SGI_II_ERROR (0x31)
#define SGI_XBOW_ERROR (0x32) #define SGI_XBOW_ERROR (0x32)
#define SGI_PCIBR_ERROR (0x33) #define SGI_PCIBR_ERROR (0x33)
#define SGI_ACPI_SCI_INT (0x34) #define SGI_ACPI_SCI_INT (0x34)
#define SGI_MMTIMER_VECTOR (0x35) #define SGI_TIOCA_ERROR (0x35)
#define SGI_TIO_ERROR (0x36) #define SGI_TIO_ERROR (0x36)
#define SGI_TIOCX_ERROR (0x37)
#define SGI_MMTIMER_VECTOR (0x38)
#define SGI_XPC_NOTIFY (0xe7) #define SGI_XPC_NOTIFY (0xe7)
#define IA64_SN2_FIRST_DEVICE_VECTOR (0x3c)
#define IA64_SN2_LAST_DEVICE_VECTOR (0xe6)
#define SN2_IRQ_RESERVED (0x1) #define SN2_IRQ_RESERVED (0x1)
#define SN2_IRQ_CONNECTED (0x2) #define SN2_IRQ_CONNECTED (0x2)
#define SN2_IRQ_SHARED (0x4) #define SN2_IRQ_SHARED (0x4)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment