Commit 87e53587 authored by Peter Chubb's avatar Peter Chubb Committed by David Mosberger

[PATCH] ia64: Preemption patch against ~2.5.60

Latest premption patch.
parent 839fe15b
......@@ -424,6 +424,18 @@ config SMP
If you don't know what to do here, say N.
config PREEMPT
bool "Preemptible Kernel"
help
This option reduces the latency of the kernel when reacting to
real-time or interactive events by allowing a low priority process to
be preempted even if it is in kernel mode executing a system call.
This allows applications to run more reliably even when the system is
under load.
Say Y here if you are building a kernel for a desktop, embedded
or real-time system. Say N if you are unsure.
config IA32_SUPPORT
bool "Support running of Linux/x86 binaries"
help
......@@ -875,6 +887,12 @@ config DEBUG_SPINLOCK
best used in conjunction with the NMI watchdog so that spinlock
deadlocks are also debuggable.
config DEBUG_SPINLOCK_SLEEP
bool "Sleep-inside-spinlock checking"
help
If you say Y here, various routines which may sleep will become very
noisy if they are called with a spinlock held.
config IA64_DEBUG_CMPXCHG
bool "Turn on compare-and-exchange bug checking (slow!)"
depends on DEBUG_KERNEL
......
......@@ -63,7 +63,6 @@ extern void ia64_ssc_connect_irq (long intr, long irq);
static char *serial_name = "SimSerial driver";
static char *serial_version = "0.6";
static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED;
/*
* This has been extracted from asm/serial.h. We need one eventually but
......@@ -235,14 +234,14 @@ static void rs_put_char(struct tty_struct *tty, unsigned char ch)
if (!tty || !info->xmit.buf) return;
spin_lock_irqsave(&serial_lock, flags);
local_irq_save(flags);
if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) {
spin_unlock_irqrestore(&serial_lock, flags);
local_irq_restore(flags);
return;
}
info->xmit.buf[info->xmit.head] = ch;
info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1);
spin_unlock_irqrestore(&serial_lock, flags);
local_irq_restore(flags);
}
static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done)
......@@ -250,7 +249,8 @@ static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done)
int count;
unsigned long flags;
spin_lock_irqsave(&serial_lock, flags);
local_irq_save(flags);
if (info->x_char) {
char c = info->x_char;
......@@ -293,7 +293,7 @@ static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done)
info->xmit.tail += count;
}
out:
spin_unlock_irqrestore(&serial_lock, flags);
local_irq_restore(flags);
}
static void rs_flush_chars(struct tty_struct *tty)
......@@ -334,7 +334,7 @@ static int rs_write(struct tty_struct * tty, int from_user,
break;
}
spin_lock_irqsave(&serial_lock, flags);
local_irq_save(flags);
{
c1 = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail,
SERIAL_XMIT_SIZE);
......@@ -344,7 +344,7 @@ static int rs_write(struct tty_struct * tty, int from_user,
info->xmit.head = ((info->xmit.head + c) &
(SERIAL_XMIT_SIZE-1));
}
spin_unlock_irqrestore(&serial_lock, flags);
local_irq_restore(flags);
buf += c;
count -= c;
......@@ -352,7 +352,7 @@ static int rs_write(struct tty_struct * tty, int from_user,
}
up(&tmp_buf_sem);
} else {
spin_lock_irqsave(&serial_lock, flags);
local_irq_save(flags);
while (1) {
c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
if (count < c)
......@@ -367,7 +367,7 @@ static int rs_write(struct tty_struct * tty, int from_user,
count -= c;
ret += c;
}
spin_unlock_irqrestore(&serial_lock, flags);
local_irq_restore(flags);
}
/*
* Hey, we transmit directly from here in our case
......@@ -398,9 +398,9 @@ static void rs_flush_buffer(struct tty_struct *tty)
struct async_struct *info = (struct async_struct *)tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&serial_lock, flags);
local_irq_save(flags);
info->xmit.head = info->xmit.tail = 0;
spin_unlock_irqrestore(&serial_lock, flags);
local_irq_restore(flags);
wake_up_interruptible(&tty->write_wait);
......@@ -573,7 +573,7 @@ static void shutdown(struct async_struct * info)
state->irq);
#endif
spin_lock_irqsave(&serial_lock, flags);
local_irq_save(flags);
{
/*
* First unlink the serial port from the IRQ chain...
......@@ -611,7 +611,7 @@ static void shutdown(struct async_struct * info)
info->flags &= ~ASYNC_INITIALIZED;
}
spin_unlock_irqrestore(&serial_lock, flags);
local_irq_restore(flags);
}
/*
......@@ -634,13 +634,13 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
state = info->state;
spin_lock_irqsave(&serial_lock, flags);
local_irq_save(flags);
if (tty_hung_up_p(filp)) {
#ifdef SIMSERIAL_DEBUG
printk("rs_close: hung_up\n");
#endif
MOD_DEC_USE_COUNT;
spin_unlock_irqrestore(&serial_lock, flags);
local_irq_restore(flags);
return;
}
#ifdef SIMSERIAL_DEBUG
......@@ -665,11 +665,11 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
}
if (state->count) {
MOD_DEC_USE_COUNT;
spin_unlock_irqrestore(&serial_lock, flags);
local_irq_restore(flags);
return;
}
info->flags |= ASYNC_CLOSING;
spin_unlock_irqrestore(&serial_lock, flags);
local_irq_restore(flags);
/*
* Now we wait for the transmit buffer to clear; and we notify
......@@ -776,7 +776,7 @@ startup(struct async_struct *info)
if (!page)
return -ENOMEM;
spin_lock_irqsave(&serial_lock, flags);
local_irq_save(flags);
if (info->flags & ASYNC_INITIALIZED) {
free_page(page);
......@@ -857,11 +857,11 @@ startup(struct async_struct *info)
}
info->flags |= ASYNC_INITIALIZED;
spin_unlock_irqrestore(&serial_lock, flags);
local_irq_restore(flags);
return 0;
errout:
spin_unlock_irqrestore(&serial_lock, flags);
local_irq_restore(flags);
return retval;
}
......
......@@ -93,7 +93,7 @@ ia32_load_state (struct task_struct *t)
{
unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd, tssd;
struct pt_regs *regs = ia64_task_regs(t);
int nr = smp_processor_id(); /* LDT and TSS depend on CPU number: */
int nr = get_cpu(); /* LDT and TSS depend on CPU number: */
eflag = t->thread.eflag;
fsr = t->thread.fsr;
......@@ -119,6 +119,7 @@ ia32_load_state (struct task_struct *t)
regs->r17 = (_TSS(nr) << 48) | (_LDT(nr) << 32) | (__u32) regs->r17;
regs->r30 = load_desc(_LDT(nr)); /* LDTD */
put_cpu();
}
/*
......
......@@ -586,10 +586,21 @@ GLOBAL_ENTRY(ia64_leave_kernel)
// work.need_resched etc. mustn't get changed by this CPU before it returns to
// user- or fsys-mode:
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
#ifdef CONFIG_PREEMPT
rsm psr.i // disable interrupts
adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
(pKStk) ld4 r21=[r20] // preempt_count ->r21
;;
(pKStk) cmp4.eq p6,p0=r21,r0 // p6 <- preempt_count == 0
;;
#else // CONFIG_PREEMPT
(pUStk) rsm psr.i
;;
(pUStk) adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
;;
#endif // CONFIG_PREEMPT
.work_processed:
(p6) ld4 r18=[r17] // load current_thread_info()->flags
adds r2=PT(R8)+16,r12
......@@ -810,15 +821,27 @@ skip_rbs_switch:
.work_pending:
tbit.z p6,p0=r18,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
(p6) br.cond.sptk.few .notify
#ifdef CONFIG_PREEMPT
(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
;;
(pKStk) st4 [r20]=r21
ssm psr.i // enable interrupts
#endif
#if __GNUC__ < 3
br.call.spnt.many rp=invoke_schedule
#else
br.call.spnt.many rp=schedule
#endif
.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1
rsm psr.i
rsm psr.i // disable interrupts
;;
adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
#if CONFIG_PREEMPT
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
(pKStk) st4 [r20]=r0 // preempt_count() <- 0
#endif
br.cond.sptk.many .work_processed // re-check
.notify:
......
......@@ -340,12 +340,14 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
* 0 return value means that this irq is already being
* handled by some other CPU. (or is disabled)
*/
int cpu = smp_processor_id();
int cpu;
irq_desc_t *desc = irq_desc(irq);
struct irqaction * action;
unsigned int status;
irq_enter();
cpu = smp_processor_id();
kstat_cpu(cpu).irqs[irq]++;
if (desc->status & IRQ_PER_CPU) {
......
......@@ -894,11 +894,13 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi
* in SMP mode, we may need to call another CPU to get correct
* information. PAL, by definition, is processor specific
*/
if (f->req_cpu == smp_processor_id())
if (f->req_cpu == get_cpu())
len = (*palinfo_entries[f->func_id].proc_read)(page);
else
len = palinfo_handle_smp(f, page);
put_cpu();
if (len <= off+count) *eof = 1;
*start = page + off;
......
This diff is collapsed.
......@@ -90,7 +90,7 @@ stop_this_cpu (void)
void
handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
{
int this_cpu = smp_processor_id();
int this_cpu = get_cpu();
unsigned long *pending_ipis = &__get_cpu_var(ipi_operation);
unsigned long ops;
......@@ -146,8 +146,12 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
} while (ops);
mb(); /* Order data access and bit testing. */
}
put_cpu();
}
/*
* Called with preeemption disabled
*/
static inline void
send_IPI_single (int dest_cpu, int op)
{
......@@ -155,6 +159,9 @@ send_IPI_single (int dest_cpu, int op)
platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
}
/*
* Called with preeemption disabled
*/
static inline void
send_IPI_allbutself (int op)
{
......@@ -166,6 +173,9 @@ send_IPI_allbutself (int op)
}
}
/*
* Called with preeemption disabled
*/
static inline void
send_IPI_all (int op)
{
......@@ -176,12 +186,18 @@ send_IPI_all (int op)
send_IPI_single(i, op);
}
/*
* Called with preeemption disabled
*/
static inline void
send_IPI_self (int op)
{
send_IPI_single(smp_processor_id(), op);
}
/*
* Called with preeemption disabled
*/
void
smp_send_reschedule (int cpu)
{
......@@ -197,12 +213,15 @@ void
smp_send_reschedule_all (void)
{
int i;
int cpu = get_cpu(); /* disable preemption */
for (i = 0; i < NR_CPUS; i++)
if (cpu_online(i) && i != smp_processor_id())
if (cpu_online(i) && i != cpu)
smp_send_reschedule(i);
put_cpu();
}
void
smp_flush_tlb_all (void)
{
......@@ -247,9 +266,11 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
{
struct call_data_struct data;
int cpus = 1;
int me = get_cpu(); /* prevent preemption and reschedule on another processor */
if (cpuid == smp_processor_id()) {
if (cpuid == me) {
printk("%s: trying to call self\n", __FUNCTION__);
put_cpu();
return -EBUSY;
}
......@@ -276,6 +297,7 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
call_data = NULL;
spin_unlock_bh(&call_lock);
put_cpu();
return 0;
}
......
......@@ -55,7 +55,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
/*
* If we're in an interrupt or have no user context, we must not take the fault..
*/
if (in_interrupt() || !mm)
if (in_atomic() || !mm)
goto no_context;
down_read(&mm->mmap_sem);
......
......@@ -81,9 +81,13 @@ wrap_mmu_context (struct mm_struct *mm)
}
read_unlock(&tasklist_lock);
/* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
{
int cpu = get_cpu(); /* prevent preemption/migration */
for (i = 0; i < NR_CPUS; ++i)
if (i != smp_processor_id())
if (i != cpu)
per_cpu(ia64_need_tlb_flush, i) = 1;
put_cpu();
}
local_flush_tlb_all();
}
......
......@@ -32,18 +32,18 @@
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-31 are the hardirq count (max # of hardirqs: 65536)
* - bits 16-29 are the hardirq count (max # of hardirqs: 16384)
*
* - (bit 63 is the PREEMPT_ACTIVE flag---not currently implemented.)
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0xffff0000
* HARDIRQ_MASK: 0x3fff0000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 16
#define HARDIRQ_BITS 14
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
......@@ -83,13 +83,13 @@
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
#define in_atomic() (preempt_count() != 0)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#if CONFIG_PREEMPT
# error CONFIG_PREEMT currently not supported.
# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define in_atomic() (preempt_count() != 0)
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
......
......@@ -206,7 +206,7 @@ extern void ia64_load_extra (struct task_struct *task);
#ifdef CONFIG_PERFMON
DECLARE_PER_CPU(unsigned long, pfm_syst_info);
# define PERFMON_IS_SYSWIDE() (get_cpu_var(pfm_syst_info) & 0x1)
# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
#else
# define PERFMON_IS_SYSWIDE() (0)
#endif
......
......@@ -15,7 +15,8 @@
#define TI_ADDR_LIMIT 0x10
#define TI_PRE_COUNT 0x18
#define PREEMPT_ACTIVE 0x4000000
#define PREEMPT_ACTIVE_BIT 30
#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
#ifndef __ASSEMBLY__
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment