Commit 9582480a authored by Linus Torvalds's avatar Linus Torvalds

v2.4.6.4 -> v2.4.6.5

  - remember to bump the version string
  - Andrea Arkangeli: softirq cleanups and fixes, and everybody is happy
  again (ie I changed some details to make me happy ;)
  - Neil Brown: raid5 stall fix, nfsd filehandle sanity check fix
parent ccb6dd87
VERSION = 2 VERSION = 2
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 7 SUBLEVEL = 7
EXTRAVERSION =-pre3 EXTRAVERSION =-pre5
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
...@@ -576,17 +576,6 @@ entSys: ...@@ -576,17 +576,6 @@ entSys:
.align 3 .align 3
ret_from_sys_call: ret_from_sys_call:
cmovne $26,0,$19 /* $19 = 0 => non-restartable */ cmovne $26,0,$19 /* $19 = 0 => non-restartable */
#ifdef CONFIG_SMP
ldl $3,TASK_PROCESSOR($8)
sll $3,L1_CACHE_SHIFT,$3
#endif
lda $4,irq_stat
#ifdef CONFIG_SMP
addq $3,$4,$4
#endif
ldq $4,0($4) /* __softirq_pending */
bne $4,handle_softirq
ret_from_softirq:
ldq $0,SP_OFF($30) ldq $0,SP_OFF($30)
and $0,8,$0 and $0,8,$0
beq $0,restore_all beq $0,restore_all
...@@ -664,17 +653,6 @@ strace_error: ...@@ -664,17 +653,6 @@ strace_error:
mov $31,$26 /* tell "ret_from_sys_call" we can restart */ mov $31,$26 /* tell "ret_from_sys_call" we can restart */
br ret_from_sys_call br ret_from_sys_call
.align 3
handle_softirq:
subq $30,16,$30
stq $19,0($30) /* save syscall nr */
stq $20,8($30) /* and error indication (a3) */
jsr $26,do_softirq
ldq $19,0($30)
ldq $20,8($30)
addq $30,16,$30
br ret_from_softirq
.align 3 .align 3
syscall_error: syscall_error:
/* /*
......
...@@ -66,10 +66,11 @@ static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) ...@@ -66,10 +66,11 @@ static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
BUG(); BUG();
if (atomic_read(&conf->active_stripes)==0) if (atomic_read(&conf->active_stripes)==0)
BUG(); BUG();
if (test_bit(STRIPE_DELAYED, &sh->state)) if (test_bit(STRIPE_HANDLE, &sh->state)) {
list_add_tail(&sh->lru, &conf->delayed_list); if (test_bit(STRIPE_DELAYED, &sh->state))
else if (test_bit(STRIPE_HANDLE, &sh->state)) { list_add_tail(&sh->lru, &conf->delayed_list);
list_add_tail(&sh->lru, &conf->handle_list); else
list_add_tail(&sh->lru, &conf->handle_list);
md_wakeup_thread(conf->thread); md_wakeup_thread(conf->thread);
} else { } else {
if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
...@@ -1167,10 +1168,9 @@ static void raid5_unplug_device(void *data) ...@@ -1167,10 +1168,9 @@ static void raid5_unplug_device(void *data)
raid5_activate_delayed(conf); raid5_activate_delayed(conf);
if (conf->plugged) { conf->plugged = 0;
conf->plugged = 0; md_wakeup_thread(conf->thread);
md_wakeup_thread(conf->thread);
}
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
} }
......
...@@ -837,11 +837,11 @@ fh_update(struct svc_fh *fhp) ...@@ -837,11 +837,11 @@ fh_update(struct svc_fh *fhp)
dentry = fhp->fh_dentry; dentry = fhp->fh_dentry;
if (!dentry->d_inode) if (!dentry->d_inode)
goto out_negative; goto out_negative;
if (fhp->fh_handle.fh_fileid_type != 0)
goto out_uptodate;
if (fhp->fh_handle.fh_version != 1) { if (fhp->fh_handle.fh_version != 1) {
_fh_update_old(dentry, fhp->fh_export, &fhp->fh_handle); _fh_update_old(dentry, fhp->fh_export, &fhp->fh_handle);
} else { } else {
if (fhp->fh_handle.fh_fileid_type != 0)
goto out_uptodate;
datap = fhp->fh_handle.fh_auth+ datap = fhp->fh_handle.fh_auth+
fhp->fh_handle.fh_size/4 -1; fhp->fh_handle.fh_size/4 -1;
fhp->fh_handle.fh_fileid_type = fhp->fh_handle.fh_fileid_type =
......
...@@ -10,6 +10,7 @@ typedef struct { ...@@ -10,6 +10,7 @@ typedef struct {
unsigned int __local_irq_count; unsigned int __local_irq_count;
unsigned int __local_bh_count; unsigned int __local_bh_count;
unsigned int __syscall_count; unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task;
} ____cacheline_aligned irq_cpustat_t; } ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
......
...@@ -8,21 +8,30 @@ ...@@ -8,21 +8,30 @@
extern inline void cpu_bh_disable(int cpu) extern inline void cpu_bh_disable(int cpu)
{ {
local_bh_count(cpu)++; local_bh_count(cpu)++;
mb(); barrier();
} }
extern inline void cpu_bh_enable(int cpu) extern inline void __cpu_bh_enable(int cpu)
{ {
mb(); barrier();
local_bh_count(cpu)--; local_bh_count(cpu)--;
} }
#define local_bh_enable() cpu_bh_enable(smp_processor_id()) #define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
#define __local_bh_enable local_bh_enable
#define local_bh_disable() cpu_bh_disable(smp_processor_id()) #define local_bh_disable() cpu_bh_disable(smp_processor_id())
#define local_bh_enable() \
do { \
int cpu; \
\
barrier(); \
cpu = smp_processor_id(); \
if (!--local_bh_count(cpu) && softirq_pending(cpu)) \
do_softirq(); \
} while (0)
#define in_softirq() (local_bh_count(smp_processor_id()) != 0) #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
#define __cpu_raise_softirq(cpu,nr) set_bit((nr), &softirq_pending(cpu)) #define __cpu_raise_softirq(cpu, nr) set_bit(nr, &softirq_pending(cpu))
#endif /* _ALPHA_SOFTIRQ_H */ #endif /* _ALPHA_SOFTIRQ_H */
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#define local_bh_disable() cpu_bh_disable(smp_processor_id()) #define local_bh_disable() cpu_bh_disable(smp_processor_id())
#define __local_bh_enable() __cpu_bh_enable(smp_processor_id()) #define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
#define __cpu_raise_softirq(cpu,nr) set_bit((nr), &softirq_pending(cpu)) #define __cpu_raise_softirq(cpu,nr) set_bit((nr), &softirq_pending(cpu))
#define raise_softirq(nr) __cpu_raise_softirq(smp_processor_id(), (nr))
#define in_softirq() (local_bh_count(smp_processor_id()) != 0) #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
......
...@@ -11,6 +11,7 @@ typedef struct { ...@@ -11,6 +11,7 @@ typedef struct {
unsigned int __local_irq_count; unsigned int __local_irq_count;
unsigned int __local_bh_count; unsigned int __local_bh_count;
unsigned int __syscall_count; unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
unsigned int __nmi_count; /* arch dependent */ unsigned int __nmi_count; /* arch dependent */
} ____cacheline_aligned irq_cpustat_t; } ____cacheline_aligned irq_cpustat_t;
......
...@@ -11,8 +11,6 @@ ...@@ -11,8 +11,6 @@
#define local_bh_disable() cpu_bh_disable(smp_processor_id()) #define local_bh_disable() cpu_bh_disable(smp_processor_id())
#define __local_bh_enable() __cpu_bh_enable(smp_processor_id()) #define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
#define __cpu_raise_softirq(cpu,nr) set_bit((nr), &softirq_pending(cpu));
#define raise_softirq(nr) __cpu_raise_softirq(smp_processor_id(), (nr))
#define in_softirq() (local_bh_count(smp_processor_id()) != 0) #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
...@@ -28,6 +26,7 @@ ...@@ -28,6 +26,7 @@
do { \ do { \
unsigned int *ptr = &local_bh_count(smp_processor_id()); \ unsigned int *ptr = &local_bh_count(smp_processor_id()); \
\ \
barrier(); \
if (!--*ptr) \ if (!--*ptr) \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
"cmpl $0, -8(%0);" \ "cmpl $0, -8(%0);" \
...@@ -46,4 +45,6 @@ do { \ ...@@ -46,4 +45,6 @@ do { \
/* no registers clobbered */ ); \ /* no registers clobbered */ ); \
} while (0) } while (0)
#define __cpu_raise_softirq(cpu, nr) __set_bit(nr, &softirq_pending(cpu))
#endif /* __ASM_SOFTIRQ_H */ #endif /* __ASM_SOFTIRQ_H */
...@@ -30,7 +30,6 @@ do { \ ...@@ -30,7 +30,6 @@ do { \
} while (0) } while (0)
#define __cpu_raise_softirq(cpu, nr) set_bit((nr), &softirq_pending(cpu)); #define __cpu_raise_softirq(cpu, nr) set_bit((nr), &softirq_pending(cpu));
#define raise_softirq(nr) __cpu_raise_softirq(smp_processor_id(), (nr))
#define in_softirq() (local_bh_count(smp_processor_id()) != 0) #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
......
...@@ -26,7 +26,6 @@ do { \ ...@@ -26,7 +26,6 @@ do { \
} while (0) } while (0)
#define __cpu_raise_softirq(cpu, nr) set_bit((nr), &softirq_pending(cpu)); #define __cpu_raise_softirq(cpu, nr) set_bit((nr), &softirq_pending(cpu));
#define raise_softirq(nr) __cpu_raise_softirq(smp_processor_id(), (nr))
#define in_softirq() (local_bh_count(smp_processor_id()) != 0) #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
......
...@@ -23,6 +23,7 @@ typedef struct { ...@@ -23,6 +23,7 @@ typedef struct {
#endif #endif
unsigned int __local_bh_count; unsigned int __local_bh_count;
unsigned int __syscall_count; unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task;
} ____cacheline_aligned irq_cpustat_t; } ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
......
...@@ -22,11 +22,11 @@ do { if (!--local_bh_count(smp_processor_id()) && \ ...@@ -22,11 +22,11 @@ do { if (!--local_bh_count(smp_processor_id()) && \
__sti(); \ __sti(); \
} \ } \
} while (0) } while (0)
#define __cpu_raise_softirq(cpu, nr) (softirq_pending(cpu) |= (1<<nr)) #define __do_cpu_raise_softirq(cpu, nr) (softirq_pending(cpu) |= (1<<nr))
#define raise_softirq(nr) \ #define __cpu_raise_softirq(cpu, nr) \
do { unsigned long flags; \ do { unsigned long flags; \
local_irq_save(flags); \ local_irq_save(flags); \
__cpu_raise_softirq(smp_processor_id(), nr); \ __do_cpu_raise_softirq(cpu, nr); \
local_irq_restore(flags); \ local_irq_restore(flags); \
} while (0) } while (0)
#define in_softirq() (local_bh_count(smp_processor_id()) != 0) #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
......
...@@ -22,6 +22,7 @@ typedef struct { ...@@ -22,6 +22,7 @@ typedef struct {
#endif #endif
unsigned int __local_bh_count; unsigned int __local_bh_count;
unsigned int __syscall_count; unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task;
} ____cacheline_aligned irq_cpustat_t; } ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
......
...@@ -19,11 +19,12 @@ do { if (!--local_bh_count(smp_processor_id()) && \ ...@@ -19,11 +19,12 @@ do { if (!--local_bh_count(smp_processor_id()) && \
__sti(); \ __sti(); \
} \ } \
} while (0) } while (0)
#define __cpu_raise_softirq(cpu, nr) (softirq_pending(cpu) |= (1<<nr))
#define raise_softirq(nr) \ #define __do_cpu_raise_softirq(cpu, nr) (softirq_pending(cpu) |= (1<<nr))
#define __cpu_raise_softirq(cpu,nr) \
do { unsigned long flags; \ do { unsigned long flags; \
local_irq_save(flags); \ local_irq_save(flags); \
__cpu_raise_softirq(smp_processor_id(), nr); \ __do_cpu_raise_softirq(cpu, nr); \
local_irq_restore(flags); \ local_irq_restore(flags); \
} while (0) } while (0)
#define in_softirq() (local_bh_count(smp_processor_id()) != 0) #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
......
...@@ -73,8 +73,9 @@ struct softirq_action ...@@ -73,8 +73,9 @@ struct softirq_action
asmlinkage void do_softirq(void); asmlinkage void do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
extern void softirq_init(void); extern void softirq_init(void);
extern void FASTCALL(cpu_raise_softirq(unsigned int cpu, unsigned int nr));
extern void FASTCALL(raise_softirq(unsigned int nr));
...@@ -129,7 +130,7 @@ extern struct tasklet_head tasklet_vec[NR_CPUS]; ...@@ -129,7 +130,7 @@ extern struct tasklet_head tasklet_vec[NR_CPUS];
extern struct tasklet_head tasklet_hi_vec[NR_CPUS]; extern struct tasklet_head tasklet_hi_vec[NR_CPUS];
#define tasklet_trylock(t) (!test_and_set_bit(TASKLET_STATE_RUN, &(t)->state)) #define tasklet_trylock(t) (!test_and_set_bit(TASKLET_STATE_RUN, &(t)->state))
#define tasklet_unlock(t) clear_bit(TASKLET_STATE_RUN, &(t)->state) #define tasklet_unlock(t) do { smp_mb__before_clear_bit(); clear_bit(TASKLET_STATE_RUN, &(t)->state); } while(0)
#define tasklet_unlock_wait(t) while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } #define tasklet_unlock_wait(t) while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
extern void tasklet_schedule(struct tasklet_struct *t); extern void tasklet_schedule(struct tasklet_struct *t);
......
...@@ -30,6 +30,7 @@ extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */ ...@@ -30,6 +30,7 @@ extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */
#define local_irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count) #define local_irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count)
#define local_bh_count(cpu) __IRQ_STAT((cpu), __local_bh_count) #define local_bh_count(cpu) __IRQ_STAT((cpu), __local_bh_count)
#define syscall_count(cpu) __IRQ_STAT((cpu), __syscall_count) #define syscall_count(cpu) __IRQ_STAT((cpu), __syscall_count)
#define ksoftirqd_task(cpu) __IRQ_STAT((cpu), __ksoftirqd_task)
/* arch dependent irq_stat fields */ /* arch dependent irq_stat fields */
#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386, ia64 */ #define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386, ia64 */
......
...@@ -487,7 +487,7 @@ static inline void __netif_schedule(struct net_device *dev) ...@@ -487,7 +487,7 @@ static inline void __netif_schedule(struct net_device *dev)
local_irq_save(flags); local_irq_save(flags);
dev->next_sched = softnet_data[cpu].output_queue; dev->next_sched = softnet_data[cpu].output_queue;
softnet_data[cpu].output_queue = dev; softnet_data[cpu].output_queue = dev;
__cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
local_irq_restore(flags); local_irq_restore(flags);
} }
} }
...@@ -536,7 +536,7 @@ static inline void dev_kfree_skb_irq(struct sk_buff *skb) ...@@ -536,7 +536,7 @@ static inline void dev_kfree_skb_irq(struct sk_buff *skb)
local_irq_save(flags); local_irq_save(flags);
skb->next = softnet_data[cpu].completion_queue; skb->next = softnet_data[cpu].completion_queue;
softnet_data[cpu].completion_queue = skb; softnet_data[cpu].completion_queue = skb;
__cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
local_irq_restore(flags); local_irq_restore(flags);
} }
} }
......
...@@ -538,6 +538,8 @@ EXPORT_SYMBOL(tasklet_init); ...@@ -538,6 +538,8 @@ EXPORT_SYMBOL(tasklet_init);
EXPORT_SYMBOL(tasklet_kill); EXPORT_SYMBOL(tasklet_kill);
EXPORT_SYMBOL(__run_task_queue); EXPORT_SYMBOL(__run_task_queue);
EXPORT_SYMBOL(do_softirq); EXPORT_SYMBOL(do_softirq);
EXPORT_SYMBOL(raise_softirq);
EXPORT_SYMBOL(cpu_raise_softirq);
EXPORT_SYMBOL(tasklet_schedule); EXPORT_SYMBOL(tasklet_schedule);
EXPORT_SYMBOL(tasklet_hi_schedule); EXPORT_SYMBOL(tasklet_hi_schedule);
......
...@@ -543,11 +543,6 @@ asmlinkage void schedule(void) ...@@ -543,11 +543,6 @@ asmlinkage void schedule(void)
release_kernel_lock(prev, this_cpu); release_kernel_lock(prev, this_cpu);
/* Do "administrative" work here while we don't hold any locks */
if (softirq_pending(this_cpu))
goto handle_softirq;
handle_softirq_back:
/* /*
* 'sched_data' is protected by the fact that we can run * 'sched_data' is protected by the fact that we can run
* only one process per CPU. * only one process per CPU.
...@@ -689,14 +684,12 @@ asmlinkage void schedule(void) ...@@ -689,14 +684,12 @@ asmlinkage void schedule(void)
goto repeat_schedule; goto repeat_schedule;
still_running: still_running:
if (!(prev->cpus_allowed & (1UL << this_cpu)))
goto still_running_back;
c = goodness(prev, this_cpu, prev->active_mm); c = goodness(prev, this_cpu, prev->active_mm);
next = prev; next = prev;
goto still_running_back; goto still_running_back;
handle_softirq:
do_softirq();
goto handle_softirq_back;
move_rr_last: move_rr_last:
if (!prev->counter) { if (!prev->counter) {
prev->counter = NICE_TO_TICKS(prev->nice); prev->counter = NICE_TO_TICKS(prev->nice);
......
...@@ -47,21 +47,38 @@ irq_cpustat_t irq_stat[NR_CPUS]; ...@@ -47,21 +47,38 @@ irq_cpustat_t irq_stat[NR_CPUS];
static struct softirq_action softirq_vec[32] __cacheline_aligned; static struct softirq_action softirq_vec[32] __cacheline_aligned;
/*
* we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency
* to the pending events, so lets the scheduler to balance
* the softirq load for us.
*/
static inline void wakeup_softirqd(unsigned cpu)
{
struct task_struct * tsk = ksoftirqd_task(cpu);
if (tsk && tsk->state != TASK_RUNNING)
wake_up_process(tsk);
}
asmlinkage void do_softirq() asmlinkage void do_softirq()
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
__u32 pending; __u32 pending;
long flags;
__u32 mask;
if (in_interrupt()) if (in_interrupt())
return; return;
local_irq_disable(); local_irq_save(flags);
pending = softirq_pending(cpu); pending = softirq_pending(cpu);
if (pending) { if (pending) {
struct softirq_action *h; struct softirq_action *h;
mask = ~pending;
local_bh_disable(); local_bh_disable();
restart: restart:
/* Reset the pending bitmask before enabling irqs */ /* Reset the pending bitmask before enabling irqs */
...@@ -81,14 +98,40 @@ asmlinkage void do_softirq() ...@@ -81,14 +98,40 @@ asmlinkage void do_softirq()
local_irq_disable(); local_irq_disable();
pending = softirq_pending(cpu); pending = softirq_pending(cpu);
if (pending) if (pending & mask) {
mask &= ~pending;
goto restart; goto restart;
}
__local_bh_enable(); __local_bh_enable();
if (pending)
wakeup_softirqd(cpu);
} }
local_irq_enable(); local_irq_restore(flags);
}
inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
{
__cpu_raise_softirq(cpu, nr);
/*
* If we're in an interrupt or bh, we're done
* (this also catches bh-disabled code). We will
* actually run the softirq once we return from
* the irq or bh.
*
* Otherwise we wake up ksoftirqd to make sure we
* schedule the softirq soon.
*/
if (!(local_irq_count(cpu) | local_bh_count(cpu)))
wakeup_softirqd(cpu);
} }
void raise_softirq(unsigned int nr)
{
cpu_raise_softirq(smp_processor_id(), nr);
}
void open_softirq(int nr, void (*action)(struct softirq_action*), void *data) void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
{ {
...@@ -112,11 +155,10 @@ void tasklet_schedule(struct tasklet_struct *t) ...@@ -112,11 +155,10 @@ void tasklet_schedule(struct tasklet_struct *t)
* If nobody is running it then add it to this CPU's * If nobody is running it then add it to this CPU's
* tasklet queue. * tasklet queue.
*/ */
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state) && if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
tasklet_trylock(t)) {
t->next = tasklet_vec[cpu].list; t->next = tasklet_vec[cpu].list;
tasklet_vec[cpu].list = t; tasklet_vec[cpu].list = t;
__cpu_raise_softirq(cpu, TASKLET_SOFTIRQ); cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
tasklet_unlock(t); tasklet_unlock(t);
} }
local_irq_restore(flags); local_irq_restore(flags);
...@@ -130,11 +172,10 @@ void tasklet_hi_schedule(struct tasklet_struct *t) ...@@ -130,11 +172,10 @@ void tasklet_hi_schedule(struct tasklet_struct *t)
cpu = smp_processor_id(); cpu = smp_processor_id();
local_irq_save(flags); local_irq_save(flags);
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state) && if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
tasklet_trylock(t)) {
t->next = tasklet_hi_vec[cpu].list; t->next = tasklet_hi_vec[cpu].list;
tasklet_hi_vec[cpu].list = t; tasklet_hi_vec[cpu].list = t;
__cpu_raise_softirq(cpu, HI_SOFTIRQ); cpu_raise_softirq(cpu, HI_SOFTIRQ);
tasklet_unlock(t); tasklet_unlock(t);
} }
local_irq_restore(flags); local_irq_restore(flags);
...@@ -148,37 +189,30 @@ static void tasklet_action(struct softirq_action *a) ...@@ -148,37 +189,30 @@ static void tasklet_action(struct softirq_action *a)
local_irq_disable(); local_irq_disable();
list = tasklet_vec[cpu].list; list = tasklet_vec[cpu].list;
tasklet_vec[cpu].list = NULL; tasklet_vec[cpu].list = NULL;
local_irq_enable();
while (list) { while (list) {
struct tasklet_struct *t = list; struct tasklet_struct *t = list;
list = list->next; list = list->next;
/*
* A tasklet is only added to the queue while it's
* locked, so no other CPU can have this tasklet
* pending:
*/
if (!tasklet_trylock(t)) if (!tasklet_trylock(t))
BUG(); BUG();
repeat:
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
BUG();
if (!atomic_read(&t->count)) { if (!atomic_read(&t->count)) {
local_irq_enable(); if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
BUG();
t->func(t->data); t->func(t->data);
local_irq_disable(); tasklet_unlock(t);
/* continue;
* One more run if the tasklet got reactivated:
*/
if (test_bit(TASKLET_STATE_SCHED, &t->state))
goto repeat;
} }
tasklet_unlock(t); tasklet_unlock(t);
if (test_bit(TASKLET_STATE_SCHED, &t->state))
tasklet_schedule(t); local_irq_disable();
t->next = tasklet_vec[cpu].list;
tasklet_vec[cpu].list = t;
cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
local_irq_enable();
} }
local_irq_enable();
} }
...@@ -193,6 +227,7 @@ static void tasklet_hi_action(struct softirq_action *a) ...@@ -193,6 +227,7 @@ static void tasklet_hi_action(struct softirq_action *a)
local_irq_disable(); local_irq_disable();
list = tasklet_hi_vec[cpu].list; list = tasklet_hi_vec[cpu].list;
tasklet_hi_vec[cpu].list = NULL; tasklet_hi_vec[cpu].list = NULL;
local_irq_enable();
while (list) { while (list) {
struct tasklet_struct *t = list; struct tasklet_struct *t = list;
...@@ -201,21 +236,21 @@ static void tasklet_hi_action(struct softirq_action *a) ...@@ -201,21 +236,21 @@ static void tasklet_hi_action(struct softirq_action *a)
if (!tasklet_trylock(t)) if (!tasklet_trylock(t))
BUG(); BUG();
repeat:
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
BUG();
if (!atomic_read(&t->count)) { if (!atomic_read(&t->count)) {
local_irq_enable(); if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
BUG();
t->func(t->data); t->func(t->data);
local_irq_disable(); tasklet_unlock(t);
if (test_bit(TASKLET_STATE_SCHED, &t->state)) continue;
goto repeat;
} }
tasklet_unlock(t); tasklet_unlock(t);
if (test_bit(TASKLET_STATE_SCHED, &t->state))
tasklet_hi_schedule(t); local_irq_disable();
t->next = tasklet_hi_vec[cpu].list;
tasklet_hi_vec[cpu].list = t;
cpu_raise_softirq(cpu, HI_SOFTIRQ);
local_irq_enable();
} }
local_irq_enable();
} }
...@@ -335,3 +370,61 @@ void __run_task_queue(task_queue *list) ...@@ -335,3 +370,61 @@ void __run_task_queue(task_queue *list)
f(data); f(data);
} }
} }
static int ksoftirqd(void * __bind_cpu)
{
int bind_cpu = *(int *) __bind_cpu;
int cpu = cpu_logical_map(bind_cpu);
daemonize();
current->nice = 19;
sigfillset(&current->blocked);
/* Migrate to the right CPU */
current->cpus_allowed = 1UL << cpu;
while (smp_processor_id() != cpu)
schedule();
sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu);
__set_current_state(TASK_INTERRUPTIBLE);
mb();
ksoftirqd_task(cpu) = current;
for (;;) {
if (!softirq_pending(cpu))
schedule();
__set_current_state(TASK_RUNNING);
while (softirq_pending(cpu)) {
do_softirq();
if (current->need_resched)
schedule();
}
__set_current_state(TASK_INTERRUPTIBLE);
}
}
static __init int spawn_ksoftirqd(void)
{
int cpu;
for (cpu = 0; cpu < smp_num_cpus; cpu++) {
if (kernel_thread(ksoftirqd, (void *) &cpu,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
printk("spawn_ksoftirqd() failed for cpu %d\n", cpu);
else {
while (!ksoftirqd_task(cpu_logical_map(cpu))) {
current->policy |= SCHED_YIELD;
schedule();
}
}
}
return 0;
}
__initcall(spawn_ksoftirqd);
...@@ -1217,6 +1217,8 @@ int netif_rx(struct sk_buff *skb) ...@@ -1217,6 +1217,8 @@ int netif_rx(struct sk_buff *skb)
enqueue: enqueue:
dev_hold(skb->dev); dev_hold(skb->dev);
__skb_queue_tail(&queue->input_pkt_queue,skb); __skb_queue_tail(&queue->input_pkt_queue,skb);
/* Runs from irqs or BH's, no need to wake BH */
__cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ); __cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ);
local_irq_restore(flags); local_irq_restore(flags);
#ifndef OFFLINE_SAMPLE #ifndef OFFLINE_SAMPLE
...@@ -1527,6 +1529,8 @@ static void net_rx_action(struct softirq_action *h) ...@@ -1527,6 +1529,8 @@ static void net_rx_action(struct softirq_action *h)
local_irq_disable(); local_irq_disable();
netdev_rx_stat[this_cpu].time_squeeze++; netdev_rx_stat[this_cpu].time_squeeze++;
/* This already runs in BH context, no need to wake up BH's */
__cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ); __cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ);
local_irq_enable(); local_irq_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment