Commit 56d8b39d authored by Patrick Mochel's avatar Patrick Mochel

Merge bk://ldm@bkbits.net/linux-2.5

into osdl.org:/home/mochel/src/kernel/devel/linux-2.5
parents abe2e064 4a99b33d
...@@ -380,18 +380,21 @@ static ssize_t mousedev_read(struct file * file, char * buffer, size_t count, lo ...@@ -380,18 +380,21 @@ static ssize_t mousedev_read(struct file * file, char * buffer, size_t count, lo
if (!list->ready && !list->buffer) { if (!list->ready && !list->buffer) {
add_wait_queue(&list->mousedev->wait, &wait); add_wait_queue(&list->mousedev->wait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
while (!list->ready) { retval = 0;
if (list->ready || list->buffer)
break;
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN; retval = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
break; break;
}
if (signal_pending(current)) {
retval = -ERESTARTSYS; retval = -ERESTARTSYS;
if (signal_pending(current))
break; break;
}
schedule(); schedule();
} }
......
...@@ -648,7 +648,7 @@ int dbNextAG(struct inode *ipbmap) ...@@ -648,7 +648,7 @@ int dbNextAG(struct inode *ipbmap)
agpref = bmp->db_agpref; agpref = bmp->db_agpref;
if ((atomic_read(&bmp->db_active[agpref]) == 0) && if ((atomic_read(&bmp->db_active[agpref]) == 0) &&
(bmp->db_agfree[agpref] >= avgfree)) (bmp->db_agfree[agpref] >= avgfree))
goto found; goto unlock;
/* From the last preferred ag, find the next one with at least /* From the last preferred ag, find the next one with at least
* average free space. * average free space.
...@@ -660,9 +660,12 @@ int dbNextAG(struct inode *ipbmap) ...@@ -660,9 +660,12 @@ int dbNextAG(struct inode *ipbmap)
if (atomic_read(&bmp->db_active[agpref])) if (atomic_read(&bmp->db_active[agpref]))
/* open file is currently growing in this ag */ /* open file is currently growing in this ag */
continue; continue;
if (bmp->db_agfree[agpref] >= avgfree) if (bmp->db_agfree[agpref] >= avgfree) {
goto found; /* Return this one */
else if (bmp->db_agfree[agpref] > hwm) { bmp->db_agpref = agpref;
goto unlock;
} else if (bmp->db_agfree[agpref] > hwm) {
/* Less than avg. freespace, but best so far */
hwm = bmp->db_agfree[agpref]; hwm = bmp->db_agfree[agpref];
next_best = agpref; next_best = agpref;
} }
...@@ -673,12 +676,9 @@ int dbNextAG(struct inode *ipbmap) ...@@ -673,12 +676,9 @@ int dbNextAG(struct inode *ipbmap)
* next best * next best
*/ */
if (next_best != -1) if (next_best != -1)
agpref = next_best; bmp->db_agpref = next_best;
/* else leave db_agpref unchanged */
/* else agpref should be back to its original value */ unlock:
found:
bmp->db_agpref = agpref;
BMAP_UNLOCK(bmp); BMAP_UNLOCK(bmp);
/* return the preferred group. /* return the preferred group.
......
...@@ -253,7 +253,7 @@ static int flock_make_lock(struct file *filp, ...@@ -253,7 +253,7 @@ static int flock_make_lock(struct file *filp,
fl->fl_file = filp; fl->fl_file = filp;
fl->fl_pid = current->pid; fl->fl_pid = current->pid;
fl->fl_flags = FL_FLOCK; fl->fl_flags = (cmd & LOCK_NB) ? FL_FLOCK : FL_FLOCK | FL_SLEEP;
fl->fl_type = type; fl->fl_type = type;
fl->fl_end = OFFSET_MAX; fl->fl_end = OFFSET_MAX;
......
...@@ -85,7 +85,8 @@ static inline void br_read_lock (enum brlock_indices idx) ...@@ -85,7 +85,8 @@ static inline void br_read_lock (enum brlock_indices idx)
if (idx >= __BR_END) if (idx >= __BR_END)
__br_lock_usage_bug(); __br_lock_usage_bug();
read_lock(&__brlock_array[smp_processor_id()][idx]); preempt_disable();
_raw_read_lock(&__brlock_array[smp_processor_id()][idx]);
} }
static inline void br_read_unlock (enum brlock_indices idx) static inline void br_read_unlock (enum brlock_indices idx)
...@@ -109,6 +110,7 @@ static inline void br_read_lock (enum brlock_indices idx) ...@@ -109,6 +110,7 @@ static inline void br_read_lock (enum brlock_indices idx)
if (idx >= __BR_END) if (idx >= __BR_END)
__br_lock_usage_bug(); __br_lock_usage_bug();
preempt_disable();
ctr = &__brlock_array[smp_processor_id()][idx]; ctr = &__brlock_array[smp_processor_id()][idx];
lock = &__br_write_locks[idx].lock; lock = &__br_write_locks[idx].lock;
again: again:
...@@ -147,6 +149,7 @@ static inline void br_read_unlock (enum brlock_indices idx) ...@@ -147,6 +149,7 @@ static inline void br_read_unlock (enum brlock_indices idx)
wmb(); wmb();
(*ctr)--; (*ctr)--;
preempt_enable();
} }
#endif /* __BRLOCK_USE_ATOMICS */ #endif /* __BRLOCK_USE_ATOMICS */
......
...@@ -514,9 +514,10 @@ static inline void __netif_schedule(struct net_device *dev) ...@@ -514,9 +514,10 @@ static inline void __netif_schedule(struct net_device *dev)
{ {
if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
unsigned long flags; unsigned long flags;
int cpu = smp_processor_id(); int cpu;
local_irq_save(flags); local_irq_save(flags);
cpu = smp_processor_id();
dev->next_sched = softnet_data[cpu].output_queue; dev->next_sched = softnet_data[cpu].output_queue;
softnet_data[cpu].output_queue = dev; softnet_data[cpu].output_queue = dev;
cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
...@@ -563,10 +564,11 @@ static inline int netif_running(struct net_device *dev) ...@@ -563,10 +564,11 @@ static inline int netif_running(struct net_device *dev)
static inline void dev_kfree_skb_irq(struct sk_buff *skb) static inline void dev_kfree_skb_irq(struct sk_buff *skb)
{ {
if (atomic_dec_and_test(&skb->users)) { if (atomic_dec_and_test(&skb->users)) {
int cpu =smp_processor_id(); int cpu;
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
cpu = smp_processor_id();
skb->next = softnet_data[cpu].completion_queue; skb->next = softnet_data[cpu].completion_queue;
softnet_data[cpu].completion_queue = skb; softnet_data[cpu].completion_queue = skb;
cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
...@@ -726,9 +728,10 @@ static inline int netif_rx_schedule_prep(struct net_device *dev) ...@@ -726,9 +728,10 @@ static inline int netif_rx_schedule_prep(struct net_device *dev)
static inline void __netif_rx_schedule(struct net_device *dev) static inline void __netif_rx_schedule(struct net_device *dev)
{ {
unsigned long flags; unsigned long flags;
int cpu = smp_processor_id(); int cpu;
local_irq_save(flags); local_irq_save(flags);
cpu = smp_processor_id();
dev_hold(dev); dev_hold(dev);
list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list); list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list);
if (dev->quota < 0) if (dev->quota < 0)
...@@ -754,11 +757,12 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo) ...@@ -754,11 +757,12 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo)
{ {
if (netif_rx_schedule_prep(dev)) { if (netif_rx_schedule_prep(dev)) {
unsigned long flags; unsigned long flags;
int cpu = smp_processor_id(); int cpu;
dev->quota += undo; dev->quota += undo;
local_irq_save(flags); local_irq_save(flags);
cpu = smp_processor_id();
list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list); list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list);
__cpu_raise_softirq(cpu, NET_RX_SOFTIRQ); __cpu_raise_softirq(cpu, NET_RX_SOFTIRQ);
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -86,9 +86,9 @@ extern void get_page_state(struct page_state *ret); ...@@ -86,9 +86,9 @@ extern void get_page_state(struct page_state *ret);
#define mod_page_state(member, delta) \ #define mod_page_state(member, delta) \
do { \ do { \
preempt_disable(); \ int cpu = get_cpu(); \
page_states[smp_processor_id()].member += (delta); \ page_states[cpu].member += (delta); \
preempt_enable(); \ put_cpu(); \
} while (0) } while (0)
#define inc_page_state(member) mod_page_state(member, 1UL) #define inc_page_state(member) mod_page_state(member, 1UL)
......
...@@ -626,7 +626,7 @@ NORET_TYPE void do_exit(long code) ...@@ -626,7 +626,7 @@ NORET_TYPE void do_exit(long code)
tsk->flags |= PF_EXITING; tsk->flags |= PF_EXITING;
del_timer_sync(&tsk->real_timer); del_timer_sync(&tsk->real_timer);
if (unlikely(preempt_count())) if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, current->pid, current->comm, current->pid,
preempt_count()); preempt_count());
......
...@@ -472,6 +472,7 @@ void check_highmem_ptes(void) ...@@ -472,6 +472,7 @@ void check_highmem_ptes(void)
{ {
int idx, type; int idx, type;
preempt_disable();
for (type = 0; type < KM_TYPE_NR; type++) { for (type = 0; type < KM_TYPE_NR; type++) {
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
if (!pte_none(*(kmap_pte-idx))) { if (!pte_none(*(kmap_pte-idx))) {
...@@ -479,6 +480,7 @@ void check_highmem_ptes(void) ...@@ -479,6 +480,7 @@ void check_highmem_ptes(void)
BUG(); BUG();
} }
} }
preempt_enable();
} }
#endif #endif
...@@ -193,6 +193,11 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, ...@@ -193,6 +193,11 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
if (error) if (error)
goto fail; goto fail;
} }
/*
* Unless it returns an error, this function always sets *pprev to
* the first vma for which vma->vm_end >= end.
*/
*pprev = vma;
if (end != vma->vm_end) { if (end != vma->vm_end) {
error = split_vma(mm, vma, end, 0); error = split_vma(mm, vma, end, 0);
......
...@@ -1357,11 +1357,7 @@ void* kmem_cache_alloc_batch(kmem_cache_t* cachep, int flags) ...@@ -1357,11 +1357,7 @@ void* kmem_cache_alloc_batch(kmem_cache_t* cachep, int flags)
cc_entry(cc)[cc->avail++] = cc_entry(cc)[cc->avail++] =
kmem_cache_alloc_one_tail(cachep, slabp); kmem_cache_alloc_one_tail(cachep, slabp);
} }
/* spin_unlock(&cachep->spinlock);
* CAREFUL: do not enable preemption yet, the per-CPU
* entries rely on us being atomic.
*/
_raw_spin_unlock(&cachep->spinlock);
if (cc->avail) if (cc->avail)
return cc_entry(cc)[--cc->avail]; return cc_entry(cc)[--cc->avail];
...@@ -1389,8 +1385,6 @@ static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags) ...@@ -1389,8 +1385,6 @@ static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags)
STATS_INC_ALLOCMISS(cachep); STATS_INC_ALLOCMISS(cachep);
objp = kmem_cache_alloc_batch(cachep,flags); objp = kmem_cache_alloc_batch(cachep,flags);
local_irq_restore(save_flags); local_irq_restore(save_flags);
/* end of non-preemptible region */
preempt_enable();
if (!objp) if (!objp)
goto alloc_new_slab_nolock; goto alloc_new_slab_nolock;
return objp; return objp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment