Commit 2b5726db authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] sched: fix scheduling latencies for !PREEMPT kernels

This patch adds a handful of cond_resched() points to a number of key,
scheduling-latency related non-inlined functions.

This reduces preemption latency for !PREEMPT kernels.  These are scheduling
points complementary to PREEMPT_VOLUNTARY scheduling points (might_sleep()
places) - i.e.  these are all points where an explicit cond_resched() had
to be added.

Has been tested as part of the -VP patchset.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent a0332406
......@@ -186,6 +186,7 @@ static int count(char __user * __user * argv, int max)
argv++;
if(++i > max)
return -E2BIG;
cond_resched();
}
}
return i;
......
......@@ -378,6 +378,7 @@ sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
list_move(&inode->i_list, &sb->s_dirty);
}
spin_unlock(&inode_lock);
cond_resched();
iput(inode);
spin_lock(&inode_lock);
if (wbc->nr_to_write <= 0)
......
......@@ -240,6 +240,7 @@ int do_select(int n, fd_set_bits *fds, long *timeout)
retval++;
}
}
cond_resched();
}
if (res_in)
*rinp = res_in;
......
......@@ -284,6 +284,7 @@ int do_syslog(int type, char __user * buf, int len)
error = __put_user(c,buf);
buf++;
i++;
cond_resched();
spin_lock_irq(&logbuf_lock);
}
spin_unlock_irq(&logbuf_lock);
......@@ -325,6 +326,7 @@ int do_syslog(int type, char __user * buf, int len)
c = LOG_BUF(j);
spin_unlock_irq(&logbuf_lock);
error = __put_user(c,&buf[count-1-i]);
cond_resched();
spin_lock_irq(&logbuf_lock);
}
spin_unlock_irq(&logbuf_lock);
......@@ -340,6 +342,7 @@ int do_syslog(int type, char __user * buf, int len)
error = -EFAULT;
break;
}
cond_resched();
}
}
break;
......
......@@ -1742,6 +1742,7 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
}
smp_rmb(); /* Prevent CPU from reordering lock-free ->nopage() */
retry:
cond_resched();
new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
/* no page was available -- either SIGBUS or OOM */
......
......@@ -2836,7 +2836,7 @@ static void cache_reap(void *unused)
next_unlock:
spin_unlock_irq(&searchp->spinlock);
next:
;
cond_resched();
}
check_irq_on();
up(&cache_chain_sem);
......
......@@ -361,6 +361,8 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
int may_enter_fs;
int referenced;
cond_resched();
page = lru_to_page(page_list);
list_del(&page->lru);
......@@ -710,6 +712,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
reclaim_mapped = 1;
while (!list_empty(&l_hold)) {
cond_resched();
page = lru_to_page(&l_hold);
list_del(&page->lru);
if (page_mapped(page)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment