Commit 01bc15ed authored by Robert Love's avatar Robert Love Committed by Linus Torvalds

[PATCH] O(1) count_active_tasks

This is William Irwin's algorithmically O(1) version of
count_active_tasks (which is currently O(n) for n total tasks on the
system).

I like it a lot: we become O(1) because now we count uninterruptible
tasks, so we can return (nr_uninterruptible + nr_running).  It does not
introduce any overhead or hurt the case for small n, so I have no
complaints.

This copy has a small optimization over the original posting, but is
otherwise the same thing wli posted earlier.  I have tested to make sure
this returns accurate results and that the kernel profile improves.
parent 5ff8f2bb
...@@ -80,6 +80,7 @@ extern unsigned long avenrun[]; /* Load averages */ ...@@ -80,6 +80,7 @@ extern unsigned long avenrun[]; /* Load averages */
extern int nr_threads; extern int nr_threads;
extern int last_pid; extern int last_pid;
extern unsigned long nr_running(void); extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
#include <linux/time.h> #include <linux/time.h>
#include <linux/param.h> #include <linux/param.h>
......
...@@ -137,6 +137,7 @@ struct runqueue { ...@@ -137,6 +137,7 @@ struct runqueue {
spinlock_t lock; spinlock_t lock;
spinlock_t frozen; spinlock_t frozen;
unsigned long nr_running, nr_switches, expired_timestamp; unsigned long nr_running, nr_switches, expired_timestamp;
signed long nr_uninterruptible;
task_t *curr, *idle; task_t *curr, *idle;
prio_array_t *active, *expired, arrays[2]; prio_array_t *active, *expired, arrays[2];
int prev_nr_running[NR_CPUS]; int prev_nr_running[NR_CPUS];
...@@ -244,6 +245,8 @@ static inline void activate_task(task_t *p, runqueue_t *rq) ...@@ -244,6 +245,8 @@ static inline void activate_task(task_t *p, runqueue_t *rq)
static inline void deactivate_task(struct task_struct *p, runqueue_t *rq) static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
{ {
rq->nr_running--; rq->nr_running--;
if (p->state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible++;
dequeue_task(p, p->array); dequeue_task(p, p->array);
p->array = NULL; p->array = NULL;
} }
...@@ -323,11 +326,15 @@ static int try_to_wake_up(task_t * p) ...@@ -323,11 +326,15 @@ static int try_to_wake_up(task_t * p)
{ {
unsigned long flags; unsigned long flags;
int success = 0; int success = 0;
long old_state;
runqueue_t *rq; runqueue_t *rq;
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
old_state = p->state;
p->state = TASK_RUNNING; p->state = TASK_RUNNING;
if (!p->array) { if (!p->array) {
if (old_state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible--;
activate_task(p, rq); activate_task(p, rq);
if (p->prio < rq->curr->prio) if (p->prio < rq->curr->prio)
resched_task(rq->curr); resched_task(rq->curr);
...@@ -433,6 +440,16 @@ unsigned long nr_running(void) ...@@ -433,6 +440,16 @@ unsigned long nr_running(void)
return sum; return sum;
} }
unsigned long nr_uninterruptible(void)
{
unsigned long i, sum = 0;
for (i = 0; i < smp_num_cpus; i++)
sum += cpu_rq(cpu_logical_map(i))->nr_uninterruptible;
return sum;
}
unsigned long nr_context_switches(void) unsigned long nr_context_switches(void)
{ {
unsigned long i, sum = 0; unsigned long i, sum = 0;
......
...@@ -597,17 +597,7 @@ void update_process_times(int user_tick) ...@@ -597,17 +597,7 @@ void update_process_times(int user_tick)
*/ */
static unsigned long count_active_tasks(void) static unsigned long count_active_tasks(void)
{ {
struct task_struct *p; return (nr_running() + nr_uninterruptible()) * FIXED_1;
unsigned long nr = 0;
read_lock(&tasklist_lock);
for_each_task(p) {
if ((p->state == TASK_RUNNING ||
(p->state & TASK_UNINTERRUPTIBLE)))
nr += FIXED_1;
}
read_unlock(&tasklist_lock);
return nr;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment