Commit c82513e5 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: Change rq->nr_running to unsigned int

Since there's a PID space limit of 30bits (see
futex.h:FUTEX_TID_MASK) and allocating that many tasks (assuming a
lower bound of 2 pages per task) would still take 8T of memory it
seems reasonable to say that unsigned int is sufficient for
rq->nr_running.

When we do get anywhere near that amount of tasks I suspect other
things would go funny, load-balancer load computations would really
need to be hoisted to 128bit etc.

So save a few bytes and convert rq->nr_running and friends to
unsigned int.
Suggested-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-y3tvyszjdmbibade5bw8zl81@git.kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ad7687dd
...@@ -202,7 +202,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) ...@@ -202,7 +202,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SPLIT_NS(spread0)); SPLIT_NS(spread0));
SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
cfs_rq->nr_spread_over); cfs_rq->nr_spread_over);
SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -4447,10 +4447,10 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -4447,10 +4447,10 @@ static int load_balance(int this_cpu, struct rq *this_rq,
* correctly treated as an imbalance. * correctly treated as an imbalance.
*/ */
env.flags |= LBF_ALL_PINNED; env.flags |= LBF_ALL_PINNED;
env.load_move = imbalance; env.load_move = imbalance;
env.src_cpu = busiest->cpu; env.src_cpu = busiest->cpu;
env.src_rq = busiest; env.src_rq = busiest;
env.loop_max = min_t(unsigned long, sysctl_sched_nr_migrate, busiest->nr_running); env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
more_balance: more_balance:
local_irq_save(flags); local_irq_save(flags);
......
...@@ -201,7 +201,7 @@ struct cfs_bandwidth { }; ...@@ -201,7 +201,7 @@ struct cfs_bandwidth { };
/* CFS-related fields in a runqueue */ /* CFS-related fields in a runqueue */
struct cfs_rq { struct cfs_rq {
struct load_weight load; struct load_weight load;
unsigned long nr_running, h_nr_running; unsigned int nr_running, h_nr_running;
u64 exec_clock; u64 exec_clock;
u64 min_vruntime; u64 min_vruntime;
...@@ -279,7 +279,7 @@ static inline int rt_bandwidth_enabled(void) ...@@ -279,7 +279,7 @@ static inline int rt_bandwidth_enabled(void)
/* Real-Time classes' related field in a runqueue: */ /* Real-Time classes' related field in a runqueue: */
struct rt_rq { struct rt_rq {
struct rt_prio_array active; struct rt_prio_array active;
unsigned long rt_nr_running; unsigned int rt_nr_running;
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
struct { struct {
int curr; /* highest queued rt task prio */ int curr; /* highest queued rt task prio */
...@@ -353,7 +353,7 @@ struct rq { ...@@ -353,7 +353,7 @@ struct rq {
* nr_running and cpu_load should be in the same cacheline because * nr_running and cpu_load should be in the same cacheline because
* remote CPUs use both these fields when doing load calculation. * remote CPUs use both these fields when doing load calculation.
*/ */
unsigned long nr_running; unsigned int nr_running;
#define CPU_LOAD_IDX_MAX 5 #define CPU_LOAD_IDX_MAX 5
unsigned long cpu_load[CPU_LOAD_IDX_MAX]; unsigned long cpu_load[CPU_LOAD_IDX_MAX];
unsigned long last_load_update_tick; unsigned long last_load_update_tick;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment