Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
66fef08f
Commit
66fef08f
authored
Mar 26, 2009
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'sched/balancing' into sched/core
parents
b6d98422
b7bb4c9b
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
515 additions
and
250 deletions
+515
-250
kernel/sched.c
kernel/sched.c
+515
-250
No files found.
kernel/sched.c
View file @
66fef08f
...
@@ -3189,61 +3189,260 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
...
@@ -3189,61 +3189,260 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
return
0
;
return
0
;
}
}
/********** Helpers for find_busiest_group ************************/
/**
* sd_lb_stats - Structure to store the statistics of a sched_domain
* during load balancing.
*/
struct
sd_lb_stats
{
struct
sched_group
*
busiest
;
/* Busiest group in this sd */
struct
sched_group
*
this
;
/* Local group in this sd */
unsigned
long
total_load
;
/* Total load of all groups in sd */
unsigned
long
total_pwr
;
/* Total power of all groups in sd */
unsigned
long
avg_load
;
/* Average load across all groups in sd */
/** Statistics of this group */
unsigned
long
this_load
;
unsigned
long
this_load_per_task
;
unsigned
long
this_nr_running
;
/* Statistics of the busiest group */
unsigned
long
max_load
;
unsigned
long
busiest_load_per_task
;
unsigned
long
busiest_nr_running
;
int
group_imb
;
/* Is there imbalance in this sd */
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
int
power_savings_balance
;
/* Is powersave balance needed for this sd */
struct
sched_group
*
group_min
;
/* Least loaded group in sd */
struct
sched_group
*
group_leader
;
/* Group which relieves group_min */
unsigned
long
min_load_per_task
;
/* load_per_task in group_min */
unsigned
long
leader_nr_running
;
/* Nr running of group_leader */
unsigned
long
min_nr_running
;
/* Nr running of group_min */
#endif
};
/*
/**
* find_busiest_group finds and returns the busiest CPU group within the
* sg_lb_stats - stats of a sched_group required for load_balancing
* domain. It calculates and returns the amount of weighted load which
*/
* should be moved to restore balance via the imbalance parameter.
struct
sg_lb_stats
{
unsigned
long
avg_load
;
/*Avg load across the CPUs of the group */
unsigned
long
group_load
;
/* Total load over the CPUs of the group */
unsigned
long
sum_nr_running
;
/* Nr tasks running in the group */
unsigned
long
sum_weighted_load
;
/* Weighted load of group's tasks */
unsigned
long
group_capacity
;
int
group_imb
;
/* Is there an imbalance in the group ? */
};
/**
* group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
* @group: The group whose first cpu is to be returned.
*/
*/
static
struct
sched_group
*
static
inline
unsigned
int
group_first_cpu
(
struct
sched_group
*
group
)
find_busiest_group
(
struct
sched_domain
*
sd
,
int
this_cpu
,
unsigned
long
*
imbalance
,
enum
cpu_idle_type
idle
,
int
*
sd_idle
,
const
struct
cpumask
*
cpus
,
int
*
balance
)
{
{
struct
sched_group
*
busiest
=
NULL
,
*
this
=
NULL
,
*
group
=
sd
->
groups
;
return
cpumask_first
(
sched_group_cpus
(
group
));
unsigned
long
max_load
,
avg_load
,
total_load
,
this_load
,
total_pwr
;
}
unsigned
long
max_pull
;
unsigned
long
busiest_load_per_task
,
busiest_nr_running
;
unsigned
long
this_load_per_task
,
this_nr_running
;
int
load_idx
,
group_imb
=
0
;
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
int
power_savings_balance
=
1
;
unsigned
long
leader_nr_running
=
0
,
min_load_per_task
=
0
;
unsigned
long
min_nr_running
=
ULONG_MAX
;
struct
sched_group
*
group_min
=
NULL
,
*
group_leader
=
NULL
;
#endif
max_load
=
this_load
=
total_load
=
total_pwr
=
0
;
/**
busiest_load_per_task
=
busiest_nr_running
=
0
;
* get_sd_load_idx - Obtain the load index for a given sched domain.
this_load_per_task
=
this_nr_running
=
0
;
* @sd: The sched_domain whose load_idx is to be obtained.
* @idle: The Idle status of the CPU for whose sd load_icx is obtained.
*/
static
inline
int
get_sd_load_idx
(
struct
sched_domain
*
sd
,
enum
cpu_idle_type
idle
)
{
int
load_idx
;
if
(
idle
==
CPU_NOT_IDLE
)
switch
(
idle
)
{
case
CPU_NOT_IDLE
:
load_idx
=
sd
->
busy_idx
;
load_idx
=
sd
->
busy_idx
;
else
if
(
idle
==
CPU_NEWLY_IDLE
)
break
;
case
CPU_NEWLY_IDLE
:
load_idx
=
sd
->
newidle_idx
;
load_idx
=
sd
->
newidle_idx
;
else
break
;
default:
load_idx
=
sd
->
idle_idx
;
load_idx
=
sd
->
idle_idx
;
break
;
}
do
{
return
load_idx
;
unsigned
long
load
,
group_capacity
,
max_cpu_load
,
min_cpu_load
;
}
int
local_group
;
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
/**
* init_sd_power_savings_stats - Initialize power savings statistics for
* the given sched_domain, during load balancing.
*
* @sd: Sched domain whose power-savings statistics are to be initialized.
* @sds: Variable containing the statistics for sd.
* @idle: Idle status of the CPU at which we're performing load-balancing.
*/
static
inline
void
init_sd_power_savings_stats
(
struct
sched_domain
*
sd
,
struct
sd_lb_stats
*
sds
,
enum
cpu_idle_type
idle
)
{
/*
* Busy processors will not participate in power savings
* balance.
*/
if
(
idle
==
CPU_NOT_IDLE
||
!
(
sd
->
flags
&
SD_POWERSAVINGS_BALANCE
))
sds
->
power_savings_balance
=
0
;
else
{
sds
->
power_savings_balance
=
1
;
sds
->
min_nr_running
=
ULONG_MAX
;
sds
->
leader_nr_running
=
0
;
}
}
/**
* update_sd_power_savings_stats - Update the power saving stats for a
* sched_domain while performing load balancing.
*
* @group: sched_group belonging to the sched_domain under consideration.
* @sds: Variable containing the statistics of the sched_domain
* @local_group: Does group contain the CPU for which we're performing
* load balancing ?
* @sgs: Variable containing the statistics of the group.
*/
static
inline
void
update_sd_power_savings_stats
(
struct
sched_group
*
group
,
struct
sd_lb_stats
*
sds
,
int
local_group
,
struct
sg_lb_stats
*
sgs
)
{
if
(
!
sds
->
power_savings_balance
)
return
;
/*
* If the local group is idle or completely loaded
* no need to do power savings balance at this domain
*/
if
(
local_group
&&
(
sds
->
this_nr_running
>=
sgs
->
group_capacity
||
!
sds
->
this_nr_running
))
sds
->
power_savings_balance
=
0
;
/*
* If a group is already running at full capacity or idle,
* don't include that group in power savings calculations
*/
if
(
!
sds
->
power_savings_balance
||
sgs
->
sum_nr_running
>=
sgs
->
group_capacity
||
!
sgs
->
sum_nr_running
)
return
;
/*
* Calculate the group which has the least non-idle load.
* This is the group from where we need to pick up the load
* for saving power
*/
if
((
sgs
->
sum_nr_running
<
sds
->
min_nr_running
)
||
(
sgs
->
sum_nr_running
==
sds
->
min_nr_running
&&
group_first_cpu
(
group
)
>
group_first_cpu
(
sds
->
group_min
)))
{
sds
->
group_min
=
group
;
sds
->
min_nr_running
=
sgs
->
sum_nr_running
;
sds
->
min_load_per_task
=
sgs
->
sum_weighted_load
/
sgs
->
sum_nr_running
;
}
/*
* Calculate the group which is almost near its
* capacity but still has some space to pick up some load
* from other group and save more power
*/
if
(
sgs
->
sum_nr_running
>
sgs
->
group_capacity
-
1
)
return
;
if
(
sgs
->
sum_nr_running
>
sds
->
leader_nr_running
||
(
sgs
->
sum_nr_running
==
sds
->
leader_nr_running
&&
group_first_cpu
(
group
)
<
group_first_cpu
(
sds
->
group_leader
)))
{
sds
->
group_leader
=
group
;
sds
->
leader_nr_running
=
sgs
->
sum_nr_running
;
}
}
/**
* check_power_save_busiest_group - Check if we have potential to perform
* some power-savings balance. If yes, set the busiest group to be
* the least loaded group in the sched_domain, so that it's CPUs can
* be put to idle.
*
* @sds: Variable containing the statistics of the sched_domain
* under consideration.
* @this_cpu: Cpu at which we're currently performing load-balancing.
* @imbalance: Variable to store the imbalance.
*
* Returns 1 if there is potential to perform power-savings balance.
* Else returns 0.
*/
static
inline
int
check_power_save_busiest_group
(
struct
sd_lb_stats
*
sds
,
int
this_cpu
,
unsigned
long
*
imbalance
)
{
if
(
!
sds
->
power_savings_balance
)
return
0
;
if
(
sds
->
this
!=
sds
->
group_leader
||
sds
->
group_leader
==
sds
->
group_min
)
return
0
;
*
imbalance
=
sds
->
min_load_per_task
;
sds
->
busiest
=
sds
->
group_min
;
if
(
sched_mc_power_savings
>=
POWERSAVINGS_BALANCE_WAKEUP
)
{
cpu_rq
(
this_cpu
)
->
rd
->
sched_mc_preferred_wakeup_cpu
=
group_first_cpu
(
sds
->
group_leader
);
}
return
1
;
}
#else
/* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
static
inline
void
init_sd_power_savings_stats
(
struct
sched_domain
*
sd
,
struct
sd_lb_stats
*
sds
,
enum
cpu_idle_type
idle
)
{
return
;
}
static
inline
void
update_sd_power_savings_stats
(
struct
sched_group
*
group
,
struct
sd_lb_stats
*
sds
,
int
local_group
,
struct
sg_lb_stats
*
sgs
)
{
return
;
}
static
inline
int
check_power_save_busiest_group
(
struct
sd_lb_stats
*
sds
,
int
this_cpu
,
unsigned
long
*
imbalance
)
{
return
0
;
}
#endif
/* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @group: sched_group whose statistics are to be updated.
* @this_cpu: Cpu for which load balance is currently performed.
* @idle: Idle status of this_cpu
* @load_idx: Load index of sched_domain of this_cpu for load calc.
* @sd_idle: Idle status of the sched_domain containing group.
* @local_group: Does group contain this_cpu.
* @cpus: Set of cpus considered for load balancing.
* @balance: Should we balance.
* @sgs: variable to hold the statistics for this group.
*/
static
inline
void
update_sg_lb_stats
(
struct
sched_group
*
group
,
int
this_cpu
,
enum
cpu_idle_type
idle
,
int
load_idx
,
int
*
sd_idle
,
int
local_group
,
const
struct
cpumask
*
cpus
,
int
*
balance
,
struct
sg_lb_stats
*
sgs
)
{
unsigned
long
load
,
max_cpu_load
,
min_cpu_load
;
int
i
;
int
i
;
int
__group_imb
=
0
;
unsigned
int
balance_cpu
=
-
1
,
first_idle_cpu
=
0
;
unsigned
int
balance_cpu
=
-
1
,
first_idle_cpu
=
0
;
unsigned
long
sum_nr_running
,
sum_weighted_load
;
unsigned
long
sum_avg_load_per_task
;
unsigned
long
sum_avg_load_per_task
;
unsigned
long
avg_load_per_task
;
unsigned
long
avg_load_per_task
;
local_group
=
cpumask_test_cpu
(
this_cpu
,
sched_group_cpus
(
group
));
if
(
local_group
)
if
(
local_group
)
balance_cpu
=
cpumask_first
(
sched_group_cpus
(
group
)
);
balance_cpu
=
group_first_cpu
(
group
);
/* Tally up the load of all CPUs in the group */
/* Tally up the load of all CPUs in the group */
sum_weighted_load
=
sum_nr_running
=
avg_load
=
0
;
sum_avg_load_per_task
=
avg_load_per_task
=
0
;
sum_avg_load_per_task
=
avg_load_per_task
=
0
;
max_cpu_load
=
0
;
max_cpu_load
=
0
;
min_cpu_load
=
~
0UL
;
min_cpu_load
=
~
0UL
;
...
@@ -3269,9 +3468,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
...
@@ -3269,9 +3468,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
min_cpu_load
=
load
;
min_cpu_load
=
load
;
}
}
avg
_load
+=
load
;
sgs
->
group
_load
+=
load
;
sum_nr_running
+=
rq
->
nr_running
;
sgs
->
sum_nr_running
+=
rq
->
nr_running
;
sum_weighted_load
+=
weighted_cpuload
(
i
);
sgs
->
sum_weighted_load
+=
weighted_cpuload
(
i
);
sum_avg_load_per_task
+=
cpu_avg_load_per_task
(
i
);
sum_avg_load_per_task
+=
cpu_avg_load_per_task
(
i
);
}
}
...
@@ -3285,15 +3484,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
...
@@ -3285,15 +3484,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if
(
idle
!=
CPU_NEWLY_IDLE
&&
local_group
&&
if
(
idle
!=
CPU_NEWLY_IDLE
&&
local_group
&&
balance_cpu
!=
this_cpu
&&
balance
)
{
balance_cpu
!=
this_cpu
&&
balance
)
{
*
balance
=
0
;
*
balance
=
0
;
goto
ret
;
return
;
}
}
total_load
+=
avg_load
;
total_pwr
+=
group
->
__cpu_power
;
/* Adjust by relative CPU power of the group */
/* Adjust by relative CPU power of the group */
avg_load
=
sg_div_cpu_power
(
group
,
sgs
->
avg_load
=
sg_div_cpu_power
(
group
,
avg
_load
*
SCHED_LOAD_SCALE
);
sgs
->
group
_load
*
SCHED_LOAD_SCALE
);
/*
/*
...
@@ -3309,126 +3505,163 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
...
@@ -3309,126 +3505,163 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
sum_avg_load_per_task
*
SCHED_LOAD_SCALE
);
sum_avg_load_per_task
*
SCHED_LOAD_SCALE
);
if
((
max_cpu_load
-
min_cpu_load
)
>
2
*
avg_load_per_task
)
if
((
max_cpu_load
-
min_cpu_load
)
>
2
*
avg_load_per_task
)
__
group_imb
=
1
;
sgs
->
group_imb
=
1
;
group_capacity
=
group
->
__cpu_power
/
SCHED_LOAD_SCALE
;
sgs
->
group_capacity
=
group
->
__cpu_power
/
SCHED_LOAD_SCALE
;
if
(
local_group
)
{
}
this_load
=
avg_load
;
this
=
group
;
this_nr_running
=
sum_nr_running
;
this_load_per_task
=
sum_weighted_load
;
}
else
if
(
avg_load
>
max_load
&&
(
sum_nr_running
>
group_capacity
||
__group_imb
))
{
max_load
=
avg_load
;
busiest
=
group
;
busiest_nr_running
=
sum_nr_running
;
busiest_load_per_task
=
sum_weighted_load
;
group_imb
=
__group_imb
;
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
/**
/*
* update_sd_lb_stats - Update sched_group's statistics for load balancing.
* Busy processors will not participate in power savings
* @sd: sched_domain whose statistics are to be updated.
* balance.
* @this_cpu: Cpu for which load balance is currently performed.
*/
* @idle: Idle status of this_cpu
if
(
idle
==
CPU_NOT_IDLE
||
* @sd_idle: Idle status of the sched_domain containing group.
!
(
sd
->
flags
&
SD_POWERSAVINGS_BALANCE
))
* @cpus: Set of cpus considered for load balancing.
goto
group_next
;
* @balance: Should we balance.
* @sds: variable to hold the statistics for this sched_domain.
*/
static
inline
void
update_sd_lb_stats
(
struct
sched_domain
*
sd
,
int
this_cpu
,
enum
cpu_idle_type
idle
,
int
*
sd_idle
,
const
struct
cpumask
*
cpus
,
int
*
balance
,
struct
sd_lb_stats
*
sds
)
{
struct
sched_group
*
group
=
sd
->
groups
;
struct
sg_lb_stats
sgs
;
int
load_idx
;
/*
init_sd_power_savings_stats
(
sd
,
sds
,
idle
);
* If the local group is idle or completely loaded
load_idx
=
get_sd_load_idx
(
sd
,
idle
);
* no need to do power savings balance at this domain
*/
if
(
local_group
&&
(
this_nr_running
>=
group_capacity
||
!
this_nr_running
))
power_savings_balance
=
0
;
/*
do
{
* If a group is already running at full capacity or idle,
int
local_group
;
* don't include that group in power savings calculations
*/
if
(
!
power_savings_balance
||
sum_nr_running
>=
group_capacity
||
!
sum_nr_running
)
goto
group_next
;
/*
local_group
=
cpumask_test_cpu
(
this_cpu
,
* Calculate the group which has the least non-idle load.
sched_group_cpus
(
group
));
* This is the group from where we need to pick up the load
memset
(
&
sgs
,
0
,
sizeof
(
sgs
));
* for saving power
update_sg_lb_stats
(
group
,
this_cpu
,
idle
,
load_idx
,
sd_idle
,
*/
local_group
,
cpus
,
balance
,
&
sgs
);
if
((
sum_nr_running
<
min_nr_running
)
||
(
sum_nr_running
==
min_nr_running
&&
cpumask_first
(
sched_group_cpus
(
group
))
>
cpumask_first
(
sched_group_cpus
(
group_min
))))
{
group_min
=
group
;
min_nr_running
=
sum_nr_running
;
min_load_per_task
=
sum_weighted_load
/
sum_nr_running
;
}
/*
if
(
local_group
&&
balance
&&
!
(
*
balance
))
* Calculate the group which is almost near its
return
;
* capacity but still has some space to pick up some load
* from other group and save more power
sds
->
total_load
+=
sgs
.
group_load
;
*/
sds
->
total_pwr
+=
group
->
__cpu_power
;
if
(
sum_nr_running
<=
group_capacity
-
1
)
{
if
(
sum_nr_running
>
leader_nr_running
||
if
(
local_group
)
{
(
sum_nr_running
==
leader_nr_running
&&
sds
->
this_load
=
sgs
.
avg_load
;
cpumask_first
(
sched_group_cpus
(
group
))
<
sds
->
this
=
group
;
cpumask_first
(
sched_group_cpus
(
group_leader
))))
{
sds
->
this_nr_running
=
sgs
.
sum_nr_running
;
group_leader
=
group
;
sds
->
this_load_per_task
=
sgs
.
sum_weighted_load
;
leader_nr_running
=
sum_nr_running
;
}
else
if
(
sgs
.
avg_load
>
sds
->
max_load
&&
}
(
sgs
.
sum_nr_running
>
sgs
.
group_capacity
||
}
sgs
.
group_imb
))
{
group_next:
sds
->
max_load
=
sgs
.
avg_load
;
#endif
sds
->
busiest
=
group
;
sds
->
busiest_nr_running
=
sgs
.
sum_nr_running
;
sds
->
busiest_load_per_task
=
sgs
.
sum_weighted_load
;
sds
->
group_imb
=
sgs
.
group_imb
;
}
update_sd_power_savings_stats
(
group
,
sds
,
local_group
,
&
sgs
);
group
=
group
->
next
;
group
=
group
->
next
;
}
while
(
group
!=
sd
->
groups
);
}
while
(
group
!=
sd
->
groups
);
if
(
!
busiest
||
this_load
>=
max_load
||
busiest_nr_running
==
0
)
}
goto
out_balanced
;
avg_load
=
(
SCHED_LOAD_SCALE
*
total_load
)
/
total_pwr
;
if
(
this_load
>=
avg_load
||
/**
100
*
max_load
<=
sd
->
imbalance_pct
*
this_load
)
* fix_small_imbalance - Calculate the minor imbalance that exists
goto
out_balanced
;
* amongst the groups of a sched_domain, during
* load balancing.
* @sds: Statistics of the sched_domain whose imbalance is to be calculated.
* @this_cpu: The cpu at whose sched_domain we're performing load-balance.
* @imbalance: Variable to store the imbalance.
*/
static
inline
void
fix_small_imbalance
(
struct
sd_lb_stats
*
sds
,
int
this_cpu
,
unsigned
long
*
imbalance
)
{
unsigned
long
tmp
,
pwr_now
=
0
,
pwr_move
=
0
;
unsigned
int
imbn
=
2
;
if
(
sds
->
this_nr_running
)
{
sds
->
this_load_per_task
/=
sds
->
this_nr_running
;
if
(
sds
->
busiest_load_per_task
>
sds
->
this_load_per_task
)
imbn
=
1
;
}
else
sds
->
this_load_per_task
=
cpu_avg_load_per_task
(
this_cpu
);
busiest_load_per_task
/=
busiest_nr_running
;
if
(
sds
->
max_load
-
sds
->
this_load
+
sds
->
busiest_load_per_task
>=
if
(
group_imb
)
sds
->
busiest_load_per_task
*
imbn
)
{
busiest_load_per_task
=
min
(
busiest_load_per_task
,
avg_load
);
*
imbalance
=
sds
->
busiest_load_per_task
;
return
;
}
/*
/*
* We're trying to get all the cpus to the average_load, so we don't
* OK, we don't have enough imbalance to justify moving tasks,
* want to push ourselves above the average load, nor do we wish to
* however we may be able to increase total CPU power used by
* reduce the max loaded cpu below the average load, as either of these
* moving them.
* actions would just result in more rebalancing later, and ping-pong
* tasks around. Thus we look for the minimum possible imbalance.
* Negative imbalances (*we* are more loaded than anyone else) will
* be counted as no imbalance for these purposes -- we can't fix that
* by pulling tasks to us. Be careful of negative numbers as they'll
* appear as very large values with unsigned longs.
*/
*/
if
(
max_load
<=
busiest_load_per_task
)
goto
out_balanced
;
pwr_now
+=
sds
->
busiest
->
__cpu_power
*
min
(
sds
->
busiest_load_per_task
,
sds
->
max_load
);
pwr_now
+=
sds
->
this
->
__cpu_power
*
min
(
sds
->
this_load_per_task
,
sds
->
this_load
);
pwr_now
/=
SCHED_LOAD_SCALE
;
/* Amount of load we'd subtract */
tmp
=
sg_div_cpu_power
(
sds
->
busiest
,
sds
->
busiest_load_per_task
*
SCHED_LOAD_SCALE
);
if
(
sds
->
max_load
>
tmp
)
pwr_move
+=
sds
->
busiest
->
__cpu_power
*
min
(
sds
->
busiest_load_per_task
,
sds
->
max_load
-
tmp
);
/* Amount of load we'd add */
if
(
sds
->
max_load
*
sds
->
busiest
->
__cpu_power
<
sds
->
busiest_load_per_task
*
SCHED_LOAD_SCALE
)
tmp
=
sg_div_cpu_power
(
sds
->
this
,
sds
->
max_load
*
sds
->
busiest
->
__cpu_power
);
else
tmp
=
sg_div_cpu_power
(
sds
->
this
,
sds
->
busiest_load_per_task
*
SCHED_LOAD_SCALE
);
pwr_move
+=
sds
->
this
->
__cpu_power
*
min
(
sds
->
this_load_per_task
,
sds
->
this_load
+
tmp
);
pwr_move
/=
SCHED_LOAD_SCALE
;
/* Move if we gain throughput */
if
(
pwr_move
>
pwr_now
)
*
imbalance
=
sds
->
busiest_load_per_task
;
}
/**
* calculate_imbalance - Calculate the amount of imbalance present within the
* groups of a given sched_domain during load balance.
* @sds: statistics of the sched_domain whose imbalance is to be calculated.
* @this_cpu: Cpu for which currently load balance is being performed.
* @imbalance: The variable to store the imbalance.
*/
static
inline
void
calculate_imbalance
(
struct
sd_lb_stats
*
sds
,
int
this_cpu
,
unsigned
long
*
imbalance
)
{
unsigned
long
max_pull
;
/*
/*
* In the presence of smp nice balancing, certain scenarios can have
* In the presence of smp nice balancing, certain scenarios can have
* max load less than avg load(as we skip the groups at or below
* max load less than avg load(as we skip the groups at or below
* its cpu_power, while calculating max_load..)
* its cpu_power, while calculating max_load..)
*/
*/
if
(
max_load
<
avg_load
)
{
if
(
sds
->
max_load
<
sds
->
avg_load
)
{
*
imbalance
=
0
;
*
imbalance
=
0
;
goto
small_imbalance
;
return
fix_small_imbalance
(
sds
,
this_cpu
,
imbalance
)
;
}
}
/* Don't want to pull so many tasks that a group would go idle */
/* Don't want to pull so many tasks that a group would go idle */
max_pull
=
min
(
max_load
-
avg_load
,
max_load
-
busiest_load_per_task
);
max_pull
=
min
(
sds
->
max_load
-
sds
->
avg_load
,
sds
->
max_load
-
sds
->
busiest_load_per_task
);
/* How much load to actually move to equalise the imbalance */
/* How much load to actually move to equalise the imbalance */
*
imbalance
=
min
(
max_pull
*
busiest
->
__cpu_power
,
*
imbalance
=
min
(
max_pull
*
sds
->
busiest
->
__cpu_power
,
(
avg_load
-
this_load
)
*
this
->
__cpu_power
)
(
sds
->
avg_load
-
sds
->
this_load
)
*
sds
->
this
->
__cpu_power
)
/
SCHED_LOAD_SCALE
;
/
SCHED_LOAD_SCALE
;
/*
/*
...
@@ -3437,78 +3670,110 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
...
@@ -3437,78 +3670,110 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* a think about bumping its value to force at least one task to be
* a think about bumping its value to force at least one task to be
* moved
* moved
*/
*/
if
(
*
imbalance
<
busiest_load_per_task
)
{
if
(
*
imbalance
<
sds
->
busiest_load_per_task
)
unsigned
long
tmp
,
pwr_now
,
pwr_move
;
return
fix_small_imbalance
(
sds
,
this_cpu
,
imbalance
);
unsigned
int
imbn
;
small_imbalance:
}
pwr_move
=
pwr_now
=
0
;
/******* find_busiest_group() helpers end here *********************/
imbn
=
2
;
if
(
this_nr_running
)
{
this_load_per_task
/=
this_nr_running
;
if
(
busiest_load_per_task
>
this_load_per_task
)
imbn
=
1
;
}
else
this_load_per_task
=
cpu_avg_load_per_task
(
this_cpu
);
if
(
max_load
-
this_load
+
busiest_load_per_task
>=
/**
busiest_load_per_task
*
imbn
)
{
* find_busiest_group - Returns the busiest group within the sched_domain
*
imbalance
=
busiest_load_per_task
;
* if there is an imbalance. If there isn't an imbalance, and
return
busiest
;
* the user has opted for power-savings, it returns a group whose
}
* CPUs can be put to idle by rebalancing those tasks elsewhere, if
* such a group exists.
*
* Also calculates the amount of weighted load which should be moved
* to restore balance.
*
* @sd: The sched_domain whose busiest group is to be returned.
* @this_cpu: The cpu for which load balancing is currently being performed.
* @imbalance: Variable which stores amount of weighted load which should
* be moved to restore balance/put a group to idle.
* @idle: The idle status of this_cpu.
* @sd_idle: The idleness of sd
* @cpus: The set of CPUs under consideration for load-balancing.
* @balance: Pointer to a variable indicating if this_cpu
* is the appropriate cpu to perform load balancing at this_level.
*
* Returns: - the busiest group if imbalance exists.
* - If no imbalance and user has opted for power-savings balance,
* return the least loaded group whose CPUs can be
* put to idle by rebalancing its tasks onto our group.
*/
static
struct
sched_group
*
find_busiest_group
(
struct
sched_domain
*
sd
,
int
this_cpu
,
unsigned
long
*
imbalance
,
enum
cpu_idle_type
idle
,
int
*
sd_idle
,
const
struct
cpumask
*
cpus
,
int
*
balance
)
{
struct
sd_lb_stats
sds
;
memset
(
&
sds
,
0
,
sizeof
(
sds
));
/*
/*
* OK, we don't have enough imbalance to justify moving tasks,
* Compute the various statistics relavent for load balancing at
* however we may be able to increase total CPU power used by
* this level.
* moving them.
*/
*/
update_sd_lb_stats
(
sd
,
this_cpu
,
idle
,
sd_idle
,
cpus
,
balance
,
&
sds
);
pwr_now
+=
busiest
->
__cpu_power
*
/* Cases where imbalance does not exist from POV of this_cpu */
min
(
busiest_load_per_task
,
max_load
);
/* 1) this_cpu is not the appropriate cpu to perform load balancing
pwr_now
+=
this
->
__cpu_power
*
* at this level.
min
(
this_load_per_task
,
this_load
);
* 2) There is no busy sibling group to pull from.
pwr_now
/=
SCHED_LOAD_SCALE
;
* 3) This group is the busiest group.
* 4) This group is more busy than the avg busieness at this
* sched_domain.
* 5) The imbalance is within the specified limit.
* 6) Any rebalance would lead to ping-pong
*/
if
(
balance
&&
!
(
*
balance
))
goto
ret
;
/* Amount of load we'd subtract */
if
(
!
sds
.
busiest
||
sds
.
busiest_nr_running
==
0
)
tmp
=
sg_div_cpu_power
(
busiest
,
goto
out_balanced
;
busiest_load_per_task
*
SCHED_LOAD_SCALE
);
if
(
max_load
>
tmp
)
pwr_move
+=
busiest
->
__cpu_power
*
min
(
busiest_load_per_task
,
max_load
-
tmp
);
/* Amount of load we'd add */
if
(
sds
.
this_load
>=
sds
.
max_load
)
if
(
max_load
*
busiest
->
__cpu_power
<
goto
out_balanced
;
busiest_load_per_task
*
SCHED_LOAD_SCALE
)
tmp
=
sg_div_cpu_power
(
this
,
max_load
*
busiest
->
__cpu_power
);
else
tmp
=
sg_div_cpu_power
(
this
,
busiest_load_per_task
*
SCHED_LOAD_SCALE
);
pwr_move
+=
this
->
__cpu_power
*
min
(
this_load_per_task
,
this_load
+
tmp
);
pwr_move
/=
SCHED_LOAD_SCALE
;
/* Move if we gain throughput */
sds
.
avg_load
=
(
SCHED_LOAD_SCALE
*
sds
.
total_load
)
/
sds
.
total_pwr
;
if
(
pwr_move
>
pwr_now
)
*
imbalance
=
busiest_load_per_task
;
}
return
busiest
;
if
(
sds
.
this_load
>=
sds
.
avg_load
)
goto
out_balanced
;
out_balanced:
if
(
100
*
sds
.
max_load
<=
sd
->
imbalance_pct
*
sds
.
this_load
)
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
goto
out_balanced
;
if
(
idle
==
CPU_NOT_IDLE
||
!
(
sd
->
flags
&
SD_POWERSAVINGS_BALANCE
))
goto
ret
;
if
(
this
==
group_leader
&&
group_leader
!=
group_min
)
{
sds
.
busiest_load_per_task
/=
sds
.
busiest_nr_running
;
*
imbalance
=
min_load_per_task
;
if
(
sds
.
group_imb
)
if
(
sched_mc_power_savings
>=
POWERSAVINGS_BALANCE_WAKEUP
)
{
sds
.
busiest_load_per_task
=
cpu_rq
(
this_cpu
)
->
rd
->
sched_mc_preferred_wakeup_cpu
=
min
(
sds
.
busiest_load_per_task
,
sds
.
avg_load
);
cpumask_first
(
sched_group_cpus
(
group_leader
));
}
/*
return
group_min
;
* We're trying to get all the cpus to the average_load, so we don't
}
* want to push ourselves above the average load, nor do we wish to
#endif
* reduce the max loaded cpu below the average load, as either of these
* actions would just result in more rebalancing later, and ping-pong
* tasks around. Thus we look for the minimum possible imbalance.
* Negative imbalances (*we* are more loaded than anyone else) will
* be counted as no imbalance for these purposes -- we can't fix that
* by pulling tasks to us. Be careful of negative numbers as they'll
* appear as very large values with unsigned longs.
*/
if
(
sds
.
max_load
<=
sds
.
busiest_load_per_task
)
goto
out_balanced
;
/* Looks like there is an imbalance. Compute it */
calculate_imbalance
(
&
sds
,
this_cpu
,
imbalance
);
return
sds
.
busiest
;
out_balanced:
/*
* There is no obvious imbalance. But check if we can do some balancing
* to save power.
*/
if
(
check_power_save_busiest_group
(
&
sds
,
this_cpu
,
imbalance
))
return
sds
.
busiest
;
ret:
ret:
*
imbalance
=
0
;
*
imbalance
=
0
;
return
NULL
;
return
NULL
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment