Commit 6092478b authored by Dietmar Eggemann's avatar Dietmar Eggemann Committed by Ingo Molnar

sched/deadline: Make dl_cpuset_cpumask_can_shrink() capacity-aware

dl_cpuset_cpumask_can_shrink() is used to validate whether there is
still enough CPU capacity for DL tasks in the reduced cpuset.

Currently it still operates on `# remaining CPUs in the cpuset` (1).
Change this to use the already capacity-aware DL admission control
__dl_overflow() for the `cpumask can shrink` test.

  dl_b->bw = sched_rt_period << BW_SHIFT / sched_rt_period

  dl_b->bw * (1) >= currently allocated bandwidth in root_domain (rd)

  Replace (1) w/ `\Sum CPU capacity in rd >> SCHED_CAPACITY_SHIFT`

Adapt __dl_bw_capacity() to take a cpumask instead of a CPU number
argument so that `rd->span` and `cpumask of the reduced cpuset` can
be used here.
Signed-off-by: default avatarDietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20220729111305.1275158-3-dietmar.eggemann@arm.com
parent 740cf8a7
...@@ -124,15 +124,12 @@ static inline int dl_bw_cpus(int i) ...@@ -124,15 +124,12 @@ static inline int dl_bw_cpus(int i)
return cpus; return cpus;
} }
static inline unsigned long __dl_bw_capacity(int i) static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
{ {
struct root_domain *rd = cpu_rq(i)->rd;
unsigned long cap = 0; unsigned long cap = 0;
int i;
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), for_each_cpu_and(i, mask, cpu_active_mask)
"sched RCU must be held");
for_each_cpu_and(i, rd->span, cpu_active_mask)
cap += capacity_orig_of(i); cap += capacity_orig_of(i);
return cap; return cap;
...@@ -148,7 +145,10 @@ static inline unsigned long dl_bw_capacity(int i) ...@@ -148,7 +145,10 @@ static inline unsigned long dl_bw_capacity(int i)
capacity_orig_of(i) == SCHED_CAPACITY_SCALE) { capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT; return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
} else { } else {
return __dl_bw_capacity(i); RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
"sched RCU must be held");
return __dl_bw_capacity(cpu_rq(i)->rd->span);
} }
} }
...@@ -3007,17 +3007,15 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) ...@@ -3007,17 +3007,15 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
const struct cpumask *trial) const struct cpumask *trial)
{ {
int ret = 1, trial_cpus; unsigned long flags, cap;
struct dl_bw *cur_dl_b; struct dl_bw *cur_dl_b;
unsigned long flags; int ret = 1;
rcu_read_lock_sched(); rcu_read_lock_sched();
cur_dl_b = dl_bw_of(cpumask_any(cur)); cur_dl_b = dl_bw_of(cpumask_any(cur));
trial_cpus = cpumask_weight(trial); cap = __dl_bw_capacity(trial);
raw_spin_lock_irqsave(&cur_dl_b->lock, flags); raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
if (cur_dl_b->bw != -1 && if (__dl_overflow(cur_dl_b, cap, 0, 0))
cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
ret = 0; ret = 0;
raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
rcu_read_unlock_sched(); rcu_read_unlock_sched();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment