Commit 89c572e2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'sched-core-2024-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler updates from Ingo Molnar:

 - Fix inconsistency in misfit task load-balancing

 - Fix CPU isolation bugs in the task-wakeup logic

 - Rework and unify the sched_use_asym_prio() and sched_asym_prefer()
   logic

 - Clean up and simplify ->avg_* accesses

 - Misc cleanups and fixes

* tag 'sched-core-2024-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/topology: Rename SD_SHARE_PKG_RESOURCES to SD_SHARE_LLC
  sched/fair: Check the SD_ASYM_PACKING flag in sched_use_asym_prio()
  sched/fair: Rework sched_use_asym_prio() and sched_asym_prefer()
  sched/fair: Remove unused parameter from sched_asym()
  sched/topology: Remove duplicate descriptions from TOPOLOGY_SD_FLAGS
  sched/fair: Simplify the update_sd_pick_busiest() logic
  sched/fair: Do strict inequality check for busiest misfit task group
  sched/fair: Remove unnecessary goto in update_sd_lb_stats()
  sched/fair: Take the scheduling domain into account in select_idle_core()
  sched/fair: Take the scheduling domain into account in select_idle_smt()
  sched/fair: Add READ_ONCE() and use existing helper function to access ->avg_irq
  sched/fair: Use existing helper functions to access ->avg_rt and ->avg_dl
  sched/core: Simplify code by removing duplicate #ifdefs
parents a5b1a017 54de4427
...@@ -984,7 +984,7 @@ static bool shared_caches __ro_after_init; ...@@ -984,7 +984,7 @@ static bool shared_caches __ro_after_init;
/* cpumask of CPUs with asymmetric SMT dependency */ /* cpumask of CPUs with asymmetric SMT dependency */
static int powerpc_smt_flags(void) static int powerpc_smt_flags(void)
{ {
int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
...@@ -1010,9 +1010,9 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(splpar_asym_pack); ...@@ -1010,9 +1010,9 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(splpar_asym_pack);
static int powerpc_shared_cache_flags(void) static int powerpc_shared_cache_flags(void)
{ {
if (static_branch_unlikely(&splpar_asym_pack)) if (static_branch_unlikely(&splpar_asym_pack))
return SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING; return SD_SHARE_LLC | SD_ASYM_PACKING;
return SD_SHARE_PKG_RESOURCES; return SD_SHARE_LLC;
} }
static int powerpc_shared_proc_flags(void) static int powerpc_shared_proc_flags(void)
......
...@@ -117,13 +117,13 @@ SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS) ...@@ -117,13 +117,13 @@ SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS) SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS)
/* /*
* Domain members share CPU package resources (i.e. caches) * Domain members share CPU Last Level Caches
* *
* SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share * SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share
* the same cache(s). * the same cache(s).
* NEEDS_GROUPS: Caches are shared between groups. * NEEDS_GROUPS: Caches are shared between groups.
*/ */
SD_FLAG(SD_SHARE_PKG_RESOURCES, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS) SD_FLAG(SD_SHARE_LLC, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
/* /*
* Only a single load balancing instance * Only a single load balancing instance
......
...@@ -38,21 +38,21 @@ extern const struct sd_flag_debug sd_flag_debug[]; ...@@ -38,21 +38,21 @@ extern const struct sd_flag_debug sd_flag_debug[];
#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void) static inline int cpu_smt_flags(void)
{ {
return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; return SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
} }
#endif #endif
#ifdef CONFIG_SCHED_CLUSTER #ifdef CONFIG_SCHED_CLUSTER
static inline int cpu_cluster_flags(void) static inline int cpu_cluster_flags(void)
{ {
return SD_CLUSTER | SD_SHARE_PKG_RESOURCES; return SD_CLUSTER | SD_SHARE_LLC;
} }
#endif #endif
#ifdef CONFIG_SCHED_MC #ifdef CONFIG_SCHED_MC
static inline int cpu_core_flags(void) static inline int cpu_core_flags(void)
{ {
return SD_SHARE_PKG_RESOURCES; return SD_SHARE_LLC;
} }
#endif #endif
......
...@@ -1792,7 +1792,6 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css); ...@@ -1792,7 +1792,6 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css);
#endif #endif
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
#ifdef CONFIG_UCLAMP_TASK
#ifdef CONFIG_UCLAMP_TASK_GROUP #ifdef CONFIG_UCLAMP_TASK_GROUP
static void uclamp_update_root_tg(void) static void uclamp_update_root_tg(void)
{ {
...@@ -1898,7 +1897,6 @@ static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, ...@@ -1898,7 +1897,6 @@ static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
return result; return result;
} }
#endif #endif
#endif
static int uclamp_validate(struct task_struct *p, static int uclamp_validate(struct task_struct *p,
const struct sched_attr *attr) const struct sched_attr *attr)
...@@ -2065,7 +2063,7 @@ static void __init init_uclamp(void) ...@@ -2065,7 +2063,7 @@ static void __init init_uclamp(void)
} }
} }
#else /* CONFIG_UCLAMP_TASK */ #else /* !CONFIG_UCLAMP_TASK */
static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
static inline int uclamp_validate(struct task_struct *p, static inline int uclamp_validate(struct task_struct *p,
......
...@@ -7289,7 +7289,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu ...@@ -7289,7 +7289,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
if (!available_idle_cpu(cpu)) { if (!available_idle_cpu(cpu)) {
idle = false; idle = false;
if (*idle_cpu == -1) { if (*idle_cpu == -1) {
if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) { if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, cpus)) {
*idle_cpu = cpu; *idle_cpu = cpu;
break; break;
} }
...@@ -7297,7 +7297,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu ...@@ -7297,7 +7297,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
} }
break; break;
} }
if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr)) if (*idle_cpu == -1 && cpumask_test_cpu(cpu, cpus))
*idle_cpu = cpu; *idle_cpu = cpu;
} }
...@@ -7311,13 +7311,19 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu ...@@ -7311,13 +7311,19 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
/* /*
* Scan the local SMT mask for idle CPUs. * Scan the local SMT mask for idle CPUs.
*/ */
static int select_idle_smt(struct task_struct *p, int target) static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
{ {
int cpu; int cpu;
for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) { for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
if (cpu == target) if (cpu == target)
continue; continue;
/*
* Check if the CPU is in the LLC scheduling domain of @target.
* Due to isolcpus, there is no guarantee that all the siblings are in the domain.
*/
if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
continue;
if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
return cpu; return cpu;
} }
...@@ -7341,7 +7347,7 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma ...@@ -7341,7 +7347,7 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
return __select_idle_cpu(core, p); return __select_idle_cpu(core, p);
} }
static inline int select_idle_smt(struct task_struct *p, int target) static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
{ {
return -1; return -1;
} }
...@@ -7591,7 +7597,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) ...@@ -7591,7 +7597,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
has_idle_core = test_idle_cores(target); has_idle_core = test_idle_cores(target);
if (!has_idle_core && cpus_share_cache(prev, target)) { if (!has_idle_core && cpus_share_cache(prev, target)) {
i = select_idle_smt(p, prev); i = select_idle_smt(p, sd, prev);
if ((unsigned int)i < nr_cpumask_bits) if ((unsigned int)i < nr_cpumask_bits)
return i; return i;
} }
...@@ -9237,19 +9243,17 @@ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) ...@@ -9237,19 +9243,17 @@ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
static inline bool others_have_blocked(struct rq *rq) static inline bool others_have_blocked(struct rq *rq)
{ {
if (READ_ONCE(rq->avg_rt.util_avg)) if (cpu_util_rt(rq))
return true; return true;
if (READ_ONCE(rq->avg_dl.util_avg)) if (cpu_util_dl(rq))
return true; return true;
if (thermal_load_avg(rq)) if (thermal_load_avg(rq))
return true; return true;
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ if (cpu_util_irq(rq))
if (READ_ONCE(rq->avg_irq.util_avg))
return true; return true;
#endif
return false; return false;
} }
...@@ -9506,8 +9510,8 @@ static unsigned long scale_rt_capacity(int cpu) ...@@ -9506,8 +9510,8 @@ static unsigned long scale_rt_capacity(int cpu)
* avg_thermal.load_avg tracks thermal pressure and the weighted * avg_thermal.load_avg tracks thermal pressure and the weighted
* average uses the actual delta max capacity(load). * average uses the actual delta max capacity(load).
*/ */
used = READ_ONCE(rq->avg_rt.util_avg); used = cpu_util_rt(rq);
used += READ_ONCE(rq->avg_dl.util_avg); used += cpu_util_dl(rq);
used += thermal_load_avg(rq); used += thermal_load_avg(rq);
if (unlikely(used >= max)) if (unlikely(used >= max))
...@@ -9740,51 +9744,49 @@ group_type group_classify(unsigned int imbalance_pct, ...@@ -9740,51 +9744,49 @@ group_type group_classify(unsigned int imbalance_pct,
*/ */
static bool sched_use_asym_prio(struct sched_domain *sd, int cpu) static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
{ {
if (!(sd->flags & SD_ASYM_PACKING))
return false;
if (!sched_smt_active()) if (!sched_smt_active())
return true; return true;
return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu); return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu);
} }
static inline bool sched_asym(struct sched_domain *sd, int dst_cpu, int src_cpu)
{
/*
* First check if @dst_cpu can do asym_packing load balance. Only do it
* if it has higher priority than @src_cpu.
*/
return sched_use_asym_prio(sd, dst_cpu) &&
sched_asym_prefer(dst_cpu, src_cpu);
}
/** /**
* sched_asym - Check if the destination CPU can do asym_packing load balance * sched_group_asym - Check if the destination CPU can do asym_packing balance
* @env: The load balancing environment * @env: The load balancing environment
* @sds: Load-balancing data with statistics of the local group
* @sgs: Load-balancing statistics of the candidate busiest group * @sgs: Load-balancing statistics of the candidate busiest group
* @group: The candidate busiest group * @group: The candidate busiest group
* *
* @env::dst_cpu can do asym_packing if it has higher priority than the * @env::dst_cpu can do asym_packing if it has higher priority than the
* preferred CPU of @group. * preferred CPU of @group.
* *
* SMT is a special case. If we are balancing load between cores, @env::dst_cpu
* can do asym_packing balance only if all its SMT siblings are idle. Also, it
* can only do it if @group is an SMT group and has exactly on busy CPU. Larger
* imbalances in the number of CPUS are dealt with in find_busiest_group().
*
* If we are balancing load within an SMT core, or at PKG domain level, always
* proceed.
*
* Return: true if @env::dst_cpu can do with asym_packing load balance. False * Return: true if @env::dst_cpu can do with asym_packing load balance. False
* otherwise. * otherwise.
*/ */
static inline bool static inline bool
sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs, sched_group_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
struct sched_group *group)
{ {
/* Ensure that the whole local core is idle, if applicable. */
if (!sched_use_asym_prio(env->sd, env->dst_cpu))
return false;
/* /*
* CPU priorities does not make sense for SMT cores with more than one * CPU priorities do not make sense for SMT cores with more than one
* busy sibling. * busy sibling.
*/ */
if (group->flags & SD_SHARE_CPUCAPACITY) { if ((group->flags & SD_SHARE_CPUCAPACITY) &&
if (sgs->group_weight - sgs->idle_cpus != 1) (sgs->group_weight - sgs->idle_cpus != 1))
return false; return false;
}
return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu); return sched_asym(env->sd, env->dst_cpu, group->asym_prefer_cpu);
} }
/* One group has more than one SMT CPU while the other group does not */ /* One group has more than one SMT CPU while the other group does not */
...@@ -9938,11 +9940,9 @@ static inline void update_sg_lb_stats(struct lb_env *env, ...@@ -9938,11 +9940,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->group_weight = group->group_weight; sgs->group_weight = group->group_weight;
/* Check if dst CPU is idle and preferred to this group */ /* Check if dst CPU is idle and preferred to this group */
if (!local_group && env->sd->flags & SD_ASYM_PACKING && if (!local_group && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running && sched_group_asym(env, sgs, group))
sched_asym(env, sds, sgs, group)) {
sgs->group_asym_packing = 1; sgs->group_asym_packing = 1;
}
/* Check for loaded SMT group to be balanced to dst CPU */ /* Check for loaded SMT group to be balanced to dst CPU */
if (!local_group && smt_balance(env, sgs, group)) if (!local_group && smt_balance(env, sgs, group))
...@@ -10006,9 +10006,7 @@ static bool update_sd_pick_busiest(struct lb_env *env, ...@@ -10006,9 +10006,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
switch (sgs->group_type) { switch (sgs->group_type) {
case group_overloaded: case group_overloaded:
/* Select the overloaded group with highest avg_load. */ /* Select the overloaded group with highest avg_load. */
if (sgs->avg_load <= busiest->avg_load) return sgs->avg_load > busiest->avg_load;
return false;
break;
case group_imbalanced: case group_imbalanced:
/* /*
...@@ -10019,18 +10017,14 @@ static bool update_sd_pick_busiest(struct lb_env *env, ...@@ -10019,18 +10017,14 @@ static bool update_sd_pick_busiest(struct lb_env *env,
case group_asym_packing: case group_asym_packing:
/* Prefer to move from lowest priority CPU's work */ /* Prefer to move from lowest priority CPU's work */
if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) return sched_asym_prefer(sds->busiest->asym_prefer_cpu, sg->asym_prefer_cpu);
return false;
break;
case group_misfit_task: case group_misfit_task:
/* /*
* If we have more than one misfit sg go with the biggest * If we have more than one misfit sg go with the biggest
* misfit. * misfit.
*/ */
if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) return sgs->group_misfit_task_load > busiest->group_misfit_task_load;
return false;
break;
case group_smt_balance: case group_smt_balance:
/* /*
...@@ -10182,10 +10176,8 @@ static int idle_cpu_without(int cpu, struct task_struct *p) ...@@ -10182,10 +10176,8 @@ static int idle_cpu_without(int cpu, struct task_struct *p)
* be computed and tested before calling idle_cpu_without(). * be computed and tested before calling idle_cpu_without().
*/ */
#ifdef CONFIG_SMP
if (rq->ttwu_pending) if (rq->ttwu_pending)
return 0; return 0;
#endif
return 1; return 1;
} }
...@@ -10578,16 +10570,11 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd ...@@ -10578,16 +10570,11 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
update_sg_lb_stats(env, sds, sg, sgs, &sg_status); update_sg_lb_stats(env, sds, sg, sgs, &sg_status);
if (local_group) if (!local_group && update_sd_pick_busiest(env, sds, sg, sgs)) {
goto next_group;
if (update_sd_pick_busiest(env, sds, sg, sgs)) {
sds->busiest = sg; sds->busiest = sg;
sds->busiest_stat = *sgs; sds->busiest_stat = *sgs;
} }
next_group:
/* Now, start updating sd_lb_stats */ /* Now, start updating sd_lb_stats */
sds->total_load += sgs->group_load; sds->total_load += sgs->group_load;
sds->total_capacity += sgs->group_capacity; sds->total_capacity += sgs->group_capacity;
...@@ -10691,7 +10678,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s ...@@ -10691,7 +10678,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
*/ */
if (local->group_type == group_has_spare) { if (local->group_type == group_has_spare) {
if ((busiest->group_type > group_fully_busy) && if ((busiest->group_type > group_fully_busy) &&
!(env->sd->flags & SD_SHARE_PKG_RESOURCES)) { !(env->sd->flags & SD_SHARE_LLC)) {
/* /*
* If busiest is overloaded, try to fill spare * If busiest is overloaded, try to fill spare
* capacity. This might end up creating spare capacity * capacity. This might end up creating spare capacity
...@@ -11038,10 +11025,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, ...@@ -11038,10 +11025,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
* If balancing between cores, let lower priority CPUs help * If balancing between cores, let lower priority CPUs help
* SMT cores with more than one busy sibling. * SMT cores with more than one busy sibling.
*/ */
if ((env->sd->flags & SD_ASYM_PACKING) && if (sched_asym(env->sd, i, env->dst_cpu) && nr_running == 1)
sched_use_asym_prio(env->sd, i) &&
sched_asym_prefer(i, env->dst_cpu) &&
nr_running == 1)
continue; continue;
switch (env->migration_type) { switch (env->migration_type) {
...@@ -11137,8 +11121,7 @@ asym_active_balance(struct lb_env *env) ...@@ -11137,8 +11121,7 @@ asym_active_balance(struct lb_env *env)
* the lower priority @env::dst_cpu help it. Do not follow * the lower priority @env::dst_cpu help it. Do not follow
* CPU priority. * CPU priority.
*/ */
return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && return env->idle != CPU_NOT_IDLE && sched_use_asym_prio(env->sd, env->dst_cpu) &&
sched_use_asym_prio(env->sd, env->dst_cpu) &&
(sched_asym_prefer(env->dst_cpu, env->src_cpu) || (sched_asym_prefer(env->dst_cpu, env->src_cpu) ||
!sched_use_asym_prio(env->sd, env->src_cpu)); !sched_use_asym_prio(env->sd, env->src_cpu));
} }
...@@ -11910,8 +11893,7 @@ static void nohz_balancer_kick(struct rq *rq) ...@@ -11910,8 +11893,7 @@ static void nohz_balancer_kick(struct rq *rq)
* preferred CPU must be idle. * preferred CPU must be idle.
*/ */
for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
if (sched_use_asym_prio(sd, i) && if (sched_asym(sd, i, cpu)) {
sched_asym_prefer(i, cpu)) {
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto unlock; goto unlock;
} }
......
...@@ -3136,7 +3136,7 @@ static inline bool uclamp_rq_is_idle(struct rq *rq) ...@@ -3136,7 +3136,7 @@ static inline bool uclamp_rq_is_idle(struct rq *rq)
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
static inline unsigned long cpu_util_irq(struct rq *rq) static inline unsigned long cpu_util_irq(struct rq *rq)
{ {
return rq->avg_irq.util_avg; return READ_ONCE(rq->avg_irq.util_avg);
} }
static inline static inline
......
...@@ -657,13 +657,13 @@ static void destroy_sched_domains(struct sched_domain *sd) ...@@ -657,13 +657,13 @@ static void destroy_sched_domains(struct sched_domain *sd)
} }
/* /*
* Keep a special pointer to the highest sched_domain that has * Keep a special pointer to the highest sched_domain that has SD_SHARE_LLC set
* SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this * (Last Level Cache Domain) for this allows us to avoid some pointer chasing
* allows us to avoid some pointer chasing select_idle_sibling(). * select_idle_sibling().
* *
* Also keep a unique ID per domain (we use the first CPU number in * Also keep a unique ID per domain (we use the first CPU number in the cpumask
* the cpumask of the domain), this allows us to quickly tell if * of the domain), this allows us to quickly tell if two CPUs are in the same
* two CPUs are in the same cache domain, see cpus_share_cache(). * cache domain, see cpus_share_cache().
*/ */
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
DEFINE_PER_CPU(int, sd_llc_size); DEFINE_PER_CPU(int, sd_llc_size);
...@@ -684,7 +684,7 @@ static void update_top_cache_domain(int cpu) ...@@ -684,7 +684,7 @@ static void update_top_cache_domain(int cpu)
int id = cpu; int id = cpu;
int size = 1; int size = 1;
sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); sd = highest_flag_domain(cpu, SD_SHARE_LLC);
if (sd) { if (sd) {
id = cpumask_first(sched_domain_span(sd)); id = cpumask_first(sched_domain_span(sd));
size = cpumask_weight(sched_domain_span(sd)); size = cpumask_weight(sched_domain_span(sd));
...@@ -1551,11 +1551,12 @@ static struct cpumask ***sched_domains_numa_masks; ...@@ -1551,11 +1551,12 @@ static struct cpumask ***sched_domains_numa_masks;
* *
* These flags are purely descriptive of the topology and do not prescribe * These flags are purely descriptive of the topology and do not prescribe
* behaviour. Behaviour is artificial and mapped in the below sd_init() * behaviour. Behaviour is artificial and mapped in the below sd_init()
* function: * function. For details, see include/linux/sched/sd_flags.h.
* *
* SD_SHARE_CPUCAPACITY - describes SMT topologies * SD_SHARE_CPUCAPACITY
* SD_SHARE_PKG_RESOURCES - describes shared caches * SD_SHARE_LLC
* SD_NUMA - describes NUMA topologies * SD_CLUSTER
* SD_NUMA
* *
* Odd one out, which beside describing the topology has a quirk also * Odd one out, which beside describing the topology has a quirk also
* prescribes the desired behaviour that goes along with it: * prescribes the desired behaviour that goes along with it:
...@@ -1565,7 +1566,7 @@ static struct cpumask ***sched_domains_numa_masks; ...@@ -1565,7 +1566,7 @@ static struct cpumask ***sched_domains_numa_masks;
#define TOPOLOGY_SD_FLAGS \ #define TOPOLOGY_SD_FLAGS \
(SD_SHARE_CPUCAPACITY | \ (SD_SHARE_CPUCAPACITY | \
SD_CLUSTER | \ SD_CLUSTER | \
SD_SHARE_PKG_RESOURCES | \ SD_SHARE_LLC | \
SD_NUMA | \ SD_NUMA | \
SD_ASYM_PACKING) SD_ASYM_PACKING)
...@@ -1608,7 +1609,7 @@ sd_init(struct sched_domain_topology_level *tl, ...@@ -1608,7 +1609,7 @@ sd_init(struct sched_domain_topology_level *tl,
| 0*SD_BALANCE_WAKE | 0*SD_BALANCE_WAKE
| 1*SD_WAKE_AFFINE | 1*SD_WAKE_AFFINE
| 0*SD_SHARE_CPUCAPACITY | 0*SD_SHARE_CPUCAPACITY
| 0*SD_SHARE_PKG_RESOURCES | 0*SD_SHARE_LLC
| 0*SD_SERIALIZE | 0*SD_SERIALIZE
| 1*SD_PREFER_SIBLING | 1*SD_PREFER_SIBLING
| 0*SD_NUMA | 0*SD_NUMA
...@@ -1645,7 +1646,7 @@ sd_init(struct sched_domain_topology_level *tl, ...@@ -1645,7 +1646,7 @@ sd_init(struct sched_domain_topology_level *tl,
if (sd->flags & SD_SHARE_CPUCAPACITY) { if (sd->flags & SD_SHARE_CPUCAPACITY) {
sd->imbalance_pct = 110; sd->imbalance_pct = 110;
} else if (sd->flags & SD_SHARE_PKG_RESOURCES) { } else if (sd->flags & SD_SHARE_LLC) {
sd->imbalance_pct = 117; sd->imbalance_pct = 117;
sd->cache_nice_tries = 1; sd->cache_nice_tries = 1;
...@@ -1670,7 +1671,7 @@ sd_init(struct sched_domain_topology_level *tl, ...@@ -1670,7 +1671,7 @@ sd_init(struct sched_domain_topology_level *tl,
* For all levels sharing cache; connect a sched_domain_shared * For all levels sharing cache; connect a sched_domain_shared
* instance. * instance.
*/ */
if (sd->flags & SD_SHARE_PKG_RESOURCES) { if (sd->flags & SD_SHARE_LLC) {
sd->shared = *per_cpu_ptr(sdd->sds, sd_id); sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
atomic_inc(&sd->shared->ref); atomic_inc(&sd->shared->ref);
atomic_set(&sd->shared->nr_busy_cpus, sd_weight); atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
...@@ -2445,8 +2446,8 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att ...@@ -2445,8 +2446,8 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
struct sched_domain *child = sd->child; struct sched_domain *child = sd->child;
if (!(sd->flags & SD_SHARE_PKG_RESOURCES) && child && if (!(sd->flags & SD_SHARE_LLC) && child &&
(child->flags & SD_SHARE_PKG_RESOURCES)) { (child->flags & SD_SHARE_LLC)) {
struct sched_domain __rcu *top_p; struct sched_domain __rcu *top_p;
unsigned int nr_llcs; unsigned int nr_llcs;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment