Commit cfe7ddcb authored by Valentin Schneider's avatar Valentin Schneider Committed by Ingo Molnar

ARM, sched/topology: Remove SD_SHARE_POWERDOMAIN

This flag was introduced in 2014 by commit:

  d77b3ed5 ("sched: Add a new SD_SHARE_POWERDOMAIN for sched_domain")

but AFAIA it was never leveraged by the scheduler. The closest thing I can
think of is EAS caring about frequency domains, and it does that by
leveraging performance domains.

Remove the flag. No change in functionality is expected.
Suggested-by: default avatarMorten Rasmussen <morten.rasmussen@arm.com>
Signed-off-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Reviewed-by: default avatarDietmar Eggemann <dietmar.eggemann@arm.com>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: https://lore.kernel.org/r/20200817113003.20802-2-valentin.schneider@arm.com
parent 18445bf4
...@@ -243,7 +243,7 @@ void store_cpu_topology(unsigned int cpuid) ...@@ -243,7 +243,7 @@ void store_cpu_topology(unsigned int cpuid)
static inline int cpu_corepower_flags(void) static inline int cpu_corepower_flags(void)
{ {
return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; return SD_SHARE_PKG_RESOURCES;
} }
static struct sched_domain_topology_level arm_topology[] = { static struct sched_domain_topology_level arm_topology[] = {
......
...@@ -18,13 +18,12 @@ ...@@ -18,13 +18,12 @@
#define SD_WAKE_AFFINE 0x0010 /* Wake task to waking CPU */ #define SD_WAKE_AFFINE 0x0010 /* Wake task to waking CPU */
#define SD_ASYM_CPUCAPACITY 0x0020 /* Domain members have different CPU capacities */ #define SD_ASYM_CPUCAPACITY 0x0020 /* Domain members have different CPU capacities */
#define SD_SHARE_CPUCAPACITY 0x0040 /* Domain members share CPU capacity */ #define SD_SHARE_CPUCAPACITY 0x0040 /* Domain members share CPU capacity */
#define SD_SHARE_POWERDOMAIN 0x0080 /* Domain members share power domain */ #define SD_SHARE_PKG_RESOURCES 0x0080 /* Domain members share CPU pkg resources */
#define SD_SHARE_PKG_RESOURCES 0x0100 /* Domain members share CPU pkg resources */ #define SD_SERIALIZE 0x0100 /* Only a single load balancing instance */
#define SD_SERIALIZE 0x0200 /* Only a single load balancing instance */ #define SD_ASYM_PACKING 0x0200 /* Place busy groups earlier in the domain */
#define SD_ASYM_PACKING 0x0400 /* Place busy groups earlier in the domain */ #define SD_PREFER_SIBLING 0x0400 /* Prefer to place tasks in a sibling domain */
#define SD_PREFER_SIBLING 0x0800 /* Prefer to place tasks in a sibling domain */ #define SD_OVERLAP 0x0800 /* sched_domains of this level overlap */
#define SD_OVERLAP 0x1000 /* sched_domains of this level overlap */ #define SD_NUMA 0x1000 /* cross-node balancing */
#define SD_NUMA 0x2000 /* cross-node balancing */
#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void) static inline int cpu_smt_flags(void)
......
...@@ -148,8 +148,7 @@ static int sd_degenerate(struct sched_domain *sd) ...@@ -148,8 +148,7 @@ static int sd_degenerate(struct sched_domain *sd)
SD_BALANCE_EXEC | SD_BALANCE_EXEC |
SD_SHARE_CPUCAPACITY | SD_SHARE_CPUCAPACITY |
SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY |
SD_SHARE_PKG_RESOURCES | SD_SHARE_PKG_RESOURCES)) {
SD_SHARE_POWERDOMAIN)) {
if (sd->groups != sd->groups->next) if (sd->groups != sd->groups->next)
return 0; return 0;
} }
...@@ -180,8 +179,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) ...@@ -180,8 +179,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY |
SD_SHARE_CPUCAPACITY | SD_SHARE_CPUCAPACITY |
SD_SHARE_PKG_RESOURCES | SD_SHARE_PKG_RESOURCES |
SD_PREFER_SIBLING | SD_PREFER_SIBLING);
SD_SHARE_POWERDOMAIN);
if (nr_node_ids == 1) if (nr_node_ids == 1)
pflags &= ~SD_SERIALIZE; pflags &= ~SD_SERIALIZE;
} }
...@@ -1292,7 +1290,6 @@ int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; ...@@ -1292,7 +1290,6 @@ int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
* SD_SHARE_CPUCAPACITY - describes SMT topologies * SD_SHARE_CPUCAPACITY - describes SMT topologies
* SD_SHARE_PKG_RESOURCES - describes shared caches * SD_SHARE_PKG_RESOURCES - describes shared caches
* SD_NUMA - describes NUMA topologies * SD_NUMA - describes NUMA topologies
* SD_SHARE_POWERDOMAIN - describes shared power domain
* *
* Odd one out, which beside describing the topology has a quirk also * Odd one out, which beside describing the topology has a quirk also
* prescribes the desired behaviour that goes along with it: * prescribes the desired behaviour that goes along with it:
...@@ -1303,8 +1300,7 @@ int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; ...@@ -1303,8 +1300,7 @@ int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
(SD_SHARE_CPUCAPACITY | \ (SD_SHARE_CPUCAPACITY | \
SD_SHARE_PKG_RESOURCES | \ SD_SHARE_PKG_RESOURCES | \
SD_NUMA | \ SD_NUMA | \
SD_ASYM_PACKING | \ SD_ASYM_PACKING)
SD_SHARE_POWERDOMAIN)
static struct sched_domain * static struct sched_domain *
sd_init(struct sched_domain_topology_level *tl, sd_init(struct sched_domain_topology_level *tl,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment