Commit 83a24e32 authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky

[S390] topology: get rid of ifdefs

Remove all ifdefs from topology code and also only compile it for the
CONFIG_SCHED_BOOK case. The new code selects SCHED_MC if SCHED_BOOK is
selected. SCHED_MC without SCHED_BOOK is not possible anymore.
Furthermore various sysfs attributes are not available anymore for the
!SCHED_BOOK case. In particular all attributes that correspond to
CPU polarization.
But since all real world kernels have SCHED_BOOK selected anyway this
doesn't matter too much.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 3931723f
......@@ -191,18 +191,13 @@ config HOTPLUG_CPU
Say N if you want to disable CPU hotplug.
config SCHED_MC
def_bool y
prompt "Multi-core scheduler support"
depends on SMP
help
Multi-core scheduler support improves the CPU scheduler's decision
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places.
def_bool n
config SCHED_BOOK
def_bool y
prompt "Book scheduler support"
depends on SMP && SCHED_MC
depends on SMP
select SCHED_MC
help
Book scheduler support improves the CPU scheduler's decision making
when dealing with machines that have several books.
......
......@@ -23,7 +23,6 @@ extern void __cpu_die (unsigned int cpu);
extern int __cpu_up (unsigned int cpu);
extern struct mutex smp_cpu_state_mutex;
extern int smp_cpu_polarization[];
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
......
......@@ -4,6 +4,10 @@
#include <linux/cpumask.h>
#include <asm/sysinfo.h>
struct cpu;
#ifdef CONFIG_SCHED_BOOK
extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
......@@ -16,8 +20,6 @@ static inline const struct cpumask *cpu_coregroup_mask(int cpu)
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define mc_capable() (1)
#ifdef CONFIG_SCHED_BOOK
extern unsigned char cpu_book_id[NR_CPUS];
extern cpumask_t cpu_book_map[NR_CPUS];
......@@ -29,19 +31,43 @@ static inline const struct cpumask *cpu_book_mask(int cpu)
#define topology_book_id(cpu) (cpu_book_id[cpu])
#define topology_book_cpumask(cpu) (&cpu_book_map[cpu])
#endif /* CONFIG_SCHED_BOOK */
int topology_cpu_init(struct cpu *);
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
void store_topology(struct sysinfo_15_1_x *info);
#define POLARIZATION_UNKNWN (-1)
#else /* CONFIG_SCHED_BOOK */
static inline void topology_schedule_update(void) { }
static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
#endif /* CONFIG_SCHED_BOOK */
#define POLARIZATION_UNKNOWN (-1)
#define POLARIZATION_HRZ (0)
#define POLARIZATION_VL (1)
#define POLARIZATION_VM (2)
#define POLARIZATION_VH (3)
#ifdef CONFIG_SMP
extern int cpu_polarization[];
static inline void cpu_set_polarization(int cpu, int val)
{
#ifdef CONFIG_SCHED_BOOK
cpu_polarization[cpu] = val;
#endif
}
static inline int cpu_read_polarization(int cpu)
{
#ifdef CONFIG_SCHED_BOOK
return cpu_polarization[cpu];
#else
return POLARIZATION_HRZ;
#endif
}
#ifdef CONFIG_SCHED_BOOK
void s390_init_cpu_topology(void);
#else
static inline void s390_init_cpu_topology(void)
......
......@@ -32,7 +32,8 @@ extra-y += head.o init_task.o vmlinux.lds
extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o topology.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCHED_BOOK) += topology.o
obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \
switch_cpu.o)
obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
......
......@@ -69,9 +69,7 @@ enum s390_cpu_state {
};
DEFINE_MUTEX(smp_cpu_state_mutex);
int smp_cpu_polarization[NR_CPUS];
static int smp_cpu_state[NR_CPUS];
static int cpu_management;
static DEFINE_PER_CPU(struct cpu, cpu_devices);
......@@ -369,7 +367,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
if (cpu_known(cpu_id))
continue;
__cpu_logical_map[logical_cpu] = cpu_id;
smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
if (!cpu_stopped(logical_cpu))
continue;
set_cpu_present(logical_cpu, true);
......@@ -403,7 +401,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail)
if (cpu_known(cpu_id))
continue;
__cpu_logical_map[logical_cpu] = cpu_id;
smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
set_cpu_present(logical_cpu, true);
if (cpu >= info->configured)
smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
......@@ -806,7 +804,7 @@ void __init smp_prepare_boot_cpu(void)
S390_lowcore.percpu_offset = __per_cpu_offset[0];
current_set[0] = current;
smp_cpu_state[0] = CPU_STATE_CONFIGURED;
smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
cpu_set_polarization(0, POLARIZATION_UNKNOWN);
}
void __init smp_cpus_done(unsigned int max_cpus)
......@@ -868,7 +866,7 @@ static ssize_t cpu_configure_store(struct sys_device *dev,
rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
if (!rc) {
smp_cpu_state[cpu] = CPU_STATE_STANDBY;
smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
}
}
break;
......@@ -877,7 +875,7 @@ static ssize_t cpu_configure_store(struct sys_device *dev,
rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
if (!rc) {
smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
}
}
break;
......@@ -892,35 +890,6 @@ static ssize_t cpu_configure_store(struct sys_device *dev,
static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
#endif /* CONFIG_HOTPLUG_CPU */
static ssize_t cpu_polarization_show(struct sys_device *dev,
struct sysdev_attribute *attr, char *buf)
{
int cpu = dev->id;
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
switch (smp_cpu_polarization[cpu]) {
case POLARIZATION_HRZ:
count = sprintf(buf, "horizontal\n");
break;
case POLARIZATION_VL:
count = sprintf(buf, "vertical:low\n");
break;
case POLARIZATION_VM:
count = sprintf(buf, "vertical:medium\n");
break;
case POLARIZATION_VH:
count = sprintf(buf, "vertical:high\n");
break;
default:
count = sprintf(buf, "unknown\n");
break;
}
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
static ssize_t show_cpu_address(struct sys_device *dev,
struct sysdev_attribute *attr, char *buf)
{
......@@ -928,13 +897,11 @@ static ssize_t show_cpu_address(struct sys_device *dev,
}
static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
static struct attribute *cpu_common_attrs[] = {
#ifdef CONFIG_HOTPLUG_CPU
&attr_configure.attr,
#endif
&attr_address.attr,
&attr_polarization.attr,
NULL,
};
......@@ -1055,11 +1022,20 @@ static int __devinit smp_add_present_cpu(int cpu)
rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
if (rc)
goto out_cpu;
if (!cpu_online(cpu))
goto out;
rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
if (!rc)
return 0;
if (cpu_online(cpu)) {
rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
if (rc)
goto out_online;
}
rc = topology_cpu_init(c);
if (rc)
goto out_topology;
return 0;
out_topology:
if (cpu_online(cpu))
sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
out_online:
sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
out_cpu:
#ifdef CONFIG_HOTPLUG_CPU
......@@ -1111,61 +1087,16 @@ static ssize_t __ref rescan_store(struct sysdev_class *class,
static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
#endif /* CONFIG_HOTPLUG_CPU */
static ssize_t dispatching_show(struct sysdev_class *class,
struct sysdev_class_attribute *attr,
char *buf)
static int __init s390_smp_init(void)
{
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
count = sprintf(buf, "%d\n", cpu_management);
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
static ssize_t dispatching_store(struct sysdev_class *dev,
struct sysdev_class_attribute *attr,
const char *buf,
size_t count)
{
int val, rc;
char delim;
if (sscanf(buf, "%d %c", &val, &delim) != 1)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
rc = 0;
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
if (cpu_management == val)
goto out;
rc = topology_set_cpu_management(val);
if (!rc)
cpu_management = val;
out:
mutex_unlock(&smp_cpu_state_mutex);
put_online_cpus();
return rc ? rc : count;
}
static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
dispatching_store);
static int __init topology_init(void)
{
int cpu;
int rc;
int cpu, rc;
register_cpu_notifier(&smp_cpu_nb);
#ifdef CONFIG_HOTPLUG_CPU
rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
if (rc)
return rc;
#endif
rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
if (rc)
return rc;
for_each_present_cpu(cpu) {
rc = smp_add_present_cpu(cpu);
if (rc)
......@@ -1173,4 +1104,4 @@ static int __init topology_init(void)
}
return 0;
}
subsys_initcall(topology_init);
subsys_initcall(s390_smp_init);
......@@ -6,17 +6,17 @@
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/bootmem.h>
#include <linux/cpuset.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/cpuset.h>
#include <asm/delay.h>
#include <linux/mm.h>
#define PTF_HORIZONTAL (0UL)
#define PTF_VERTICAL (1UL)
......@@ -41,11 +41,12 @@ static struct mask_info core_info;
cpumask_t cpu_core_map[NR_CPUS];
unsigned char cpu_core_id[NR_CPUS];
#ifdef CONFIG_SCHED_BOOK
static struct mask_info book_info;
cpumask_t cpu_book_map[NR_CPUS];
unsigned char cpu_book_id[NR_CPUS];
#endif
/* smp_cpu_state_mutex must be held when accessing this array */
int cpu_polarization[NR_CPUS];
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
......@@ -85,10 +86,8 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
for_each_present_cpu(lcpu) {
if (cpu_logical_map(lcpu) != rcpu)
continue;
#ifdef CONFIG_SCHED_BOOK
cpumask_set_cpu(lcpu, &book->mask);
cpu_book_id[lcpu] = book->id;
#endif
cpumask_set_cpu(lcpu, &core->mask);
if (z10) {
cpu_core_id[lcpu] = rcpu;
......@@ -96,7 +95,7 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
} else {
cpu_core_id[lcpu] = core->id;
}
smp_cpu_polarization[lcpu] = tl_cpu->pp;
cpu_set_polarization(lcpu, tl_cpu->pp);
}
}
return core;
......@@ -111,13 +110,11 @@ static void clear_masks(void)
cpumask_clear(&info->mask);
info = info->next;
}
#ifdef CONFIG_SCHED_BOOK
info = &book_info;
while (info) {
cpumask_clear(&info->mask);
info = info->next;
}
#endif
}
static union topology_entry *next_tle(union topology_entry *tle)
......@@ -129,26 +126,19 @@ static union topology_entry *next_tle(union topology_entry *tle)
static void tl_to_cores(struct sysinfo_15_1_x *info)
{
#ifdef CONFIG_SCHED_BOOK
struct mask_info *book = &book_info;
struct cpuid cpu_id;
#else
struct mask_info *book = NULL;
#endif
struct mask_info *core = &core_info;
struct mask_info *book = &book_info;
union topology_entry *tle, *end;
struct cpuid cpu_id;
int z10 = 0;
#ifdef CONFIG_SCHED_BOOK
get_cpu_id(&cpu_id);
z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098;
#endif
spin_lock_irq(&topology_lock);
clear_masks();
tle = info->tle;
end = (union topology_entry *)((unsigned long)info + info->length);
while (tle < end) {
#ifdef CONFIG_SCHED_BOOK
if (z10) {
switch (tle->nl) {
case 1:
......@@ -165,14 +155,11 @@ static void tl_to_cores(struct sysinfo_15_1_x *info)
tle = next_tle(tle);
continue;
}
#endif
switch (tle->nl) {
#ifdef CONFIG_SCHED_BOOK
case 2:
book = book->next;
book->id = tle->container.id;
break;
#endif
case 1:
core = core->next;
core->id = tle->container.id;
......@@ -196,7 +183,7 @@ static void topology_update_polarization_simple(void)
mutex_lock(&smp_cpu_state_mutex);
for_each_possible_cpu(cpu)
smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
cpu_set_polarization(cpu, POLARIZATION_HRZ);
mutex_unlock(&smp_cpu_state_mutex);
}
......@@ -215,8 +202,7 @@ static int ptf(unsigned long fc)
int topology_set_cpu_management(int fc)
{
int cpu;
int rc;
int cpu, rc;
if (!MACHINE_HAS_TOPOLOGY)
return -EOPNOTSUPP;
......@@ -227,7 +213,7 @@ int topology_set_cpu_management(int fc)
if (rc)
return -EBUSY;
for_each_possible_cpu(cpu)
smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
return rc;
}
......@@ -239,22 +225,18 @@ static void update_cpu_core_map(void)
spin_lock_irqsave(&topology_lock, flags);
for_each_possible_cpu(cpu) {
cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
#ifdef CONFIG_SCHED_BOOK
cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
#endif
}
spin_unlock_irqrestore(&topology_lock, flags);
}
void store_topology(struct sysinfo_15_1_x *info)
{
#ifdef CONFIG_SCHED_BOOK
int rc;
rc = stsi(info, 15, 1, 3);
if (rc != -ENOSYS)
return;
#endif
stsi(info, 15, 1, 2);
}
......@@ -313,23 +295,6 @@ static int __init early_parse_topology(char *p)
}
early_param("topology", early_parse_topology);
static int __init init_topology_update(void)
{
int rc;
rc = 0;
if (!MACHINE_HAS_TOPOLOGY) {
topology_update_polarization_simple();
goto out;
}
init_timer_deferrable(&topology_timer);
set_topology_timer();
out:
update_cpu_core_map();
return rc;
}
__initcall(init_topology_update);
static void __init alloc_masks(struct sysinfo_15_1_x *info,
struct mask_info *mask, int offset)
{
......@@ -357,10 +322,107 @@ void __init s390_init_cpu_topology(void)
store_topology(info);
pr_info("The CPU configuration topology of the machine is:");
for (i = 0; i < TOPOLOGY_NR_MAG; i++)
printk(" %d", info->mag[i]);
printk(" / %d\n", info->mnest);
printk(KERN_CONT " %d", info->mag[i]);
printk(KERN_CONT " / %d\n", info->mnest);
alloc_masks(info, &core_info, 1);
#ifdef CONFIG_SCHED_BOOK
alloc_masks(info, &book_info, 2);
#endif
}
static int cpu_management;
static ssize_t dispatching_show(struct sysdev_class *class,
struct sysdev_class_attribute *attr,
char *buf)
{
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
count = sprintf(buf, "%d\n", cpu_management);
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
static ssize_t dispatching_store(struct sysdev_class *dev,
struct sysdev_class_attribute *attr,
const char *buf,
size_t count)
{
int val, rc;
char delim;
if (sscanf(buf, "%d %c", &val, &delim) != 1)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
rc = 0;
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
if (cpu_management == val)
goto out;
rc = topology_set_cpu_management(val);
if (!rc)
cpu_management = val;
out:
mutex_unlock(&smp_cpu_state_mutex);
put_online_cpus();
return rc ? rc : count;
}
static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
dispatching_store);
static ssize_t cpu_polarization_show(struct sys_device *dev,
struct sysdev_attribute *attr, char *buf)
{
int cpu = dev->id;
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
switch (cpu_read_polarization(cpu)) {
case POLARIZATION_HRZ:
count = sprintf(buf, "horizontal\n");
break;
case POLARIZATION_VL:
count = sprintf(buf, "vertical:low\n");
break;
case POLARIZATION_VM:
count = sprintf(buf, "vertical:medium\n");
break;
case POLARIZATION_VH:
count = sprintf(buf, "vertical:high\n");
break;
default:
count = sprintf(buf, "unknown\n");
break;
}
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
static struct attribute *topology_cpu_attrs[] = {
&attr_polarization.attr,
NULL,
};
static struct attribute_group topology_cpu_attr_group = {
.attrs = topology_cpu_attrs,
};
int topology_cpu_init(struct cpu *cpu)
{
return sysfs_create_group(&cpu->sysdev.kobj, &topology_cpu_attr_group);
}
static int __init topology_init(void)
{
if (!MACHINE_HAS_TOPOLOGY) {
topology_update_polarization_simple();
goto out;
}
init_timer_deferrable(&topology_timer);
set_topology_timer();
out:
update_cpu_core_map();
return sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
}
device_initcall(topology_init);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment