Commit 2fdf66b4 authored by Rusty Russell's avatar Rusty Russell Committed by Ingo Molnar

cpumask: convert shared_cpu_map in acpi_processor* structs to cpumask_var_t

Impact: Reduce memory usage, use new API.

This is part of an effort to reduce structure sizes for machines
configured with large NR_CPUS.  cpumask_t gets replaced by
cpumask_var_t, which is either struct cpumask[1] (small NR_CPUS) or
struct cpumask * (large NR_CPUS).

(Changes to powernow-k* by <travis>.)
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ee943a82
...@@ -517,6 +517,17 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) ...@@ -517,6 +517,17 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
} }
} }
static void free_acpi_perf_data(void)
{
unsigned int i;
/* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
for_each_possible_cpu(i)
free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
->shared_cpu_map);
free_percpu(acpi_perf_data);
}
/* /*
* acpi_cpufreq_early_init - initialize ACPI P-States library * acpi_cpufreq_early_init - initialize ACPI P-States library
* *
...@@ -527,6 +538,7 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) ...@@ -527,6 +538,7 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
*/ */
static int __init acpi_cpufreq_early_init(void) static int __init acpi_cpufreq_early_init(void)
{ {
unsigned int i;
dprintk("acpi_cpufreq_early_init\n"); dprintk("acpi_cpufreq_early_init\n");
acpi_perf_data = alloc_percpu(struct acpi_processor_performance); acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
...@@ -534,6 +546,15 @@ static int __init acpi_cpufreq_early_init(void) ...@@ -534,6 +546,15 @@ static int __init acpi_cpufreq_early_init(void)
dprintk("Memory allocation error for acpi_perf_data.\n"); dprintk("Memory allocation error for acpi_perf_data.\n");
return -ENOMEM; return -ENOMEM;
} }
for_each_possible_cpu(i) {
if (!alloc_cpumask_var(&per_cpu_ptr(acpi_perf_data, i)
->shared_cpu_map, GFP_KERNEL)) {
/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
free_acpi_perf_data();
return -ENOMEM;
}
}
/* Do initialization in ACPI core */ /* Do initialization in ACPI core */
acpi_processor_preregister_performance(acpi_perf_data); acpi_processor_preregister_performance(acpi_perf_data);
...@@ -604,9 +625,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -604,9 +625,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
*/ */
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
policy->cpus = perf->shared_cpu_map; cpumask_copy(&policy->cpus, perf->shared_cpu_map);
} }
policy->related_cpus = perf->shared_cpu_map; cpumask_copy(&policy->related_cpus, perf->shared_cpu_map);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
dmi_check_system(sw_any_bug_dmi_table); dmi_check_system(sw_any_bug_dmi_table);
...@@ -795,7 +816,7 @@ static int __init acpi_cpufreq_init(void) ...@@ -795,7 +816,7 @@ static int __init acpi_cpufreq_init(void)
ret = cpufreq_register_driver(&acpi_cpufreq_driver); ret = cpufreq_register_driver(&acpi_cpufreq_driver);
if (ret) if (ret)
free_percpu(acpi_perf_data); free_acpi_perf_data();
return ret; return ret;
} }
......
...@@ -310,6 +310,12 @@ static int powernow_acpi_init(void) ...@@ -310,6 +310,12 @@ static int powernow_acpi_init(void)
goto err0; goto err0;
} }
if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
GFP_KERNEL)) {
retval = -ENOMEM;
goto err05;
}
if (acpi_processor_register_performance(acpi_processor_perf, 0)) { if (acpi_processor_register_performance(acpi_processor_perf, 0)) {
retval = -EIO; retval = -EIO;
goto err1; goto err1;
...@@ -412,6 +418,8 @@ static int powernow_acpi_init(void) ...@@ -412,6 +418,8 @@ static int powernow_acpi_init(void)
err2: err2:
acpi_processor_unregister_performance(acpi_processor_perf, 0); acpi_processor_unregister_performance(acpi_processor_perf, 0);
err1: err1:
free_cpumask_var(acpi_processor_perf->shared_cpu_map);
err05:
kfree(acpi_processor_perf); kfree(acpi_processor_perf);
err0: err0:
printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n"); printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n");
...@@ -652,6 +660,7 @@ static int powernow_cpu_exit (struct cpufreq_policy *policy) { ...@@ -652,6 +660,7 @@ static int powernow_cpu_exit (struct cpufreq_policy *policy) {
#ifdef CONFIG_X86_POWERNOW_K7_ACPI #ifdef CONFIG_X86_POWERNOW_K7_ACPI
if (acpi_processor_perf) { if (acpi_processor_perf) {
acpi_processor_unregister_performance(acpi_processor_perf, 0); acpi_processor_unregister_performance(acpi_processor_perf, 0);
free_cpumask_var(acpi_processor_perf->shared_cpu_map);
kfree(acpi_processor_perf); kfree(acpi_processor_perf);
} }
#endif #endif
......
...@@ -766,7 +766,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned ...@@ -766,7 +766,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned
static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
{ {
struct cpufreq_frequency_table *powernow_table; struct cpufreq_frequency_table *powernow_table;
int ret_val; int ret_val = -ENODEV;
if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
dprintk("register performance failed: bad ACPI data\n"); dprintk("register performance failed: bad ACPI data\n");
...@@ -815,6 +815,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) ...@@ -815,6 +815,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
/* notify BIOS that we exist */ /* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE); acpi_processor_notify_smm(THIS_MODULE);
if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
printk(KERN_ERR PFX
"unable to alloc powernow_k8_data cpumask\n");
ret_val = -ENOMEM;
goto err_out_mem;
}
return 0; return 0;
err_out_mem: err_out_mem:
...@@ -826,7 +833,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) ...@@ -826,7 +833,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
/* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
data->acpi_data.state_count = 0; data->acpi_data.state_count = 0;
return -ENODEV; return ret_val;
} }
static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table)
...@@ -929,6 +936,7 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) ...@@ -929,6 +936,7 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
{ {
if (data->acpi_data.state_count) if (data->acpi_data.state_count)
acpi_processor_unregister_performance(&data->acpi_data, data->cpu); acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
free_cpumask_var(data->acpi_data.shared_cpu_map);
} }
#else #else
...@@ -1134,7 +1142,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1134,7 +1142,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
data->cpu = pol->cpu; data->cpu = pol->cpu;
data->currpstate = HW_PSTATE_INVALID; data->currpstate = HW_PSTATE_INVALID;
if (powernow_k8_cpu_init_acpi(data)) { rc = powernow_k8_cpu_init_acpi(data);
if (rc) {
/* /*
* Use the PSB BIOS structure. This is only availabe on * Use the PSB BIOS structure. This is only availabe on
* an UP version, and is deprecated by AMD. * an UP version, and is deprecated by AMD.
...@@ -1152,20 +1161,17 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1152,20 +1161,17 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
"ACPI maintainers and complain to your BIOS " "ACPI maintainers and complain to your BIOS "
"vendor.\n"); "vendor.\n");
#endif #endif
kfree(data); goto err_out;
return -ENODEV;
} }
if (pol->cpu != 0) { if (pol->cpu != 0) {
printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
"CPU other than CPU0. Complain to your BIOS " "CPU other than CPU0. Complain to your BIOS "
"vendor.\n"); "vendor.\n");
kfree(data); goto err_out;
return -ENODEV;
} }
rc = find_psb_table(data); rc = find_psb_table(data);
if (rc) { if (rc) {
kfree(data); goto err_out;
return -ENODEV;
} }
} }
......
...@@ -826,6 +826,11 @@ static int acpi_processor_add(struct acpi_device *device) ...@@ -826,6 +826,11 @@ static int acpi_processor_add(struct acpi_device *device)
if (!pr) if (!pr)
return -ENOMEM; return -ENOMEM;
if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
kfree(pr);
return -ENOMEM;
}
pr->handle = device->handle; pr->handle = device->handle;
strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
...@@ -845,10 +850,8 @@ static int acpi_processor_remove(struct acpi_device *device, int type) ...@@ -845,10 +850,8 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
pr = acpi_driver_data(device); pr = acpi_driver_data(device);
if (pr->id >= nr_cpu_ids) { if (pr->id >= nr_cpu_ids)
kfree(pr); goto free;
return 0;
}
if (type == ACPI_BUS_REMOVAL_EJECT) { if (type == ACPI_BUS_REMOVAL_EJECT) {
if (acpi_processor_handle_eject(pr)) if (acpi_processor_handle_eject(pr))
...@@ -873,6 +876,9 @@ static int acpi_processor_remove(struct acpi_device *device, int type) ...@@ -873,6 +876,9 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
per_cpu(processors, pr->id) = NULL; per_cpu(processors, pr->id) = NULL;
per_cpu(processor_device_array, pr->id) = NULL; per_cpu(processor_device_array, pr->id) = NULL;
free:
free_cpumask_var(pr->throttling.shared_cpu_map);
kfree(pr); kfree(pr);
return 0; return 0;
......
...@@ -588,12 +588,15 @@ int acpi_processor_preregister_performance( ...@@ -588,12 +588,15 @@ int acpi_processor_preregister_performance(
int count, count_target; int count, count_target;
int retval = 0; int retval = 0;
unsigned int i, j; unsigned int i, j;
cpumask_t covered_cpus; cpumask_var_t covered_cpus;
struct acpi_processor *pr; struct acpi_processor *pr;
struct acpi_psd_package *pdomain; struct acpi_psd_package *pdomain;
struct acpi_processor *match_pr; struct acpi_processor *match_pr;
struct acpi_psd_package *match_pdomain; struct acpi_psd_package *match_pdomain;
if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
return -ENOMEM;
mutex_lock(&performance_mutex); mutex_lock(&performance_mutex);
retval = 0; retval = 0;
...@@ -617,7 +620,7 @@ int acpi_processor_preregister_performance( ...@@ -617,7 +620,7 @@ int acpi_processor_preregister_performance(
} }
pr->performance = percpu_ptr(performance, i); pr->performance = percpu_ptr(performance, i);
cpu_set(i, pr->performance->shared_cpu_map); cpumask_set_cpu(i, pr->performance->shared_cpu_map);
if (acpi_processor_get_psd(pr)) { if (acpi_processor_get_psd(pr)) {
retval = -EINVAL; retval = -EINVAL;
continue; continue;
...@@ -650,18 +653,18 @@ int acpi_processor_preregister_performance( ...@@ -650,18 +653,18 @@ int acpi_processor_preregister_performance(
} }
} }
cpus_clear(covered_cpus); cpumask_clear(covered_cpus);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
pr = per_cpu(processors, i); pr = per_cpu(processors, i);
if (!pr) if (!pr)
continue; continue;
if (cpu_isset(i, covered_cpus)) if (cpumask_test_cpu(i, covered_cpus))
continue; continue;
pdomain = &(pr->performance->domain_info); pdomain = &(pr->performance->domain_info);
cpu_set(i, pr->performance->shared_cpu_map); cpumask_set_cpu(i, pr->performance->shared_cpu_map);
cpu_set(i, covered_cpus); cpumask_set_cpu(i, covered_cpus);
if (pdomain->num_processors <= 1) if (pdomain->num_processors <= 1)
continue; continue;
...@@ -699,8 +702,8 @@ int acpi_processor_preregister_performance( ...@@ -699,8 +702,8 @@ int acpi_processor_preregister_performance(
goto err_ret; goto err_ret;
} }
cpu_set(j, covered_cpus); cpumask_set_cpu(j, covered_cpus);
cpu_set(j, pr->performance->shared_cpu_map); cpumask_set_cpu(j, pr->performance->shared_cpu_map);
count++; count++;
} }
...@@ -718,8 +721,8 @@ int acpi_processor_preregister_performance( ...@@ -718,8 +721,8 @@ int acpi_processor_preregister_performance(
match_pr->performance->shared_type = match_pr->performance->shared_type =
pr->performance->shared_type; pr->performance->shared_type;
match_pr->performance->shared_cpu_map = cpumask_copy(match_pr->performance->shared_cpu_map,
pr->performance->shared_cpu_map; pr->performance->shared_cpu_map);
} }
} }
...@@ -731,14 +734,15 @@ int acpi_processor_preregister_performance( ...@@ -731,14 +734,15 @@ int acpi_processor_preregister_performance(
/* Assume no coordination on any error parsing domain info */ /* Assume no coordination on any error parsing domain info */
if (retval) { if (retval) {
cpus_clear(pr->performance->shared_cpu_map); cpumask_clear(pr->performance->shared_cpu_map);
cpu_set(i, pr->performance->shared_cpu_map); cpumask_set_cpu(i, pr->performance->shared_cpu_map);
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
} }
pr->performance = NULL; /* Will be set for real in register */ pr->performance = NULL; /* Will be set for real in register */
} }
mutex_unlock(&performance_mutex); mutex_unlock(&performance_mutex);
free_cpumask_var(covered_cpus);
return retval; return retval;
} }
EXPORT_SYMBOL(acpi_processor_preregister_performance); EXPORT_SYMBOL(acpi_processor_preregister_performance);
......
...@@ -61,11 +61,14 @@ static int acpi_processor_update_tsd_coord(void) ...@@ -61,11 +61,14 @@ static int acpi_processor_update_tsd_coord(void)
int count, count_target; int count, count_target;
int retval = 0; int retval = 0;
unsigned int i, j; unsigned int i, j;
cpumask_t covered_cpus; cpumask_var_t covered_cpus;
struct acpi_processor *pr, *match_pr; struct acpi_processor *pr, *match_pr;
struct acpi_tsd_package *pdomain, *match_pdomain; struct acpi_tsd_package *pdomain, *match_pdomain;
struct acpi_processor_throttling *pthrottling, *match_pthrottling; struct acpi_processor_throttling *pthrottling, *match_pthrottling;
if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
return -ENOMEM;
/* /*
* Now that we have _TSD data from all CPUs, lets setup T-state * Now that we have _TSD data from all CPUs, lets setup T-state
* coordination between all CPUs. * coordination between all CPUs.
...@@ -91,19 +94,19 @@ static int acpi_processor_update_tsd_coord(void) ...@@ -91,19 +94,19 @@ static int acpi_processor_update_tsd_coord(void)
if (retval) if (retval)
goto err_ret; goto err_ret;
cpus_clear(covered_cpus); cpumask_clear(covered_cpus);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
pr = per_cpu(processors, i); pr = per_cpu(processors, i);
if (!pr) if (!pr)
continue; continue;
if (cpu_isset(i, covered_cpus)) if (cpumask_test_cpu(i, covered_cpus))
continue; continue;
pthrottling = &pr->throttling; pthrottling = &pr->throttling;
pdomain = &(pthrottling->domain_info); pdomain = &(pthrottling->domain_info);
cpu_set(i, pthrottling->shared_cpu_map); cpumask_set_cpu(i, pthrottling->shared_cpu_map);
cpu_set(i, covered_cpus); cpumask_set_cpu(i, covered_cpus);
/* /*
* If the number of processor in the TSD domain is 1, it is * If the number of processor in the TSD domain is 1, it is
* unnecessary to parse the coordination for this CPU. * unnecessary to parse the coordination for this CPU.
...@@ -144,8 +147,8 @@ static int acpi_processor_update_tsd_coord(void) ...@@ -144,8 +147,8 @@ static int acpi_processor_update_tsd_coord(void)
goto err_ret; goto err_ret;
} }
cpu_set(j, covered_cpus); cpumask_set_cpu(j, covered_cpus);
cpu_set(j, pthrottling->shared_cpu_map); cpumask_set_cpu(j, pthrottling->shared_cpu_map);
count++; count++;
} }
for_each_possible_cpu(j) { for_each_possible_cpu(j) {
...@@ -165,12 +168,14 @@ static int acpi_processor_update_tsd_coord(void) ...@@ -165,12 +168,14 @@ static int acpi_processor_update_tsd_coord(void)
* If some CPUS have the same domain, they * If some CPUS have the same domain, they
* will have the same shared_cpu_map. * will have the same shared_cpu_map.
*/ */
match_pthrottling->shared_cpu_map = cpumask_copy(match_pthrottling->shared_cpu_map,
pthrottling->shared_cpu_map; pthrottling->shared_cpu_map);
} }
} }
err_ret: err_ret:
free_cpumask_var(covered_cpus);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
pr = per_cpu(processors, i); pr = per_cpu(processors, i);
if (!pr) if (!pr)
...@@ -182,8 +187,8 @@ static int acpi_processor_update_tsd_coord(void) ...@@ -182,8 +187,8 @@ static int acpi_processor_update_tsd_coord(void)
*/ */
if (retval) { if (retval) {
pthrottling = &(pr->throttling); pthrottling = &(pr->throttling);
cpus_clear(pthrottling->shared_cpu_map); cpumask_clear(pthrottling->shared_cpu_map);
cpu_set(i, pthrottling->shared_cpu_map); cpumask_set_cpu(i, pthrottling->shared_cpu_map);
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
} }
} }
...@@ -567,7 +572,7 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr) ...@@ -567,7 +572,7 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
pthrottling = &pr->throttling; pthrottling = &pr->throttling;
pthrottling->tsd_valid_flag = 1; pthrottling->tsd_valid_flag = 1;
pthrottling->shared_type = pdomain->coord_type; pthrottling->shared_type = pdomain->coord_type;
cpu_set(pr->id, pthrottling->shared_cpu_map); cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
/* /*
* If the coordination type is not defined in ACPI spec, * If the coordination type is not defined in ACPI spec,
* the tsd_valid_flag will be clear and coordination type * the tsd_valid_flag will be clear and coordination type
...@@ -826,7 +831,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) ...@@ -826,7 +831,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
static int acpi_processor_get_throttling(struct acpi_processor *pr) static int acpi_processor_get_throttling(struct acpi_processor *pr)
{ {
cpumask_t saved_mask; cpumask_var_t saved_mask;
int ret; int ret;
if (!pr) if (!pr)
...@@ -834,14 +839,20 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) ...@@ -834,14 +839,20 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
if (!pr->flags.throttling) if (!pr->flags.throttling)
return -ENODEV; return -ENODEV;
if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
return -ENOMEM;
/* /*
* Migrate task to the cpu pointed by pr. * Migrate task to the cpu pointed by pr.
*/ */
saved_mask = current->cpus_allowed; cpumask_copy(saved_mask, &current->cpus_allowed);
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); /* FIXME: use work_on_cpu() */
set_cpus_allowed_ptr(current, cpumask_of(pr->id));
ret = pr->throttling.acpi_processor_get_throttling(pr); ret = pr->throttling.acpi_processor_get_throttling(pr);
/* restore the previous state */ /* restore the previous state */
set_cpus_allowed_ptr(current, &saved_mask); set_cpus_allowed_ptr(current, saved_mask);
free_cpumask_var(saved_mask);
return ret; return ret;
} }
...@@ -986,13 +997,13 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, ...@@ -986,13 +997,13 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
int acpi_processor_set_throttling(struct acpi_processor *pr, int state) int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
{ {
cpumask_t saved_mask; cpumask_var_t saved_mask;
int ret = 0; int ret = 0;
unsigned int i; unsigned int i;
struct acpi_processor *match_pr; struct acpi_processor *match_pr;
struct acpi_processor_throttling *p_throttling; struct acpi_processor_throttling *p_throttling;
struct throttling_tstate t_state; struct throttling_tstate t_state;
cpumask_t online_throttling_cpus; cpumask_var_t online_throttling_cpus;
if (!pr) if (!pr)
return -EINVAL; return -EINVAL;
...@@ -1003,17 +1014,25 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) ...@@ -1003,17 +1014,25 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
if ((state < 0) || (state > (pr->throttling.state_count - 1))) if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL; return -EINVAL;
saved_mask = current->cpus_allowed; if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
return -ENOMEM;
if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
free_cpumask_var(saved_mask);
return -ENOMEM;
}
cpumask_copy(saved_mask, &current->cpus_allowed);
t_state.target_state = state; t_state.target_state = state;
p_throttling = &(pr->throttling); p_throttling = &(pr->throttling);
cpus_and(online_throttling_cpus, cpu_online_map, cpumask_and(online_throttling_cpus, cpu_online_mask,
p_throttling->shared_cpu_map); p_throttling->shared_cpu_map);
/* /*
* The throttling notifier will be called for every * The throttling notifier will be called for every
* affected cpu in order to get one proper T-state. * affected cpu in order to get one proper T-state.
* The notifier event is THROTTLING_PRECHANGE. * The notifier event is THROTTLING_PRECHANGE.
*/ */
for_each_cpu_mask_nr(i, online_throttling_cpus) { for_each_cpu(i, online_throttling_cpus) {
t_state.cpu = i; t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
&t_state); &t_state);
...@@ -1025,7 +1044,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) ...@@ -1025,7 +1044,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* it can be called only for the cpu pointed by pr. * it can be called only for the cpu pointed by pr.
*/ */
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); /* FIXME: use work_on_cpu() */
set_cpus_allowed_ptr(current, cpumask_of(pr->id));
ret = p_throttling->acpi_processor_set_throttling(pr, ret = p_throttling->acpi_processor_set_throttling(pr,
t_state.target_state); t_state.target_state);
} else { } else {
...@@ -1034,7 +1054,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) ...@@ -1034,7 +1054,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* it is necessary to set T-state for every affected * it is necessary to set T-state for every affected
* cpus. * cpus.
*/ */
for_each_cpu_mask_nr(i, online_throttling_cpus) { for_each_cpu(i, online_throttling_cpus) {
match_pr = per_cpu(processors, i); match_pr = per_cpu(processors, i);
/* /*
* If the pointer is invalid, we will report the * If the pointer is invalid, we will report the
...@@ -1056,7 +1076,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) ...@@ -1056,7 +1076,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
continue; continue;
} }
t_state.cpu = i; t_state.cpu = i;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); /* FIXME: use work_on_cpu() */
set_cpus_allowed_ptr(current, cpumask_of(i));
ret = match_pr->throttling. ret = match_pr->throttling.
acpi_processor_set_throttling( acpi_processor_set_throttling(
match_pr, t_state.target_state); match_pr, t_state.target_state);
...@@ -1068,13 +1089,16 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) ...@@ -1068,13 +1089,16 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* affected cpu to update the T-states. * affected cpu to update the T-states.
* The notifier event is THROTTLING_POSTCHANGE * The notifier event is THROTTLING_POSTCHANGE
*/ */
for_each_cpu_mask_nr(i, online_throttling_cpus) { for_each_cpu(i, online_throttling_cpus) {
t_state.cpu = i; t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
&t_state); &t_state);
} }
/* restore the previous state */ /* restore the previous state */
set_cpus_allowed_ptr(current, &saved_mask); /* FIXME: use work_on_cpu() */
set_cpus_allowed_ptr(current, saved_mask);
free_cpumask_var(online_throttling_cpus);
free_cpumask_var(saved_mask);
return ret; return ret;
} }
...@@ -1120,7 +1144,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) ...@@ -1120,7 +1144,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
if (acpi_processor_get_tsd(pr)) { if (acpi_processor_get_tsd(pr)) {
pthrottling = &pr->throttling; pthrottling = &pr->throttling;
pthrottling->tsd_valid_flag = 0; pthrottling->tsd_valid_flag = 0;
cpu_set(pr->id, pthrottling->shared_cpu_map); cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
} }
......
...@@ -127,7 +127,7 @@ struct acpi_processor_performance { ...@@ -127,7 +127,7 @@ struct acpi_processor_performance {
unsigned int state_count; unsigned int state_count;
struct acpi_processor_px *states; struct acpi_processor_px *states;
struct acpi_psd_package domain_info; struct acpi_psd_package domain_info;
cpumask_t shared_cpu_map; cpumask_var_t shared_cpu_map;
unsigned int shared_type; unsigned int shared_type;
}; };
...@@ -172,7 +172,7 @@ struct acpi_processor_throttling { ...@@ -172,7 +172,7 @@ struct acpi_processor_throttling {
unsigned int state_count; unsigned int state_count;
struct acpi_processor_tx_tss *states_tss; struct acpi_processor_tx_tss *states_tss;
struct acpi_tsd_package domain_info; struct acpi_tsd_package domain_info;
cpumask_t shared_cpu_map; cpumask_var_t shared_cpu_map;
int (*acpi_processor_get_throttling) (struct acpi_processor * pr); int (*acpi_processor_get_throttling) (struct acpi_processor * pr);
int (*acpi_processor_set_throttling) (struct acpi_processor * pr, int (*acpi_processor_set_throttling) (struct acpi_processor * pr,
int state); int state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment