Commit fc0e4748 authored by Mike Travis's avatar Mike Travis Committed by Ingo Molnar

x86: use new set_cpus_allowed_ptr function

  * Use new set_cpus_allowed_ptr() function added by previous patch,
    which instead of passing the "newly allowed cpus" cpumask_t arg
    by value,  pass it by pointer:

    -int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
    +int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)

  * Cleanup uses of CPU_MASK_ALL.

  * Collapse other NR_CPUS changes to arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
    Use pointers to cpumask_t arguments whenever possible.

Depends on:
	[sched-devel]: sched: add new set_cpus_allowed_ptr function

Cc: Len Brown <len.brown@intel.com>
Cc: Dave Jones <davej@codemonkey.org.uk>
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 434d53b0
...@@ -93,7 +93,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, ...@@ -93,7 +93,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
/* Make sure we are running on right CPU */ /* Make sure we are running on right CPU */
saved_mask = current->cpus_allowed; saved_mask = current->cpus_allowed;
retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
if (retval) if (retval)
return -1; return -1;
...@@ -130,7 +130,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, ...@@ -130,7 +130,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
cx->address); cx->address);
out: out:
set_cpus_allowed(current, saved_mask); set_cpus_allowed_ptr(current, &saved_mask);
return retval; return retval;
} }
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
......
...@@ -192,9 +192,9 @@ static void drv_read(struct drv_cmd *cmd) ...@@ -192,9 +192,9 @@ static void drv_read(struct drv_cmd *cmd)
cpumask_t saved_mask = current->cpus_allowed; cpumask_t saved_mask = current->cpus_allowed;
cmd->val = 0; cmd->val = 0;
set_cpus_allowed(current, cmd->mask); set_cpus_allowed_ptr(current, &cmd->mask);
do_drv_read(cmd); do_drv_read(cmd);
set_cpus_allowed(current, saved_mask); set_cpus_allowed_ptr(current, &saved_mask);
} }
static void drv_write(struct drv_cmd *cmd) static void drv_write(struct drv_cmd *cmd)
...@@ -203,30 +203,30 @@ static void drv_write(struct drv_cmd *cmd) ...@@ -203,30 +203,30 @@ static void drv_write(struct drv_cmd *cmd)
unsigned int i; unsigned int i;
for_each_cpu_mask(i, cmd->mask) { for_each_cpu_mask(i, cmd->mask) {
set_cpus_allowed(current, cpumask_of_cpu(i)); set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
do_drv_write(cmd); do_drv_write(cmd);
} }
set_cpus_allowed(current, saved_mask); set_cpus_allowed_ptr(current, &saved_mask);
return; return;
} }
static u32 get_cur_val(cpumask_t mask) static u32 get_cur_val(const cpumask_t *mask)
{ {
struct acpi_processor_performance *perf; struct acpi_processor_performance *perf;
struct drv_cmd cmd; struct drv_cmd cmd;
if (unlikely(cpus_empty(mask))) if (unlikely(cpus_empty(*mask)))
return 0; return 0;
switch (per_cpu(drv_data, first_cpu(mask))->cpu_feature) { switch (per_cpu(drv_data, first_cpu(*mask))->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE: case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
break; break;
case SYSTEM_IO_CAPABLE: case SYSTEM_IO_CAPABLE:
cmd.type = SYSTEM_IO_CAPABLE; cmd.type = SYSTEM_IO_CAPABLE;
perf = per_cpu(drv_data, first_cpu(mask))->acpi_data; perf = per_cpu(drv_data, first_cpu(*mask))->acpi_data;
cmd.addr.io.port = perf->control_register.address; cmd.addr.io.port = perf->control_register.address;
cmd.addr.io.bit_width = perf->control_register.bit_width; cmd.addr.io.bit_width = perf->control_register.bit_width;
break; break;
...@@ -234,7 +234,7 @@ static u32 get_cur_val(cpumask_t mask) ...@@ -234,7 +234,7 @@ static u32 get_cur_val(cpumask_t mask)
return 0; return 0;
} }
cmd.mask = mask; cmd.mask = *mask;
drv_read(&cmd); drv_read(&cmd);
...@@ -271,7 +271,7 @@ static unsigned int get_measured_perf(unsigned int cpu) ...@@ -271,7 +271,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
unsigned int retval; unsigned int retval;
saved_mask = current->cpus_allowed; saved_mask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
if (get_cpu() != cpu) { if (get_cpu() != cpu) {
/* We were not able to run on requested processor */ /* We were not able to run on requested processor */
put_cpu(); put_cpu();
...@@ -329,7 +329,7 @@ static unsigned int get_measured_perf(unsigned int cpu) ...@@ -329,7 +329,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
retval = per_cpu(drv_data, cpu)->max_freq * perf_percent / 100; retval = per_cpu(drv_data, cpu)->max_freq * perf_percent / 100;
put_cpu(); put_cpu();
set_cpus_allowed(current, saved_mask); set_cpus_allowed_ptr(current, &saved_mask);
dprintk("cpu %d: performance percent %d\n", cpu, perf_percent); dprintk("cpu %d: performance percent %d\n", cpu, perf_percent);
return retval; return retval;
...@@ -347,13 +347,13 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) ...@@ -347,13 +347,13 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
return 0; return 0;
} }
freq = extract_freq(get_cur_val(cpumask_of_cpu(cpu)), data); freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
dprintk("cur freq = %u\n", freq); dprintk("cur freq = %u\n", freq);
return freq; return freq;
} }
static unsigned int check_freqs(cpumask_t mask, unsigned int freq, static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq,
struct acpi_cpufreq_data *data) struct acpi_cpufreq_data *data)
{ {
unsigned int cur_freq; unsigned int cur_freq;
...@@ -449,7 +449,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, ...@@ -449,7 +449,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
drv_write(&cmd); drv_write(&cmd);
if (acpi_pstate_strict) { if (acpi_pstate_strict) {
if (!check_freqs(cmd.mask, freqs.new, data)) { if (!check_freqs(&cmd.mask, freqs.new, data)) {
dprintk("acpi_cpufreq_target failed (%d)\n", dprintk("acpi_cpufreq_target failed (%d)\n",
policy->cpu); policy->cpu);
return -EAGAIN; return -EAGAIN;
......
...@@ -478,12 +478,12 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi ...@@ -478,12 +478,12 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
static int check_supported_cpu(unsigned int cpu) static int check_supported_cpu(unsigned int cpu)
{ {
cpumask_t oldmask = CPU_MASK_ALL; cpumask_t oldmask;
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
unsigned int rc = 0; unsigned int rc = 0;
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
if (smp_processor_id() != cpu) { if (smp_processor_id() != cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
...@@ -528,7 +528,7 @@ static int check_supported_cpu(unsigned int cpu) ...@@ -528,7 +528,7 @@ static int check_supported_cpu(unsigned int cpu)
rc = 1; rc = 1;
out: out:
set_cpus_allowed(current, oldmask); set_cpus_allowed_ptr(current, &oldmask);
return rc; return rc;
} }
...@@ -1015,7 +1015,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i ...@@ -1015,7 +1015,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
/* Driver entry point to switch to the target frequency */ /* Driver entry point to switch to the target frequency */
static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
{ {
cpumask_t oldmask = CPU_MASK_ALL; cpumask_t oldmask;
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
u32 checkfid; u32 checkfid;
u32 checkvid; u32 checkvid;
...@@ -1030,7 +1030,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi ...@@ -1030,7 +1030,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
/* only run on specific CPU from here on */ /* only run on specific CPU from here on */
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
if (smp_processor_id() != pol->cpu) { if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
...@@ -1085,7 +1085,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi ...@@ -1085,7 +1085,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
ret = 0; ret = 0;
err_out: err_out:
set_cpus_allowed(current, oldmask); set_cpus_allowed_ptr(current, &oldmask);
return ret; return ret;
} }
...@@ -1104,7 +1104,7 @@ static int powernowk8_verify(struct cpufreq_policy *pol) ...@@ -1104,7 +1104,7 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
{ {
struct powernow_k8_data *data; struct powernow_k8_data *data;
cpumask_t oldmask = CPU_MASK_ALL; cpumask_t oldmask;
int rc; int rc;
if (!cpu_online(pol->cpu)) if (!cpu_online(pol->cpu))
...@@ -1145,7 +1145,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1145,7 +1145,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
/* only run on specific CPU from here on */ /* only run on specific CPU from here on */
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
if (smp_processor_id() != pol->cpu) { if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
...@@ -1164,7 +1164,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1164,7 +1164,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
fidvid_msr_init(); fidvid_msr_init();
/* run on any CPU again */ /* run on any CPU again */
set_cpus_allowed(current, oldmask); set_cpus_allowed_ptr(current, &oldmask);
if (cpu_family == CPU_HW_PSTATE) if (cpu_family == CPU_HW_PSTATE)
pol->cpus = cpumask_of_cpu(pol->cpu); pol->cpus = cpumask_of_cpu(pol->cpu);
...@@ -1205,7 +1205,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1205,7 +1205,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
return 0; return 0;
err_out: err_out:
set_cpus_allowed(current, oldmask); set_cpus_allowed_ptr(current, &oldmask);
powernow_k8_cpu_exit_acpi(data); powernow_k8_cpu_exit_acpi(data);
kfree(data); kfree(data);
...@@ -1242,10 +1242,11 @@ static unsigned int powernowk8_get (unsigned int cpu) ...@@ -1242,10 +1242,11 @@ static unsigned int powernowk8_get (unsigned int cpu)
if (!data) if (!data)
return -EINVAL; return -EINVAL;
set_cpus_allowed(current, cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
if (smp_processor_id() != cpu) { if (smp_processor_id() != cpu) {
printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu); printk(KERN_ERR PFX
set_cpus_allowed(current, oldmask); "limiting to CPU %d failed in powernowk8_get\n", cpu);
set_cpus_allowed_ptr(current, &oldmask);
return 0; return 0;
} }
...@@ -1253,13 +1254,14 @@ static unsigned int powernowk8_get (unsigned int cpu) ...@@ -1253,13 +1254,14 @@ static unsigned int powernowk8_get (unsigned int cpu)
goto out; goto out;
if (cpu_family == CPU_HW_PSTATE) if (cpu_family == CPU_HW_PSTATE)
khz = find_khz_freq_from_pstate(data->powernow_table, data->currpstate); khz = find_khz_freq_from_pstate(data->powernow_table,
data->currpstate);
else else
khz = find_khz_freq_from_fid(data->currfid); khz = find_khz_freq_from_fid(data->currfid);
out: out:
set_cpus_allowed(current, oldmask); set_cpus_allowed_ptr(current, &oldmask);
return khz; return khz;
} }
......
...@@ -315,7 +315,7 @@ static unsigned int get_cur_freq(unsigned int cpu) ...@@ -315,7 +315,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
cpumask_t saved_mask; cpumask_t saved_mask;
saved_mask = current->cpus_allowed; saved_mask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
if (smp_processor_id() != cpu) if (smp_processor_id() != cpu)
return 0; return 0;
...@@ -333,7 +333,7 @@ static unsigned int get_cur_freq(unsigned int cpu) ...@@ -333,7 +333,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
clock_freq = extract_clock(l, cpu, 1); clock_freq = extract_clock(l, cpu, 1);
} }
set_cpus_allowed(current, saved_mask); set_cpus_allowed_ptr(current, &saved_mask);
return clock_freq; return clock_freq;
} }
...@@ -487,7 +487,7 @@ static int centrino_target (struct cpufreq_policy *policy, ...@@ -487,7 +487,7 @@ static int centrino_target (struct cpufreq_policy *policy,
else else
cpu_set(j, set_mask); cpu_set(j, set_mask);
set_cpus_allowed(current, set_mask); set_cpus_allowed_ptr(current, &set_mask);
preempt_disable(); preempt_disable();
if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) { if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
dprintk("couldn't limit to CPUs in this domain\n"); dprintk("couldn't limit to CPUs in this domain\n");
...@@ -555,7 +555,8 @@ static int centrino_target (struct cpufreq_policy *policy, ...@@ -555,7 +555,8 @@ static int centrino_target (struct cpufreq_policy *policy,
if (!cpus_empty(covered_cpus)) { if (!cpus_empty(covered_cpus)) {
for_each_cpu_mask(j, covered_cpus) { for_each_cpu_mask(j, covered_cpus) {
set_cpus_allowed(current, cpumask_of_cpu(j)); set_cpus_allowed_ptr(current,
&cpumask_of_cpu(j));
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
} }
} }
...@@ -569,12 +570,12 @@ static int centrino_target (struct cpufreq_policy *policy, ...@@ -569,12 +570,12 @@ static int centrino_target (struct cpufreq_policy *policy,
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
} }
} }
set_cpus_allowed(current, saved_mask); set_cpus_allowed_ptr(current, &saved_mask);
return 0; return 0;
migrate_end: migrate_end:
preempt_enable(); preempt_enable();
set_cpus_allowed(current, saved_mask); set_cpus_allowed_ptr(current, &saved_mask);
return 0; return 0;
} }
......
...@@ -229,22 +229,22 @@ static unsigned int speedstep_detect_chipset (void) ...@@ -229,22 +229,22 @@ static unsigned int speedstep_detect_chipset (void)
return 0; return 0;
} }
static unsigned int _speedstep_get(cpumask_t cpus) static unsigned int _speedstep_get(const cpumask_t *cpus)
{ {
unsigned int speed; unsigned int speed;
cpumask_t cpus_allowed; cpumask_t cpus_allowed;
cpus_allowed = current->cpus_allowed; cpus_allowed = current->cpus_allowed;
set_cpus_allowed(current, cpus); set_cpus_allowed_ptr(current, cpus);
speed = speedstep_get_processor_frequency(speedstep_processor); speed = speedstep_get_processor_frequency(speedstep_processor);
set_cpus_allowed(current, cpus_allowed); set_cpus_allowed_ptr(current, &cpus_allowed);
dprintk("detected %u kHz as current frequency\n", speed); dprintk("detected %u kHz as current frequency\n", speed);
return speed; return speed;
} }
static unsigned int speedstep_get(unsigned int cpu) static unsigned int speedstep_get(unsigned int cpu)
{ {
return _speedstep_get(cpumask_of_cpu(cpu)); return _speedstep_get(&cpumask_of_cpu(cpu));
} }
/** /**
...@@ -267,7 +267,7 @@ static int speedstep_target (struct cpufreq_policy *policy, ...@@ -267,7 +267,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate))
return -EINVAL; return -EINVAL;
freqs.old = _speedstep_get(policy->cpus); freqs.old = _speedstep_get(&policy->cpus);
freqs.new = speedstep_freqs[newstate].frequency; freqs.new = speedstep_freqs[newstate].frequency;
freqs.cpu = policy->cpu; freqs.cpu = policy->cpu;
...@@ -285,12 +285,12 @@ static int speedstep_target (struct cpufreq_policy *policy, ...@@ -285,12 +285,12 @@ static int speedstep_target (struct cpufreq_policy *policy,
} }
/* switch to physical CPU where state is to be changed */ /* switch to physical CPU where state is to be changed */
set_cpus_allowed(current, policy->cpus); set_cpus_allowed_ptr(current, &policy->cpus);
speedstep_set_state(newstate); speedstep_set_state(newstate);
/* allow to be run on all CPUs */ /* allow to be run on all CPUs */
set_cpus_allowed(current, cpus_allowed); set_cpus_allowed_ptr(current, &cpus_allowed);
for_each_cpu_mask(i, policy->cpus) { for_each_cpu_mask(i, policy->cpus) {
freqs.cpu = i; freqs.cpu = i;
...@@ -326,7 +326,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) ...@@ -326,7 +326,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
#endif #endif
cpus_allowed = current->cpus_allowed; cpus_allowed = current->cpus_allowed;
set_cpus_allowed(current, policy->cpus); set_cpus_allowed_ptr(current, &policy->cpus);
/* detect low and high frequency and transition latency */ /* detect low and high frequency and transition latency */
result = speedstep_get_freqs(speedstep_processor, result = speedstep_get_freqs(speedstep_processor,
...@@ -334,12 +334,12 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) ...@@ -334,12 +334,12 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
&speedstep_freqs[SPEEDSTEP_HIGH].frequency, &speedstep_freqs[SPEEDSTEP_HIGH].frequency,
&policy->cpuinfo.transition_latency, &policy->cpuinfo.transition_latency,
&speedstep_set_state); &speedstep_set_state);
set_cpus_allowed(current, cpus_allowed); set_cpus_allowed_ptr(current, &cpus_allowed);
if (result) if (result)
return result; return result;
/* get current speed setting */ /* get current speed setting */
speed = _speedstep_get(policy->cpus); speed = _speedstep_get(&policy->cpus);
if (!speed) if (!speed)
return -EIO; return -EIO;
......
...@@ -525,7 +525,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) ...@@ -525,7 +525,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
return -ENOMEM; return -ENOMEM;
oldmask = current->cpus_allowed; oldmask = current->cpus_allowed;
retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
if (retval) if (retval)
goto out; goto out;
...@@ -542,7 +542,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) ...@@ -542,7 +542,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
} }
cache_shared_cpu_map_setup(cpu, j); cache_shared_cpu_map_setup(cpu, j);
} }
set_cpus_allowed(current, oldmask); set_cpus_allowed_ptr(current, &oldmask);
out: out:
if (retval) { if (retval) {
......
...@@ -402,7 +402,7 @@ static int do_microcode_update (void) ...@@ -402,7 +402,7 @@ static int do_microcode_update (void)
if (!uci->valid) if (!uci->valid)
continue; continue;
set_cpus_allowed(current, cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
error = get_maching_microcode(new_mc, cpu); error = get_maching_microcode(new_mc, cpu);
if (error < 0) if (error < 0)
goto out; goto out;
...@@ -416,7 +416,7 @@ static int do_microcode_update (void) ...@@ -416,7 +416,7 @@ static int do_microcode_update (void)
vfree(new_mc); vfree(new_mc);
if (cursor < 0) if (cursor < 0)
error = cursor; error = cursor;
set_cpus_allowed(current, old); set_cpus_allowed_ptr(current, &old);
return error; return error;
} }
...@@ -579,7 +579,7 @@ static int apply_microcode_check_cpu(int cpu) ...@@ -579,7 +579,7 @@ static int apply_microcode_check_cpu(int cpu)
return 0; return 0;
old = current->cpus_allowed; old = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
/* Check if the microcode we have in memory matches the CPU */ /* Check if the microcode we have in memory matches the CPU */
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
...@@ -610,7 +610,7 @@ static int apply_microcode_check_cpu(int cpu) ...@@ -610,7 +610,7 @@ static int apply_microcode_check_cpu(int cpu)
" sig=0x%x, pf=0x%x, rev=0x%x\n", " sig=0x%x, pf=0x%x, rev=0x%x\n",
cpu, uci->sig, uci->pf, uci->rev); cpu, uci->sig, uci->pf, uci->rev);
set_cpus_allowed(current, old); set_cpus_allowed_ptr(current, &old);
return err; return err;
} }
...@@ -621,13 +621,13 @@ static void microcode_init_cpu(int cpu, int resume) ...@@ -621,13 +621,13 @@ static void microcode_init_cpu(int cpu, int resume)
old = current->cpus_allowed; old = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
mutex_lock(&microcode_mutex); mutex_lock(&microcode_mutex);
collect_cpu_info(cpu); collect_cpu_info(cpu);
if (uci->valid && system_state == SYSTEM_RUNNING && !resume) if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
cpu_request_microcode(cpu); cpu_request_microcode(cpu);
mutex_unlock(&microcode_mutex); mutex_unlock(&microcode_mutex);
set_cpus_allowed(current, old); set_cpus_allowed_ptr(current, &old);
} }
static void microcode_fini_cpu(int cpu) static void microcode_fini_cpu(int cpu)
...@@ -657,14 +657,14 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz) ...@@ -657,14 +657,14 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz)
old = current->cpus_allowed; old = current->cpus_allowed;
get_online_cpus(); get_online_cpus();
set_cpus_allowed(current, cpumask_of_cpu(cpu)); set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
mutex_lock(&microcode_mutex); mutex_lock(&microcode_mutex);
if (uci->valid) if (uci->valid)
err = cpu_request_microcode(cpu); err = cpu_request_microcode(cpu);
mutex_unlock(&microcode_mutex); mutex_unlock(&microcode_mutex);
put_online_cpus(); put_online_cpus();
set_cpus_allowed(current, old); set_cpus_allowed_ptr(current, &old);
} }
if (err) if (err)
return err; return err;
......
...@@ -420,7 +420,7 @@ static void native_machine_shutdown(void) ...@@ -420,7 +420,7 @@ static void native_machine_shutdown(void)
reboot_cpu_id = smp_processor_id(); reboot_cpu_id = smp_processor_id();
/* Make certain I only run on the appropriate processor */ /* Make certain I only run on the appropriate processor */
set_cpus_allowed(current, cpumask_of_cpu(reboot_cpu_id)); set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id));
/* O.K Now that I'm on the appropriate processor, /* O.K Now that I'm on the appropriate processor,
* stop all of the others. * stop all of the others.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment