Commit 13df635f authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus

* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
  kvm: fix kvm reboot crash when MAXSMP is used
  cpumask: alloc zeroed cpumask for static cpumask_var_ts
  cpumask: introduce zalloc_cpumask_var
parents 9cdba302 8437a617
...@@ -550,7 +550,7 @@ static int __init acpi_cpufreq_early_init(void) ...@@ -550,7 +550,7 @@ static int __init acpi_cpufreq_early_init(void)
return -ENOMEM; return -ENOMEM;
} }
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
if (!alloc_cpumask_var_node( if (!zalloc_cpumask_var_node(
&per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
GFP_KERNEL, cpu_to_node(i))) { GFP_KERNEL, cpu_to_node(i))) {
......
...@@ -322,7 +322,7 @@ static int powernow_acpi_init(void) ...@@ -322,7 +322,7 @@ static int powernow_acpi_init(void)
goto err0; goto err0;
} }
if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
GFP_KERNEL)) { GFP_KERNEL)) {
retval = -ENOMEM; retval = -ENOMEM;
goto err05; goto err05;
......
...@@ -887,7 +887,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) ...@@ -887,7 +887,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
/* notify BIOS that we exist */ /* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE); acpi_processor_notify_smm(THIS_MODULE);
if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
printk(KERN_ERR PFX printk(KERN_ERR PFX
"unable to alloc powernow_k8_data cpumask\n"); "unable to alloc powernow_k8_data cpumask\n");
ret_val = -ENOMEM; ret_val = -ENOMEM;
......
...@@ -471,7 +471,7 @@ static int centrino_target (struct cpufreq_policy *policy, ...@@ -471,7 +471,7 @@ static int centrino_target (struct cpufreq_policy *policy,
if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL))) if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL)))
return -ENOMEM; return -ENOMEM;
if (unlikely(!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) {
free_cpumask_var(saved_mask); free_cpumask_var(saved_mask);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -1163,7 +1163,7 @@ static __init int mce_init_device(void) ...@@ -1163,7 +1163,7 @@ static __init int mce_init_device(void)
if (!mce_available(&boot_cpu_data)) if (!mce_available(&boot_cpu_data))
return -EIO; return -EIO;
alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
err = mce_init_banks(); err = mce_init_banks();
if (err) if (err)
......
...@@ -832,7 +832,7 @@ static int __init uv_bau_init(void) ...@@ -832,7 +832,7 @@ static int __init uv_bau_init(void)
return 0; return 0;
for_each_possible_cpu(cur_cpu) for_each_possible_cpu(cur_cpu)
alloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
GFP_KERNEL, cpu_to_node(cur_cpu)); GFP_KERNEL, cpu_to_node(cur_cpu));
uv_bau_retry_limit = 1; uv_bau_retry_limit = 1;
......
...@@ -844,7 +844,7 @@ static int acpi_processor_add(struct acpi_device *device) ...@@ -844,7 +844,7 @@ static int acpi_processor_add(struct acpi_device *device)
if (!pr) if (!pr)
return -ENOMEM; return -ENOMEM;
if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
kfree(pr); kfree(pr);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -808,7 +808,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ...@@ -808,7 +808,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
ret = -ENOMEM; ret = -ENOMEM;
goto nomem_out; goto nomem_out;
} }
if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
free_cpumask_var(policy->cpus); free_cpumask_var(policy->cpus);
kfree(policy); kfree(policy);
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -1022,6 +1022,8 @@ typedef struct cpumask *cpumask_var_t; ...@@ -1022,6 +1022,8 @@ typedef struct cpumask *cpumask_var_t;
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
void alloc_bootmem_cpumask_var(cpumask_var_t *mask); void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask); void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask); void free_bootmem_cpumask_var(cpumask_var_t mask);
...@@ -1040,6 +1042,19 @@ static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, ...@@ -1040,6 +1042,19 @@ static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
return true; return true;
} }
static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
cpumask_clear(*mask);
return true;
}
static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
cpumask_clear(*mask);
return true;
}
static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{ {
} }
......
...@@ -165,7 +165,7 @@ int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) ...@@ -165,7 +165,7 @@ int __init_refok cpupri_init(struct cpupri *cp, bool bootmem)
vec->count = 0; vec->count = 0;
if (bootmem) if (bootmem)
alloc_bootmem_cpumask_var(&vec->mask); alloc_bootmem_cpumask_var(&vec->mask);
else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL)) else if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
goto cleanup; goto cleanup;
} }
......
...@@ -1591,7 +1591,7 @@ static inline void init_sched_rt_class(void) ...@@ -1591,7 +1591,7 @@ static inline void init_sched_rt_class(void)
unsigned int i; unsigned int i;
for_each_possible_cpu(i) for_each_possible_cpu(i)
alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
GFP_KERNEL, cpu_to_node(i)); GFP_KERNEL, cpu_to_node(i));
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -52,7 +52,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) ...@@ -52,7 +52,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
switch (action) { switch (action) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN: case CPU_UP_PREPARE_FROZEN:
if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
cpu_to_node(cpu))) cpu_to_node(cpu)))
return NOTIFY_BAD; return NOTIFY_BAD;
break; break;
......
...@@ -119,6 +119,12 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) ...@@ -119,6 +119,12 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
} }
EXPORT_SYMBOL(alloc_cpumask_var_node); EXPORT_SYMBOL(alloc_cpumask_var_node);
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
}
EXPORT_SYMBOL(zalloc_cpumask_var_node);
/** /**
* alloc_cpumask_var - allocate a struct cpumask * alloc_cpumask_var - allocate a struct cpumask
* @mask: pointer to cpumask_var_t where the cpumask is returned * @mask: pointer to cpumask_var_t where the cpumask is returned
...@@ -135,6 +141,12 @@ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) ...@@ -135,6 +141,12 @@ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
} }
EXPORT_SYMBOL(alloc_cpumask_var); EXPORT_SYMBOL(alloc_cpumask_var);
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var(mask, flags | __GFP_ZERO);
}
EXPORT_SYMBOL(zalloc_cpumask_var);
/** /**
* alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
* @mask: pointer to cpumask_var_t where the cpumask is returned * @mask: pointer to cpumask_var_t where the cpumask is returned
......
...@@ -2301,7 +2301,7 @@ int kvm_init(void *opaque, unsigned int vcpu_size, ...@@ -2301,7 +2301,7 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
bad_pfn = page_to_pfn(bad_page); bad_pfn = page_to_pfn(bad_page);
if (!alloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
r = -ENOMEM; r = -ENOMEM;
goto out_free_0; goto out_free_0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment