Commit 937e26c0 authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by Linus Torvalds

m32r: convert cpumask api

We plan to remove cpus_xx() old cpumask APIs later.  Also, we plan to
change mm_cpu_mask() implementation, allocate only nr_cpu_ids, thus
*mm_cpu_mask() is dangerous operation.

Then, this patch convert them.
Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ba7328b2
...@@ -81,11 +81,11 @@ static __inline__ int cpu_number_map(int cpu) ...@@ -81,11 +81,11 @@ static __inline__ int cpu_number_map(int cpu)
static __inline__ unsigned int num_booting_cpus(void) static __inline__ unsigned int num_booting_cpus(void)
{ {
return cpus_weight(cpu_callout_map); return cpumask_weight(&cpu_callout_map);
} }
extern void smp_send_timer(void); extern void smp_send_timer(void);
extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int);
extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
......
...@@ -87,7 +87,6 @@ void smp_local_timer_interrupt(void); ...@@ -87,7 +87,6 @@ void smp_local_timer_interrupt(void);
static void send_IPI_allbutself(int, int); static void send_IPI_allbutself(int, int);
static void send_IPI_mask(const struct cpumask *, int, int); static void send_IPI_mask(const struct cpumask *, int, int);
unsigned long send_IPI_mask_phys(cpumask_t, int, int);
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/* Rescheduling request Routines */ /* Rescheduling request Routines */
...@@ -162,10 +161,10 @@ void smp_flush_cache_all(void) ...@@ -162,10 +161,10 @@ void smp_flush_cache_all(void)
unsigned long *mask; unsigned long *mask;
preempt_disable(); preempt_disable();
cpumask = cpu_online_map; cpumask_copy(&cpumask, cpu_online_mask);
cpu_clear(smp_processor_id(), cpumask); cpumask_clear_cpu(smp_processor_id(), &cpumask);
spin_lock(&flushcache_lock); spin_lock(&flushcache_lock);
mask=cpus_addr(cpumask); mask=cpumask_bits(&cpumask);
atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
_flush_cache_copyback_all(); _flush_cache_copyback_all();
...@@ -263,8 +262,8 @@ void smp_flush_tlb_mm(struct mm_struct *mm) ...@@ -263,8 +262,8 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
preempt_disable(); preempt_disable();
cpu_id = smp_processor_id(); cpu_id = smp_processor_id();
mmc = &mm->context[cpu_id]; mmc = &mm->context[cpu_id];
cpu_mask = *mm_cpumask(mm); cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpu_clear(cpu_id, cpu_mask); cpumask_clear_cpu(cpu_id, &cpu_mask);
if (*mmc != NO_CONTEXT) { if (*mmc != NO_CONTEXT) {
local_irq_save(flags); local_irq_save(flags);
...@@ -275,7 +274,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) ...@@ -275,7 +274,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
local_irq_restore(flags); local_irq_restore(flags);
} }
if (!cpus_empty(cpu_mask)) if (!cpumask_empty(&cpu_mask))
flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL); flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
preempt_enable(); preempt_enable();
...@@ -333,8 +332,8 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) ...@@ -333,8 +332,8 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
preempt_disable(); preempt_disable();
cpu_id = smp_processor_id(); cpu_id = smp_processor_id();
mmc = &mm->context[cpu_id]; mmc = &mm->context[cpu_id];
cpu_mask = *mm_cpumask(mm); cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpu_clear(cpu_id, cpu_mask); cpumask_clear_cpu(cpu_id, &cpu_mask);
#ifdef DEBUG_SMP #ifdef DEBUG_SMP
if (!mm) if (!mm)
...@@ -348,7 +347,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) ...@@ -348,7 +347,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
__flush_tlb_page(va); __flush_tlb_page(va);
local_irq_restore(flags); local_irq_restore(flags);
} }
if (!cpus_empty(cpu_mask)) if (!cpumask_empty(&cpu_mask))
flush_tlb_others(cpu_mask, mm, vma, va); flush_tlb_others(cpu_mask, mm, vma, va);
preempt_enable(); preempt_enable();
...@@ -395,14 +394,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, ...@@ -395,14 +394,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
* - current CPU must not be in mask * - current CPU must not be in mask
* - mask must exist :) * - mask must exist :)
*/ */
BUG_ON(cpus_empty(cpumask)); BUG_ON(cpumask_empty(&cpumask));
BUG_ON(cpu_isset(smp_processor_id(), cpumask)); BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
BUG_ON(!mm); BUG_ON(!mm);
/* If a CPU which we ran on has gone down, OK. */ /* If a CPU which we ran on has gone down, OK. */
cpus_and(cpumask, cpumask, cpu_online_map); cpumask_and(&cpumask, &cpumask, cpu_online_mask);
if (cpus_empty(cpumask)) if (cpumask_empty(&cpumask))
return; return;
/* /*
...@@ -416,7 +415,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, ...@@ -416,7 +415,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
flush_mm = mm; flush_mm = mm;
flush_vma = vma; flush_vma = vma;
flush_va = va; flush_va = va;
mask=cpus_addr(cpumask); mask=cpumask_bits(&cpumask);
atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
/* /*
...@@ -425,7 +424,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, ...@@ -425,7 +424,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
*/ */
send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
while (!cpus_empty(flush_cpumask)) { while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
/* nothing. lockup detection does not belong here */ /* nothing. lockup detection does not belong here */
mb(); mb();
} }
...@@ -460,7 +459,7 @@ void smp_invalidate_interrupt(void) ...@@ -460,7 +459,7 @@ void smp_invalidate_interrupt(void)
int cpu_id = smp_processor_id(); int cpu_id = smp_processor_id();
unsigned long *mmc = &flush_mm->context[cpu_id]; unsigned long *mmc = &flush_mm->context[cpu_id];
if (!cpu_isset(cpu_id, flush_cpumask)) if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
return; return;
if (flush_va == FLUSH_ALL) { if (flush_va == FLUSH_ALL) {
...@@ -478,7 +477,7 @@ void smp_invalidate_interrupt(void) ...@@ -478,7 +477,7 @@ void smp_invalidate_interrupt(void)
__flush_tlb_page(va); __flush_tlb_page(va);
} }
} }
cpu_clear(cpu_id, flush_cpumask); cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask);
} }
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
...@@ -530,7 +529,7 @@ static void stop_this_cpu(void *dummy) ...@@ -530,7 +529,7 @@ static void stop_this_cpu(void *dummy)
/* /*
* Remove this CPU: * Remove this CPU:
*/ */
cpu_clear(cpu_id, cpu_online_map); set_cpu_online(cpu_id, false);
/* /*
* PSW IE = 1; * PSW IE = 1;
...@@ -725,8 +724,8 @@ static void send_IPI_allbutself(int ipi_num, int try) ...@@ -725,8 +724,8 @@ static void send_IPI_allbutself(int ipi_num, int try)
{ {
cpumask_t cpumask; cpumask_t cpumask;
cpumask = cpu_online_map; cpumask_copy(&cpumask, cpu_online_mask);
cpu_clear(smp_processor_id(), cpumask); cpumask_clear_cpu(smp_processor_id(), &cpumask);
send_IPI_mask(&cpumask, ipi_num, try); send_IPI_mask(&cpumask, ipi_num, try);
} }
...@@ -763,13 +762,13 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) ...@@ -763,13 +762,13 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
cpumask_and(&tmp, cpumask, cpu_online_mask); cpumask_and(&tmp, cpumask, cpu_online_mask);
BUG_ON(!cpumask_equal(cpumask, &tmp)); BUG_ON(!cpumask_equal(cpumask, &tmp));
physid_mask = CPU_MASK_NONE; cpumask_clear(&physid_mask);
for_each_cpu(cpu_id, cpumask) { for_each_cpu(cpu_id, cpumask) {
if ((phys_id = cpu_to_physid(cpu_id)) != -1) if ((phys_id = cpu_to_physid(cpu_id)) != -1)
cpu_set(phys_id, physid_mask); cpumask_set_cpu(phys_id, &physid_mask);
} }
send_IPI_mask_phys(physid_mask, ipi_num, try); send_IPI_mask_phys(&physid_mask, ipi_num, try);
} }
/*==========================================================================* /*==========================================================================*
...@@ -792,14 +791,14 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) ...@@ -792,14 +791,14 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
* ---------- --- -------------------------------------------------------- * ---------- --- --------------------------------------------------------
* *
*==========================================================================*/ *==========================================================================*/
unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num, unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num,
int try) int try)
{ {
spinlock_t *ipilock; spinlock_t *ipilock;
volatile unsigned long *ipicr_addr; volatile unsigned long *ipicr_addr;
unsigned long ipicr_val; unsigned long ipicr_val;
unsigned long my_physid_mask; unsigned long my_physid_mask;
unsigned long mask = cpus_addr(physid_mask)[0]; unsigned long mask = cpumask_bits(physid_mask)[0];
if (mask & ~physids_coerce(phys_cpu_present_map)) if (mask & ~physids_coerce(phys_cpu_present_map))
......
...@@ -135,9 +135,9 @@ void __devinit smp_prepare_boot_cpu(void) ...@@ -135,9 +135,9 @@ void __devinit smp_prepare_boot_cpu(void)
{ {
bsp_phys_id = hard_smp_processor_id(); bsp_phys_id = hard_smp_processor_id();
physid_set(bsp_phys_id, phys_cpu_present_map); physid_set(bsp_phys_id, phys_cpu_present_map);
cpu_set(0, cpu_online_map); /* BSP's cpu_id == 0 */ set_cpu_online(0, true); /* BSP's cpu_id == 0 */
cpu_set(0, cpu_callout_map); cpumask_set_cpu(0, &cpu_callout_map);
cpu_set(0, cpu_callin_map); cpumask_set_cpu(0, &cpu_callin_map);
/* /*
* Initialize the logical to physical CPU number mapping * Initialize the logical to physical CPU number mapping
...@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
physid_set(phys_id, phys_cpu_present_map); physid_set(phys_id, phys_cpu_present_map);
#ifndef CONFIG_HOTPLUG_CPU #ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(&cpu_possible_map); init_cpu_present(cpu_possible_mask);
#endif #endif
show_mp_info(nr_cpu); show_mp_info(nr_cpu);
...@@ -294,10 +294,10 @@ static void __init do_boot_cpu(int phys_id) ...@@ -294,10 +294,10 @@ static void __init do_boot_cpu(int phys_id)
send_status = 0; send_status = 0;
boot_status = 0; boot_status = 0;
cpu_set(phys_id, cpu_bootout_map); cpumask_set_cpu(phys_id, &cpu_bootout_map);
/* Send Startup IPI */ /* Send Startup IPI */
send_IPI_mask_phys(cpumask_of_cpu(phys_id), CPU_BOOT_IPI, 0); send_IPI_mask_phys(cpumask_of(phys_id), CPU_BOOT_IPI, 0);
Dprintk("Waiting for send to finish...\n"); Dprintk("Waiting for send to finish...\n");
timeout = 0; timeout = 0;
...@@ -306,7 +306,7 @@ static void __init do_boot_cpu(int phys_id) ...@@ -306,7 +306,7 @@ static void __init do_boot_cpu(int phys_id)
do { do {
Dprintk("+"); Dprintk("+");
udelay(1000); udelay(1000);
send_status = !cpu_isset(phys_id, cpu_bootin_map); send_status = !cpumask_test_cpu(phys_id, &cpu_bootin_map);
} while (send_status && (timeout++ < 100)); } while (send_status && (timeout++ < 100));
Dprintk("After Startup.\n"); Dprintk("After Startup.\n");
...@@ -316,19 +316,19 @@ static void __init do_boot_cpu(int phys_id) ...@@ -316,19 +316,19 @@ static void __init do_boot_cpu(int phys_id)
* allow APs to start initializing. * allow APs to start initializing.
*/ */
Dprintk("Before Callout %d.\n", cpu_id); Dprintk("Before Callout %d.\n", cpu_id);
cpu_set(cpu_id, cpu_callout_map); cpumask_set_cpu(cpu_id, &cpu_callout_map);
Dprintk("After Callout %d.\n", cpu_id); Dprintk("After Callout %d.\n", cpu_id);
/* /*
* Wait 5s total for a response * Wait 5s total for a response
*/ */
for (timeout = 0; timeout < 5000; timeout++) { for (timeout = 0; timeout < 5000; timeout++) {
if (cpu_isset(cpu_id, cpu_callin_map)) if (cpumask_test_cpu(cpu_id, &cpu_callin_map))
break; /* It has booted */ break; /* It has booted */
udelay(1000); udelay(1000);
} }
if (cpu_isset(cpu_id, cpu_callin_map)) { if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
/* number CPUs logically, starting from 1 (BSP is 0) */ /* number CPUs logically, starting from 1 (BSP is 0) */
Dprintk("OK.\n"); Dprintk("OK.\n");
} else { } else {
...@@ -340,9 +340,9 @@ static void __init do_boot_cpu(int phys_id) ...@@ -340,9 +340,9 @@ static void __init do_boot_cpu(int phys_id)
if (send_status || boot_status) { if (send_status || boot_status) {
unmap_cpu_to_physid(cpu_id, phys_id); unmap_cpu_to_physid(cpu_id, phys_id);
cpu_clear(cpu_id, cpu_callout_map); cpumask_clear_cpu(cpu_id, &cpu_callout_map);
cpu_clear(cpu_id, cpu_callin_map); cpumask_clear_cpu(cpu_id, &cpu_callin_map);
cpu_clear(cpu_id, cpu_initialized); cpumask_clear_cpu(cpu_id, &cpu_initialized);
cpucount--; cpucount--;
} }
} }
...@@ -351,17 +351,17 @@ int __cpuinit __cpu_up(unsigned int cpu_id) ...@@ -351,17 +351,17 @@ int __cpuinit __cpu_up(unsigned int cpu_id)
{ {
int timeout; int timeout;
cpu_set(cpu_id, smp_commenced_mask); cpumask_set_cpu(cpu_id, &smp_commenced_mask);
/* /*
* Wait 5s total for a response * Wait 5s total for a response
*/ */
for (timeout = 0; timeout < 5000; timeout++) { for (timeout = 0; timeout < 5000; timeout++) {
if (cpu_isset(cpu_id, cpu_online_map)) if (cpu_online(cpu_id))
break; break;
udelay(1000); udelay(1000);
} }
if (!cpu_isset(cpu_id, cpu_online_map)) if (!cpu_online(cpu_id))
BUG(); BUG();
return 0; return 0;
...@@ -373,11 +373,11 @@ void __init smp_cpus_done(unsigned int max_cpus) ...@@ -373,11 +373,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
unsigned long bogosum = 0; unsigned long bogosum = 0;
for (timeout = 0; timeout < 5000; timeout++) { for (timeout = 0; timeout < 5000; timeout++) {
if (cpus_equal(cpu_callin_map, cpu_online_map)) if (cpumask_equal(&cpu_callin_map, cpu_online_mask))
break; break;
udelay(1000); udelay(1000);
} }
if (!cpus_equal(cpu_callin_map, cpu_online_map)) if (!cpumask_equal(&cpu_callin_map, cpu_online_mask))
BUG(); BUG();
for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++) for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++)
...@@ -388,7 +388,7 @@ void __init smp_cpus_done(unsigned int max_cpus) ...@@ -388,7 +388,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
*/ */
Dprintk("Before bogomips.\n"); Dprintk("Before bogomips.\n");
if (cpucount) { if (cpucount) {
for_each_cpu_mask(cpu_id, cpu_online_map) for_each_cpu(cpu_id,cpu_online_mask)
bogosum += cpu_data[cpu_id].loops_per_jiffy; bogosum += cpu_data[cpu_id].loops_per_jiffy;
printk(KERN_INFO "Total of %d processors activated " \ printk(KERN_INFO "Total of %d processors activated " \
...@@ -425,7 +425,7 @@ int __init start_secondary(void *unused) ...@@ -425,7 +425,7 @@ int __init start_secondary(void *unused)
cpu_init(); cpu_init();
preempt_disable(); preempt_disable();
smp_callin(); smp_callin();
while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
cpu_relax(); cpu_relax();
smp_online(); smp_online();
...@@ -463,7 +463,7 @@ static void __init smp_callin(void) ...@@ -463,7 +463,7 @@ static void __init smp_callin(void)
int cpu_id = smp_processor_id(); int cpu_id = smp_processor_id();
unsigned long timeout; unsigned long timeout;
if (cpu_isset(cpu_id, cpu_callin_map)) { if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
printk("huh, phys CPU#%d, CPU#%d already present??\n", printk("huh, phys CPU#%d, CPU#%d already present??\n",
phys_id, cpu_id); phys_id, cpu_id);
BUG(); BUG();
...@@ -474,7 +474,7 @@ static void __init smp_callin(void) ...@@ -474,7 +474,7 @@ static void __init smp_callin(void)
timeout = jiffies + (2 * HZ); timeout = jiffies + (2 * HZ);
while (time_before(jiffies, timeout)) { while (time_before(jiffies, timeout)) {
/* Has the boot CPU finished it's STARTUP sequence ? */ /* Has the boot CPU finished it's STARTUP sequence ? */
if (cpu_isset(cpu_id, cpu_callout_map)) if (cpumask_test_cpu(cpu_id, &cpu_callout_map))
break; break;
cpu_relax(); cpu_relax();
} }
...@@ -486,7 +486,7 @@ static void __init smp_callin(void) ...@@ -486,7 +486,7 @@ static void __init smp_callin(void)
} }
/* Allow the master to continue. */ /* Allow the master to continue. */
cpu_set(cpu_id, cpu_callin_map); cpumask_set_cpu(cpu_id, &cpu_callin_map);
} }
static void __init smp_online(void) static void __init smp_online(void)
...@@ -503,7 +503,7 @@ static void __init smp_online(void) ...@@ -503,7 +503,7 @@ static void __init smp_online(void)
/* Save our processor parameters */ /* Save our processor parameters */
smp_store_cpu_info(cpu_id); smp_store_cpu_info(cpu_id);
cpu_set(cpu_id, cpu_online_map); set_cpu_online(cpu_id, true);
} }
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment