Commit 0b5f9c00 authored by Rusty Russell's avatar Rusty Russell

remove references to cpu_*_map in arch/

This has been obsolescent for a while; time for the final push.

In adjacent context, replaced old cpus_* with cpumask_*.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Acked-by: David S. Miller <davem@davemloft.net> (arch/sparc)
Acked-by: Chris Metcalf <cmetcalf@tilera.com> (arch/tile)
Cc: user-mode-linux-devel@lists.sourceforge.net
Cc: Russell King <linux@arm.linux.org.uk>
Cc: linux-arm-kernel@lists.infradead.org
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: linux-hexagon@vger.kernel.org
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Helge Deller <deller@gmx.de>
Cc: sparclinux@vger.kernel.org
parent b5174fa3
...@@ -127,7 +127,7 @@ void __kprobes arch_arm_kprobe(struct kprobe *p) ...@@ -127,7 +127,7 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
flush_insns(addr, sizeof(u16)); flush_insns(addr, sizeof(u16));
} else if (addr & 2) { } else if (addr & 2) {
/* A 32-bit instruction spanning two words needs special care */ /* A 32-bit instruction spanning two words needs special care */
stop_machine(set_t32_breakpoint, (void *)addr, &cpu_online_map); stop_machine(set_t32_breakpoint, (void *)addr, cpu_online_mask);
} else { } else {
/* Word aligned 32-bit instruction can be written atomically */ /* Word aligned 32-bit instruction can be written atomically */
u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION; u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
...@@ -190,7 +190,7 @@ int __kprobes __arch_disarm_kprobe(void *p) ...@@ -190,7 +190,7 @@ int __kprobes __arch_disarm_kprobe(void *p)
void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_disarm_kprobe(struct kprobe *p)
{ {
stop_machine(__arch_disarm_kprobe, p, &cpu_online_map); stop_machine(__arch_disarm_kprobe, p, cpu_online_mask);
} }
void __kprobes arch_remove_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p)
......
...@@ -354,7 +354,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -354,7 +354,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
* re-initialize the map in platform_smp_prepare_cpus() if * re-initialize the map in platform_smp_prepare_cpus() if
* present != possible (e.g. physical hotplug). * present != possible (e.g. physical hotplug).
*/ */
init_cpu_present(&cpu_possible_map); init_cpu_present(cpu_possible_mask);
/* /*
* Initialise the SCU if there are more than one CPU * Initialise the SCU if there are more than one CPU
...@@ -586,8 +586,9 @@ void smp_send_stop(void) ...@@ -586,8 +586,9 @@ void smp_send_stop(void)
unsigned long timeout; unsigned long timeout;
if (num_online_cpus() > 1) { if (num_online_cpus() > 1) {
cpumask_t mask = cpu_online_map; struct cpumask mask;
cpu_clear(smp_processor_id(), mask); cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
smp_cross_call(&mask, IPI_CPU_STOP); smp_cross_call(&mask, IPI_CPU_STOP);
} }
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#define BASE_IPI_IRQ 26 #define BASE_IPI_IRQ 26
/* /*
* cpu_possible_map needs to be filled out prior to setup_per_cpu_areas * cpu_possible_mask needs to be filled out prior to setup_per_cpu_areas
* (which is prior to any of our smp_prepare_cpu crap), in order to set * (which is prior to any of our smp_prepare_cpu crap), in order to set
* up the... per_cpu areas. * up the... per_cpu areas.
*/ */
...@@ -208,7 +208,7 @@ int __cpuinit __cpu_up(unsigned int cpu) ...@@ -208,7 +208,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
stack_start = ((void *) thread) + THREAD_SIZE; stack_start = ((void *) thread) + THREAD_SIZE;
__vmstart(start_secondary, stack_start); __vmstart(start_secondary, stack_start);
while (!cpu_isset(cpu, cpu_online_map)) while (!cpu_online(cpu))
barrier(); barrier();
return 0; return 0;
...@@ -229,7 +229,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -229,7 +229,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* Right now, let's just fake it. */ /* Right now, let's just fake it. */
for (i = 0; i < max_cpus; i++) for (i = 0; i < max_cpus; i++)
cpu_set(i, cpu_present_map); set_cpu_present(i, true);
/* Also need to register the interrupts for IPI */ /* Also need to register the interrupts for IPI */
if (max_cpus > 1) if (max_cpus > 1)
...@@ -269,5 +269,5 @@ void smp_start_cpus(void) ...@@ -269,5 +269,5 @@ void smp_start_cpus(void)
int i; int i;
for (i = 0; i < NR_CPUS; i++) for (i = 0; i < NR_CPUS; i++)
cpu_set(i, cpu_possible_map); set_cpu_possible(i, true);
} }
...@@ -268,7 +268,7 @@ static int octeon_cpu_disable(void) ...@@ -268,7 +268,7 @@ static int octeon_cpu_disable(void)
spin_lock(&smp_reserve_lock); spin_lock(&smp_reserve_lock);
cpu_clear(cpu, cpu_online_map); set_cpu_online(cpu, false);
cpu_clear(cpu, cpu_callin_map); cpu_clear(cpu, cpu_callin_map);
local_irq_disable(); local_irq_disable();
fixup_irqs(); fixup_irqs();
......
...@@ -173,7 +173,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, ...@@ -173,7 +173,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
if (retval) if (retval)
goto out_unlock; goto out_unlock;
cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
out_unlock: out_unlock:
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
......
...@@ -25,7 +25,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -25,7 +25,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
int i; int i;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (!cpu_isset(n, cpu_online_map)) if (!cpu_online(n))
return 0; return 0;
#endif #endif
......
...@@ -317,7 +317,7 @@ static int bmips_cpu_disable(void) ...@@ -317,7 +317,7 @@ static int bmips_cpu_disable(void)
pr_info("SMP: CPU%d is offline\n", cpu); pr_info("SMP: CPU%d is offline\n", cpu);
cpu_clear(cpu, cpu_online_map); set_cpu_online(cpu, false);
cpu_clear(cpu, cpu_callin_map); cpu_clear(cpu, cpu_callin_map);
local_flush_tlb_all(); local_flush_tlb_all();
......
...@@ -148,7 +148,7 @@ static void stop_this_cpu(void *dummy) ...@@ -148,7 +148,7 @@ static void stop_this_cpu(void *dummy)
/* /*
* Remove this CPU: * Remove this CPU:
*/ */
cpu_clear(smp_processor_id(), cpu_online_map); set_cpu_online(smp_processor_id(), false);
for (;;) { for (;;) {
if (cpu_wait) if (cpu_wait)
(*cpu_wait)(); /* Wait if available. */ (*cpu_wait)(); /* Wait if available. */
...@@ -174,7 +174,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -174,7 +174,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
mp_ops->prepare_cpus(max_cpus); mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0); set_cpu_sibling_map(0);
#ifndef CONFIG_HOTPLUG_CPU #ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(&cpu_possible_map); init_cpu_present(cpu_possible_mask);
#endif #endif
} }
...@@ -248,7 +248,7 @@ int __cpuinit __cpu_up(unsigned int cpu) ...@@ -248,7 +248,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
while (!cpu_isset(cpu, cpu_callin_map)) while (!cpu_isset(cpu, cpu_callin_map))
udelay(100); udelay(100);
cpu_set(cpu, cpu_online_map); set_cpu_online(cpu, true);
return 0; return 0;
} }
...@@ -320,13 +320,12 @@ void flush_tlb_mm(struct mm_struct *mm) ...@@ -320,13 +320,12 @@ void flush_tlb_mm(struct mm_struct *mm)
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_on_other_tlbs(flush_tlb_mm_ipi, mm); smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
} else { } else {
cpumask_t mask = cpu_online_map;
unsigned int cpu; unsigned int cpu;
cpu_clear(smp_processor_id(), mask); for_each_online_cpu(cpu) {
for_each_cpu_mask(cpu, mask) if (cpu != smp_processor_id() && cpu_context(cpu, mm))
if (cpu_context(cpu, mm))
cpu_context(cpu, mm) = 0; cpu_context(cpu, mm) = 0;
}
} }
local_flush_tlb_mm(mm); local_flush_tlb_mm(mm);
...@@ -360,13 +359,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l ...@@ -360,13 +359,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
smp_on_other_tlbs(flush_tlb_range_ipi, &fd); smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
} else { } else {
cpumask_t mask = cpu_online_map;
unsigned int cpu; unsigned int cpu;
cpu_clear(smp_processor_id(), mask); for_each_online_cpu(cpu) {
for_each_cpu_mask(cpu, mask) if (cpu != smp_processor_id() && cpu_context(cpu, mm))
if (cpu_context(cpu, mm))
cpu_context(cpu, mm) = 0; cpu_context(cpu, mm) = 0;
}
} }
local_flush_tlb_range(vma, start, end); local_flush_tlb_range(vma, start, end);
preempt_enable(); preempt_enable();
...@@ -407,13 +405,12 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ...@@ -407,13 +405,12 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
smp_on_other_tlbs(flush_tlb_page_ipi, &fd); smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
} else { } else {
cpumask_t mask = cpu_online_map;
unsigned int cpu; unsigned int cpu;
cpu_clear(smp_processor_id(), mask); for_each_online_cpu(cpu) {
for_each_cpu_mask(cpu, mask) if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
if (cpu_context(cpu, vma->vm_mm))
cpu_context(cpu, vma->vm_mm) = 0; cpu_context(cpu, vma->vm_mm) = 0;
}
} }
local_flush_tlb_page(vma, page); local_flush_tlb_page(vma, page);
preempt_enable(); preempt_enable();
......
...@@ -291,7 +291,7 @@ static void smtc_configure_tlb(void) ...@@ -291,7 +291,7 @@ static void smtc_configure_tlb(void)
* possibly leave some TCs/VPEs as "slave" processors. * possibly leave some TCs/VPEs as "slave" processors.
* *
* Use c0_MVPConf0 to find out how many TCs are available, setting up * Use c0_MVPConf0 to find out how many TCs are available, setting up
* cpu_possible_map and the logical/physical mappings. * cpu_possible_mask and the logical/physical mappings.
*/ */
int __init smtc_build_cpu_map(int start_cpu_slot) int __init smtc_build_cpu_map(int start_cpu_slot)
......
...@@ -80,9 +80,9 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) ...@@ -80,9 +80,9 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
if (vma) if (vma)
mask = *mm_cpumask(vma->vm_mm); mask = *mm_cpumask(vma->vm_mm);
else else
mask = cpu_online_map; mask = *cpu_online_mask;
cpu_clear(cpu, mask); cpumask_clear_cpu(cpu, &mask);
for_each_cpu_mask(cpu, mask) for_each_cpu(cpu, &mask)
octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
preempt_enable(); preempt_enable();
......
...@@ -165,7 +165,7 @@ void __init nlm_smp_setup(void) ...@@ -165,7 +165,7 @@ void __init nlm_smp_setup(void)
cpu_set(boot_cpu, phys_cpu_present_map); cpu_set(boot_cpu, phys_cpu_present_map);
__cpu_number_map[boot_cpu] = 0; __cpu_number_map[boot_cpu] = 0;
__cpu_logical_map[0] = boot_cpu; __cpu_logical_map[0] = boot_cpu;
cpu_set(0, cpu_possible_map); set_cpu_possible(0, true);
num_cpus = 1; num_cpus = 1;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
...@@ -177,14 +177,14 @@ void __init nlm_smp_setup(void) ...@@ -177,14 +177,14 @@ void __init nlm_smp_setup(void)
cpu_set(i, phys_cpu_present_map); cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = num_cpus; __cpu_number_map[i] = num_cpus;
__cpu_logical_map[num_cpus] = i; __cpu_logical_map[num_cpus] = i;
cpu_set(num_cpus, cpu_possible_map); set_cpu_possible(num_cpus, true);
++num_cpus; ++num_cpus;
} }
} }
pr_info("Phys CPU present map: %lx, possible map %lx\n", pr_info("Phys CPU present map: %lx, possible map %lx\n",
(unsigned long)phys_cpu_present_map.bits[0], (unsigned long)phys_cpu_present_map.bits[0],
(unsigned long)cpu_possible_map.bits[0]); (unsigned long)cpumask_bits(cpu_possible_mask)[0]);
pr_info("Detected %i Slave CPU(s)\n", num_cpus); pr_info("Detected %i Slave CPU(s)\n", num_cpus);
nlm_set_nmi_handler(nlm_boot_secondary_cpus); nlm_set_nmi_handler(nlm_boot_secondary_cpus);
......
...@@ -155,10 +155,10 @@ static void __init yos_smp_setup(void) ...@@ -155,10 +155,10 @@ static void __init yos_smp_setup(void)
{ {
int i; int i;
cpus_clear(cpu_possible_map); init_cpu_possible(cpu_none_mask);
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
cpu_set(i, cpu_possible_map); set_cpu_possible(i, true);
__cpu_number_map[i] = i; __cpu_number_map[i] = i;
__cpu_logical_map[i] = i; __cpu_logical_map[i] = i;
} }
...@@ -169,7 +169,7 @@ static void __init yos_prepare_cpus(unsigned int max_cpus) ...@@ -169,7 +169,7 @@ static void __init yos_prepare_cpus(unsigned int max_cpus)
/* /*
* Be paranoid. Enable the IPI only if we're really about to go SMP. * Be paranoid. Enable the IPI only if we're really about to go SMP.
*/ */
if (cpus_weight(cpu_possible_map)) if (num_possible_cpus())
set_c0_status(STATUSF_IP5); set_c0_status(STATUSF_IP5);
} }
......
...@@ -76,7 +76,7 @@ static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest) ...@@ -76,7 +76,7 @@ static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest)
/* Only let it join in if it's marked enabled */ /* Only let it join in if it's marked enabled */
if ((acpu->cpu_info.flags & KLINFO_ENABLE) && if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
(tot_cpus_found != NR_CPUS)) { (tot_cpus_found != NR_CPUS)) {
cpu_set(cpuid, cpu_possible_map); set_cpu_possible(cpuid, true);
alloc_cpupda(cpuid, tot_cpus_found); alloc_cpupda(cpuid, tot_cpus_found);
cpus_found++; cpus_found++;
tot_cpus_found++; tot_cpus_found++;
......
...@@ -147,14 +147,13 @@ static void __init bcm1480_smp_setup(void) ...@@ -147,14 +147,13 @@ static void __init bcm1480_smp_setup(void)
{ {
int i, num; int i, num;
cpus_clear(cpu_possible_map); init_cpu_possible(cpumask_of(0));
cpu_set(0, cpu_possible_map);
__cpu_number_map[0] = 0; __cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0; __cpu_logical_map[0] = 0;
for (i = 1, num = 0; i < NR_CPUS; i++) { for (i = 1, num = 0; i < NR_CPUS; i++) {
if (cfe_cpu_stop(i) == 0) { if (cfe_cpu_stop(i) == 0) {
cpu_set(i, cpu_possible_map); set_cpu_possible(i, true);
__cpu_number_map[i] = ++num; __cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i; __cpu_logical_map[num] = i;
} }
......
...@@ -126,7 +126,7 @@ static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle) ...@@ -126,7 +126,7 @@ static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle)
/* /*
* Use CFE to find out how many CPUs are available, setting up * Use CFE to find out how many CPUs are available, setting up
* cpu_possible_map and the logical/physical mappings. * cpu_possible_mask and the logical/physical mappings.
* XXXKW will the boot CPU ever not be physical 0? * XXXKW will the boot CPU ever not be physical 0?
* *
* Common setup before any secondaries are started * Common setup before any secondaries are started
...@@ -135,14 +135,13 @@ static void __init sb1250_smp_setup(void) ...@@ -135,14 +135,13 @@ static void __init sb1250_smp_setup(void)
{ {
int i, num; int i, num;
cpus_clear(cpu_possible_map); init_cpu_possible(cpumask_of(0));
cpu_set(0, cpu_possible_map);
__cpu_number_map[0] = 0; __cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0; __cpu_logical_map[0] = 0;
for (i = 1, num = 0; i < NR_CPUS; i++) { for (i = 1, num = 0; i < NR_CPUS; i++) {
if (cfe_cpu_stop(i) == 0) { if (cfe_cpu_stop(i) == 0) {
cpu_set(i, cpu_possible_map); set_cpu_possible(i, true);
__cpu_number_map[i] = ++num; __cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i; __cpu_logical_map[num] = i;
} }
......
...@@ -104,11 +104,11 @@ static int irq_choose_cpu(const struct cpumask *affinity) ...@@ -104,11 +104,11 @@ static int irq_choose_cpu(const struct cpumask *affinity)
{ {
cpumask_t mask; cpumask_t mask;
cpus_and(mask, cpu_online_map, *affinity); cpumask_and(&mask, cpu_online_mask, affinity);
if (cpus_equal(mask, cpu_online_map) || cpus_empty(mask)) if (cpumask_equal(&mask, cpu_online_mask) || cpumask_empty(&mask))
return boot_cpu_id; return boot_cpu_id;
else else
return first_cpu(mask); return cpumask_first(&mask);
} }
#else #else
#define irq_choose_cpu(affinity) boot_cpu_id #define irq_choose_cpu(affinity) boot_cpu_id
......
...@@ -1186,7 +1186,7 @@ static void __init setup_cpu_maps(void) ...@@ -1186,7 +1186,7 @@ static void __init setup_cpu_maps(void)
sizeof(cpu_lotar_map)); sizeof(cpu_lotar_map));
if (rc < 0) { if (rc < 0) {
pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n"); pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
cpu_lotar_map = cpu_possible_map; cpu_lotar_map = *cpu_possible_mask;
} }
#if CHIP_HAS_CBOX_HOME_MAP() #if CHIP_HAS_CBOX_HOME_MAP()
...@@ -1196,9 +1196,9 @@ static void __init setup_cpu_maps(void) ...@@ -1196,9 +1196,9 @@ static void __init setup_cpu_maps(void)
sizeof(hash_for_home_map)); sizeof(hash_for_home_map));
if (rc < 0) if (rc < 0)
early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc); early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
cpumask_or(&cpu_cacheable_map, &cpu_possible_map, &hash_for_home_map); cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
#else #else
cpu_cacheable_map = cpu_possible_map; cpu_cacheable_map = *cpu_possible_mask;
#endif #endif
} }
......
...@@ -41,7 +41,7 @@ static int __init start_kernel_proc(void *unused) ...@@ -41,7 +41,7 @@ static int __init start_kernel_proc(void *unused)
cpu_tasks[0].pid = pid; cpu_tasks[0].pid = pid;
cpu_tasks[0].task = current; cpu_tasks[0].task = current;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpu_online_map = cpumask_of_cpu(0); init_cpu_online(get_cpu_mask(0));
#endif #endif
start_kernel(); start_kernel();
return 0; return 0;
......
...@@ -76,7 +76,7 @@ static int idle_proc(void *cpup) ...@@ -76,7 +76,7 @@ static int idle_proc(void *cpup)
cpu_relax(); cpu_relax();
notify_cpu_starting(cpu); notify_cpu_starting(cpu);
cpu_set(cpu, cpu_online_map); set_cpu_online(cpu, true);
default_idle(); default_idle();
return 0; return 0;
} }
...@@ -110,8 +110,7 @@ void smp_prepare_cpus(unsigned int maxcpus) ...@@ -110,8 +110,7 @@ void smp_prepare_cpus(unsigned int maxcpus)
for (i = 0; i < ncpus; ++i) for (i = 0; i < ncpus; ++i)
set_cpu_possible(i, true); set_cpu_possible(i, true);
cpu_clear(me, cpu_online_map); set_cpu_online(me, true);
cpu_set(me, cpu_online_map);
cpu_set(me, cpu_callin_map); cpu_set(me, cpu_callin_map);
err = os_pipe(cpu_data[me].ipi_pipe, 1, 1); err = os_pipe(cpu_data[me].ipi_pipe, 1, 1);
...@@ -138,13 +137,13 @@ void smp_prepare_cpus(unsigned int maxcpus) ...@@ -138,13 +137,13 @@ void smp_prepare_cpus(unsigned int maxcpus)
void smp_prepare_boot_cpu(void) void smp_prepare_boot_cpu(void)
{ {
cpu_set(smp_processor_id(), cpu_online_map); set_cpu_online(smp_processor_id(), true);
} }
int __cpu_up(unsigned int cpu) int __cpu_up(unsigned int cpu)
{ {
cpu_set(cpu, smp_commenced_mask); cpu_set(cpu, smp_commenced_mask);
while (!cpu_isset(cpu, cpu_online_map)) while (!cpu_online(cpu))
mb(); mb();
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment