Commit 9eb0dcc1 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] cpumask: remove obsolete cpumask macro uses - i386 arch

From: Paul Jackson <pj@sgi.com>

Remove by recoding i386 uses of the obsolete cpumask const, coerce and promote
macros.
Signed-off-by: default avatarPaul Jackson <pj@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent ed880528
...@@ -224,7 +224,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask) ...@@ -224,7 +224,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
struct irq_pin_list *entry = irq_2_pin + irq; struct irq_pin_list *entry = irq_2_pin + irq;
unsigned int apicid_value; unsigned int apicid_value;
apicid_value = cpu_mask_to_apicid(mk_cpumask_const(cpumask)); apicid_value = cpu_mask_to_apicid(cpumask);
/* Prepare to do the io_apic_write */ /* Prepare to do the io_apic_write */
apicid_value = apicid_value << 24; apicid_value = apicid_value << 24;
spin_lock_irqsave(&ioapic_lock, flags); spin_lock_irqsave(&ioapic_lock, flags);
......
...@@ -159,7 +159,7 @@ void fastcall send_IPI_self(int vector) ...@@ -159,7 +159,7 @@ void fastcall send_IPI_self(int vector)
*/ */
inline void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) inline void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
{ {
unsigned long mask = cpus_coerce(cpumask); unsigned long mask = cpus_addr(cpumask)[0];
unsigned long cfg; unsigned long cfg;
unsigned long flags; unsigned long flags;
......
...@@ -153,7 +153,7 @@ static inline void ...@@ -153,7 +153,7 @@ static inline void
send_CPI_allbutself(__u8 cpi) send_CPI_allbutself(__u8 cpi)
{ {
__u8 cpu = smp_processor_id(); __u8 cpu = smp_processor_id();
__u32 mask = cpus_coerce(cpu_online_map) & ~(1 << cpu); __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu);
send_CPI(mask, cpi); send_CPI(mask, cpi);
} }
...@@ -402,11 +402,11 @@ find_smp_config(void) ...@@ -402,11 +402,11 @@ find_smp_config(void)
/* set up everything for just this CPU, we can alter /* set up everything for just this CPU, we can alter
* this as we start the other CPUs later */ * this as we start the other CPUs later */
/* now get the CPU disposition from the extended CMOS */ /* now get the CPU disposition from the extended CMOS */
phys_cpu_present_map = cpus_promote(voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK)); cpus_addr(phys_cpu_present_map)[0] = voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK);
cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8; cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16; cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16;
cpus_coerce(phys_cpu_present_map) |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24; cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24;
printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_coerce(phys_cpu_present_map)); printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]);
/* Here we set up the VIC to enable SMP */ /* Here we set up the VIC to enable SMP */
/* enable the CPIs by writing the base vector to their register */ /* enable the CPIs by writing the base vector to their register */
outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER); outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER);
...@@ -706,12 +706,12 @@ smp_boot_cpus(void) ...@@ -706,12 +706,12 @@ smp_boot_cpus(void)
/* now that the cat has probed the Voyager System Bus, sanity /* now that the cat has probed the Voyager System Bus, sanity
* check the cpu map */ * check the cpu map */
if( ((voyager_quad_processors | voyager_extended_vic_processors) if( ((voyager_quad_processors | voyager_extended_vic_processors)
& cpus_coerce(phys_cpu_present_map)) != cpus_coerce(phys_cpu_present_map)) { & cpus_addr(phys_cpu_present_map)[0]) != cpus_addr(phys_cpu_present_map)[0]) {
/* should panic */ /* should panic */
printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n"); printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n");
} }
} else if(voyager_level == 4) } else if(voyager_level == 4)
voyager_extended_vic_processors = cpus_coerce(phys_cpu_present_map); voyager_extended_vic_processors = cpus_addr(phys_cpu_present_map)[0];
/* this sets up the idle task to run on the current cpu */ /* this sets up the idle task to run on the current cpu */
voyager_extended_cpus = 1; voyager_extended_cpus = 1;
...@@ -909,7 +909,7 @@ flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, ...@@ -909,7 +909,7 @@ flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
if (!cpumask) if (!cpumask)
BUG(); BUG();
if ((cpumask & cpus_coerce(cpu_online_map)) != cpumask) if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask)
BUG(); BUG();
if (cpumask & (1 << smp_processor_id())) if (cpumask & (1 << smp_processor_id()))
BUG(); BUG();
...@@ -952,7 +952,7 @@ flush_tlb_current_task(void) ...@@ -952,7 +952,7 @@ flush_tlb_current_task(void)
preempt_disable(); preempt_disable();
cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id()); cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
local_flush_tlb(); local_flush_tlb();
if (cpu_mask) if (cpu_mask)
flush_tlb_others(cpu_mask, mm, FLUSH_ALL); flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
...@@ -968,7 +968,7 @@ flush_tlb_mm (struct mm_struct * mm) ...@@ -968,7 +968,7 @@ flush_tlb_mm (struct mm_struct * mm)
preempt_disable(); preempt_disable();
cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id()); cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
if (current->active_mm == mm) { if (current->active_mm == mm) {
if (current->mm) if (current->mm)
...@@ -989,7 +989,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) ...@@ -989,7 +989,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
preempt_disable(); preempt_disable();
cpu_mask = cpus_coerce(mm->cpu_vm_mask) & ~(1 << smp_processor_id()); cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
if (current->active_mm == mm) { if (current->active_mm == mm) {
if(current->mm) if(current->mm)
__flush_tlb_one(va); __flush_tlb_one(va);
...@@ -1098,7 +1098,7 @@ smp_call_function (void (*func) (void *info), void *info, int retry, ...@@ -1098,7 +1098,7 @@ smp_call_function (void (*func) (void *info), void *info, int retry,
int wait) int wait)
{ {
struct call_data_struct data; struct call_data_struct data;
__u32 mask = cpus_coerce(cpu_online_map); __u32 mask = cpus_addr(cpu_online_map)[0];
mask &= ~(1<<smp_processor_id()); mask &= ~(1<<smp_processor_id());
...@@ -1789,9 +1789,9 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask) ...@@ -1789,9 +1789,9 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
unsigned long irq_mask = 1 << irq; unsigned long irq_mask = 1 << irq;
int cpu; int cpu;
real_mask = cpus_coerce(mask) & voyager_extended_vic_processors; real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
if(cpus_coerce(mask) == 0) if(cpus_addr(mask)[0] == 0)
/* can't have no cpu's to accept the interrupt -- extremely /* can't have no cpu's to accept the interrupt -- extremely
* bad things will happen */ * bad things will happen */
return; return;
......
...@@ -62,7 +62,7 @@ struct genapic { ...@@ -62,7 +62,7 @@ struct genapic {
unsigned (*get_apic_id)(unsigned long x); unsigned (*get_apic_id)(unsigned long x);
unsigned long apic_id_mask; unsigned long apic_id_mask;
unsigned int (*cpu_mask_to_apicid)(cpumask_const_t cpumask); unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
/* ipi */ /* ipi */
void (*send_IPI_mask)(cpumask_t mask, int vector); void (*send_IPI_mask)(cpumask_t mask, int vector);
......
...@@ -28,11 +28,11 @@ static inline cpumask_t target_cpus(void) ...@@ -28,11 +28,11 @@ static inline cpumask_t target_cpus(void)
static unsigned long cpu = NR_CPUS; static unsigned long cpu = NR_CPUS;
do { do {
if (cpu >= NR_CPUS) if (cpu >= NR_CPUS)
cpu = first_cpu_const(cpu_online_map); cpu = first_cpu(cpu_online_map);
else else
cpu = next_cpu_const(cpu, cpu_online_map); cpu = next_cpu(cpu, cpu_online_map);
} while (cpu >= NR_CPUS); } while (cpu >= NR_CPUS);
return mk_cpumask_const(cpumask_of_cpu(cpu)); return cpumask_of_cpu(cpu);
} }
#define TARGET_CPUS (target_cpus()) #define TARGET_CPUS (target_cpus())
...@@ -149,12 +149,12 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) ...@@ -149,12 +149,12 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
} }
/* As we are using single CPU as destination, pick only one CPU here */ /* As we are using single CPU as destination, pick only one CPU here */
static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask) static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
{ {
int cpu; int cpu;
int apicid; int apicid;
cpu = first_cpu_const(cpumask); cpu = first_cpu(cpumask);
apicid = cpu_to_logical_apicid(cpu); apicid = cpu_to_logical_apicid(cpu);
return apicid; return apicid;
} }
......
...@@ -6,12 +6,12 @@ ...@@ -6,12 +6,12 @@
#define APIC_DFR_VALUE (APIC_DFR_FLAT) #define APIC_DFR_VALUE (APIC_DFR_FLAT)
static inline cpumask_const_t target_cpus(void) static inline cpumask_t target_cpus(void)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
return mk_cpumask_const(cpu_online_map); return cpu_online_map;
#else #else
return mk_cpumask_const(cpumask_of_cpu(0)); return cpumask_of_cpu(0);
#endif #endif
} }
#define TARGET_CPUS (target_cpus()) #define TARGET_CPUS (target_cpus())
...@@ -116,9 +116,9 @@ static inline int apic_id_registered(void) ...@@ -116,9 +116,9 @@ static inline int apic_id_registered(void)
return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map);
} }
static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask) static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
{ {
return cpus_coerce_const(cpumask); return cpus_addr(cpumask)[0];
} }
static inline void enable_apic_mode(void) static inline void enable_apic_mode(void)
......
...@@ -88,7 +88,7 @@ static inline void clustered_apic_check(void) ...@@ -88,7 +88,7 @@ static inline void clustered_apic_check(void)
int apic = bios_cpu_apicid[smp_processor_id()]; int apic = bios_cpu_apicid[smp_processor_id()];
printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
(apic_version[apic] == 0x14) ? (apic_version[apic] == 0x14) ?
"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_coerce(TARGET_CPUS)); "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]);
} }
static inline int multi_timer_check(int apic, int irq) static inline int multi_timer_check(int apic, int irq)
...@@ -158,14 +158,14 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid) ...@@ -158,14 +158,14 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid)
return (1); return (1);
} }
static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask) static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
{ {
int num_bits_set; int num_bits_set;
int cpus_found = 0; int cpus_found = 0;
int cpu; int cpu;
int apicid; int apicid;
num_bits_set = cpus_weight_const(cpumask); num_bits_set = cpus_weight(cpumask);
/* Return id to all */ /* Return id to all */
if (num_bits_set == NR_CPUS) if (num_bits_set == NR_CPUS)
#if defined CONFIG_ES7000_CLUSTERED_APIC #if defined CONFIG_ES7000_CLUSTERED_APIC
...@@ -177,10 +177,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask) ...@@ -177,10 +177,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask)
* The cpus in the mask must all be on the apic cluster. If are not * The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS. * on the same apicid cluster return default value of TARGET_CPUS.
*/ */
cpu = first_cpu_const(cpumask); cpu = first_cpu(cpumask);
apicid = cpu_to_logical_apicid(cpu); apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) { while (cpus_found < num_bits_set) {
if (cpu_isset_const(cpu, cpumask)) { if (cpu_isset(cpu, cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu); int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) != if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){ apicid_cluster(new_apicid)){
......
...@@ -135,7 +135,7 @@ static inline void enable_apic_mode(void) ...@@ -135,7 +135,7 @@ static inline void enable_apic_mode(void)
* We use physical apicids here, not logical, so just return the default * We use physical apicids here, not logical, so just return the default
* physical broadcast to stop people from breaking us * physical broadcast to stop people from breaking us
*/ */
static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask) static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
{ {
return (int) 0xF; return (int) 0xF;
} }
......
...@@ -139,14 +139,14 @@ static inline void enable_apic_mode(void) ...@@ -139,14 +139,14 @@ static inline void enable_apic_mode(void)
{ {
} }
static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask) static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
{ {
int num_bits_set; int num_bits_set;
int cpus_found = 0; int cpus_found = 0;
int cpu; int cpu;
int apicid; int apicid;
num_bits_set = cpus_weight_const(cpumask); num_bits_set = cpus_weight(cpumask);
/* Return id to all */ /* Return id to all */
if (num_bits_set == NR_CPUS) if (num_bits_set == NR_CPUS)
return (int) 0xFF; return (int) 0xFF;
...@@ -154,10 +154,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask) ...@@ -154,10 +154,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask)
* The cpus in the mask must all be on the apic cluster. If are not * The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS. * on the same apicid cluster return default value of TARGET_CPUS.
*/ */
cpu = first_cpu_const(cpumask); cpu = first_cpu(cpumask);
apicid = cpu_to_logical_apicid(cpu); apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) { while (cpus_found < num_bits_set) {
if (cpu_isset_const(cpu, cpumask)) { if (cpu_isset(cpu, cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu); int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) != if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){ apicid_cluster(new_apicid)){
......
...@@ -86,9 +86,9 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) ...@@ -86,9 +86,9 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
} }
static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask) static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
{ {
return cpus_coerce_const(cpumask); return cpus_addr(cpumask)[0];
} }
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment