Commit 363ab6f1 authored by Mike Travis's avatar Mike Travis Committed by Thomas Gleixner

core: use performance variant for_each_cpu_mask_nr

Change references from for_each_cpu_mask to for_each_cpu_mask_nr
where appropriate
Reviewed-by: default avatarPaul Jackson <pj@sgi.com>
Reviewed-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 068b1277
...@@ -390,7 +390,7 @@ void __ref enable_nonboot_cpus(void) ...@@ -390,7 +390,7 @@ void __ref enable_nonboot_cpus(void)
goto out; goto out;
printk("Enabling non-boot CPUs ...\n"); printk("Enabling non-boot CPUs ...\n");
for_each_cpu_mask(cpu, frozen_cpus) { for_each_cpu_mask_nr(cpu, frozen_cpus) {
error = _cpu_up(cpu, 1); error = _cpu_up(cpu, 1);
if (!error) { if (!error) {
printk("CPU%d is up\n", cpu); printk("CPU%d is up\n", cpu);
......
...@@ -92,7 +92,7 @@ static void force_quiescent_state(struct rcu_data *rdp, ...@@ -92,7 +92,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
*/ */
cpumask = rcp->cpumask; cpumask = rcp->cpumask;
cpu_clear(rdp->cpu, cpumask); cpu_clear(rdp->cpu, cpumask);
for_each_cpu_mask(cpu, cpumask) for_each_cpu_mask_nr(cpu, cpumask)
smp_send_reschedule(cpu); smp_send_reschedule(cpu);
} }
} }
......
...@@ -657,7 +657,7 @@ rcu_try_flip_idle(void) ...@@ -657,7 +657,7 @@ rcu_try_flip_idle(void)
/* Now ask each CPU for acknowledgement of the flip. */ /* Now ask each CPU for acknowledgement of the flip. */
for_each_cpu_mask(cpu, rcu_cpu_online_map) { for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
per_cpu(rcu_flip_flag, cpu) = rcu_flipped; per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
dyntick_save_progress_counter(cpu); dyntick_save_progress_counter(cpu);
} }
...@@ -675,7 +675,7 @@ rcu_try_flip_waitack(void) ...@@ -675,7 +675,7 @@ rcu_try_flip_waitack(void)
int cpu; int cpu;
RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
for_each_cpu_mask(cpu, rcu_cpu_online_map) for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
if (rcu_try_flip_waitack_needed(cpu) && if (rcu_try_flip_waitack_needed(cpu) &&
per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
...@@ -707,7 +707,7 @@ rcu_try_flip_waitzero(void) ...@@ -707,7 +707,7 @@ rcu_try_flip_waitzero(void)
/* Check to see if the sum of the "last" counters is zero. */ /* Check to see if the sum of the "last" counters is zero. */
RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
for_each_cpu_mask(cpu, rcu_cpu_online_map) for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
if (sum != 0) { if (sum != 0) {
RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
...@@ -722,7 +722,7 @@ rcu_try_flip_waitzero(void) ...@@ -722,7 +722,7 @@ rcu_try_flip_waitzero(void)
smp_mb(); /* ^^^^^^^^^^^^ */ smp_mb(); /* ^^^^^^^^^^^^ */
/* Call for a memory barrier from each CPU. */ /* Call for a memory barrier from each CPU. */
for_each_cpu_mask(cpu, rcu_cpu_online_map) { for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
dyntick_save_progress_counter(cpu); dyntick_save_progress_counter(cpu);
} }
...@@ -742,7 +742,7 @@ rcu_try_flip_waitmb(void) ...@@ -742,7 +742,7 @@ rcu_try_flip_waitmb(void)
int cpu; int cpu;
RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
for_each_cpu_mask(cpu, rcu_cpu_online_map) for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
if (rcu_try_flip_waitmb_needed(cpu) && if (rcu_try_flip_waitmb_needed(cpu) &&
per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
......
...@@ -2271,7 +2271,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) ...@@ -2271,7 +2271,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
/* Tally up the load of all CPUs in the group */ /* Tally up the load of all CPUs in the group */
avg_load = 0; avg_load = 0;
for_each_cpu_mask(i, group->cpumask) { for_each_cpu_mask_nr(i, group->cpumask) {
/* Bias balancing toward cpus of our domain */ /* Bias balancing toward cpus of our domain */
if (local_group) if (local_group)
load = source_load(i, load_idx); load = source_load(i, load_idx);
...@@ -2313,7 +2313,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, ...@@ -2313,7 +2313,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
/* Traverse only the allowed CPUs */ /* Traverse only the allowed CPUs */
cpus_and(*tmp, group->cpumask, p->cpus_allowed); cpus_and(*tmp, group->cpumask, p->cpus_allowed);
for_each_cpu_mask(i, *tmp) { for_each_cpu_mask_nr(i, *tmp) {
load = weighted_cpuload(i); load = weighted_cpuload(i);
if (load < min_load || (load == min_load && i == this_cpu)) { if (load < min_load || (load == min_load && i == this_cpu)) {
...@@ -3296,7 +3296,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, ...@@ -3296,7 +3296,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
max_cpu_load = 0; max_cpu_load = 0;
min_cpu_load = ~0UL; min_cpu_load = ~0UL;
for_each_cpu_mask(i, group->cpumask) { for_each_cpu_mask_nr(i, group->cpumask) {
struct rq *rq; struct rq *rq;
if (!cpu_isset(i, *cpus)) if (!cpu_isset(i, *cpus))
...@@ -3560,7 +3560,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, ...@@ -3560,7 +3560,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
unsigned long max_load = 0; unsigned long max_load = 0;
int i; int i;
for_each_cpu_mask(i, group->cpumask) { for_each_cpu_mask_nr(i, group->cpumask) {
unsigned long wl; unsigned long wl;
if (!cpu_isset(i, *cpus)) if (!cpu_isset(i, *cpus))
...@@ -4100,7 +4100,7 @@ static void run_rebalance_domains(struct softirq_action *h) ...@@ -4100,7 +4100,7 @@ static void run_rebalance_domains(struct softirq_action *h)
int balance_cpu; int balance_cpu;
cpu_clear(this_cpu, cpus); cpu_clear(this_cpu, cpus);
for_each_cpu_mask(balance_cpu, cpus) { for_each_cpu_mask_nr(balance_cpu, cpus) {
/* /*
* If this cpu gets work to do, stop the load balancing * If this cpu gets work to do, stop the load balancing
* work being done for other cpus. Next load * work being done for other cpus. Next load
...@@ -6832,7 +6832,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, ...@@ -6832,7 +6832,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
cpus_clear(*covered); cpus_clear(*covered);
for_each_cpu_mask(i, *span) { for_each_cpu_mask_nr(i, *span) {
struct sched_group *sg; struct sched_group *sg;
int group = group_fn(i, cpu_map, &sg, tmpmask); int group = group_fn(i, cpu_map, &sg, tmpmask);
int j; int j;
...@@ -6843,7 +6843,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, ...@@ -6843,7 +6843,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
cpus_clear(sg->cpumask); cpus_clear(sg->cpumask);
sg->__cpu_power = 0; sg->__cpu_power = 0;
for_each_cpu_mask(j, *span) { for_each_cpu_mask_nr(j, *span) {
if (group_fn(j, cpu_map, NULL, tmpmask) != group) if (group_fn(j, cpu_map, NULL, tmpmask) != group)
continue; continue;
...@@ -7043,7 +7043,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) ...@@ -7043,7 +7043,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
if (!sg) if (!sg)
return; return;
do { do {
for_each_cpu_mask(j, sg->cpumask) { for_each_cpu_mask_nr(j, sg->cpumask) {
struct sched_domain *sd; struct sched_domain *sd;
sd = &per_cpu(phys_domains, j); sd = &per_cpu(phys_domains, j);
...@@ -7068,7 +7068,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) ...@@ -7068,7 +7068,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
{ {
int cpu, i; int cpu, i;
for_each_cpu_mask(cpu, *cpu_map) { for_each_cpu_mask_nr(cpu, *cpu_map) {
struct sched_group **sched_group_nodes struct sched_group **sched_group_nodes
= sched_group_nodes_bycpu[cpu]; = sched_group_nodes_bycpu[cpu];
...@@ -7302,7 +7302,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, ...@@ -7302,7 +7302,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
/* /*
* Set up domains for cpus specified by the cpu_map. * Set up domains for cpus specified by the cpu_map.
*/ */
for_each_cpu_mask(i, *cpu_map) { for_each_cpu_mask_nr(i, *cpu_map) {
struct sched_domain *sd = NULL, *p; struct sched_domain *sd = NULL, *p;
SCHED_CPUMASK_VAR(nodemask, allmasks); SCHED_CPUMASK_VAR(nodemask, allmasks);
...@@ -7374,7 +7374,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, ...@@ -7374,7 +7374,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SCHED_SMT
/* Set up CPU (sibling) groups */ /* Set up CPU (sibling) groups */
for_each_cpu_mask(i, *cpu_map) { for_each_cpu_mask_nr(i, *cpu_map) {
SCHED_CPUMASK_VAR(this_sibling_map, allmasks); SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks); SCHED_CPUMASK_VAR(send_covered, allmasks);
...@@ -7391,7 +7391,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, ...@@ -7391,7 +7391,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#ifdef CONFIG_SCHED_MC #ifdef CONFIG_SCHED_MC
/* Set up multi-core groups */ /* Set up multi-core groups */
for_each_cpu_mask(i, *cpu_map) { for_each_cpu_mask_nr(i, *cpu_map) {
SCHED_CPUMASK_VAR(this_core_map, allmasks); SCHED_CPUMASK_VAR(this_core_map, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks); SCHED_CPUMASK_VAR(send_covered, allmasks);
...@@ -7458,7 +7458,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, ...@@ -7458,7 +7458,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
goto error; goto error;
} }
sched_group_nodes[i] = sg; sched_group_nodes[i] = sg;
for_each_cpu_mask(j, *nodemask) { for_each_cpu_mask_nr(j, *nodemask) {
struct sched_domain *sd; struct sched_domain *sd;
sd = &per_cpu(node_domains, j); sd = &per_cpu(node_domains, j);
...@@ -7504,21 +7504,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map, ...@@ -7504,21 +7504,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
/* Calculate CPU power for physical packages and nodes */ /* Calculate CPU power for physical packages and nodes */
#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SCHED_SMT
for_each_cpu_mask(i, *cpu_map) { for_each_cpu_mask_nr(i, *cpu_map) {
struct sched_domain *sd = &per_cpu(cpu_domains, i); struct sched_domain *sd = &per_cpu(cpu_domains, i);
init_sched_groups_power(i, sd); init_sched_groups_power(i, sd);
} }
#endif #endif
#ifdef CONFIG_SCHED_MC #ifdef CONFIG_SCHED_MC
for_each_cpu_mask(i, *cpu_map) { for_each_cpu_mask_nr(i, *cpu_map) {
struct sched_domain *sd = &per_cpu(core_domains, i); struct sched_domain *sd = &per_cpu(core_domains, i);
init_sched_groups_power(i, sd); init_sched_groups_power(i, sd);
} }
#endif #endif
for_each_cpu_mask(i, *cpu_map) { for_each_cpu_mask_nr(i, *cpu_map) {
struct sched_domain *sd = &per_cpu(phys_domains, i); struct sched_domain *sd = &per_cpu(phys_domains, i);
init_sched_groups_power(i, sd); init_sched_groups_power(i, sd);
...@@ -7538,7 +7538,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, ...@@ -7538,7 +7538,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#endif #endif
/* Attach the domains */ /* Attach the domains */
for_each_cpu_mask(i, *cpu_map) { for_each_cpu_mask_nr(i, *cpu_map) {
struct sched_domain *sd; struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i); sd = &per_cpu(cpu_domains, i);
...@@ -7621,7 +7621,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) ...@@ -7621,7 +7621,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
unregister_sched_domain_sysctl(); unregister_sched_domain_sysctl();
for_each_cpu_mask(i, *cpu_map) for_each_cpu_mask_nr(i, *cpu_map)
cpu_attach_domain(NULL, &def_root_domain, i); cpu_attach_domain(NULL, &def_root_domain, i);
synchronize_sched(); synchronize_sched();
arch_destroy_sched_domains(cpu_map, &tmpmask); arch_destroy_sched_domains(cpu_map, &tmpmask);
......
...@@ -1022,7 +1022,7 @@ static int wake_idle(int cpu, struct task_struct *p) ...@@ -1022,7 +1022,7 @@ static int wake_idle(int cpu, struct task_struct *p)
|| ((sd->flags & SD_WAKE_IDLE_FAR) || ((sd->flags & SD_WAKE_IDLE_FAR)
&& !task_hot(p, task_rq(p)->clock, sd))) { && !task_hot(p, task_rq(p)->clock, sd))) {
cpus_and(tmp, sd->span, p->cpus_allowed); cpus_and(tmp, sd->span, p->cpus_allowed);
for_each_cpu_mask(i, tmp) { for_each_cpu_mask_nr(i, tmp) {
if (idle_cpu(i)) { if (idle_cpu(i)) {
if (i != task_cpu(p)) { if (i != task_cpu(p)) {
schedstat_inc(p, schedstat_inc(p,
......
...@@ -231,7 +231,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) ...@@ -231,7 +231,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
return 1; return 1;
span = sched_rt_period_mask(); span = sched_rt_period_mask();
for_each_cpu_mask(i, span) { for_each_cpu_mask_nr(i, span) {
int enqueue = 0; int enqueue = 0;
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq); struct rq *rq = rq_of_rt_rq(rt_rq);
...@@ -272,7 +272,7 @@ static int balance_runtime(struct rt_rq *rt_rq) ...@@ -272,7 +272,7 @@ static int balance_runtime(struct rt_rq *rt_rq)
spin_lock(&rt_b->rt_runtime_lock); spin_lock(&rt_b->rt_runtime_lock);
rt_period = ktime_to_ns(rt_b->rt_period); rt_period = ktime_to_ns(rt_b->rt_period);
for_each_cpu_mask(i, rd->span) { for_each_cpu_mask_nr(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
s64 diff; s64 diff;
...@@ -1000,7 +1000,7 @@ static int pull_rt_task(struct rq *this_rq) ...@@ -1000,7 +1000,7 @@ static int pull_rt_task(struct rq *this_rq)
next = pick_next_task_rt(this_rq); next = pick_next_task_rt(this_rq);
for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
if (this_cpu == cpu) if (this_cpu == cpu)
continue; continue;
......
...@@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) ...@@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
return -EINVAL; return -EINVAL;
if (isadd == REGISTER) { if (isadd == REGISTER) {
for_each_cpu_mask(cpu, mask) { for_each_cpu_mask_nr(cpu, mask) {
s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
cpu_to_node(cpu)); cpu_to_node(cpu));
if (!s) if (!s)
...@@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) ...@@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
/* Deregister or cleanup */ /* Deregister or cleanup */
cleanup: cleanup:
for_each_cpu_mask(cpu, mask) { for_each_cpu_mask_nr(cpu, mask) {
listeners = &per_cpu(listener_array, cpu); listeners = &per_cpu(listener_array, cpu);
down_write(&listeners->sem); down_write(&listeners->sem);
list_for_each_entry_safe(s, tmp, &listeners->list, list) { list_for_each_entry_safe(s, tmp, &listeners->list, list) {
......
...@@ -397,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -397,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq)
might_sleep(); might_sleep();
lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
lock_release(&wq->lockdep_map, 1, _THIS_IP_); lock_release(&wq->lockdep_map, 1, _THIS_IP_);
for_each_cpu_mask(cpu, *cpu_map) for_each_cpu_mask_nr(cpu, *cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
} }
EXPORT_SYMBOL_GPL(flush_workqueue); EXPORT_SYMBOL_GPL(flush_workqueue);
...@@ -477,7 +477,7 @@ static void wait_on_work(struct work_struct *work) ...@@ -477,7 +477,7 @@ static void wait_on_work(struct work_struct *work)
wq = cwq->wq; wq = cwq->wq;
cpu_map = wq_cpu_map(wq); cpu_map = wq_cpu_map(wq);
for_each_cpu_mask(cpu, *cpu_map) for_each_cpu_mask_nr(cpu, *cpu_map)
wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
} }
...@@ -813,7 +813,7 @@ void destroy_workqueue(struct workqueue_struct *wq) ...@@ -813,7 +813,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
list_del(&wq->list); list_del(&wq->list);
spin_unlock(&workqueue_lock); spin_unlock(&workqueue_lock);
for_each_cpu_mask(cpu, *cpu_map) for_each_cpu_mask_nr(cpu, *cpu_map)
cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
put_online_cpus(); put_online_cpus();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment