Commit a953e459 authored by Mike Travis's avatar Mike Travis Committed by Thomas Gleixner

sched: replace MAX_NUMNODES with nr_node_ids in kernel/sched.c

  * Replace usages of MAX_NUMNODES with nr_node_ids in kernel/sched.c,
    where appropriate.  This saves some allocated space as well as many
    wasted cycles going through node entries that are non-existent.

For inclusion into sched-devel/latest tree.

Based on:
	git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
    +   sched-devel/latest  .../mingo/linux-2.6-sched-devel.git
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 75d3bce2
......@@ -6879,9 +6879,9 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)
min_val = INT_MAX;
for (i = 0; i < MAX_NUMNODES; i++) {
for (i = 0; i < nr_node_ids; i++) {
/* Start at @node */
n = (node + i) % MAX_NUMNODES;
n = (node + i) % nr_node_ids;
if (!nr_cpus_node(n))
continue;
......@@ -7075,7 +7075,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
if (!sched_group_nodes)
continue;
for (i = 0; i < MAX_NUMNODES; i++) {
for (i = 0; i < nr_node_ids; i++) {
struct sched_group *oldsg, *sg = sched_group_nodes[i];
*nodemask = node_to_cpumask(i);
......@@ -7263,7 +7263,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
/*
* Allocate the per-node list of sched groups
*/
sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
GFP_KERNEL);
if (!sched_group_nodes) {
printk(KERN_WARNING "Can not alloc sched group node list\n");
......@@ -7407,7 +7407,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#endif
/* Set up physical groups */
for (i = 0; i < MAX_NUMNODES; i++) {
for (i = 0; i < nr_node_ids; i++) {
SCHED_CPUMASK_VAR(nodemask, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks);
......@@ -7431,7 +7431,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
send_covered, tmpmask);
}
for (i = 0; i < MAX_NUMNODES; i++) {
for (i = 0; i < nr_node_ids; i++) {
/* Set up node groups */
struct sched_group *sg, *prev;
SCHED_CPUMASK_VAR(nodemask, allmasks);
......@@ -7470,9 +7470,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
cpus_or(*covered, *covered, *nodemask);
prev = sg;
for (j = 0; j < MAX_NUMNODES; j++) {
for (j = 0; j < nr_node_ids; j++) {
SCHED_CPUMASK_VAR(notcovered, allmasks);
int n = (i + j) % MAX_NUMNODES;
int n = (i + j) % nr_node_ids;
node_to_cpumask_ptr(pnodemask, n);
cpus_complement(*notcovered, *covered);
......@@ -7525,7 +7525,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
}
#ifdef CONFIG_NUMA
for (i = 0; i < MAX_NUMNODES; i++)
for (i = 0; i < nr_node_ids; i++)
init_numa_sched_groups_power(sched_group_nodes[i]);
if (sd_allnodes) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment