Commit 8c4ea5db authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] Remove __ from topology macros

Patch from Matthew Dobson <colpatch@us.ibm.com>

When I originally wrote the patches implementing the in-kernel topology
macros, they were meant to be called as a second layer of functions,
sans underbars.  This additional layer was deemed unnecessary and
summarily dropped.  As such, carrying around (and typing!) all these
extra underbars is quite pointless.  Here's a patch to nip this in the
(sorta) bud.  The macros only appear in 16 files so far, most of them
being the definitions themselves.
parent 85e81b27
...@@ -35,7 +35,7 @@ struct device_driver cpu_driver = { ...@@ -35,7 +35,7 @@ struct device_driver cpu_driver = {
*/ */
int __init register_cpu(struct cpu *cpu, int num, struct node *root) int __init register_cpu(struct cpu *cpu, int num, struct node *root)
{ {
cpu->node_id = __cpu_to_node(num); cpu->node_id = cpu_to_node(num);
cpu->sysdev.name = "cpu"; cpu->sysdev.name = "cpu";
cpu->sysdev.id = num; cpu->sysdev.id = num;
if (root) if (root)
......
...@@ -36,7 +36,7 @@ struct device_driver memblk_driver = { ...@@ -36,7 +36,7 @@ struct device_driver memblk_driver = {
*/ */
int __init register_memblk(struct memblk *memblk, int num, struct node *root) int __init register_memblk(struct memblk *memblk, int num, struct node *root)
{ {
memblk->node_id = __memblk_to_node(num); memblk->node_id = memblk_to_node(num);
memblk->sysdev.name = "memblk"; memblk->sysdev.name = "memblk";
memblk->sysdev.id = num; memblk->sysdev.id = num;
if (root) if (root)
......
...@@ -72,7 +72,7 @@ int __init register_node(struct node *node, int num, struct node *parent) ...@@ -72,7 +72,7 @@ int __init register_node(struct node *node, int num, struct node *parent)
{ {
int error; int error;
node->cpumap = __node_to_cpu_mask(num); node->cpumap = node_to_cpumask(num);
node->sysroot.id = num; node->sysroot.id = num;
if (parent) if (parent)
node->sysroot.dev.parent = &parent->sysroot.sysdev; node->sysroot.dev.parent = &parent->sysroot.sysdev;
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <asm/machvec.h> #include <asm/machvec.h>
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
static inline int __cpu_to_node(int cpu) static inline int cpu_to_node(int cpu)
{ {
int node; int node;
...@@ -23,13 +23,13 @@ static inline int __cpu_to_node(int cpu) ...@@ -23,13 +23,13 @@ static inline int __cpu_to_node(int cpu)
return node; return node;
} }
static inline int __node_to_cpu_mask(int node) static inline int node_to_cpumask(int node)
{ {
unsigned long node_cpu_mask = 0; unsigned long node_cpu_mask = 0;
int cpu; int cpu;
for(cpu = 0; cpu < NR_CPUS; cpu++) { for(cpu = 0; cpu < NR_CPUS; cpu++) {
if (cpu_online(cpu) && (__cpu_to_node(cpu) == node)) if (cpu_online(cpu) && (cpu_to_node(cpu) == node))
node_cpu_mask |= 1UL << cpu; node_cpu_mask |= 1UL << cpu;
} }
...@@ -40,8 +40,8 @@ static inline int __node_to_cpu_mask(int node) ...@@ -40,8 +40,8 @@ static inline int __node_to_cpu_mask(int node)
return node_cpu_mask; return node_cpu_mask;
} }
# define __node_to_memblk(node) (node) # define node_to_memblk(node) (node)
# define __memblk_to_node(memblk) (memblk) # define memblk_to_node(memblk) (memblk)
#else /* CONFIG_NUMA */ #else /* CONFIG_NUMA */
# include <asm-generic/topology.h> # include <asm-generic/topology.h>
......
...@@ -29,23 +29,23 @@ ...@@ -29,23 +29,23 @@
/* Other architectures wishing to use this simple topology API should fill /* Other architectures wishing to use this simple topology API should fill
in the below functions as appropriate in their own <asm/topology.h> file. */ in the below functions as appropriate in their own <asm/topology.h> file. */
#ifndef __cpu_to_node #ifndef cpu_to_node
#define __cpu_to_node(cpu) (0) #define cpu_to_node(cpu) (0)
#endif #endif
#ifndef __memblk_to_node #ifndef memblk_to_node
#define __memblk_to_node(memblk) (0) #define memblk_to_node(memblk) (0)
#endif #endif
#ifndef __parent_node #ifndef parent_node
#define __parent_node(node) (0) #define parent_node(node) (0)
#endif #endif
#ifndef __node_to_first_cpu #ifndef node_to_cpumask
#define __node_to_first_cpu(node) (0) #define node_to_cpumask(node) (cpu_online_map)
#endif #endif
#ifndef __node_to_cpu_mask #ifndef node_to_first_cpu
#define __node_to_cpu_mask(node) (cpu_online_map) #define node_to_first_cpu(node) (0)
#endif #endif
#ifndef __node_to_memblk #ifndef node_to_memblk
#define __node_to_memblk(node) (0) #define node_to_memblk(node) (0)
#endif #endif
/* Cross-node load balancing interval. */ /* Cross-node load balancing interval. */
......
...@@ -17,7 +17,7 @@ static inline int arch_register_cpu(int num){ ...@@ -17,7 +17,7 @@ static inline int arch_register_cpu(int num){
struct node *parent = NULL; struct node *parent = NULL;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
parent = &node_devices[__cpu_to_node(num)].node; parent = &node_devices[cpu_to_node(num)].node;
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
return register_cpu(&cpu_devices[num].cpu, num, parent); return register_cpu(&cpu_devices[num].cpu, num, parent);
......
...@@ -14,7 +14,7 @@ struct i386_memblk { ...@@ -14,7 +14,7 @@ struct i386_memblk {
extern struct i386_memblk memblk_devices[MAX_NR_MEMBLKS]; extern struct i386_memblk memblk_devices[MAX_NR_MEMBLKS];
static inline int arch_register_memblk(int num){ static inline int arch_register_memblk(int num){
int p_node = __memblk_to_node(num); int p_node = memblk_to_node(num);
return register_memblk(&memblk_devices[num].memblk, num, return register_memblk(&memblk_devices[num].memblk, num,
&node_devices[p_node].node); &node_devices[p_node].node);
......
...@@ -13,7 +13,7 @@ struct i386_node { ...@@ -13,7 +13,7 @@ struct i386_node {
extern struct i386_node node_devices[MAX_NUMNODES]; extern struct i386_node node_devices[MAX_NUMNODES];
static inline int arch_register_node(int num){ static inline int arch_register_node(int num){
int p_node = __parent_node(num); int p_node = parent_node(num);
struct node *parent = NULL; struct node *parent = NULL;
if (p_node != num) if (p_node != num)
......
...@@ -34,32 +34,32 @@ extern volatile unsigned long node_2_cpu_mask[]; ...@@ -34,32 +34,32 @@ extern volatile unsigned long node_2_cpu_mask[];
extern volatile int cpu_2_node[]; extern volatile int cpu_2_node[];
/* Returns the number of the node containing CPU 'cpu' */ /* Returns the number of the node containing CPU 'cpu' */
static inline int __cpu_to_node(int cpu) static inline int cpu_to_node(int cpu)
{ {
return cpu_2_node[cpu]; return cpu_2_node[cpu];
} }
/* Returns the number of the node containing MemBlk 'memblk' */ /* Returns the number of the node containing MemBlk 'memblk' */
#define __memblk_to_node(memblk) (memblk) #define memblk_to_node(memblk) (memblk)
/* Returns the number of the node containing Node 'node'. This architecture is flat, /* Returns the number of the node containing Node 'node'. This architecture is flat,
so it is a pretty simple function! */ so it is a pretty simple function! */
#define __parent_node(node) (node) #define parent_node(node) (node)
/* Returns a bitmask of CPUs on Node 'node'. */ /* Returns a bitmask of CPUs on Node 'node'. */
static inline unsigned long __node_to_cpu_mask(int node) static inline unsigned long node_to_cpumask(int node)
{ {
return node_2_cpu_mask[node]; return node_2_cpu_mask[node];
} }
/* Returns the number of the first CPU on Node 'node'. */ /* Returns the number of the first CPU on Node 'node'. */
static inline int __node_to_first_cpu(int node) static inline int node_to_first_cpu(int node)
{ {
return __ffs(__node_to_cpu_mask(node)); return __ffs(node_to_cpumask(node));
} }
/* Returns the number of the first MemBlk on Node 'node' */ /* Returns the number of the first MemBlk on Node 'node' */
#define __node_to_memblk(node) (node) #define node_to_memblk(node) (node)
/* Cross-node load balancing interval. */ /* Cross-node load balancing interval. */
#define NODE_BALANCE_RATE 100 #define NODE_BALANCE_RATE 100
......
...@@ -21,25 +21,25 @@ ...@@ -21,25 +21,25 @@
/* /*
* Returns the number of the node containing CPU 'cpu' * Returns the number of the node containing CPU 'cpu'
*/ */
#define __cpu_to_node(cpu) (int)(cpu_to_node_map[cpu]) #define cpu_to_node(cpu) (int)(cpu_to_node_map[cpu])
/* /*
* Returns a bitmask of CPUs on Node 'node'. * Returns a bitmask of CPUs on Node 'node'.
*/ */
#define __node_to_cpu_mask(node) (node_to_cpu_mask[node]) #define node_to_cpumask(node) (node_to_cpumask[node])
#else #else
#define __cpu_to_node(cpu) (0) #define cpu_to_node(cpu) (0)
#define __node_to_cpu_mask(node) (phys_cpu_present_map) #define node_to_cpumask(node) (phys_cpu_present_map)
#endif #endif
/* /*
* Returns the number of the node containing MemBlk 'memblk' * Returns the number of the node containing MemBlk 'memblk'
*/ */
#ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_ACPI_NUMA
#define __memblk_to_node(memblk) (node_memblk[memblk].nid) #define memblk_to_node(memblk) (node_memblk[memblk].nid)
#else #else
#define __memblk_to_node(memblk) (memblk) #define memblk_to_node(memblk) (memblk)
#endif #endif
/* /*
...@@ -47,18 +47,18 @@ ...@@ -47,18 +47,18 @@
* Not implemented here. Multi-level hierarchies detected with * Not implemented here. Multi-level hierarchies detected with
* the help of node_distance(). * the help of node_distance().
*/ */
#define __parent_node(nid) (nid) #define parent_node(nid) (nid)
/* /*
* Returns the number of the first CPU on Node 'node'. * Returns the number of the first CPU on Node 'node'.
*/ */
#define __node_to_first_cpu(node) (__ffs(__node_to_cpu_mask(node))) #define node_to_first_cpu(node) (__ffs(node_to_cpumask(node)))
/* /*
* Returns the number of the first MemBlk on Node 'node' * Returns the number of the first MemBlk on Node 'node'
* Should be fixed when IA64 discontigmem goes in. * Should be fixed when IA64 discontigmem goes in.
*/ */
#define __node_to_memblk(node) (node) #define node_to_memblk(node) (node)
/* Cross-node load balancing interval. */ /* Cross-node load balancing interval. */
#define NODE_BALANCE_RATE 10 #define NODE_BALANCE_RATE 10
......
...@@ -3,6 +3,6 @@ ...@@ -3,6 +3,6 @@
#include <asm/mmzone.h> #include <asm/mmzone.h>
#define __cpu_to_node(cpu) (cputocnode(cpu)) #define cpu_to_node(cpu) (cputocnode(cpu))
#endif /* _ASM_MIPS64_TOPOLOGY_H */ #endif /* _ASM_MIPS64_TOPOLOGY_H */
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
static inline int __cpu_to_node(int cpu) static inline int cpu_to_node(int cpu)
{ {
int node; int node;
...@@ -19,7 +19,7 @@ static inline int __cpu_to_node(int cpu) ...@@ -19,7 +19,7 @@ static inline int __cpu_to_node(int cpu)
return node; return node;
} }
static inline int __node_to_first_cpu(int node) static inline int node_to_first_cpu(int node)
{ {
int cpu; int cpu;
...@@ -31,7 +31,7 @@ static inline int __node_to_first_cpu(int node) ...@@ -31,7 +31,7 @@ static inline int __node_to_first_cpu(int node)
return -1; return -1;
} }
static inline unsigned long __node_to_cpu_mask(int node) static inline unsigned long node_to_cpumask(int node)
{ {
int cpu; int cpu;
unsigned long mask = 0UL; unsigned long mask = 0UL;
...@@ -51,12 +51,7 @@ static inline unsigned long __node_to_cpu_mask(int node) ...@@ -51,12 +51,7 @@ static inline unsigned long __node_to_cpu_mask(int node)
#else /* !CONFIG_NUMA */ #else /* !CONFIG_NUMA */
#define __cpu_to_node(cpu) (0) #include <asm-generic/topology.h>
#define __memblk_to_node(memblk) (0)
#define __parent_node(nid) (0)
#define __node_to_first_cpu(node) (0)
#define __node_to_cpu_mask(node) (cpu_online_map)
#define __node_to_memblk(node) (0)
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
......
...@@ -257,7 +257,7 @@ static inline struct zone *next_zone(struct zone *zone) ...@@ -257,7 +257,7 @@ static inline struct zone *next_zone(struct zone *zone)
#include <asm/topology.h> #include <asm/topology.h>
/* Returns the number of the current Node. */ /* Returns the number of the current Node. */
#define numa_node_id() (__cpu_to_node(smp_processor_id())) #define numa_node_id() (cpu_to_node(smp_processor_id()))
#ifndef CONFIG_DISCONTIGMEM #ifndef CONFIG_DISCONTIGMEM
extern struct pglist_data contig_page_data; extern struct pglist_data contig_page_data;
......
...@@ -213,7 +213,7 @@ __init void node_nr_running_init(void) ...@@ -213,7 +213,7 @@ __init void node_nr_running_init(void)
int i; int i;
for (i = 0; i < NR_CPUS; i++) for (i = 0; i < NR_CPUS; i++)
cpu_rq(i)->node_nr_running = &node_nr_running[__cpu_to_node(i)]; cpu_rq(i)->node_nr_running = &node_nr_running[cpu_to_node(i)];
} }
#else /* !CONFIG_NUMA */ #else /* !CONFIG_NUMA */
...@@ -715,7 +715,7 @@ static int sched_best_cpu(struct task_struct *p) ...@@ -715,7 +715,7 @@ static int sched_best_cpu(struct task_struct *p)
} }
minload = 10000000; minload = 10000000;
cpumask = __node_to_cpu_mask(node); cpumask = node_to_cpumask(node);
for (i = 0; i < NR_CPUS; ++i) { for (i = 0; i < NR_CPUS; ++i) {
if (!(cpumask & (1UL << i))) if (!(cpumask & (1UL << i)))
continue; continue;
...@@ -767,7 +767,7 @@ static int find_busiest_node(int this_node) ...@@ -767,7 +767,7 @@ static int find_busiest_node(int this_node)
static inline unsigned long cpus_to_balance(int this_cpu, runqueue_t *this_rq) static inline unsigned long cpus_to_balance(int this_cpu, runqueue_t *this_rq)
{ {
int this_node = __cpu_to_node(this_cpu); int this_node = cpu_to_node(this_cpu);
/* /*
* Avoid rebalancing between nodes too often. * Avoid rebalancing between nodes too often.
* We rebalance globally once every NODE_BALANCE_RATE load balances. * We rebalance globally once every NODE_BALANCE_RATE load balances.
...@@ -776,9 +776,9 @@ static inline unsigned long cpus_to_balance(int this_cpu, runqueue_t *this_rq) ...@@ -776,9 +776,9 @@ static inline unsigned long cpus_to_balance(int this_cpu, runqueue_t *this_rq)
int node = find_busiest_node(this_node); int node = find_busiest_node(this_node);
this_rq->nr_balanced = 0; this_rq->nr_balanced = 0;
if (node >= 0) if (node >= 0)
return (__node_to_cpu_mask(node) | (1UL << this_cpu)); return (node_to_cpumask(node) | (1UL << this_cpu));
} }
return __node_to_cpu_mask(this_node); return node_to_cpumask(this_node);
} }
#else /* !CONFIG_NUMA */ #else /* !CONFIG_NUMA */
......
...@@ -1269,7 +1269,7 @@ void __init free_area_init_node(int nid, struct pglist_data *pgdat, ...@@ -1269,7 +1269,7 @@ void __init free_area_init_node(int nid, struct pglist_data *pgdat,
pgdat->node_mem_map = node_mem_map; pgdat->node_mem_map = node_mem_map;
free_area_init_core(pgdat, zones_size, zholes_size); free_area_init_core(pgdat, zones_size, zholes_size);
memblk_set_online(__node_to_memblk(nid)); memblk_set_online(node_to_memblk(nid));
calculate_zone_bitmap(pgdat, zones_size); calculate_zone_bitmap(pgdat, zones_size);
} }
......
...@@ -929,7 +929,7 @@ int kswapd(void *p) ...@@ -929,7 +929,7 @@ int kswapd(void *p)
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
daemonize(); daemonize();
set_cpus_allowed(tsk, __node_to_cpu_mask(pgdat->node_id)); set_cpus_allowed(tsk, node_to_cpumask(pgdat->node_id));
sprintf(tsk->comm, "kswapd%d", pgdat->node_id); sprintf(tsk->comm, "kswapd%d", pgdat->node_id);
sigfillset(&tsk->blocked); sigfillset(&tsk->blocked);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment