Commit ca887863 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] node enumeration fixes

From: Matthew Dobson <colpatch@us.ibm.com>

Here's a small update to the numnodes fix that went into -mm3.  The biggest
changes are:

1) move the actual NODES_SHIFT and MAX_NUMNODES definitions into
   linux/numa.h and include this in linux/mmzone.h, instead of being
   directly in linux/mmzone.h.  This allows other files to include *just*
   the NUMNODES stuff w/out grabbing all of mmzone.h.

2) pull NODE_SHIFT out of linux/mm.h.  This isn't used anywhere in the
   kernel, and it will only get confused with NODES_SHIFT.

3) Fix the IA64 patch.  The original patch I had sent out hadn't been
   tested on IA64.  It was mostly right, but there were circular
   dependencies.  All better now, and acked by Jesse.

4) In linux/mmzone.h, insert code to define MAX_NODES_SHIFT based on the
   size of unsigned long.  For 64-bit arches, we can have a much larger
   value.  This allows IA64 to have 100's or 1000's of nodes.
   MAX_NODES_SHIFT is defined as 10 (ie: 1024 nodes) for 64-bit for now,
   although it could likely be much larger.  For 32-bit it is 6 (ie: 64
   nodes).

5) Small cleanup in include/asm-arm/memory.h.  Mostly the result of the
   new linux/numa.h file.  Much cleaner and more readable now.
parent d5135580
...@@ -84,27 +84,24 @@ static inline void *phys_to_virt(unsigned long x) ...@@ -84,27 +84,24 @@ static inline void *phys_to_virt(unsigned long x)
#define PHYS_TO_NID(addr) (0) #define PHYS_TO_NID(addr) (0)
#else #else /* CONFIG_DISCONTIGMEM */
/* /*
* This is more complex. We have a set of mem_map arrays spread * This is more complex. We have a set of mem_map arrays spread
* around in memory. * around in memory.
*/ */
#include <asm/numnodes.h> #include <linux/numa.h>
#define NUM_NODES (1 << NODES_SHIFT)
#define page_to_pfn(page) \ #define page_to_pfn(page) \
(( (page) - page_zone(page)->zone_mem_map) \ (( (page) - page_zone(page)->zone_mem_map) \
+ page_zone(page)->zone_start_pfn) + page_zone(page)->zone_start_pfn)
#define pfn_to_page(pfn) \ #define pfn_to_page(pfn) \
(PFN_TO_MAPBASE(pfn) + LOCAL_MAP_NR((pfn) << PAGE_SHIFT)) (PFN_TO_MAPBASE(pfn) + LOCAL_MAP_NR((pfn) << PAGE_SHIFT))
#define pfn_valid(pfn) (PFN_TO_NID(pfn) < MAX_NUMNODES)
#define pfn_valid(pfn) (PFN_TO_NID(pfn) < NUM_NODES)
#define virt_to_page(kaddr) \ #define virt_to_page(kaddr) \
(ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr)) (ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))
#define virt_addr_valid(kaddr) (KVADDR_TO_NID(kaddr) < MAX_NUMNODES)
#define virt_addr_valid(kaddr) (KVADDR_TO_NID(kaddr) < NUM_NODES)
/* /*
* Common discontigmem stuff. * Common discontigmem stuff.
...@@ -112,9 +109,7 @@ static inline void *phys_to_virt(unsigned long x) ...@@ -112,9 +109,7 @@ static inline void *phys_to_virt(unsigned long x)
*/ */
#define PHYS_TO_NID(addr) PFN_TO_NID((addr) >> PAGE_SHIFT) #define PHYS_TO_NID(addr) PFN_TO_NID((addr) >> PAGE_SHIFT)
#undef NUM_NODES #endif /* !CONFIG_DISCONTIGMEM */
#endif
/* /*
* For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die. * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die.
......
...@@ -8,13 +8,11 @@ ...@@ -8,13 +8,11 @@
* Copyright (c) 2002 Erich Focht <efocht@ess.nec.de> * Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
* Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com> * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
*/ */
#ifndef _ASM_IA64_NODEDATA_H #ifndef _ASM_IA64_NODEDATA_H
#define _ASM_IA64_NODEDATA_H #define _ASM_IA64_NODEDATA_H
#include <linux/numa.h>
#include <linux/mmzone.h> #include <asm/mmzone.h>
/* /*
* Node Data. One of these structures is located on each node of a NUMA system. * Node Data. One of these structures is located on each node of a NUMA system.
......
...@@ -16,9 +16,9 @@ ...@@ -16,9 +16,9 @@
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
# include <asm/mmzone.h> #include <linux/numa.h>
#include <linux/cache.h> #include <linux/cache.h>
extern volatile char cpu_to_node_map[NR_CPUS] __cacheline_aligned; extern volatile char cpu_to_node_map[NR_CPUS] __cacheline_aligned;
extern volatile cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; extern volatile cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/mmzone.h> #include <linux/numa.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/processor.h> #include <asm/processor.h>
......
...@@ -323,7 +323,6 @@ static inline void put_page(struct page *page) ...@@ -323,7 +323,6 @@ static inline void put_page(struct page *page)
* The zone field is never updated after free_area_init_core() * The zone field is never updated after free_area_init_core()
* sets it, so none of the operations on it need to be atomic. * sets it, so none of the operations on it need to be atomic.
*/ */
#define NODE_SHIFT 4
#define ZONE_SHIFT (BITS_PER_LONG - 8) #define ZONE_SHIFT (BITS_PER_LONG - 8)
struct zone; struct zone;
......
...@@ -10,14 +10,8 @@ ...@@ -10,14 +10,8 @@
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/numa.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#ifdef CONFIG_DISCONTIGMEM
#include <asm/numnodes.h>
#endif
#ifndef NODES_SHIFT
#define NODES_SHIFT 0
#endif
#define MAX_NUMNODES (1 << NODES_SHIFT)
/* Free memory management - zoned buddy allocator. */ /* Free memory management - zoned buddy allocator. */
#ifndef CONFIG_FORCE_MAX_ZONEORDER #ifndef CONFIG_FORCE_MAX_ZONEORDER
...@@ -313,12 +307,19 @@ extern struct pglist_data contig_page_data; ...@@ -313,12 +307,19 @@ extern struct pglist_data contig_page_data;
#else /* CONFIG_DISCONTIGMEM */ #else /* CONFIG_DISCONTIGMEM */
#include <asm/mmzone.h> #include <asm/mmzone.h>
#if BITS_PER_LONG == 32
/* /*
* page->zone is currently 8 bits * with 32 bit flags field, page->zone is currently 8 bits.
* there are 3 zones (2 bits) * there are 3 zones (2 bits) and this leaves 8-2=6 bits for nodes.
* this leaves 8-2=6 bits for nodes
*/ */
#define MAX_NODES_SHIFT 6 #define MAX_NODES_SHIFT 6
#elif BITS_PER_LONG == 64
/*
* with 64 bit flags field, there's plenty of room.
*/
#define MAX_NODES_SHIFT 10
#endif
#endif /* !CONFIG_DISCONTIGMEM */ #endif /* !CONFIG_DISCONTIGMEM */
......
#ifndef _LINUX_NUMA_H
#define _LINUX_NUMA_H
#include <linux/config.h>
#ifdef CONFIG_DISCONTIGMEM
#include <asm/numnodes.h>
#endif
#ifndef NODES_SHIFT
#define NODES_SHIFT 0
#endif
#define MAX_NUMNODES (1 << NODES_SHIFT)
#endif /* _LINUX_NUMA_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment