Commit e5191c50 authored by Rik van Riel's avatar Rik van Riel Committed by Linus Torvalds

[PATCH] new struct page shrinkage

The patch has been changed like you wanted, with page->zone
shoved into page->flags. I've also pulled the thing up to
your latest changes from linux.bkbits.net so you should be
able to just pull it into your tree from:

Rik
parent cd566b34
...@@ -830,7 +830,7 @@ static void agp_generic_destroy_page(unsigned long addr) ...@@ -830,7 +830,7 @@ static void agp_generic_destroy_page(unsigned long addr)
page = virt_to_page(pt); page = virt_to_page(pt);
atomic_dec(&page->count); atomic_dec(&page->count);
clear_bit(PG_locked, &page->flags); clear_bit(PG_locked, &page->flags);
wake_up(&page->wait); wake_up_page(page);
free_page((unsigned long) pt); free_page((unsigned long) pt);
atomic_dec(&agp_bridge.current_memory_agp); atomic_dec(&agp_bridge.current_memory_agp);
} }
...@@ -2828,7 +2828,7 @@ static void ali_destroy_page(unsigned long addr) ...@@ -2828,7 +2828,7 @@ static void ali_destroy_page(unsigned long addr)
page = virt_to_page(pt); page = virt_to_page(pt);
atomic_dec(&page->count); atomic_dec(&page->count);
clear_bit(PG_locked, &page->flags); clear_bit(PG_locked, &page->flags);
wake_up(&page->wait); wake_up_page(page);
free_page((unsigned long) pt); free_page((unsigned long) pt);
atomic_dec(&agp_bridge.current_memory_agp); atomic_dec(&agp_bridge.current_memory_agp);
} }
......
...@@ -294,14 +294,13 @@ static unsigned long i810_alloc_page(drm_device_t *dev) ...@@ -294,14 +294,13 @@ static unsigned long i810_alloc_page(drm_device_t *dev)
static void i810_free_page(drm_device_t *dev, unsigned long page) static void i810_free_page(drm_device_t *dev, unsigned long page)
{ {
if(page == 0UL) if (page) {
return; struct page *p = virt_to_page(page);
atomic_dec(p);
atomic_dec(&virt_to_page(page)->count); clear_bit(PG_locked, &p->flags);
clear_bit(PG_locked, &virt_to_page(page)->flags); wake_up_page(p);
wake_up(&virt_to_page(page)->wait); free_page(page);
free_page(page); }
return;
} }
static int i810_dma_cleanup(drm_device_t *dev) static int i810_dma_cleanup(drm_device_t *dev)
......
...@@ -2115,8 +2115,7 @@ int generic_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsig ...@@ -2115,8 +2115,7 @@ int generic_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsig
* of kiobuf structs (much like a user-space iovec list). * of kiobuf structs (much like a user-space iovec list).
* *
* The kiobuf must already be locked for IO. IO is submitted * The kiobuf must already be locked for IO. IO is submitted
* asynchronously: you need to check page->locked, page->uptodate, and * asynchronously: you need to check page->locked and page->uptodate.
* maybe wait on page->wait.
* *
* It is up to the caller to make sure that there are enough blocks * It is up to the caller to make sure that there are enough blocks
* passed in to completely map the iobufs to disk. * passed in to completely map the iobufs to disk.
...@@ -2173,8 +2172,8 @@ int brw_kiovec(int rw, int nr, struct kiobuf *iovec[], kdev_t dev, sector_t b[], ...@@ -2173,8 +2172,8 @@ int brw_kiovec(int rw, int nr, struct kiobuf *iovec[], kdev_t dev, sector_t b[],
/* /*
* Start I/O on a page. * Start I/O on a page.
* This function expects the page to be locked and may return * This function expects the page to be locked and may return
* before I/O is complete. You then have to check page->locked, * before I/O is complete. You then have to check page->locked
* page->uptodate, and maybe wait on page->wait. * and page->uptodate.
* *
* brw_page() is SMP-safe, although it's being called with the * brw_page() is SMP-safe, although it's being called with the
* kernel lock held - but the code is ready. * kernel lock held - but the code is ready.
......
...@@ -268,8 +268,6 @@ extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _P ...@@ -268,8 +268,6 @@ extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _P
extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; } extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; }
extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; } extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
#define page_address(page) ((page)->virtual)
/* /*
* The following only work if pte_present() is true. * The following only work if pte_present() is true.
* Undefined behaviour if not.. * Undefined behaviour if not..
......
...@@ -99,7 +99,6 @@ extern struct page *empty_zero_page; ...@@ -99,7 +99,6 @@ extern struct page *empty_zero_page;
/* /*
* Permanent address of a page. We never have highmem, so this is trivial. * Permanent address of a page. We never have highmem, so this is trivial.
*/ */
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
/* /*
......
...@@ -439,7 +439,6 @@ static inline unsigned long __pte_page(pte_t pte) ...@@ -439,7 +439,6 @@ static inline unsigned long __pte_page(pte_t pte)
/* permanent address of a page */ /* permanent address of a page */
#define page_address(page) ((page)->virtual)
#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)) #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
#define pte_page(pte) (mem_map+pte_pagenr(pte)) #define pte_page(pte) (mem_map+pte_pagenr(pte))
......
...@@ -264,11 +264,7 @@ extern unsigned long pg0[1024]; ...@@ -264,11 +264,7 @@ extern unsigned long pg0[1024];
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
/*
* Permanent address of a page. Obviously must never be
* called on a highmem page.
*/
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/* /*
......
...@@ -165,11 +165,6 @@ ...@@ -165,11 +165,6 @@
* addresses: * addresses:
*/ */
/*
* Given a pointer to an mem_map[] entry, return the kernel virtual
* address corresponding to that page.
*/
#define page_address(page) ((page)->virtual)
/* Quick test to see if ADDR is a (potentially) valid physical address. */ /* Quick test to see if ADDR is a (potentially) valid physical address. */
static inline long static inline long
......
...@@ -331,11 +331,6 @@ extern inline int pgd_bad(pgd_t pgd) { return 0; } ...@@ -331,11 +331,6 @@ extern inline int pgd_bad(pgd_t pgd) { return 0; }
extern inline int pgd_present(pgd_t pgd) { return 1; } extern inline int pgd_present(pgd_t pgd) { return 1; }
extern inline void pgd_clear(pgd_t *pgdp) { } extern inline void pgd_clear(pgd_t *pgdp) { }
/*
* Permanent address of a page. On MIPS we never have highmem, so this
* is simple.
*/
#define page_address(page) ((page)->virtual)
#ifdef CONFIG_CPU_VR41XX #ifdef CONFIG_CPU_VR41XX
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> (PAGE_SHIFT + 2)))) #define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> (PAGE_SHIFT + 2))))
#else #else
......
...@@ -370,11 +370,6 @@ extern inline void pgd_clear(pgd_t *pgdp) ...@@ -370,11 +370,6 @@ extern inline void pgd_clear(pgd_t *pgdp)
pgd_val(*pgdp) = ((unsigned long) invalid_pmd_table); pgd_val(*pgdp) = ((unsigned long) invalid_pmd_table);
} }
/*
* Permanent address of a page. On MIPS64 we never have highmem, so this
* is simple.
*/
#define page_address(page) ((page)->virtual)
#ifndef CONFIG_DISCONTIGMEM #ifndef CONFIG_DISCONTIGMEM
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT))) #define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#else #else
......
...@@ -275,7 +275,6 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -275,7 +275,6 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* Permanent address of a page. Obviously must never be * Permanent address of a page. Obviously must never be
* called on a highmem page. * called on a highmem page.
*/ */
#define page_address(page) ({ if (!(page)->virtual) BUG(); (page)->virtual; })
#define __page_address(page) ({ if (PageHighMem(page)) BUG(); PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT); }) #define __page_address(page) ({ if (PageHighMem(page)) BUG(); PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT); })
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
#define pte_page(x) (mem_map+pte_pagenr(x)) #define pte_page(x) (mem_map+pte_pagenr(x))
......
...@@ -389,10 +389,6 @@ extern unsigned long empty_zero_page[1024]; ...@@ -389,10 +389,6 @@ extern unsigned long empty_zero_page[1024];
#define pmd_present(pmd) ((pmd_val(pmd) & PAGE_MASK) != 0) #define pmd_present(pmd) ((pmd_val(pmd) & PAGE_MASK) != 0)
#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
/*
* Permanent address of a page.
*/
#define page_address(page) ((page)->virtual)
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x)-PPC_MEMSTART) >> PAGE_SHIFT)) #define pte_page(x) (mem_map+(unsigned long)((pte_val(x)-PPC_MEMSTART) >> PAGE_SHIFT))
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -239,10 +239,6 @@ extern inline void set_pte(pte_t *pteptr, pte_t pteval) ...@@ -239,10 +239,6 @@ extern inline void set_pte(pte_t *pteptr, pte_t pteval)
*pteptr = pteval; *pteptr = pteval;
} }
/*
* Permanent address of a page.
*/
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/* /*
......
...@@ -234,10 +234,6 @@ extern inline void set_pte(pte_t *pteptr, pte_t pteval) ...@@ -234,10 +234,6 @@ extern inline void set_pte(pte_t *pteptr, pte_t pteval)
*pteptr = pteval; *pteptr = pteval;
} }
/*
* Permanent address of a page.
*/
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/* /*
......
...@@ -208,11 +208,6 @@ extern unsigned long empty_zero_page[1024]; ...@@ -208,11 +208,6 @@ extern unsigned long empty_zero_page[1024];
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
/*
* Permanent address of a page. Obviously must never be
* called on a highmem page.
*/
#define page_address(page) ((page)->virtual) /* P1 address of the page */
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
#define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK) #define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK)
......
...@@ -293,9 +293,6 @@ BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t) ...@@ -293,9 +293,6 @@ BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
#define page_pte_prot(page, prot) mk_pte(page, prot) #define page_pte_prot(page, prot) mk_pte(page, prot)
#define page_pte(page) page_pte_prot(page, __pgprot(0)) #define page_pte(page) page_pte_prot(page, __pgprot(0))
/* Permanent address of a page. */
#define page_address(page) ((page)->virtual)
BTFIXUPDEF_CALL(struct page *, pte_page, pte_t) BTFIXUPDEF_CALL(struct page *, pte_page, pte_t)
#define pte_page(pte) BTFIXUP_CALL(pte_page)(pte) #define pte_page(pte) BTFIXUP_CALL(pte_page)(pte)
......
...@@ -30,6 +30,9 @@ extern void do_BUG(const char *file, int line); ...@@ -30,6 +30,9 @@ extern void do_BUG(const char *file, int line);
#define PAGE_BUG(page) BUG() #define PAGE_BUG(page) BUG()
/* Sparc64 is slow at multiplication, we prefer to use some extra space. */
#define WANT_PAGE_VIRTUAL 1
extern void _clear_page(void *page); extern void _clear_page(void *page);
#define clear_page(X) _clear_page((void *)(X)) #define clear_page(X) _clear_page((void *)(X))
extern void clear_user_page(void *page, unsigned long vaddr); extern void clear_user_page(void *page, unsigned long vaddr);
......
...@@ -243,8 +243,7 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) ...@@ -243,8 +243,7 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
#define pte_mkold(pte) (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED)) #define pte_mkold(pte) (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED))
/* Permanent address of a page. */ /* Permanent address of a page. */
#define __page_address(page) ((page)->virtual) #define __page_address(page) page_address(page)
#define page_address(page) ({ __page_address(page); })
#define pte_page(x) (mem_map+(((pte_val(x)&_PAGE_PADDR)-phys_base)>>PAGE_SHIFT)) #define pte_page(x) (mem_map+(((pte_val(x)&_PAGE_PADDR)-phys_base)>>PAGE_SHIFT))
......
...@@ -289,7 +289,6 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd); ...@@ -289,7 +289,6 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd);
* Permanent address of a page. Obviously must never be * Permanent address of a page. Obviously must never be
* called on a highmem page. * called on a highmem page.
*/ */
#define page_address(page) ((page)->virtual)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this
right? */ right? */
#define pte_page(x) (mem_map+((unsigned long)((pte_val(x) >> PAGE_SHIFT)))) #define pte_page(x) (mem_map+((unsigned long)((pte_val(x) >> PAGE_SHIFT))))
......
...@@ -157,12 +157,23 @@ typedef struct page { ...@@ -157,12 +157,23 @@ typedef struct page {
updated asynchronously */ updated asynchronously */
struct list_head lru; /* Pageout list, eg. active_list; struct list_head lru; /* Pageout list, eg. active_list;
protected by pagemap_lru_lock !! */ protected by pagemap_lru_lock !! */
wait_queue_head_t wait; /* Page locked? Stand in line... */
struct page **pprev_hash; /* Complement to *next_hash. */ struct page **pprev_hash; /* Complement to *next_hash. */
struct buffer_head * buffers; /* Buffer maps us to a disk block. */ struct buffer_head * buffers; /* Buffer maps us to a disk block. */
/*
* On machines where all RAM is mapped into kernel address space,
* we can simply calculate the virtual address. On machines with
* highmem some memory is mapped into kernel virtual memory
* dynamically, so we need a place to store that address.
* Note that this field could be 16 bits on x86 ... ;)
*
* Architectures with slow multiplication can define
* WANT_PAGE_VIRTUAL in asm/page.h
*/
#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
void *virtual; /* Kernel virtual address (NULL if void *virtual; /* Kernel virtual address (NULL if
not kmapped, ie. highmem) */ not kmapped, ie. highmem) */
struct zone_struct *zone; /* Memory zone we are in. */ #endif /* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL */
} mem_map_t; } mem_map_t;
/* /*
...@@ -183,6 +194,11 @@ typedef struct page { ...@@ -183,6 +194,11 @@ typedef struct page {
#define page_count(p) atomic_read(&(p)->count) #define page_count(p) atomic_read(&(p)->count)
#define set_page_count(p,v) atomic_set(&(p)->count, v) #define set_page_count(p,v) atomic_set(&(p)->count, v)
static inline void init_page_count(struct page *page)
{
page->count.counter = 0;
}
/* /*
* Various page->flags bits: * Various page->flags bits:
* *
...@@ -237,7 +253,7 @@ typedef struct page { ...@@ -237,7 +253,7 @@ typedef struct page {
* - private pages which have been modified may need to be swapped out * - private pages which have been modified may need to be swapped out
* to swap space and (later) to be read back into memory. * to swap space and (later) to be read back into memory.
* During disk I/O, PG_locked is used. This bit is set before I/O * During disk I/O, PG_locked is used. This bit is set before I/O
* and reset when I/O completes. page->wait is a wait queue of all * and reset when I/O completes. page_waitqueue(page) is a wait queue of all
* tasks waiting for the I/O on this page to complete. * tasks waiting for the I/O on this page to complete.
* PG_uptodate tells whether the page's contents is valid. * PG_uptodate tells whether the page's contents is valid.
* When a read completes, the page becomes uptodate, unless a disk I/O * When a read completes, the page becomes uptodate, unless a disk I/O
...@@ -299,6 +315,61 @@ typedef struct page { ...@@ -299,6 +315,61 @@ typedef struct page {
#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags) #define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
#define PageLaunder(page) test_bit(PG_launder, &(page)->flags) #define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags) #define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
#define __SetPageReserved(page) __set_bit(PG_reserved, &(page)->flags)
/*
* The zone field is never updated after free_area_init_core()
* sets it, so none of the operations on it need to be atomic.
*/
#define NODE_SHIFT 4
#define ZONE_SHIFT (BITS_PER_LONG - 8)
struct zone_struct;
extern struct zone_struct *zone_table[];
static inline zone_t *page_zone(struct page *page)
{
return zone_table[page->flags >> ZONE_SHIFT];
}
static inline void set_page_zone(struct page *page, unsigned long zone_num)
{
page->flags &= ~(~0UL << ZONE_SHIFT);
page->flags |= zone_num << ZONE_SHIFT;
}
/*
* In order to avoid #ifdefs within C code itself, we define
* set_page_address to a noop for non-highmem machines, where
* the field isn't useful.
* The same is true for page_address() in arch-dependent code.
*/
#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
#define set_page_address(page, address) \
do { \
(page)->virtual = (address); \
} while(0)
#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
#define set_page_address(page, address) do { } while(0)
#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
/*
* Permanent address of a page. Obviously must never be
* called on a highmem page.
*/
#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
#define page_address(page) ((page)->virtual)
#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
#define page_address(page) \
__va( (((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
+ page_zone(page)->zone_start_paddr)
#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
extern void FASTCALL(set_page_dirty(struct page *)); extern void FASTCALL(set_page_dirty(struct page *));
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/wait.h>
/* /*
* Free memory management - zoned buddy allocator. * Free memory management - zoned buddy allocator.
...@@ -47,6 +48,35 @@ typedef struct zone_struct { ...@@ -47,6 +48,35 @@ typedef struct zone_struct {
*/ */
free_area_t free_area[MAX_ORDER]; free_area_t free_area[MAX_ORDER];
/*
* wait_table -- the array holding the hash table
* wait_table_size -- the size of the hash table array
* wait_table_shift -- wait_table_size
* == BITS_PER_LONG (1 << wait_table_bits)
*
* The purpose of all these is to keep track of the people
* waiting for a page to become available and make them
* runnable again when possible. The trouble is that this
* consumes a lot of space, especially when so few things
* wait on pages at a given time. So instead of using
* per-page waitqueues, we use a waitqueue hash table.
*
* The bucket discipline is to sleep on the same queue when
* colliding and wake all in that wait queue when removing.
* When something wakes, it must check to be sure its page is
* truly available, a la thundering herd. The cost of a
* collision is great, but given the expected load of the
* table, they should be so rare as to be outweighed by the
* benefits from the saved space.
*
* __wait_on_page() and unlock_page() in mm/filemap.c, are the
* primary users of these fields, and in mm/page_alloc.c
* free_area_init_core() performs the initialization of them.
*/
wait_queue_head_t * wait_table;
unsigned long wait_table_size;
unsigned long wait_table_shift;
/* /*
* Discontig memory support fields. * Discontig memory support fields.
*/ */
...@@ -132,11 +162,15 @@ extern pg_data_t contig_page_data; ...@@ -132,11 +162,15 @@ extern pg_data_t contig_page_data;
#define NODE_DATA(nid) (&contig_page_data) #define NODE_DATA(nid) (&contig_page_data)
#define NODE_MEM_MAP(nid) mem_map #define NODE_MEM_MAP(nid) mem_map
#define MAX_NR_NODES 1
#else /* !CONFIG_DISCONTIGMEM */ #else /* !CONFIG_DISCONTIGMEM */
#include <asm/mmzone.h> #include <asm/mmzone.h>
/* page->zone is currently 8 bits ... */
#define MAX_NR_NODES (255 / MAX_NR_ZONES)
#endif /* !CONFIG_DISCONTIGMEM */ #endif /* !CONFIG_DISCONTIGMEM */
#define MAP_ALIGN(x) ((((x) % sizeof(mem_map_t)) == 0) ? (x) : ((x) + \ #define MAP_ALIGN(x) ((((x) % sizeof(mem_map_t)) == 0) ? (x) : ((x) + \
......
...@@ -97,6 +97,8 @@ static inline void wait_on_page(struct page * page) ...@@ -97,6 +97,8 @@ static inline void wait_on_page(struct page * page)
___wait_on_page(page); ___wait_on_page(page);
} }
extern void wake_up_page(struct page *);
extern struct page * grab_cache_page (struct address_space *, unsigned long); extern struct page * grab_cache_page (struct address_space *, unsigned long);
extern struct page * grab_cache_page_nowait (struct address_space *, unsigned long); extern struct page * grab_cache_page_nowait (struct address_space *, unsigned long);
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
O_TARGET := mm.o O_TARGET := mm.o
export-objs := shmem.o filemap.o mempool.o export-objs := shmem.o filemap.o mempool.o page_alloc.o
obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \ obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \
vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \ vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \
......
...@@ -740,6 +740,67 @@ static int read_cluster_nonblocking(struct file * file, unsigned long offset, ...@@ -740,6 +740,67 @@ static int read_cluster_nonblocking(struct file * file, unsigned long offset,
return 0; return 0;
} }
/*
* Knuth recommends primes in approximately golden ratio to the maximum
* integer representable by a machine word for multiplicative hashing.
* Chuck Lever verified the effectiveness of this technique:
* http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
*
* These primes are chosen to be bit-sparse, that is operations on
* them can use shifts and additions instead of multiplications for
* machines where multiplications are slow.
*/
#if BITS_PER_LONG == 32
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
#define GOLDEN_RATIO_PRIME 0x9e370001UL
#elif BITS_PER_LONG == 64
/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
#else
#error Define GOLDEN_RATIO_PRIME for your wordsize.
#endif
/*
* In order to wait for pages to become available there must be
* waitqueues associated with pages. By using a hash table of
* waitqueues where the bucket discipline is to maintain all
* waiters on the same queue and wake all when any of the pages
* become available, and for the woken contexts to check to be
* sure the appropriate page became available, this saves space
* at a cost of "thundering herd" phenomena during rare hash
* collisions.
*/
static inline wait_queue_head_t *page_waitqueue(struct page *page)
{
const zone_t *zone = page_zone(page);
wait_queue_head_t *wait = zone->wait_table;
unsigned long hash = (unsigned long)page;
#if BITS_PER_LONG == 64
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
unsigned long n = hash;
n <<= 18;
hash -= n;
n <<= 33;
hash -= n;
n <<= 3;
hash += n;
n <<= 3;
hash -= n;
n <<= 4;
hash += n;
n <<= 2;
hash += n;
#else
/* On some cpus multiply is faster, on others gcc will do shifts */
hash *= GOLDEN_RATIO_PRIME;
#endif
hash >>= zone->wait_table_shift;
return &wait[hash];
}
/* /*
* Wait for a page to get unlocked. * Wait for a page to get unlocked.
* *
...@@ -749,10 +810,11 @@ static int read_cluster_nonblocking(struct file * file, unsigned long offset, ...@@ -749,10 +810,11 @@ static int read_cluster_nonblocking(struct file * file, unsigned long offset,
*/ */
void ___wait_on_page(struct page *page) void ___wait_on_page(struct page *page)
{ {
wait_queue_head_t *waitqueue = page_waitqueue(page);
struct task_struct *tsk = current; struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk); DECLARE_WAITQUEUE(wait, tsk);
add_wait_queue(&page->wait, &wait); add_wait_queue(waitqueue, &wait);
do { do {
set_task_state(tsk, TASK_UNINTERRUPTIBLE); set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (!PageLocked(page)) if (!PageLocked(page))
...@@ -760,19 +822,23 @@ void ___wait_on_page(struct page *page) ...@@ -760,19 +822,23 @@ void ___wait_on_page(struct page *page)
sync_page(page); sync_page(page);
schedule(); schedule();
} while (PageLocked(page)); } while (PageLocked(page));
tsk->state = TASK_RUNNING; __set_task_state(tsk, TASK_RUNNING);
remove_wait_queue(&page->wait, &wait); remove_wait_queue(waitqueue, &wait);
} }
/*
* Unlock the page and wake up sleepers in ___wait_on_page.
*/
void unlock_page(struct page *page) void unlock_page(struct page *page)
{ {
wait_queue_head_t *waitqueue = page_waitqueue(page);
clear_bit(PG_launder, &(page)->flags); clear_bit(PG_launder, &(page)->flags);
smp_mb__before_clear_bit(); smp_mb__before_clear_bit();
if (!test_and_clear_bit(PG_locked, &(page)->flags)) if (!test_and_clear_bit(PG_locked, &(page)->flags))
BUG(); BUG();
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
if (waitqueue_active(&(page)->wait)) if (waitqueue_active(waitqueue))
wake_up(&(page)->wait); wake_up_all(waitqueue);
} }
/* /*
...@@ -781,10 +847,11 @@ void unlock_page(struct page *page) ...@@ -781,10 +847,11 @@ void unlock_page(struct page *page)
*/ */
static void __lock_page(struct page *page) static void __lock_page(struct page *page)
{ {
wait_queue_head_t *waitqueue = page_waitqueue(page);
struct task_struct *tsk = current; struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk); DECLARE_WAITQUEUE(wait, tsk);
add_wait_queue_exclusive(&page->wait, &wait); add_wait_queue_exclusive(waitqueue, &wait);
for (;;) { for (;;) {
set_task_state(tsk, TASK_UNINTERRUPTIBLE); set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (PageLocked(page)) { if (PageLocked(page)) {
...@@ -794,10 +861,15 @@ static void __lock_page(struct page *page) ...@@ -794,10 +861,15 @@ static void __lock_page(struct page *page)
if (!TryLockPage(page)) if (!TryLockPage(page))
break; break;
} }
tsk->state = TASK_RUNNING; __set_task_state(tsk, TASK_RUNNING);
remove_wait_queue(&page->wait, &wait); remove_wait_queue(waitqueue, &wait);
} }
void wake_up_page(struct page *page)
{
wake_up(page_waitqueue(page));
}
EXPORT_SYMBOL(wake_up_page);
/* /*
* Get an exclusive lock on the page, optimistically * Get an exclusive lock on the page, optimistically
......
...@@ -378,7 +378,7 @@ void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig) ...@@ -378,7 +378,7 @@ void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig)
/* /*
* is destination page below bounce pfn? * is destination page below bounce pfn?
*/ */
if ((page - page->zone->zone_mem_map) + (page->zone->zone_start_paddr >> PAGE_SHIFT) < pfn) if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_paddr >> PAGE_SHIFT) < pfn)
continue; continue;
/* /*
......
/* /*
* linux/mm/page_alloc.c * linux/mm/page_alloc.c
* *
* Manages the free list, the system allocates free pages here.
* Note that kmalloc() lives in slab.c
*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
* Swap reorganised 29.12.95, Stephen Tweedie * Swap reorganised 29.12.95, Stephen Tweedie
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
...@@ -18,6 +21,7 @@ ...@@ -18,6 +21,7 @@
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/module.h>
int nr_swap_pages; int nr_swap_pages;
int nr_active_pages; int nr_active_pages;
...@@ -26,6 +30,10 @@ struct list_head inactive_list; ...@@ -26,6 +30,10 @@ struct list_head inactive_list;
struct list_head active_list; struct list_head active_list;
pg_data_t *pgdat_list; pg_data_t *pgdat_list;
/* Used to look up the address of the struct zone encoded in page->zone */
zone_t *zone_table[MAX_NR_ZONES*MAX_NR_NODES];
EXPORT_SYMBOL(zone_table);
static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" }; static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
static int zone_balance_ratio[MAX_NR_ZONES] __initdata = { 128, 128, 128, }; static int zone_balance_ratio[MAX_NR_ZONES] __initdata = { 128, 128, 128, };
static int zone_balance_min[MAX_NR_ZONES] __initdata = { 20 , 20, 20, }; static int zone_balance_min[MAX_NR_ZONES] __initdata = { 20 , 20, 20, };
...@@ -54,12 +62,31 @@ static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, }; ...@@ -54,12 +62,31 @@ static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, };
/* /*
* Temporary debugging check. * Temporary debugging check.
*/ */
#define BAD_RANGE(zone,x) (((zone) != (x)->zone) || (((x)-mem_map) < (zone)->zone_start_mapnr) || (((x)-mem_map) >= (zone)->zone_start_mapnr+(zone)->size)) #define BAD_RANGE(zone, page) \
( \
(((page) - mem_map) >= ((zone)->zone_start_mapnr+(zone)->size)) \
|| (((page) - mem_map) < (zone)->zone_start_mapnr) \
|| ((zone) != page_zone(page)) \
)
/* /*
* Buddy system. Hairy. You really aren't expected to understand this * Freeing function for a buddy system allocator.
*
* The concept of a buddy system is to maintain direct-mapped table
* (containing bit values) for memory blocks of various "orders".
* The bottom level table contains the map for the smallest allocatable
* units of memory (here, pages), and each level above it describes
* pairs of units from the levels below, hence, "buddies".
* At a high level, all that happens here is marking the table entry
* at the bottom level available, and propagating the changes upward
* as necessary, plus some accounting needed to play nicely with other
* parts of the VM system.
*
* TODO: give references to descriptions of buddy system allocators,
* describe precisely the silly trick buddy allocators use to avoid
* storing an extra bit, utilizing entry point information.
* *
* Hint: -mask = 1+~mask * -- wli
*/ */
static void FASTCALL(__free_pages_ok (struct page *page, unsigned int order)); static void FASTCALL(__free_pages_ok (struct page *page, unsigned int order));
...@@ -90,7 +117,7 @@ static void __free_pages_ok (struct page *page, unsigned int order) ...@@ -90,7 +117,7 @@ static void __free_pages_ok (struct page *page, unsigned int order)
goto local_freelist; goto local_freelist;
back_local_freelist: back_local_freelist:
zone = page->zone; zone = page_zone(page);
mask = (~0UL) << order; mask = (~0UL) << order;
base = zone->zone_mem_map; base = zone->zone_mem_map;
...@@ -117,6 +144,8 @@ static void __free_pages_ok (struct page *page, unsigned int order) ...@@ -117,6 +144,8 @@ static void __free_pages_ok (struct page *page, unsigned int order)
break; break;
/* /*
* Move the buddy up one level. * Move the buddy up one level.
* This code is taking advantage of the identity:
* -mask = 1+~mask
*/ */
buddy1 = base + (page_idx ^ -mask); buddy1 = base + (page_idx ^ -mask);
buddy2 = base + page_idx; buddy2 = base + page_idx;
...@@ -255,7 +284,7 @@ static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask ...@@ -255,7 +284,7 @@ static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask
entry = local_pages->next; entry = local_pages->next;
do { do {
tmp = list_entry(entry, struct page, list); tmp = list_entry(entry, struct page, list);
if (tmp->index == order && memclass(tmp->zone, classzone)) { if (tmp->index == order && memclass(page_zone(tmp), classzone)) {
list_del(entry); list_del(entry);
current->nr_local_pages--; current->nr_local_pages--;
set_page_count(tmp, 1); set_page_count(tmp, 1);
...@@ -625,6 +654,48 @@ static inline void build_zonelists(pg_data_t *pgdat) ...@@ -625,6 +654,48 @@ static inline void build_zonelists(pg_data_t *pgdat)
} }
} }
/*
* Helper functions to size the waitqueue hash table.
* Essentially these want to choose hash table sizes sufficiently
* large so that collisions trying to wait on pages are rare.
* But in fact, the number of active page waitqueues on typical
* systems is ridiculously low, less than 200. So this is even
* conservative, even though it seems large.
*
* The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
* waitqueues, i.e. the size of the waitq table given the number of pages.
*/
#define PAGES_PER_WAITQUEUE 256
static inline unsigned long wait_table_size(unsigned long pages)
{
unsigned long size = 1;
pages /= PAGES_PER_WAITQUEUE;
while (size < pages)
size <<= 1;
/*
* Once we have dozens or even hundreds of threads sleeping
* on IO we've got bigger problems than wait queue collision.
* Limit the size of the wait table to a reasonable size.
*/
size = min(size, 4096UL);
return size;
}
/*
* This is an integer logarithm so that shifts can be used later
* to extract the more random high bits from the multiplicative
* hash function before the remainder is taken.
*/
static inline unsigned long wait_table_bits(unsigned long size)
{
return ffz(~size);
}
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
/* /*
...@@ -637,7 +708,6 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, ...@@ -637,7 +708,6 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
unsigned long *zones_size, unsigned long zone_start_paddr, unsigned long *zones_size, unsigned long zone_start_paddr,
unsigned long *zholes_size, struct page *lmem_map) unsigned long *zholes_size, struct page *lmem_map)
{ {
struct page *p;
unsigned long i, j; unsigned long i, j;
unsigned long map_size; unsigned long map_size;
unsigned long totalpages, offset, realtotalpages; unsigned long totalpages, offset, realtotalpages;
...@@ -680,24 +750,13 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, ...@@ -680,24 +750,13 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
pgdat->node_start_mapnr = (lmem_map - mem_map); pgdat->node_start_mapnr = (lmem_map - mem_map);
pgdat->nr_zones = 0; pgdat->nr_zones = 0;
/*
* Initially all pages are reserved - free ones are freed
* up by free_all_bootmem() once the early boot process is
* done.
*/
for (p = lmem_map; p < lmem_map + totalpages; p++) {
set_page_count(p, 0);
SetPageReserved(p);
init_waitqueue_head(&p->wait);
memlist_init(&p->list);
}
offset = lmem_map - mem_map; offset = lmem_map - mem_map;
for (j = 0; j < MAX_NR_ZONES; j++) { for (j = 0; j < MAX_NR_ZONES; j++) {
zone_t *zone = pgdat->node_zones + j; zone_t *zone = pgdat->node_zones + j;
unsigned long mask; unsigned long mask;
unsigned long size, realsize; unsigned long size, realsize;
zone_table[nid * MAX_NR_ZONES + j] = zone;
realsize = size = zones_size[j]; realsize = size = zones_size[j];
if (zholes_size) if (zholes_size)
realsize -= zholes_size[j]; realsize -= zholes_size[j];
...@@ -712,6 +771,20 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, ...@@ -712,6 +771,20 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
if (!size) if (!size)
continue; continue;
/*
* The per-page waitqueue mechanism uses hashed waitqueues
* per zone.
*/
zone->wait_table_size = wait_table_size(size);
zone->wait_table_shift =
BITS_PER_LONG - wait_table_bits(zone->wait_table_size);
zone->wait_table = (wait_queue_head_t *)
alloc_bootmem_node(pgdat, zone->wait_table_size
* sizeof(wait_queue_head_t));
for(i = 0; i < zone->wait_table_size; ++i)
init_waitqueue_head(zone->wait_table + i);
pgdat->nr_zones = j+1; pgdat->nr_zones = j+1;
mask = (realsize / zone_balance_ratio[j]); mask = (realsize / zone_balance_ratio[j]);
...@@ -730,11 +803,19 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, ...@@ -730,11 +803,19 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
if ((zone_start_paddr >> PAGE_SHIFT) & (zone_required_alignment-1)) if ((zone_start_paddr >> PAGE_SHIFT) & (zone_required_alignment-1))
printk("BUG: wrong zone alignment, it will crash\n"); printk("BUG: wrong zone alignment, it will crash\n");
/*
* Initially all pages are reserved - free ones are freed
* up by free_all_bootmem() once the early boot process is
* done. Non-atomic initialization, single-pass.
*/
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
struct page *page = mem_map + offset + i; struct page *page = mem_map + offset + i;
page->zone = zone; set_page_zone(page, nid * MAX_NR_ZONES + j);
init_page_count(page);
__SetPageReserved(page);
memlist_init(&page->list);
if (j != ZONE_HIGHMEM) if (j != ZONE_HIGHMEM)
page->virtual = __va(zone_start_paddr); set_page_address(page, __va(zone_start_paddr));
zone_start_paddr += PAGE_SIZE; zone_start_paddr += PAGE_SIZE;
} }
......
...@@ -59,7 +59,7 @@ static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* ...@@ -59,7 +59,7 @@ static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct*
return 0; return 0;
/* Don't bother replenishing zones not under pressure.. */ /* Don't bother replenishing zones not under pressure.. */
if (!memclass(page->zone, classzone)) if (!memclass(page_zone(page), classzone))
return 0; return 0;
if (TryLockPage(page)) if (TryLockPage(page))
...@@ -370,7 +370,7 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, ...@@ -370,7 +370,7 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask,
if (unlikely(!page_count(page))) if (unlikely(!page_count(page)))
continue; continue;
if (!memclass(page->zone, classzone)) if (!memclass(page_zone(page), classzone))
continue; continue;
/* Racy check to avoid trylocking when not worthwhile */ /* Racy check to avoid trylocking when not worthwhile */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment