Commit c60ed932 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.115pre2

parent b5d6c0fe
......@@ -88,6 +88,21 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
return (pte_t *) pmd_page(*pmd) + offset;
}
int do_check_pgt_cache(int low, int high)
{
int freed = 0;
if(pgtable_cache_size > high) {
do {
if(pgd_quicklist)
free_pgd_slow(get_pgd_fast()), freed++;
if(pmd_quicklist)
free_pmd_slow(get_pmd_fast()), freed++;
if(pte_quicklist)
free_pte_slow(get_pte_fast()), freed++;
} while(pgtable_cache_size > low);
}
return freed;
}
/*
* BAD_PAGE is the page that is used for page faults when linux
......
......@@ -71,6 +71,7 @@ asmlinkage int sys_idle(void)
current->priority = -100;
for (;;)
{
check_pgt_cache();
#if 0 //def ARCH_IDLE_OK
if (!hlt_counter && !need_resched)
proc_idle ();
......
......@@ -34,6 +34,22 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern char _etext, _stext, _edata, __bss_start, _end;
extern char __init_begin, __init_end;
int do_check_pgt_cache(int low, int high)
{
int freed = 0;
if(pgtable_cache_size > high) {
do {
if(pgd_quicklist)
free_pgd_slow(get_pgd_fast()), freed++;
if(pmd_quicklist)
free_pmd_slow(get_pmd_fast()), freed++;
if(pte_quicklist)
free_pte_slow(get_pte_fast()), freed++;
} while(pgtable_cache_size > low);
}
return freed;
}
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
......
......@@ -146,10 +146,6 @@ mainmenu_option next_comment
comment 'Kernel hacking'
#bool 'Debug kmalloc/kfree' CONFIG_DEBUG_MALLOC
bool 'Kernel profiling support' CONFIG_PROFILE
if [ "$CONFIG_PROFILE" = "y" ]; then
int ' Profile shift count' CONFIG_PROFILE_SHIFT 2
fi
bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ
endmenu
......@@ -88,6 +88,21 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
return (pte_t *) (pmd_page(*pmd) + offset);
}
int do_check_pgt_cache(int low, int high)
{
int freed = 0;
if(pgtable_cache_size > high) {
do {
if(pgd_quicklist)
free_pgd_slow(get_pgd_fast()), freed++;
if(pmd_quicklist)
free_pmd_slow(get_pmd_fast()), freed++;
if(pte_quicklist)
free_pte_slow(get_pte_fast()), freed++;
} while(pgtable_cache_size > low);
}
return freed;
}
/*
* BAD_PAGE is the page that is used for page faults when linux
......
......@@ -31,6 +31,20 @@ extern void die_if_kernel(char *,struct pt_regs *,long);
extern void init_kpointer_table(void);
extern void show_net_buffers(void);
int do_check_pgt_cache(int low, int high)
{
int freed = 0;
if(pgtable_cache_size > high) {
do {
if(pmd_quicklist)
freed += free_pmd_slow(get_pmd_fast());
if(pte_quicklist)
free_pte_slow(get_pte_fast()), freed++;
} while(pgtable_cache_size > low);
}
return freed;
}
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
......
......@@ -161,7 +161,7 @@ pmd_t *get_pointer_table (void)
return pmdp;
}
void free_pointer_table (pmd_t *ptable)
int free_pointer_table (pmd_t *ptable)
{
struct ptable_desc *dp;
unsigned long page = (unsigned long)ptable & PAGE_MASK;
......@@ -189,7 +189,7 @@ void free_pointer_table (pmd_t *ptable)
cache_page (dp->page);
free_page (dp->page);
kfree (dp);
return;
return 1;
} else {
/*
* move this descriptor to the front of the list, since
......@@ -205,6 +205,7 @@ void free_pointer_table (pmd_t *ptable)
ptable_list.next->prev = dp;
ptable_list.next = dp;
restore_flags(flags);
return 0;
}
}
......
......@@ -47,6 +47,22 @@ asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
return 0;
}
int do_check_pgt_cache(int low, int high)
{
int freed = 0;
if(pgtable_cache_size > high) {
do {
if(pgd_quicklist)
free_pgd_slow(get_pgd_fast()), freed++;
if(pmd_quicklist)
free_pmd_slow(get_pmd_fast()), freed++;
if(pte_quicklist)
free_pte_slow(get_pte_fast()), freed++;
} while(pgtable_cache_size > low);
}
return freed;
}
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
......
......@@ -148,6 +148,22 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
return (pte_t *) pmd_page(*pmd) + offset;
}
int do_check_pgt_cache(int low, int high)
{
int freed = 0;
if(pgtable_cache_size > high) {
do {
if(pgd_quicklist)
free_pgd_slow(get_pgd_fast()), freed++;
if(pmd_quicklist)
free_pmd_slow(get_pmd_fast()), freed++;
if(pte_quicklist)
free_pte_slow(get_pte_fast()), freed++;
} while(pgtable_cache_size > low);
}
return freed;
}
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
......
......@@ -40,7 +40,6 @@
#include <asm/elf.h>
extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
extern void srmmu_check_pgt_cache(void);
struct task_struct *current_set[NR_CPUS] = {&init_task, };
......@@ -92,9 +91,8 @@ asmlinkage int sys_idle(void)
}
}
restore_flags(flags);
check_pgt_cache();
} else
srmmu_check_pgt_cache();
}
check_pgt_cache();
schedule();
}
ret = 0;
......
......@@ -2722,16 +2722,12 @@ __initfunc(static void get_srmmu_type(void))
srmmu_is_bad();
}
/* Low and high watermarks for page table cache.
The system should try to have pgt_water[0] <= cache elements <= pgt_water[1]
*/
extern int pgt_cache_water[2];
void srmmu_check_pgt_cache(void)
static int srmmu_check_pgt_cache(int low, int high)
{
struct page *page, *page2;
int freed = 0;
if (pgtable_cache_size > pgt_cache_water[0]) {
if (pgtable_cache_size > high) {
spin_lock(&pte_spinlock);
for (page2 = NULL, page = (struct page *)pte_quicklist; page;) {
if ((unsigned int)page->pprev_hash == 0xffff) {
......@@ -2743,11 +2739,12 @@ void srmmu_check_pgt_cache(void)
page->pprev_hash = NULL;
pgtable_cache_size -= 16;
free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
freed++;
if (page2)
page = page2->next_hash;
else
page = (struct page *)pte_quicklist;
if (pgtable_cache_size <= pgt_cache_water[1])
if (pgtable_cache_size <= low)
break;
continue;
}
......@@ -2756,7 +2753,7 @@ void srmmu_check_pgt_cache(void)
}
spin_unlock(&pte_spinlock);
}
if (pgd_cache_size > pgt_cache_water[0] / 4) {
if (pgd_cache_size > high / 4) {
spin_lock(&pgd_spinlock);
for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
if ((unsigned int)page->pprev_hash == 0xf) {
......@@ -2768,11 +2765,12 @@ void srmmu_check_pgt_cache(void)
page->pprev_hash = NULL;
pgd_cache_size -= 4;
free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
freed++;
if (page2)
page = page2->next_hash;
else
page = (struct page *)pgd_quicklist;
if (pgd_cache_size <= pgt_cache_water[1] / 4)
if (pgd_cache_size <= low / 4)
break;
continue;
}
......@@ -2781,6 +2779,7 @@ void srmmu_check_pgt_cache(void)
}
spin_unlock(&pgd_spinlock);
}
return freed;
}
extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
......@@ -2853,6 +2852,7 @@ __initfunc(void ld_mmu_srmmu(void))
BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_RETINT(0));
BTFIXUPSET_CALL(free_pte_slow, srmmu_free_pte_slow, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(free_pgd_slow, srmmu_free_pgd_slow, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(set_pgdir, srmmu_set_pgdir, BTFIXUPCALL_NORM);
......
......@@ -2536,6 +2536,22 @@ extern __inline__ pgd_t *sun4c_get_pgd_fast(void)
return (pgd_t *)ret;
}
static int sun4c_check_pgt_cache(int low, int high)
{
int freed = 0;
if(pgtable_cache_size > high) {
do {
if(pgd_quicklist)
free_pgd_slow(get_pgd_fast()), freed++;
if(pmd_quicklist)
free_pmd_slow(get_pmd_fast()), freed++;
if(pte_quicklist)
free_pte_slow(get_pte_fast()), freed++;
} while(pgtable_cache_size > low);
}
return freed;
}
static void sun4c_set_pgdir(unsigned long address, pgd_t entry)
{
/* Nothing to do */
......@@ -2803,6 +2819,7 @@ __initfunc(void ld_mmu_sun4c(void))
BTFIXUPSET_CALL(get_pgd_fast, sun4c_pgd_alloc, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_pte_slow, sun4c_free_pte_slow, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_pgd_slow, sun4c_free_pgd_slow, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(do_check_pgt_cache, sun4c_check_pgt_cache, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(set_pgdir, sun4c_set_pgdir, BTFIXUPCALL_NOP);
......
......@@ -43,45 +43,6 @@
#ifndef __SMP__
extern int pgt_cache_water[2];
static inline void ultra_check_pgt_cache(void)
{
struct page *page, *page2;
if(pgtable_cache_size > pgt_cache_water[0]) {
do {
if(pmd_quicklist)
free_pmd_slow(get_pmd_fast());
if(pte_quicklist)
free_pte_slow(get_pte_fast());
} while(pgtable_cache_size > pgt_cache_water[1]);
}
if (pgd_cache_size > pgt_cache_water[0] / 4) {
for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
if ((unsigned long)page->pprev_hash == 3) {
if (page2)
page2->next_hash = page->next_hash;
else
(struct page *)pgd_quicklist = page->next_hash;
page->next_hash = NULL;
page->pprev_hash = NULL;
pgd_cache_size -= 2;
free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
if (page2)
page = page2->next_hash;
else
page = (struct page *)pgd_quicklist;
if (pgd_cache_size <= pgt_cache_water[1] / 4)
break;
continue;
}
page2 = page;
page = page->next_hash;
}
}
}
/*
* the idle loop on a Sparc... ;)
*/
......@@ -94,7 +55,7 @@ asmlinkage int sys_idle(void)
current->priority = -100;
current->counter = -100;
for (;;) {
ultra_check_pgt_cache();
check_pgt_cache();
run_task_queue(&tq_scheduler);
schedule();
}
......
......@@ -54,6 +54,50 @@ static __inline__ void __init_pmd(pmd_t *pmdp)
__bfill64((void *)pmdp, &two_null_pte_table);
}
int do_check_pgt_cache(int low, int high)
{
struct page *page, *page2;
int freed = 0;
if(pgtable_cache_size > high) {
do {
#ifdef __SMP__
if(pgd_quicklist)
free_pgd_slow(get_pgd_fast()), freed++;
#endif
if(pte_quicklist)
free_pte_slow(get_pte_fast()), freed++;
} while(pgtable_cache_size > low);
}
#ifndef __SMP__
if (pgd_cache_size > high / 4) {
for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
if ((unsigned long)page->pprev_hash == 3) {
if (page2)
page2->next_hash = page->next_hash;
else
(struct page *)pgd_quicklist = page->next_hash;
page->next_hash = NULL;
page->pprev_hash = NULL;
pgd_cache_size -= 2;
free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
freed++;
if (page2)
page = page2->next_hash;
else
page = (struct page *)pgd_quicklist;
if (pgd_cache_size <= low / 4)
break;
continue;
}
page2 = page;
page = page->next_hash;
}
}
#endif
return freed;
}
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
......
......@@ -559,6 +559,8 @@ extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
#define pte_alloc_kernel pte_alloc
#define pmd_alloc_kernel pmd_alloc
extern int do_check_pgt_cache(int, int);
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
struct task_struct * p;
......
......@@ -7,4 +7,6 @@
#define module_map vmalloc
#define module_unmap vfree
extern int do_check_pgt_cache(int, int);
#endif /* _ASMARM_PGTABLE_H */
......@@ -547,6 +547,8 @@ extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
#define pmd_free_kernel pmd_free
#define pmd_alloc_kernel pmd_alloc
extern int do_check_pgt_cache(int, int);
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
struct task_struct * p;
......
......@@ -9,9 +9,16 @@
/*
* Your basic spinlocks, allowing only a single CPU anywhere
*
* Gcc-2.7.x has a nasty bug with empty initializers.
*/
typedef struct { int gcc_is_buggy; } spinlock_t;
#define SPIN_LOCK_UNLOCKED { 0 }
#if (__GNUC__ > 2) || (__GNUC_MINOR__ >= 8)
typedef struct { } spinlock_t;
#define SPIN_LOCK_UNLOCKED { 0 }
#else
typedef struct { int gcc_is_buggy; } spinlock_t;
#define SPIN_LOCK_UNLOCKED { 0 }
#endif
#define spin_lock_init(lock) do { } while(0)
#define spin_lock(lock) do { } while(0)
......
......@@ -621,7 +621,7 @@ extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset);
extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset);
extern pmd_t *get_pointer_table(void);
extern void free_pointer_table(pmd_t *);
extern int free_pointer_table(pmd_t *);
extern pmd_t *get_kpointer_table(void);
extern void free_kpointer_table(pmd_t *);
......@@ -671,9 +671,9 @@ extern __inline__ void free_pmd_fast(pmd_t *pmd)
quicklists.pgtable_cache_sz++;
}
extern __inline__ void free_pmd_slow(pmd_t *pmd)
extern __inline__ int free_pmd_slow(pmd_t *pmd)
{
free_pointer_table(pmd);
return free_pointer_table(pmd);
}
/* The pgd cache is folded into the pmd cache, so these are dummy routines. */
......@@ -789,6 +789,8 @@ extern inline pgd_t * pgd_alloc(void)
return pgd;
}
extern int do_check_pgt_cache(int, int);
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
}
......
......@@ -489,6 +489,8 @@ extern inline pgd_t *pgd_alloc(void)
return (pgd_t *) page;
}
extern int do_check_pgt_cache(int, int);
extern pgd_t swapper_pg_dir[1024];
extern void (*update_mmu_cache)(struct vm_area_struct *vma,
......
......@@ -513,6 +513,8 @@ extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
#define pmd_alloc_kernel pmd_alloc
#define pte_alloc_kernel pte_alloc
extern int do_check_pgt_cache(int, int);
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
struct task_struct * p;
......
......@@ -354,6 +354,7 @@ BTFIXUPDEF_CALL(pte_t *, get_pte_fast, void)
BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void)
BTFIXUPDEF_CALL(void, free_pte_slow, pte_t *)
BTFIXUPDEF_CALL(void, free_pgd_slow, pgd_t *)
BTFIXUPDEF_CALL(int, do_check_pgt_cache, int, int)
#define get_pte_fast() BTFIXUP_CALL(get_pte_fast)()
extern __inline__ pmd_t *get_pmd_fast(void)
......@@ -366,6 +367,7 @@ extern __inline__ void free_pmd_slow(pmd_t *pmd)
{
}
#define free_pgd_slow(pgd) BTFIXUP_CALL(free_pgd_slow)(pgd)
#define do_check_pgt_cache(low,high) BTFIXUP_CALL(do_check_pgt_cache)(low,high)
/*
* Allocate and free page tables. The xxx_kernel() versions are
......
......@@ -541,6 +541,8 @@ extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
#define pte_alloc_kernel(pmd, addr) pte_alloc(pmd, addr)
#define pmd_alloc_kernel(pgd, addr) pmd_alloc(pgd, addr)
extern int do_check_pgt_cache(int, int);
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
/* Nothing to do on sparc64 :) */
......
......@@ -277,9 +277,11 @@ extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t p
extern void vmtruncate(struct inode * inode, unsigned long offset);
extern void handle_mm_fault(struct task_struct *tsk,struct vm_area_struct *vma, unsigned long address, int write_access);
extern void check_pgt_cache(void);
extern void make_pages_present(unsigned long addr, unsigned long end);
extern int pgt_cache_water[2];
extern int check_pgt_cache(void);
extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
extern void mem_init(unsigned long start_mem, unsigned long end_mem);
extern void show_mem(void);
......
......@@ -56,6 +56,11 @@ unsigned long max_mapnr = 0;
unsigned long num_physpages = 0;
void * high_memory = NULL;
/* Low and high watermarks for page table cache.
The system should try to have pgt_water[0] <= cache elements <= pgt_water[1]
*/
int pgt_cache_water[2] = { 25, 50 };
/*
* We special-case the C-O-W ZERO_PAGE, because it's such
* a common occurrence (no need to read the page to know
......@@ -136,7 +141,8 @@ void clear_page_tables(struct task_struct * tsk)
free_one_pgd(page_dir + i);
/* keep the page table cache within bounds */
check_pgt_cache();
do_check_pgt_cache(pgtable_cache_water[0],
pgtable_cache_water[1]);
return;
out_bad:
......@@ -165,7 +171,8 @@ void free_page_tables(struct mm_struct * mm)
pgd_free(page_dir);
/* keep the page table cache within bounds */
check_pgt_cache();
do_check_pgt_cache(pgtable_cache_water[0],
pgtable_cache_water[1]);
out:
return;
......@@ -948,21 +955,9 @@ void make_pages_present(unsigned long addr, unsigned long end)
}
}
/* Low and high watermarks for page table cache.
The system should try to have pgt_water[0] <= cache elements <= pgt_water[1]
*/
int pgt_cache_water[2] = { 25, 50 };
void check_pgt_cache(void)
/* Returns the number of pages freed */
int check_pgt_cache(void)
{
if (pgtable_cache_size > pgt_cache_water[1]) {
do {
if (pgd_quicklist)
free_pgd_slow(get_pgd_fast());
if (pmd_quicklist)
free_pmd_slow(get_pmd_fast());
if (pte_quicklist)
free_pte_slow(get_pte_fast());
} while (pgtable_cache_size > pgt_cache_water[0]);
}
return do_check_pgt_cache(pgtable_cache_water[0],
pgtable_cache_water[1]);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment