Commit 5528f050 authored by Roman Zippel's avatar Roman Zippel Committed by Russell King

[PATCH] 2.5.13: remove VALID_PAGE

This patch removes VALID_PAGE(), as the test was always too late for
discontinous memory configuration. It is replaced with pfn_valid()/
virt_addr_valid(), which are used to test the original input value.
Other helper functions:
pte_pfn() - extract the page number from a pte
pfn_to_page()/page_to_pfn() - convert a page number to/from a page struct
parent 31efb48d
...@@ -150,8 +150,8 @@ static void __free_small_page(unsigned long spage, struct order *order) ...@@ -150,8 +150,8 @@ static void __free_small_page(unsigned long spage, struct order *order)
unsigned long flags; unsigned long flags;
struct page *page; struct page *page;
page = virt_to_page(spage); if (virt_addr_valid(spage)) {
if (VALID_PAGE(page)) { page = virt_to_page(spage);
/* /*
* The container-page must be marked Reserved * The container-page must be marked Reserved
......
...@@ -240,9 +240,13 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page) ...@@ -240,9 +240,13 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page)
*/ */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{ {
struct page *page = pte_page(pte); unsigned long pfn = pte_pfn(pte);
struct page *page;
if (VALID_PAGE(page) && page->mapping) { if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
if (page->mapping) {
if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
__flush_dcache_page(page); __flush_dcache_page(page);
......
...@@ -109,6 +109,7 @@ free_initmem (void) ...@@ -109,6 +109,7 @@ free_initmem (void)
void void
free_initrd_mem (unsigned long start, unsigned long end) free_initrd_mem (unsigned long start, unsigned long end)
{ {
struct page *page;
/* /*
* EFI uses 4KB pages while the kernel can use 4KB or bigger. * EFI uses 4KB pages while the kernel can use 4KB or bigger.
* Thus EFI and the kernel may have different page sizes. It is * Thus EFI and the kernel may have different page sizes. It is
...@@ -147,11 +148,12 @@ free_initrd_mem (unsigned long start, unsigned long end) ...@@ -147,11 +148,12 @@ free_initrd_mem (unsigned long start, unsigned long end)
printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10); printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) { for (; start < end; start += PAGE_SIZE) {
if (!VALID_PAGE(virt_to_page(start))) if (!virt_addr_valid(start))
continue; continue;
clear_bit(PG_reserved, &virt_to_page(start)->flags); page = virt_to_page(start);
set_page_count(virt_to_page(start), 1); clear_bit(PG_reserved, &page->flags);
free_page(start); set_page_count(page, 1);
__free_page(page);
++totalram_pages; ++totalram_pages;
} }
} }
......
...@@ -116,8 +116,12 @@ void *vmalloc_uncached (unsigned long size) ...@@ -116,8 +116,12 @@ void *vmalloc_uncached (unsigned long size)
static inline void free_pte(pte_t page) static inline void free_pte(pte_t page)
{ {
if (pte_present(page)) { if (pte_present(page)) {
struct page *ptpage = pte_page(page); unsigned long pfn = pte_pfn(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage)) struct page *ptpage;
if (!pfn_valid(pfn))
return;
ptpage = pfn_to_page(pfn);
if (PageReserved(ptpage))
return; return;
__free_page(ptpage); __free_page(ptpage);
if (current->mm->rss <= 0) if (current->mm->rss <= 0)
......
...@@ -115,8 +115,12 @@ void *vmalloc_uncached (unsigned long size) ...@@ -115,8 +115,12 @@ void *vmalloc_uncached (unsigned long size)
static inline void free_pte(pte_t page) static inline void free_pte(pte_t page)
{ {
if (pte_present(page)) { if (pte_present(page)) {
struct page *ptpage = pte_page(page); unsigned long pfn = pte_pfn(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage)) struct page *ptpage;
if (!pfn_valid(pfn))
return;
ptpage = pfn_to_page(pfn);
if (PageReserved(ptpage))
return; return;
__free_page(ptpage); __free_page(ptpage);
if (current->mm->rss <= 0) if (current->mm->rss <= 0)
......
...@@ -290,6 +290,7 @@ void update_mmu_cache(struct vm_area_struct * vma, ...@@ -290,6 +290,7 @@ void update_mmu_cache(struct vm_area_struct * vma,
unsigned long vpn; unsigned long vpn;
#if defined(__SH4__) #if defined(__SH4__)
struct page *page; struct page *page;
unsigned long pfn;
unsigned long ptea; unsigned long ptea;
#endif #endif
...@@ -298,11 +299,14 @@ void update_mmu_cache(struct vm_area_struct * vma, ...@@ -298,11 +299,14 @@ void update_mmu_cache(struct vm_area_struct * vma,
return; return;
#if defined(__SH4__) #if defined(__SH4__)
page = pte_page(pte); pfn = pte_pfn(pte);
if (VALID_PAGE(page) && !test_bit(PG_mapped, &page->flags)) { if (pfn_valid(pfn)) {
unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; page = pfn_to_page(pfn);
__flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE); if (!test_bit(PG_mapped, &page->flags)) {
__set_bit(PG_mapped, &page->flags); unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
__flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
__set_bit(PG_mapped, &page->flags);
}
} }
#endif #endif
......
...@@ -19,8 +19,12 @@ static inline void forget_pte(pte_t page) ...@@ -19,8 +19,12 @@ static inline void forget_pte(pte_t page)
if (pte_none(page)) if (pte_none(page))
return; return;
if (pte_present(page)) { if (pte_present(page)) {
struct page *ptpage = pte_page(page); unsigned long pfn = pte_pfn(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage)) struct page *ptpage;
if (!pfn_valid(pfn))
return;
ptpage = pfn_to_page(pfn);
if (PageReserved(ptpage))
return; return;
page_cache_release(ptpage); page_cache_release(ptpage);
return; return;
......
...@@ -1327,7 +1327,7 @@ static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus ...@@ -1327,7 +1327,7 @@ static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus
unsigned long page; unsigned long page;
page = ((unsigned long)bufptr) & PAGE_MASK; page = ((unsigned long)bufptr) & PAGE_MASK;
if (!VALID_PAGE(virt_to_page(page))) { if (!virt_addr_valid(page)) {
sun4c_flush_page(page); sun4c_flush_page(page);
return (__u32)bufptr; /* already locked */ return (__u32)bufptr; /* already locked */
} }
...@@ -2106,7 +2106,7 @@ static void sun4c_pte_clear(pte_t *ptep) { *ptep = __pte(0); } ...@@ -2106,7 +2106,7 @@ static void sun4c_pte_clear(pte_t *ptep) { *ptep = __pte(0); }
static int sun4c_pmd_bad(pmd_t pmd) static int sun4c_pmd_bad(pmd_t pmd)
{ {
return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) || return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) ||
(!VALID_PAGE(virt_to_page(pmd_val(pmd))))); (!virt_addr_valid(pmd_val(pmd))));
} }
static int sun4c_pmd_present(pmd_t pmd) static int sun4c_pmd_present(pmd_t pmd)
......
...@@ -1312,10 +1312,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned ...@@ -1312,10 +1312,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
} }
if (recoverable) { if (recoverable) {
struct page *page = virt_to_page(__va(afar)); if (pfn_valid(afar >> PAGE_SHIFT))
get_page(pfn_to_page(afar >> PAGE_SHIFT));
if (VALID_PAGE(page))
get_page(page);
else else
recoverable = 0; recoverable = 0;
......
...@@ -20,8 +20,12 @@ static inline void forget_pte(pte_t page) ...@@ -20,8 +20,12 @@ static inline void forget_pte(pte_t page)
if (pte_none(page)) if (pte_none(page))
return; return;
if (pte_present(page)) { if (pte_present(page)) {
struct page *ptpage = pte_page(page); unsigned long pfn = pte_pfn(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage)) struct page *ptpage;
if (!pfn_valid(pfn))
return;
ptpage = pfn_to_page(page);
if (PageReserved(ptpage))
return; return;
page_cache_release(ptpage); page_cache_release(ptpage);
return; return;
......
...@@ -187,11 +187,13 @@ extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long addre ...@@ -187,11 +187,13 @@ extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long addre
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{ {
struct page *page = pte_page(pte); struct page *page;
unsigned long pfn;
unsigned long pg_flags; unsigned long pg_flags;
if (VALID_PAGE(page) && pfn = pte_pfn(pte);
page->mapping && if (pfn_valid(pfn) &&
(page = pfn_to_page(pfn), page->mapping) &&
((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL)); int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
...@@ -260,10 +262,14 @@ static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsig ...@@ -260,10 +262,14 @@ static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsig
continue; continue;
if (pte_present(pte) && pte_dirty(pte)) { if (pte_present(pte) && pte_dirty(pte)) {
struct page *page = pte_page(pte); struct page *page;
unsigned long pgaddr, uaddr; unsigned long pgaddr, uaddr;
unsigned long pfn = pte_pfn(pte);
if (!VALID_PAGE(page) || PageReserved(page) || !page->mapping) if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
if (PageReserved(page) || !page->mapping)
continue; continue;
pgaddr = (unsigned long) page_address(page); pgaddr = (unsigned long) page_address(page);
uaddr = address + offset; uaddr = address + offset;
......
...@@ -416,6 +416,7 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned ...@@ -416,6 +416,7 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned
do { do {
pte_t page = *pte; pte_t page = *pte;
struct page *ptpage; struct page *ptpage;
unsigned long pfn;
address += PAGE_SIZE; address += PAGE_SIZE;
pte++; pte++;
...@@ -424,8 +425,11 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned ...@@ -424,8 +425,11 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned
++*total; ++*total;
if (!pte_present(page)) if (!pte_present(page))
continue; continue;
ptpage = pte_page(page); pfn = pte_pfn(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage)) if (!pfn_valid(pfn))
continue;
ptpage = pfn_to_page(pfn);
if (PageReserved(ptpage))
continue; continue;
++*pages; ++*pages;
if (pte_dirty(page)) if (pte_dirty(page))
......
...@@ -102,7 +102,7 @@ unsigned long get_wchan(struct task_struct *p); ...@@ -102,7 +102,7 @@ unsigned long get_wchan(struct task_struct *p);
unsigned long eip = 0; \ unsigned long eip = 0; \
unsigned long regs = (unsigned long)user_regs(tsk); \ unsigned long regs = (unsigned long)user_regs(tsk); \
if (regs > PAGE_SIZE && \ if (regs > PAGE_SIZE && \
VALID_PAGE(virt_to_page(regs))) \ virt_addr_valid(regs)) \
eip = ((struct pt_regs *)regs)->irp; \ eip = ((struct pt_regs *)regs)->irp; \
eip; }) eip; })
......
...@@ -131,8 +131,12 @@ static __inline__ int get_order(unsigned long size) ...@@ -131,8 +131,12 @@ static __inline__ int get_order(unsigned long size)
#define MAXMEM ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE)) #define MAXMEM ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE))
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT)) #define pfn_to_page(pfn) (mem_map + (pfn))
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr) #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
...@@ -56,8 +56,9 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) ...@@ -56,8 +56,9 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
} }
#define ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0)) #define ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0))
#define pte_same(a, b) ((a).pte_low == (b).pte_low) #define pte_same(a, b) ((a).pte_low == (b).pte_low)
#define pte_page(x) (mem_map+((unsigned long)(((x).pte_low >> PAGE_SHIFT)))) #define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_none(x) (!(x).pte_low) #define pte_none(x) (!(x).pte_low)
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
#define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot)) #define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
#endif /* _I386_PGTABLE_2LEVEL_H */ #endif /* _I386_PGTABLE_2LEVEL_H */
...@@ -86,8 +86,9 @@ static inline int pte_same(pte_t a, pte_t b) ...@@ -86,8 +86,9 @@ static inline int pte_same(pte_t a, pte_t b)
return a.pte_low == b.pte_low && a.pte_high == b.pte_high; return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
} }
#define pte_page(x) (mem_map+(((x).pte_low >> PAGE_SHIFT) | ((x).pte_high << (32 - PAGE_SHIFT)))) #define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_none(x) (!(x).pte_low && !(x).pte_high) #define pte_none(x) (!(x).pte_low && !(x).pte_high)
#define pte_pfn(x) (((x).pte_low >> PAGE_SHIFT) | ((x).pte_high << (32 - PAGE_SHIFT)))
static inline pte_t __mk_pte(unsigned long page_nr, pgprot_t pgprot) static inline pte_t __mk_pte(unsigned long page_nr, pgprot_t pgprot)
{ {
......
...@@ -139,7 +139,7 @@ unsigned long get_wchan(struct task_struct *p); ...@@ -139,7 +139,7 @@ unsigned long get_wchan(struct task_struct *p);
({ \ ({ \
unsigned long eip = 0; \ unsigned long eip = 0; \
if ((tsk)->thread.esp0 > PAGE_SIZE && \ if ((tsk)->thread.esp0 > PAGE_SIZE && \
(VALID_PAGE(virt_to_page((tsk)->thread.esp0)))) \ (virt_addr_valid((tsk)->thread.esp0))) \
eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \ eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
eip; }) eip; })
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
......
...@@ -105,10 +105,13 @@ static inline pte_t ptep_get_and_clear(pte_t *ptep) ...@@ -105,10 +105,13 @@ static inline pte_t ptep_get_and_clear(pte_t *ptep)
pte_clear(ptep); pte_clear(ptep);
if (!pte_not_present(pte)) { if (!pte_not_present(pte)) {
struct page *page = pte_page(pte); struct page *page;
if (VALID_PAGE(page)&& unsigned long pfn = pte_pfn(pte);
(!page->mapping || !(page->mapping->i_mmap_shared))) if (pfn_valid(pfn)) {
__clear_bit(PG_mapped, &page->flags); page = pfn_to_page(page);
if (!page->mapping || !page->mapping->i_mmap_shared)
__clear_bit(PG_mapped, &page->flags);
}
} }
return pte; return pte;
} }
......
...@@ -76,8 +76,12 @@ mem_map_t * mem_map; ...@@ -76,8 +76,12 @@ mem_map_t * mem_map;
*/ */
void __free_pte(pte_t pte) void __free_pte(pte_t pte)
{ {
struct page *page = pte_page(pte); struct page *page;
if ((!VALID_PAGE(page)) || PageReserved(page)) unsigned long pfn = pte_pfn(pte);
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
if (PageReserved(page))
return; return;
if (pte_dirty(pte)) if (pte_dirty(pte))
set_page_dirty(page); set_page_dirty(page);
...@@ -269,6 +273,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK; ...@@ -269,6 +273,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
do { do {
pte_t pte = *src_pte; pte_t pte = *src_pte;
struct page *ptepage; struct page *ptepage;
unsigned long pfn;
/* copy_one_pte */ /* copy_one_pte */
...@@ -278,9 +283,11 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK; ...@@ -278,9 +283,11 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
swap_duplicate(pte_to_swp_entry(pte)); swap_duplicate(pte_to_swp_entry(pte));
goto cont_copy_pte_range; goto cont_copy_pte_range;
} }
ptepage = pte_page(pte); pfn = pte_pfn(pte);
if ((!VALID_PAGE(ptepage)) || if (!pfn_valid(pfn))
PageReserved(ptepage)) goto cont_copy_pte_range;
ptepage = pfn_to_page(pfn);
if (PageReserved(ptepage))
goto cont_copy_pte_range; goto cont_copy_pte_range;
/* If it's a COW mapping, write protect it both in the parent and the child */ /* If it's a COW mapping, write protect it both in the parent and the child */
...@@ -356,9 +363,13 @@ static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long ad ...@@ -356,9 +363,13 @@ static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long ad
if (pte_none(pte)) if (pte_none(pte))
continue; continue;
if (pte_present(pte)) { if (pte_present(pte)) {
struct page *page = pte_page(pte); struct page *page;
if (VALID_PAGE(page) && !PageReserved(page)) unsigned long pfn = pte_pfn(pte);
freed ++; if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (!PageReserved(page))
freed++;
}
/* This will eventually call __free_pte on the pte. */ /* This will eventually call __free_pte on the pte. */
tlb_remove_page(tlb, ptep, address + offset); tlb_remove_page(tlb, ptep, address + offset);
} else { } else {
...@@ -451,6 +462,7 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in ...@@ -451,6 +462,7 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmd; pmd_t *pmd;
pte_t *ptep, pte; pte_t *ptep, pte;
unsigned long pfn;
pgd = pgd_offset(mm, address); pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || pgd_bad(*pgd)) if (pgd_none(*pgd) || pgd_bad(*pgd))
...@@ -472,8 +484,11 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in ...@@ -472,8 +484,11 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in
preempt_enable(); preempt_enable();
if (pte_present(pte)) { if (pte_present(pte)) {
if (!write || if (!write ||
(pte_write(pte) && pte_dirty(pte))) (pte_write(pte) && pte_dirty(pte))) {
return pte_page(pte); pfn = pte_pfn(pte);
if (pfn_valid(pfn))
return pfn_to_page(pfn);
}
} }
out: out:
...@@ -488,8 +503,6 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in ...@@ -488,8 +503,6 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in
static inline struct page * get_page_map(struct page *page) static inline struct page * get_page_map(struct page *page)
{ {
if (!VALID_PAGE(page))
return 0;
return page; return page;
} }
...@@ -860,11 +873,10 @@ static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned ...@@ -860,11 +873,10 @@ static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned
end = PMD_SIZE; end = PMD_SIZE;
do { do {
struct page *page; struct page *page;
pte_t oldpage; pte_t oldpage = ptep_get_and_clear(pte);
oldpage = ptep_get_and_clear(pte); unsigned long pfn = phys_addr >> PAGE_SHIFT;
page = virt_to_page(__va(phys_addr)); if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
if ((!VALID_PAGE(page)) || PageReserved(page))
set_pte(pte, mk_pte_phys(phys_addr, prot)); set_pte(pte, mk_pte_phys(phys_addr, prot));
forget_pte(oldpage); forget_pte(oldpage);
address += PAGE_SIZE; address += PAGE_SIZE;
...@@ -977,10 +989,11 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, ...@@ -977,10 +989,11 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte) unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
{ {
struct page *old_page, *new_page; struct page *old_page, *new_page;
unsigned long pfn = pte_pfn(pte);
old_page = pte_page(pte); if (!pfn_valid(pfn))
if (!VALID_PAGE(old_page))
goto bad_wp_page; goto bad_wp_page;
old_page = pfn_to_page(pfn);
if (!TestSetPageLocked(old_page)) { if (!TestSetPageLocked(old_page)) {
int reuse = can_share_swap_page(old_page); int reuse = can_share_swap_page(old_page);
......
...@@ -26,10 +26,14 @@ static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma, ...@@ -26,10 +26,14 @@ static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma,
pte_t pte = *ptep; pte_t pte = *ptep;
if (pte_present(pte) && pte_dirty(pte)) { if (pte_present(pte) && pte_dirty(pte)) {
struct page *page = pte_page(pte); struct page *page;
if (VALID_PAGE(page) && !PageReserved(page) && ptep_test_and_clear_dirty(ptep)) { unsigned long pfn = pte_pfn(pte);
flush_tlb_page(vma, address); if (pfn_valid(pfn)) {
set_page_dirty(page); page = pfn_to_page(pfn);
if (!PageReserved(page) && ptep_test_and_clear_dirty(ptep)) {
flush_tlb_page(vma, address);
set_page_dirty(page);
}
} }
} }
return 0; return 0;
......
...@@ -101,8 +101,6 @@ static void __free_pages_ok (struct page *page, unsigned int order) ...@@ -101,8 +101,6 @@ static void __free_pages_ok (struct page *page, unsigned int order)
BUG(); BUG();
if (page->mapping) if (page->mapping)
BUG(); BUG();
if (!VALID_PAGE(page))
BUG();
if (PageLocked(page)) if (PageLocked(page))
BUG(); BUG();
if (PageLRU(page)) if (PageLRU(page))
...@@ -295,8 +293,6 @@ static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask ...@@ -295,8 +293,6 @@ static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask
BUG(); BUG();
if (page->mapping) if (page->mapping)
BUG(); BUG();
if (!VALID_PAGE(page))
BUG();
if (PageLocked(page)) if (PageLocked(page))
BUG(); BUG();
if (PageLRU(page)) if (PageLRU(page))
...@@ -477,8 +473,10 @@ void __free_pages(struct page *page, unsigned int order) ...@@ -477,8 +473,10 @@ void __free_pages(struct page *page, unsigned int order)
void free_pages(unsigned long addr, unsigned int order) void free_pages(unsigned long addr, unsigned int order)
{ {
if (addr != 0) if (addr != 0) {
BUG_ON(!virt_addr_valid(addr));
__free_pages(virt_to_page(addr), order); __free_pages(virt_to_page(addr), order);
}
} }
/* /*
......
...@@ -1415,15 +1415,16 @@ static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags) ...@@ -1415,15 +1415,16 @@ static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags)
#if DEBUG #if DEBUG
# define CHECK_NR(pg) \ # define CHECK_NR(pg) \
do { \ do { \
if (!VALID_PAGE(pg)) { \ if (!virt_addr_valid(pg)) { \
printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \ printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \
(unsigned long)objp); \ (unsigned long)objp); \
BUG(); \ BUG(); \
} \ } \
} while (0) } while (0)
# define CHECK_PAGE(page) \ # define CHECK_PAGE(addr) \
do { \ do { \
CHECK_NR(page); \ struct page *page = virt_to_page(addr); \
CHECK_NR(addr); \
if (!PageSlab(page)) { \ if (!PageSlab(page)) { \
printk(KERN_ERR "kfree: bad ptr %lxh.\n", \ printk(KERN_ERR "kfree: bad ptr %lxh.\n", \
(unsigned long)objp); \ (unsigned long)objp); \
...@@ -1439,7 +1440,7 @@ static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp) ...@@ -1439,7 +1440,7 @@ static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
{ {
slab_t* slabp; slab_t* slabp;
CHECK_PAGE(virt_to_page(objp)); CHECK_PAGE(objp);
/* reduces memory footprint /* reduces memory footprint
* *
if (OPTIMIZE(cachep)) if (OPTIMIZE(cachep))
...@@ -1519,7 +1520,7 @@ static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp) ...@@ -1519,7 +1520,7 @@ static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpucache_t *cc = cc_data(cachep); cpucache_t *cc = cc_data(cachep);
CHECK_PAGE(virt_to_page(objp)); CHECK_PAGE(objp);
if (cc) { if (cc) {
int batchcount; int batchcount;
if (cc->avail < cc->limit) { if (cc->avail < cc->limit) {
...@@ -1601,7 +1602,7 @@ void kmem_cache_free (kmem_cache_t *cachep, void *objp) ...@@ -1601,7 +1602,7 @@ void kmem_cache_free (kmem_cache_t *cachep, void *objp)
{ {
unsigned long flags; unsigned long flags;
#if DEBUG #if DEBUG
CHECK_PAGE(virt_to_page(objp)); CHECK_PAGE(objp);
if (cachep != GET_PAGE_CACHE(virt_to_page(objp))) if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
BUG(); BUG();
#endif #endif
...@@ -1626,7 +1627,7 @@ void kfree (const void *objp) ...@@ -1626,7 +1627,7 @@ void kfree (const void *objp)
if (!objp) if (!objp)
return; return;
local_irq_save(flags); local_irq_save(flags);
CHECK_PAGE(virt_to_page(objp)); CHECK_PAGE(objp);
c = GET_PAGE_CACHE(virt_to_page(objp)); c = GET_PAGE_CACHE(virt_to_page(objp));
__kmem_cache_free(c, (void*)objp); __kmem_cache_free(c, (void*)objp);
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -45,8 +45,12 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo ...@@ -45,8 +45,12 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo
if (pte_none(page)) if (pte_none(page))
continue; continue;
if (pte_present(page)) { if (pte_present(page)) {
struct page *ptpage = pte_page(page); struct page *ptpage;
if (VALID_PAGE(ptpage) && (!PageReserved(ptpage))) unsigned long pfn = pte_pfn(page);
if (!pfn_valid(pfn))
continue;
ptpage = pfn_to_page(pfn);
if (!PageReserved(ptpage))
__free_page(ptpage); __free_page(ptpage);
continue; continue;
} }
......
...@@ -216,9 +216,10 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm ...@@ -216,9 +216,10 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm
do { do {
if (pte_present(*pte)) { if (pte_present(*pte)) {
struct page *page = pte_page(*pte); unsigned long pfn = pte_pfn(*pte);
struct page *page = pfn_to_page(pfn);
if (VALID_PAGE(page) && !PageReserved(page)) { if (pfn_valid(pfn) && !PageReserved(page)) {
count -= try_to_swap_out(mm, vma, address, pte, page, classzone); count -= try_to_swap_out(mm, vma, address, pte, page, classzone);
if (!count) { if (!count) {
address += PAGE_SIZE; address += PAGE_SIZE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment