Commit 5528f050 authored by Roman Zippel's avatar Roman Zippel Committed by Russell King

[PATCH] 2.5.13: remove VALID_PAGE

This patch removes VALID_PAGE(), as the test was always too late for
discontinous memory configuration. It is replaced with pfn_valid()/
virt_addr_valid(), which are used to test the original input value.
Other helper functions:
pte_pfn() - extract the page number from a pte
pfn_to_page()/page_to_pfn() - convert a page number to/from a page struct
parent 31efb48d
......@@ -150,8 +150,8 @@ static void __free_small_page(unsigned long spage, struct order *order)
unsigned long flags;
struct page *page;
page = virt_to_page(spage);
if (VALID_PAGE(page)) {
if (virt_addr_valid(spage)) {
page = virt_to_page(spage);
/*
* The container-page must be marked Reserved
......
......@@ -240,9 +240,13 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page)
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
struct page *page = pte_page(pte);
unsigned long pfn = pte_pfn(pte);
struct page *page;
if (VALID_PAGE(page) && page->mapping) {
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
if (page->mapping) {
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
__flush_dcache_page(page);
......
......@@ -109,6 +109,7 @@ free_initmem (void)
void
free_initrd_mem (unsigned long start, unsigned long end)
{
struct page *page;
/*
* EFI uses 4KB pages while the kernel can use 4KB or bigger.
* Thus EFI and the kernel may have different page sizes. It is
......@@ -147,11 +148,12 @@ free_initrd_mem (unsigned long start, unsigned long end)
printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
if (!VALID_PAGE(virt_to_page(start)))
if (!virt_addr_valid(start))
continue;
clear_bit(PG_reserved, &virt_to_page(start)->flags);
set_page_count(virt_to_page(start), 1);
free_page(start);
page = virt_to_page(start);
clear_bit(PG_reserved, &page->flags);
set_page_count(page, 1);
__free_page(page);
++totalram_pages;
}
}
......
......@@ -116,8 +116,12 @@ void *vmalloc_uncached (unsigned long size)
static inline void free_pte(pte_t page)
{
if (pte_present(page)) {
struct page *ptpage = pte_page(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
unsigned long pfn = pte_pfn(page);
struct page *ptpage;
if (!pfn_valid(pfn))
return;
ptpage = pfn_to_page(pfn);
if (PageReserved(ptpage))
return;
__free_page(ptpage);
if (current->mm->rss <= 0)
......
......@@ -115,8 +115,12 @@ void *vmalloc_uncached (unsigned long size)
static inline void free_pte(pte_t page)
{
if (pte_present(page)) {
struct page *ptpage = pte_page(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
unsigned long pfn = pte_pfn(page);
struct page *ptpage;
if (!pfn_valid(pfn))
return;
ptpage = pfn_to_page(pfn);
if (PageReserved(ptpage))
return;
__free_page(ptpage);
if (current->mm->rss <= 0)
......
......@@ -290,6 +290,7 @@ void update_mmu_cache(struct vm_area_struct * vma,
unsigned long vpn;
#if defined(__SH4__)
struct page *page;
unsigned long pfn;
unsigned long ptea;
#endif
......@@ -298,11 +299,14 @@ void update_mmu_cache(struct vm_area_struct * vma,
return;
#if defined(__SH4__)
page = pte_page(pte);
if (VALID_PAGE(page) && !test_bit(PG_mapped, &page->flags)) {
unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
__flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
__set_bit(PG_mapped, &page->flags);
pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (!test_bit(PG_mapped, &page->flags)) {
unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
__flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
__set_bit(PG_mapped, &page->flags);
}
}
#endif
......
......@@ -19,8 +19,12 @@ static inline void forget_pte(pte_t page)
if (pte_none(page))
return;
if (pte_present(page)) {
struct page *ptpage = pte_page(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
unsigned long pfn = pte_pfn(page);
struct page *ptpage;
if (!pfn_valid(pfn))
return;
ptpage = pfn_to_page(pfn);
if (PageReserved(ptpage))
return;
page_cache_release(ptpage);
return;
......
......@@ -1327,7 +1327,7 @@ static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus
unsigned long page;
page = ((unsigned long)bufptr) & PAGE_MASK;
if (!VALID_PAGE(virt_to_page(page))) {
if (!virt_addr_valid(page)) {
sun4c_flush_page(page);
return (__u32)bufptr; /* already locked */
}
......@@ -2106,7 +2106,7 @@ static void sun4c_pte_clear(pte_t *ptep) { *ptep = __pte(0); }
static int sun4c_pmd_bad(pmd_t pmd)
{
return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) ||
(!VALID_PAGE(virt_to_page(pmd_val(pmd)))));
(!virt_addr_valid(pmd_val(pmd))));
}
static int sun4c_pmd_present(pmd_t pmd)
......
......@@ -1312,10 +1312,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
}
if (recoverable) {
struct page *page = virt_to_page(__va(afar));
if (VALID_PAGE(page))
get_page(page);
if (pfn_valid(afar >> PAGE_SHIFT))
get_page(pfn_to_page(afar >> PAGE_SHIFT));
else
recoverable = 0;
......
......@@ -20,8 +20,12 @@ static inline void forget_pte(pte_t page)
if (pte_none(page))
return;
if (pte_present(page)) {
struct page *ptpage = pte_page(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
unsigned long pfn = pte_pfn(page);
struct page *ptpage;
if (!pfn_valid(pfn))
return;
ptpage = pfn_to_page(page);
if (PageReserved(ptpage))
return;
page_cache_release(ptpage);
return;
......
......@@ -187,11 +187,13 @@ extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long addre
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
struct page *page = pte_page(pte);
struct page *page;
unsigned long pfn;
unsigned long pg_flags;
if (VALID_PAGE(page) &&
page->mapping &&
pfn = pte_pfn(pte);
if (pfn_valid(pfn) &&
(page = pfn_to_page(pfn), page->mapping) &&
((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
......@@ -260,10 +262,14 @@ static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsig
continue;
if (pte_present(pte) && pte_dirty(pte)) {
struct page *page = pte_page(pte);
struct page *page;
unsigned long pgaddr, uaddr;
unsigned long pfn = pte_pfn(pte);
if (!VALID_PAGE(page) || PageReserved(page) || !page->mapping)
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
if (PageReserved(page) || !page->mapping)
continue;
pgaddr = (unsigned long) page_address(page);
uaddr = address + offset;
......
......@@ -416,6 +416,7 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned
do {
pte_t page = *pte;
struct page *ptpage;
unsigned long pfn;
address += PAGE_SIZE;
pte++;
......@@ -424,8 +425,11 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned
++*total;
if (!pte_present(page))
continue;
ptpage = pte_page(page);
if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
pfn = pte_pfn(page);
if (!pfn_valid(pfn))
continue;
ptpage = pfn_to_page(pfn);
if (PageReserved(ptpage))
continue;
++*pages;
if (pte_dirty(page))
......
......@@ -102,7 +102,7 @@ unsigned long get_wchan(struct task_struct *p);
unsigned long eip = 0; \
unsigned long regs = (unsigned long)user_regs(tsk); \
if (regs > PAGE_SIZE && \
VALID_PAGE(virt_to_page(regs))) \
virt_addr_valid(regs)) \
eip = ((struct pt_regs *)regs)->irp; \
eip; })
......
......@@ -131,8 +131,12 @@ static __inline__ int get_order(unsigned long size)
#define MAXMEM ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE))
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
......@@ -56,8 +56,9 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
}
#define ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0))
#define pte_same(a, b) ((a).pte_low == (b).pte_low)
#define pte_page(x) (mem_map+((unsigned long)(((x).pte_low >> PAGE_SHIFT))))
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_none(x) (!(x).pte_low)
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
#define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
#endif /* _I386_PGTABLE_2LEVEL_H */
......@@ -86,8 +86,9 @@ static inline int pte_same(pte_t a, pte_t b)
return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
}
#define pte_page(x) (mem_map+(((x).pte_low >> PAGE_SHIFT) | ((x).pte_high << (32 - PAGE_SHIFT))))
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_none(x) (!(x).pte_low && !(x).pte_high)
#define pte_pfn(x) (((x).pte_low >> PAGE_SHIFT) | ((x).pte_high << (32 - PAGE_SHIFT)))
static inline pte_t __mk_pte(unsigned long page_nr, pgprot_t pgprot)
{
......
......@@ -139,7 +139,7 @@ unsigned long get_wchan(struct task_struct *p);
({ \
unsigned long eip = 0; \
if ((tsk)->thread.esp0 > PAGE_SIZE && \
(VALID_PAGE(virt_to_page((tsk)->thread.esp0)))) \
(virt_addr_valid((tsk)->thread.esp0))) \
eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
eip; })
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
......
......@@ -105,10 +105,13 @@ static inline pte_t ptep_get_and_clear(pte_t *ptep)
pte_clear(ptep);
if (!pte_not_present(pte)) {
struct page *page = pte_page(pte);
if (VALID_PAGE(page)&&
(!page->mapping || !(page->mapping->i_mmap_shared)))
__clear_bit(PG_mapped, &page->flags);
struct page *page;
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(page);
if (!page->mapping || !page->mapping->i_mmap_shared)
__clear_bit(PG_mapped, &page->flags);
}
}
return pte;
}
......
......@@ -76,8 +76,12 @@ mem_map_t * mem_map;
*/
void __free_pte(pte_t pte)
{
struct page *page = pte_page(pte);
if ((!VALID_PAGE(page)) || PageReserved(page))
struct page *page;
unsigned long pfn = pte_pfn(pte);
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
if (PageReserved(page))
return;
if (pte_dirty(pte))
set_page_dirty(page);
......@@ -269,6 +273,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
do {
pte_t pte = *src_pte;
struct page *ptepage;
unsigned long pfn;
/* copy_one_pte */
......@@ -278,9 +283,11 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
swap_duplicate(pte_to_swp_entry(pte));
goto cont_copy_pte_range;
}
ptepage = pte_page(pte);
if ((!VALID_PAGE(ptepage)) ||
PageReserved(ptepage))
pfn = pte_pfn(pte);
if (!pfn_valid(pfn))
goto cont_copy_pte_range;
ptepage = pfn_to_page(pfn);
if (PageReserved(ptepage))
goto cont_copy_pte_range;
/* If it's a COW mapping, write protect it both in the parent and the child */
......@@ -356,9 +363,13 @@ static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long ad
if (pte_none(pte))
continue;
if (pte_present(pte)) {
struct page *page = pte_page(pte);
if (VALID_PAGE(page) && !PageReserved(page))
freed ++;
struct page *page;
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (!PageReserved(page))
freed++;
}
/* This will eventually call __free_pte on the pte. */
tlb_remove_page(tlb, ptep, address + offset);
} else {
......@@ -451,6 +462,7 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in
pgd_t *pgd;
pmd_t *pmd;
pte_t *ptep, pte;
unsigned long pfn;
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || pgd_bad(*pgd))
......@@ -472,8 +484,11 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in
preempt_enable();
if (pte_present(pte)) {
if (!write ||
(pte_write(pte) && pte_dirty(pte)))
return pte_page(pte);
(pte_write(pte) && pte_dirty(pte))) {
pfn = pte_pfn(pte);
if (pfn_valid(pfn))
return pfn_to_page(pfn);
}
}
out:
......@@ -488,8 +503,6 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in
static inline struct page * get_page_map(struct page *page)
{
if (!VALID_PAGE(page))
return 0;
return page;
}
......@@ -860,11 +873,10 @@ static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned
end = PMD_SIZE;
do {
struct page *page;
pte_t oldpage;
oldpage = ptep_get_and_clear(pte);
pte_t oldpage = ptep_get_and_clear(pte);
unsigned long pfn = phys_addr >> PAGE_SHIFT;
page = virt_to_page(__va(phys_addr));
if ((!VALID_PAGE(page)) || PageReserved(page))
if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
set_pte(pte, mk_pte_phys(phys_addr, prot));
forget_pte(oldpage);
address += PAGE_SIZE;
......@@ -977,10 +989,11 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
{
struct page *old_page, *new_page;
unsigned long pfn = pte_pfn(pte);
old_page = pte_page(pte);
if (!VALID_PAGE(old_page))
if (!pfn_valid(pfn))
goto bad_wp_page;
old_page = pfn_to_page(pfn);
if (!TestSetPageLocked(old_page)) {
int reuse = can_share_swap_page(old_page);
......
......@@ -26,10 +26,14 @@ static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma,
pte_t pte = *ptep;
if (pte_present(pte) && pte_dirty(pte)) {
struct page *page = pte_page(pte);
if (VALID_PAGE(page) && !PageReserved(page) && ptep_test_and_clear_dirty(ptep)) {
flush_tlb_page(vma, address);
set_page_dirty(page);
struct page *page;
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (!PageReserved(page) && ptep_test_and_clear_dirty(ptep)) {
flush_tlb_page(vma, address);
set_page_dirty(page);
}
}
}
return 0;
......
......@@ -101,8 +101,6 @@ static void __free_pages_ok (struct page *page, unsigned int order)
BUG();
if (page->mapping)
BUG();
if (!VALID_PAGE(page))
BUG();
if (PageLocked(page))
BUG();
if (PageLRU(page))
......@@ -295,8 +293,6 @@ static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask
BUG();
if (page->mapping)
BUG();
if (!VALID_PAGE(page))
BUG();
if (PageLocked(page))
BUG();
if (PageLRU(page))
......@@ -477,8 +473,10 @@ void __free_pages(struct page *page, unsigned int order)
void free_pages(unsigned long addr, unsigned int order)
{
if (addr != 0)
if (addr != 0) {
BUG_ON(!virt_addr_valid(addr));
__free_pages(virt_to_page(addr), order);
}
}
/*
......
......@@ -1415,15 +1415,16 @@ static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags)
#if DEBUG
# define CHECK_NR(pg) \
do { \
if (!VALID_PAGE(pg)) { \
if (!virt_addr_valid(pg)) { \
printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \
(unsigned long)objp); \
BUG(); \
} \
} while (0)
# define CHECK_PAGE(page) \
# define CHECK_PAGE(addr) \
do { \
CHECK_NR(page); \
struct page *page = virt_to_page(addr); \
CHECK_NR(addr); \
if (!PageSlab(page)) { \
printk(KERN_ERR "kfree: bad ptr %lxh.\n", \
(unsigned long)objp); \
......@@ -1439,7 +1440,7 @@ static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
{
slab_t* slabp;
CHECK_PAGE(virt_to_page(objp));
CHECK_PAGE(objp);
/* reduces memory footprint
*
if (OPTIMIZE(cachep))
......@@ -1519,7 +1520,7 @@ static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp)
#ifdef CONFIG_SMP
cpucache_t *cc = cc_data(cachep);
CHECK_PAGE(virt_to_page(objp));
CHECK_PAGE(objp);
if (cc) {
int batchcount;
if (cc->avail < cc->limit) {
......@@ -1601,7 +1602,7 @@ void kmem_cache_free (kmem_cache_t *cachep, void *objp)
{
unsigned long flags;
#if DEBUG
CHECK_PAGE(virt_to_page(objp));
CHECK_PAGE(objp);
if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
BUG();
#endif
......@@ -1626,7 +1627,7 @@ void kfree (const void *objp)
if (!objp)
return;
local_irq_save(flags);
CHECK_PAGE(virt_to_page(objp));
CHECK_PAGE(objp);
c = GET_PAGE_CACHE(virt_to_page(objp));
__kmem_cache_free(c, (void*)objp);
local_irq_restore(flags);
......
......@@ -45,8 +45,12 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo
if (pte_none(page))
continue;
if (pte_present(page)) {
struct page *ptpage = pte_page(page);
if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
struct page *ptpage;
unsigned long pfn = pte_pfn(page);
if (!pfn_valid(pfn))
continue;
ptpage = pfn_to_page(pfn);
if (!PageReserved(ptpage))
__free_page(ptpage);
continue;
}
......
......@@ -216,9 +216,10 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm
do {
if (pte_present(*pte)) {
struct page *page = pte_page(*pte);
unsigned long pfn = pte_pfn(*pte);
struct page *page = pfn_to_page(pfn);
if (VALID_PAGE(page) && !PageReserved(page)) {
if (pfn_valid(pfn) && !PageReserved(page)) {
count -= try_to_swap_out(mm, vma, address, pte, page, classzone);
if (!count) {
address += PAGE_SIZE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment