Commit c1523e5c authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] ptwalk: map and unmap_vm_area

Convert unmap_vm_area and map_vm_area pagetable walkers to loops using
p?d_addr_end; rename internal levels vunmap_p??_range, vmap_p??_range.
map_vm_area shows the style when allocating: allocs moved down a level.
Replace KERN_CRIT Whee message by boring WARN_ON.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f801372a
...@@ -23,199 +23,137 @@ ...@@ -23,199 +23,137 @@
DEFINE_RWLOCK(vmlist_lock); DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist; struct vm_struct *vmlist;
static void unmap_area_pte(pmd_t *pmd, unsigned long address, static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
unsigned long size)
{ {
unsigned long base, end;
pte_t *pte; pte_t *pte;
if (pmd_none_or_clear_bad(pmd)) if (pmd_none_or_clear_bad(pmd))
return; return;
pte = pte_offset_kernel(pmd, address); pte = pte_offset_kernel(pmd, addr);
base = address & PMD_MASK;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do { do {
pte_t page; pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
page = ptep_get_and_clear(&init_mm, base + address, pte); WARN_ON(!pte_none(ptent) && !pte_present(ptent));
address += PAGE_SIZE; } while (pte++, addr += PAGE_SIZE, addr != end);
pte++;
if (pte_none(page))
continue;
if (pte_present(page))
continue;
printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
} while (address < end);
} }
static void unmap_area_pmd(pud_t *pud, unsigned long address, static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
unsigned long size)
{ {
unsigned long base, end;
pmd_t *pmd; pmd_t *pmd;
unsigned long next;
if (pud_none_or_clear_bad(pud)) if (pud_none_or_clear_bad(pud))
return; return;
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, addr);
base = address & PUD_MASK;
address &= ~PUD_MASK;
end = address + size;
if (end > PUD_SIZE)
end = PUD_SIZE;
do { do {
unmap_area_pte(pmd, base + address, end - address); next = pmd_addr_end(addr, end);
address = (address + PMD_SIZE) & PMD_MASK; vunmap_pte_range(pmd, addr, next);
pmd++; } while (pmd++, addr = next, addr != end);
} while (address < end);
} }
static void unmap_area_pud(pgd_t *pgd, unsigned long address, static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
unsigned long size)
{ {
pud_t *pud; pud_t *pud;
unsigned long base, end; unsigned long next;
if (pgd_none_or_clear_bad(pgd)) if (pgd_none_or_clear_bad(pgd))
return; return;
pud = pud_offset(pgd, address); pud = pud_offset(pgd, addr);
base = address & PGDIR_MASK;
address &= ~PGDIR_MASK;
end = address + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
do { do {
unmap_area_pmd(pud, base + address, end - address); next = pud_addr_end(addr, end);
address = (address + PUD_SIZE) & PUD_MASK; vunmap_pmd_range(pud, addr, next);
pud++; } while (pud++, addr = next, addr != end);
} while (address && (address < end));
} }
static int map_area_pte(pte_t *pte, unsigned long address, void unmap_vm_area(struct vm_struct *area)
unsigned long size, pgprot_t prot,
struct page ***pages)
{ {
unsigned long base, end; pgd_t *pgd;
unsigned long next;
base = address & PMD_MASK; unsigned long addr = (unsigned long) area->addr;
address &= ~PMD_MASK; unsigned long end = addr + area->size;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
BUG_ON(addr >= end);
pgd = pgd_offset_k(addr);
flush_cache_vunmap(addr, end);
do { do {
struct page *page = **pages; next = pgd_addr_end(addr, end);
WARN_ON(!pte_none(*pte)); vunmap_pud_range(pgd, addr, next);
if (!page) } while (pgd++, addr = next, addr != end);
return -ENOMEM; flush_tlb_kernel_range((unsigned long) area->addr, end);
set_pte_at(&init_mm, base + address, pte, mk_pte(page, prot));
address += PAGE_SIZE;
pte++;
(*pages)++;
} while (address < end);
return 0;
} }
static int map_area_pmd(pmd_t *pmd, unsigned long address, static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
unsigned long size, pgprot_t prot, pgprot_t prot, struct page ***pages)
struct page ***pages)
{ {
unsigned long base, end; pte_t *pte;
base = address & PUD_MASK;
address &= ~PUD_MASK;
end = address + size;
if (end > PUD_SIZE)
end = PUD_SIZE;
do { pte = pte_alloc_kernel(&init_mm, pmd, addr);
pte_t * pte = pte_alloc_kernel(&init_mm, pmd, base + address);
if (!pte) if (!pte)
return -ENOMEM; return -ENOMEM;
if (map_area_pte(pte, base + address, end - address, prot, pages)) do {
struct page *page = **pages;
WARN_ON(!pte_none(*pte));
if (!page)
return -ENOMEM; return -ENOMEM;
address = (address + PMD_SIZE) & PMD_MASK; set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
pmd++; (*pages)++;
} while (address < end); } while (pte++, addr += PAGE_SIZE, addr != end);
return 0; return 0;
} }
static int map_area_pud(pud_t *pud, unsigned long address, static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
unsigned long end, pgprot_t prot, pgprot_t prot, struct page ***pages)
struct page ***pages)
{ {
do { pmd_t *pmd;
pmd_t *pmd = pmd_alloc(&init_mm, pud, address); unsigned long next;
pmd = pmd_alloc(&init_mm, pud, addr);
if (!pmd) if (!pmd)
return -ENOMEM; return -ENOMEM;
if (map_area_pmd(pmd, address, end - address, prot, pages)) do {
next = pmd_addr_end(addr, end);
if (vmap_pte_range(pmd, addr, next, prot, pages))
return -ENOMEM; return -ENOMEM;
address = (address + PUD_SIZE) & PUD_MASK; } while (pmd++, addr = next, addr != end);
pud++;
} while (address && address < end);
return 0; return 0;
} }
void unmap_vm_area(struct vm_struct *area) static int vmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
pgprot_t prot, struct page ***pages)
{ {
unsigned long address = (unsigned long) area->addr; pud_t *pud;
unsigned long end = (address + area->size);
unsigned long next; unsigned long next;
pgd_t *pgd;
int i;
pgd = pgd_offset_k(address); pud = pud_alloc(&init_mm, pgd, addr);
flush_cache_vunmap(address, end); if (!pud)
for (i = pgd_index(address); i <= pgd_index(end-1); i++) { return -ENOMEM;
next = (address + PGDIR_SIZE) & PGDIR_MASK; do {
if (next <= address || next > end) next = pud_addr_end(addr, end);
next = end; if (vmap_pmd_range(pud, addr, next, prot, pages))
unmap_area_pud(pgd, address, next - address); return -ENOMEM;
address = next; } while (pud++, addr = next, addr != end);
pgd++; return 0;
}
flush_tlb_kernel_range((unsigned long) area->addr, end);
} }
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
{ {
unsigned long address = (unsigned long) area->addr;
unsigned long end = address + (area->size-PAGE_SIZE);
unsigned long next;
pgd_t *pgd; pgd_t *pgd;
int err = 0; unsigned long next;
int i; unsigned long addr = (unsigned long) area->addr;
unsigned long end = addr + area->size - PAGE_SIZE;
int err;
pgd = pgd_offset_k(address); BUG_ON(addr >= end);
pgd = pgd_offset_k(addr);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
for (i = pgd_index(address); i <= pgd_index(end-1); i++) { do {
pud_t *pud = pud_alloc(&init_mm, pgd, address); next = pgd_addr_end(addr, end);
if (!pud) { err = vmap_pud_range(pgd, addr, next, prot, pages);
err = -ENOMEM; if (err)
break;
}
next = (address + PGDIR_SIZE) & PGDIR_MASK;
if (next < address || next > end)
next = end;
if (map_area_pud(pud, address, next, prot, pages)) {
err = -ENOMEM;
break; break;
} } while (pgd++, addr = next, addr != end);
address = next;
pgd++;
}
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
flush_cache_vmap((unsigned long) area->addr, end); flush_cache_vmap((unsigned long) area->addr, end);
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment