Commit 68c45e43 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] rmap 31 unlikely bad memory

From: Hugh Dickins <hugh@veritas.com>

From: Andrea Arcangeli <andrea@suse.de>

Sprinkle unlikelys throughout mm/memory.c, wherever we see a pgd_bad or a
pmd_bad; likely or unlikely on pte_same or !pte_same.  Put the jump in the
error return from do_no_page, not in the fast path.
parent d321a42d
...@@ -97,7 +97,7 @@ static inline void free_one_pmd(struct mmu_gather *tlb, pmd_t * dir) ...@@ -97,7 +97,7 @@ static inline void free_one_pmd(struct mmu_gather *tlb, pmd_t * dir)
if (pmd_none(*dir)) if (pmd_none(*dir))
return; return;
if (pmd_bad(*dir)) { if (unlikely(pmd_bad(*dir))) {
pmd_ERROR(*dir); pmd_ERROR(*dir);
pmd_clear(dir); pmd_clear(dir);
return; return;
...@@ -115,7 +115,7 @@ static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * dir) ...@@ -115,7 +115,7 @@ static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * dir)
if (pgd_none(*dir)) if (pgd_none(*dir))
return; return;
if (pgd_bad(*dir)) { if (unlikely(pgd_bad(*dir))) {
pgd_ERROR(*dir); pgd_ERROR(*dir);
pgd_clear(dir); pgd_clear(dir);
return; return;
...@@ -232,7 +232,7 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, ...@@ -232,7 +232,7 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
if (pgd_none(*src_pgd)) if (pgd_none(*src_pgd))
goto skip_copy_pmd_range; goto skip_copy_pmd_range;
if (pgd_bad(*src_pgd)) { if (unlikely(pgd_bad(*src_pgd))) {
pgd_ERROR(*src_pgd); pgd_ERROR(*src_pgd);
pgd_clear(src_pgd); pgd_clear(src_pgd);
skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK; skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
...@@ -253,7 +253,7 @@ skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK; ...@@ -253,7 +253,7 @@ skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
if (pmd_none(*src_pmd)) if (pmd_none(*src_pmd))
goto skip_copy_pte_range; goto skip_copy_pte_range;
if (pmd_bad(*src_pmd)) { if (unlikely(pmd_bad(*src_pmd))) {
pmd_ERROR(*src_pmd); pmd_ERROR(*src_pmd);
pmd_clear(src_pmd); pmd_clear(src_pmd);
skip_copy_pte_range: skip_copy_pte_range:
...@@ -355,7 +355,7 @@ static void zap_pte_range(struct mmu_gather *tlb, ...@@ -355,7 +355,7 @@ static void zap_pte_range(struct mmu_gather *tlb,
if (pmd_none(*pmd)) if (pmd_none(*pmd))
return; return;
if (pmd_bad(*pmd)) { if (unlikely(pmd_bad(*pmd))) {
pmd_ERROR(*pmd); pmd_ERROR(*pmd);
pmd_clear(pmd); pmd_clear(pmd);
return; return;
...@@ -436,7 +436,7 @@ static void zap_pmd_range(struct mmu_gather *tlb, ...@@ -436,7 +436,7 @@ static void zap_pmd_range(struct mmu_gather *tlb,
if (pgd_none(*dir)) if (pgd_none(*dir))
return; return;
if (pgd_bad(*dir)) { if (unlikely(pgd_bad(*dir))) {
pgd_ERROR(*dir); pgd_ERROR(*dir);
pgd_clear(dir); pgd_clear(dir);
return; return;
...@@ -617,7 +617,7 @@ follow_page(struct mm_struct *mm, unsigned long address, int write) ...@@ -617,7 +617,7 @@ follow_page(struct mm_struct *mm, unsigned long address, int write)
return page; return page;
pgd = pgd_offset(mm, address); pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || pgd_bad(*pgd)) if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
goto out; goto out;
pmd = pmd_offset(pgd, address); pmd = pmd_offset(pgd, address);
...@@ -625,7 +625,7 @@ follow_page(struct mm_struct *mm, unsigned long address, int write) ...@@ -625,7 +625,7 @@ follow_page(struct mm_struct *mm, unsigned long address, int write)
goto out; goto out;
if (pmd_huge(*pmd)) if (pmd_huge(*pmd))
return follow_huge_pmd(mm, address, pmd, write); return follow_huge_pmd(mm, address, pmd, write);
if (pmd_bad(*pmd)) if (unlikely(pmd_bad(*pmd)))
goto out; goto out;
ptep = pte_offset_map(pmd, address); ptep = pte_offset_map(pmd, address);
...@@ -682,12 +682,12 @@ untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma, ...@@ -682,12 +682,12 @@ untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
/* Check if page directory entry exists. */ /* Check if page directory entry exists. */
pgd = pgd_offset(mm, address); pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || pgd_bad(*pgd)) if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
return 1; return 1;
/* Check if page middle directory entry exists. */ /* Check if page middle directory entry exists. */
pmd = pmd_offset(pgd, address); pmd = pmd_offset(pgd, address);
if (pmd_none(*pmd) || pmd_bad(*pmd)) if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
return 1; return 1;
/* There is a pte slot for 'address' in 'mm'. */ /* There is a pte slot for 'address' in 'mm'. */
...@@ -1081,7 +1081,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, ...@@ -1081,7 +1081,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
*/ */
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
page_table = pte_offset_map(pmd, address); page_table = pte_offset_map(pmd, address);
if (pte_same(*page_table, pte)) { if (likely(pte_same(*page_table, pte))) {
if (PageReserved(old_page)) if (PageReserved(old_page))
++mm->rss; ++mm->rss;
else else
...@@ -1318,7 +1318,7 @@ static int do_swap_page(struct mm_struct * mm, ...@@ -1318,7 +1318,7 @@ static int do_swap_page(struct mm_struct * mm,
*/ */
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
page_table = pte_offset_map(pmd, address); page_table = pte_offset_map(pmd, address);
if (pte_same(*page_table, orig_pte)) if (likely(pte_same(*page_table, orig_pte)))
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
else else
ret = VM_FAULT_MINOR; ret = VM_FAULT_MINOR;
...@@ -1341,7 +1341,7 @@ static int do_swap_page(struct mm_struct * mm, ...@@ -1341,7 +1341,7 @@ static int do_swap_page(struct mm_struct * mm,
*/ */
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
page_table = pte_offset_map(pmd, address); page_table = pte_offset_map(pmd, address);
if (!pte_same(*page_table, orig_pte)) { if (unlikely(!pte_same(*page_table, orig_pte))) {
pte_unmap(page_table); pte_unmap(page_table);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
unlock_page(page); unlock_page(page);
...@@ -1547,12 +1547,12 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1547,12 +1547,12 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* no need to invalidate: a not-present page shouldn't be cached */ /* no need to invalidate: a not-present page shouldn't be cached */
update_mmu_cache(vma, address, entry); update_mmu_cache(vma, address, entry);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
goto out; out:
return ret;
oom: oom:
page_cache_release(new_page); page_cache_release(new_page);
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
out: goto out;
return ret;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment