Commit 33709b5c authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] start anon pages on the active list (properly this time)

Use lru_cache_add_active() so ensure that pages which are, or will be
mapped into pagetables are started out on the active list.
parent 228c3d15
...@@ -306,7 +306,7 @@ void put_dirty_page(struct task_struct * tsk, struct page *page, unsigned long a ...@@ -306,7 +306,7 @@ void put_dirty_page(struct task_struct * tsk, struct page *page, unsigned long a
pte_unmap(pte); pte_unmap(pte);
goto out; goto out;
} }
lru_cache_add(page); lru_cache_add_active(page);
flush_dcache_page(page); flush_dcache_page(page);
flush_page_to_ram(page); flush_page_to_ram(page);
set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, PAGE_COPY)))); set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, PAGE_COPY))));
......
...@@ -842,7 +842,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, ...@@ -842,7 +842,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
page_remove_rmap(old_page, page_table); page_remove_rmap(old_page, page_table);
break_cow(vma, new_page, address, page_table); break_cow(vma, new_page, address, page_table);
page_add_rmap(new_page, page_table); page_add_rmap(new_page, page_table);
lru_cache_add(new_page); lru_cache_add_active(new_page);
/* Free the old page.. */ /* Free the old page.. */
new_page = old_page; new_page = old_page;
...@@ -1092,7 +1092,7 @@ static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma, ...@@ -1092,7 +1092,7 @@ static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma,
mm->rss++; mm->rss++;
flush_page_to_ram(page); flush_page_to_ram(page);
entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
lru_cache_add(page); lru_cache_add_active(page);
mark_page_accessed(page); mark_page_accessed(page);
} }
...@@ -1151,7 +1151,7 @@ static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma, ...@@ -1151,7 +1151,7 @@ static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
} }
copy_user_highpage(page, new_page, address); copy_user_highpage(page, new_page, address);
page_cache_release(new_page); page_cache_release(new_page);
lru_cache_add(page); lru_cache_add_active(page);
new_page = page; new_page = page;
} }
......
...@@ -376,7 +376,7 @@ struct page * read_swap_cache_async(swp_entry_t entry) ...@@ -376,7 +376,7 @@ struct page * read_swap_cache_async(swp_entry_t entry)
/* /*
* Initiate read into locked page and return. * Initiate read into locked page and return.
*/ */
lru_cache_add(new_page); lru_cache_add_active(new_page);
swap_readpage(NULL, new_page); swap_readpage(NULL, new_page);
return new_page; return new_page;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment