Commit e550cf78 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] empty the deferred lru-addition buffers in swapin_readahead

If we're about to return to userspace after performing some swap
readahead, the pages in the deferred-addition LRU queues could stay
there for some time.  So drain them after performing readahead.
parent 33709b5c
......@@ -21,7 +21,6 @@ void __pagevec_release_nonlru(struct pagevec *pvec);
void __pagevec_free(struct pagevec *pvec);
void __pagevec_lru_add(struct pagevec *pvec);
void __pagevec_lru_add_active(struct pagevec *pvec);
void lru_add_drain(void);
void pagevec_deactivate_inactive(struct pagevec *pvec);
void pagevec_strip(struct pagevec *pvec);
unsigned int pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
......
......@@ -158,6 +158,7 @@ extern int FASTCALL(page_over_rsslimit(struct page *));
extern void FASTCALL(lru_cache_add(struct page *));
extern void FASTCALL(lru_cache_add_active(struct page *));
extern void FASTCALL(activate_page(struct page *));
void lru_add_drain(void);
extern void swap_setup(void);
......
......@@ -967,12 +967,13 @@ void swapin_readahead(swp_entry_t entry)
num = valid_swaphandles(entry, &offset);
for (i = 0; i < num; offset++, i++) {
/* Ok, do the async read-ahead now */
new_page = read_swap_cache_async(swp_entry(swp_type(entry), offset));
new_page = read_swap_cache_async(swp_entry(swp_type(entry),
offset));
if (!new_page)
break;
page_cache_release(new_page);
}
return;
lru_add_drain(); /* Push any new pages onto the LRU now */
}
/*
......@@ -1007,7 +1008,7 @@ static int do_swap_page(struct mm_struct * mm,
ret = VM_FAULT_MINOR;
pte_unmap(page_table);
spin_unlock(&mm->page_table_lock);
return ret;
goto out;
}
/* Had to read the page from swap area: Major fault */
......@@ -1029,7 +1030,8 @@ static int do_swap_page(struct mm_struct * mm,
spin_unlock(&mm->page_table_lock);
unlock_page(page);
page_cache_release(page);
return VM_FAULT_MINOR;
ret = VM_FAULT_MINOR;
goto out;
}
/* The page isn't present yet, go ahead with the fault. */
......@@ -1053,6 +1055,7 @@ static int do_swap_page(struct mm_struct * mm,
update_mmu_cache(vma, address, pte);
pte_unmap(page_table);
spin_unlock(&mm->page_table_lock);
out:
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment