Commit c78a6f26 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] rename rmap_lock to page_map_lock

Sync this up with Andrea's patches.
parent 70d1f017
......@@ -8,9 +8,9 @@
#include <linux/config.h>
#include <linux/linkage.h>
#define rmap_lock(page) \
#define page_map_lock(page) \
bit_spin_lock(PG_maplock, (unsigned long *)&(page)->flags)
#define rmap_unlock(page) \
#define page_map_unlock(page) \
bit_spin_unlock(PG_maplock, (unsigned long *)&(page)->flags)
#ifdef CONFIG_MMU
......
......@@ -186,7 +186,7 @@ page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain)
if (PageReserved(page))
return pte_chain;
rmap_lock(page);
page_map_lock(page);
if (page->pte.direct == 0) {
page->pte.direct = pte_paddr;
......@@ -223,7 +223,7 @@ page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain)
cur_pte_chain->ptes[pte_chain_idx(cur_pte_chain) - 1] = pte_paddr;
cur_pte_chain->next_and_idx--;
out:
rmap_unlock(page);
page_map_unlock(page);
return pte_chain;
}
......@@ -245,7 +245,7 @@ void fastcall page_remove_rmap(struct page *page, pte_t *ptep)
if (!pfn_valid(page_to_pfn(page)) || PageReserved(page))
return;
rmap_lock(page);
page_map_lock(page);
if (!page_mapped(page))
goto out_unlock; /* remap_page_range() from a driver? */
......@@ -294,7 +294,7 @@ void fastcall page_remove_rmap(struct page *page, pte_t *ptep)
dec_page_state(nr_mapped);
}
out_unlock:
rmap_unlock(page);
page_map_unlock(page);
}
/**
......
......@@ -276,11 +276,11 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
if (PageWriteback(page))
goto keep_locked;
rmap_lock(page);
page_map_lock(page);
referenced = page_referenced(page);
if (referenced && page_mapping_inuse(page)) {
/* In active use or really unfreeable. Activate it. */
rmap_unlock(page);
page_map_unlock(page);
goto activate_locked;
}
......@@ -295,10 +295,10 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
* XXX: implement swap clustering ?
*/
if (PageAnon(page) && !PageSwapCache(page)) {
rmap_unlock(page);
page_map_unlock(page);
if (!add_to_swap(page))
goto activate_locked;
rmap_lock(page);
page_map_lock(page);
}
if (PageSwapCache(page)) {
mapping = &swapper_space;
......@@ -313,16 +313,16 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
if (page_mapped(page) && mapping) {
switch (try_to_unmap(page)) {
case SWAP_FAIL:
rmap_unlock(page);
page_map_unlock(page);
goto activate_locked;
case SWAP_AGAIN:
rmap_unlock(page);
page_map_unlock(page);
goto keep_locked;
case SWAP_SUCCESS:
; /* try to free the page below */
}
}
rmap_unlock(page);
page_map_unlock(page);
/*
* If the page is dirty, only perform writeback if that write
......@@ -663,13 +663,13 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
list_add(&page->lru, &l_active);
continue;
}
rmap_lock(page);
page_map_lock(page);
if (page_referenced(page)) {
rmap_unlock(page);
page_map_unlock(page);
list_add(&page->lru, &l_active);
continue;
}
rmap_unlock(page);
page_map_unlock(page);
}
/*
* FIXME: need to consider page_count(page) here if/when we
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment