Commit b8072f09 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] mm: update comments to pte lock

Updated several references to page_table_lock in common code comments.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f412ac08
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* - update the page tables * - update the page tables
* - inform the TLB about the new one * - inform the TLB about the new one
* *
* We hold the mm semaphore for reading and vma->vm_mm->page_table_lock. * We hold the mm semaphore for reading, and the pte lock.
* *
* Note: the old pte is known to not be writable, so we don't need to * Note: the old pte is known to not be writable, so we don't need to
* worry about dirty bits etc getting lost. * worry about dirty bits etc getting lost.
......
...@@ -47,8 +47,7 @@ struct vm_area_struct; ...@@ -47,8 +47,7 @@ struct vm_area_struct;
* Locking policy for interlave: * Locking policy for interlave:
* In process context there is no locking because only the process accesses * In process context there is no locking because only the process accesses
* its own state. All vma manipulation is somewhat protected by a down_read on * its own state. All vma manipulation is somewhat protected by a down_read on
* mmap_sem. For allocating in the interleave policy the page_table_lock * mmap_sem.
* must be also aquired to protect il_next.
* *
* Freeing policy: * Freeing policy:
* When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd. * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
......
...@@ -66,7 +66,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, ...@@ -66,7 +66,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
* *
* ->mmap_sem * ->mmap_sem
* ->i_mmap_lock * ->i_mmap_lock
* ->page_table_lock (various places, mainly in mmap.c) * ->page_table_lock or pte_lock (various, mainly in memory.c)
* ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
* *
* ->mmap_sem * ->mmap_sem
...@@ -86,9 +86,9 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, ...@@ -86,9 +86,9 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
* ->anon_vma.lock (vma_adjust) * ->anon_vma.lock (vma_adjust)
* *
* ->anon_vma.lock * ->anon_vma.lock
* ->page_table_lock (anon_vma_prepare and various) * ->page_table_lock or pte_lock (anon_vma_prepare and various)
* *
* ->page_table_lock * ->page_table_lock or pte_lock
* ->swap_lock (try_to_unmap_one) * ->swap_lock (try_to_unmap_one)
* ->private_lock (try_to_unmap_one) * ->private_lock (try_to_unmap_one)
* ->tree_lock (try_to_unmap_one) * ->tree_lock (try_to_unmap_one)
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
* page->flags PG_locked (lock_page) * page->flags PG_locked (lock_page)
* mapping->i_mmap_lock * mapping->i_mmap_lock
* anon_vma->lock * anon_vma->lock
* mm->page_table_lock * mm->page_table_lock or pte_lock
* zone->lru_lock (in mark_page_accessed) * zone->lru_lock (in mark_page_accessed)
* swap_lock (in swap_duplicate, swap_info_get) * swap_lock (in swap_duplicate, swap_info_get)
* mmlist_lock (in mmput, drain_mmlist and others) * mmlist_lock (in mmput, drain_mmlist and others)
...@@ -244,7 +244,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) ...@@ -244,7 +244,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
/* /*
* Check that @page is mapped at @address into @mm. * Check that @page is mapped at @address into @mm.
* *
* On success returns with mapped pte and locked mm->page_table_lock. * On success returns with pte mapped and locked.
*/ */
pte_t *page_check_address(struct page *page, struct mm_struct *mm, pte_t *page_check_address(struct page *page, struct mm_struct *mm,
unsigned long address, spinlock_t **ptlp) unsigned long address, spinlock_t **ptlp)
...@@ -445,7 +445,7 @@ int page_referenced(struct page *page, int is_locked, int ignore_token) ...@@ -445,7 +445,7 @@ int page_referenced(struct page *page, int is_locked, int ignore_token)
* @vma: the vm area in which the mapping is added * @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped * @address: the user virtual address mapped
* *
* The caller needs to hold the mm->page_table_lock. * The caller needs to hold the pte lock.
*/ */
void page_add_anon_rmap(struct page *page, void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address) struct vm_area_struct *vma, unsigned long address)
...@@ -468,7 +468,7 @@ void page_add_anon_rmap(struct page *page, ...@@ -468,7 +468,7 @@ void page_add_anon_rmap(struct page *page,
* page_add_file_rmap - add pte mapping to a file page * page_add_file_rmap - add pte mapping to a file page
* @page: the page to add the mapping to * @page: the page to add the mapping to
* *
* The caller needs to hold the mm->page_table_lock. * The caller needs to hold the pte lock.
*/ */
void page_add_file_rmap(struct page *page) void page_add_file_rmap(struct page *page)
{ {
...@@ -483,7 +483,7 @@ void page_add_file_rmap(struct page *page) ...@@ -483,7 +483,7 @@ void page_add_file_rmap(struct page *page)
* page_remove_rmap - take down pte mapping from a page * page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from * @page: page to remove mapping from
* *
* Caller needs to hold the mm->page_table_lock. * The caller needs to hold the pte lock.
*/ */
void page_remove_rmap(struct page *page) void page_remove_rmap(struct page *page)
{ {
......
...@@ -259,8 +259,7 @@ static inline void free_swap_cache(struct page *page) ...@@ -259,8 +259,7 @@ static inline void free_swap_cache(struct page *page)
/* /*
* Perform a free_page(), also freeing any swap cache associated with * Perform a free_page(), also freeing any swap cache associated with
* this page if it is the last user of the page. Can not do a lock_page, * this page if it is the last user of the page.
* as we are holding the page_table_lock spinlock.
*/ */
void free_page_and_swap_cache(struct page *page) void free_page_and_swap_cache(struct page *page)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment