Commit a858f7b2 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

vma_page_offset() has no callees: drop it

Hugh adds: vma_pagecache_offset() has a dangerously misleading name, since
it's using hugepage units: rename it to vma_hugecache_offset().

[apw@shadowen.org: restack onto fixed MAP_PRIVATE reservations]
[akpm@linux-foundation.org: vma_split conversion]
Signed-off-by: default avatarJohannes Weiner <hannes@saeurebad.de>
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Cc: Adam Litke <agl@us.ibm.com>
Cc: Nishanth Aravamudan <nacc@us.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Nick Piggin <npiggin@suse.de>
Signed-off-by: default avatarAndy Whitcroft <apw@shadowen.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 84afd99b
...@@ -199,22 +199,11 @@ static long region_count(struct list_head *head, long f, long t) ...@@ -199,22 +199,11 @@ static long region_count(struct list_head *head, long f, long t)
return chg; return chg;
} }
/*
* Convert the address within this vma to the page offset within
* the mapping, in base page units.
*/
static pgoff_t vma_page_offset(struct vm_area_struct *vma,
unsigned long address)
{
return ((address - vma->vm_start) >> PAGE_SHIFT) +
(vma->vm_pgoff >> PAGE_SHIFT);
}
/* /*
* Convert the address within this vma to the page offset within * Convert the address within this vma to the page offset within
* the mapping, in pagecache page units; huge pages here. * the mapping, in pagecache page units; huge pages here.
*/ */
static pgoff_t vma_pagecache_offset(struct vm_area_struct *vma, static pgoff_t vma_hugecache_offset(struct vm_area_struct *vma,
unsigned long address) unsigned long address)
{ {
return ((address - vma->vm_start) >> HPAGE_SHIFT) + return ((address - vma->vm_start) >> HPAGE_SHIFT) +
...@@ -806,7 +795,7 @@ static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr) ...@@ -806,7 +795,7 @@ static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr)
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
if (vma->vm_flags & VM_SHARED) { if (vma->vm_flags & VM_SHARED) {
pgoff_t idx = vma_pagecache_offset(vma, addr); pgoff_t idx = vma_hugecache_offset(vma, addr);
return region_chg(&inode->i_mapping->private_list, return region_chg(&inode->i_mapping->private_list,
idx, idx + 1); idx, idx + 1);
...@@ -815,7 +804,7 @@ static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr) ...@@ -815,7 +804,7 @@ static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr)
} else { } else {
int err; int err;
pgoff_t idx = vma_pagecache_offset(vma, addr); pgoff_t idx = vma_hugecache_offset(vma, addr);
struct resv_map *reservations = vma_resv_map(vma); struct resv_map *reservations = vma_resv_map(vma);
err = region_chg(&reservations->regions, idx, idx + 1); err = region_chg(&reservations->regions, idx, idx + 1);
...@@ -831,11 +820,11 @@ static void vma_commit_reservation(struct vm_area_struct *vma, ...@@ -831,11 +820,11 @@ static void vma_commit_reservation(struct vm_area_struct *vma,
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
if (vma->vm_flags & VM_SHARED) { if (vma->vm_flags & VM_SHARED) {
pgoff_t idx = vma_pagecache_offset(vma, addr); pgoff_t idx = vma_hugecache_offset(vma, addr);
region_add(&inode->i_mapping->private_list, idx, idx + 1); region_add(&inode->i_mapping->private_list, idx, idx + 1);
} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
pgoff_t idx = vma_pagecache_offset(vma, addr); pgoff_t idx = vma_hugecache_offset(vma, addr);
struct resv_map *reservations = vma_resv_map(vma); struct resv_map *reservations = vma_resv_map(vma);
/* Mark this page used in the map. */ /* Mark this page used in the map. */
...@@ -1153,8 +1142,8 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma) ...@@ -1153,8 +1142,8 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
unsigned long end; unsigned long end;
if (reservations) { if (reservations) {
start = vma_pagecache_offset(vma, vma->vm_start); start = vma_hugecache_offset(vma, vma->vm_start);
end = vma_pagecache_offset(vma, vma->vm_end); end = vma_hugecache_offset(vma, vma->vm_end);
reserve = (end - start) - reserve = (end - start) -
region_count(&reservations->regions, start, end); region_count(&reservations->regions, start, end);
...@@ -1471,7 +1460,7 @@ static struct page *hugetlbfs_pagecache_page(struct vm_area_struct *vma, ...@@ -1471,7 +1460,7 @@ static struct page *hugetlbfs_pagecache_page(struct vm_area_struct *vma,
pgoff_t idx; pgoff_t idx;
mapping = vma->vm_file->f_mapping; mapping = vma->vm_file->f_mapping;
idx = vma_pagecache_offset(vma, address); idx = vma_hugecache_offset(vma, address);
return find_lock_page(mapping, idx); return find_lock_page(mapping, idx);
} }
...@@ -1499,7 +1488,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1499,7 +1488,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
} }
mapping = vma->vm_file->f_mapping; mapping = vma->vm_file->f_mapping;
idx = vma_pagecache_offset(vma, address); idx = vma_hugecache_offset(vma, address);
/* /*
* Use page lock to guard against racing truncation * Use page lock to guard against racing truncation
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment