Commit 5b7a1d40 authored by Huang Ying's avatar Huang Ying Committed by Linus Torvalds

mm, hugetlbfs: rename address to haddr in hugetlb_cow()

To take better advantage of general huge page copying optimization, the
target subpage address will be passed to hugetlb_cow(), then
copy_user_huge_page().  So we will use both target subpage address and
huge page size aligned address in hugetlb_cow().  To distinguish between
them, "haddr" is used for huge page size aligned address to be
consistent with Transparent Huge Page naming convention.

Now, only huge page size aligned address is used in hugetlb_cow(), so
the "address" is renamed to "haddr" in hugetlb_cow() in this patch.
Next patch will use target subpage address in hugetlb_cow() too.

The patch is just code cleanup without any functionality changes.

Link: http://lkml.kernel.org/r/20180524005851.4079-4-ying.huang@intel.comSigned-off-by: default avatar"Huang, Ying" <ying.huang@intel.com>
Suggested-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Suggested-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Shaohua Li <shli@fb.com>
Cc: Christopher Lameter <cl@linux.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Punit Agrawal <punit.agrawal@arm.com>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c9f4cd71
...@@ -3509,7 +3509,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3509,7 +3509,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
* Keep the pte_same checks anyway to make transition from the mutex easier. * Keep the pte_same checks anyway to make transition from the mutex easier.
*/ */
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, unsigned long haddr, pte_t *ptep,
struct page *pagecache_page, spinlock_t *ptl) struct page *pagecache_page, spinlock_t *ptl)
{ {
pte_t pte; pte_t pte;
...@@ -3527,7 +3527,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3527,7 +3527,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
* and just make the page writable */ * and just make the page writable */
if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
page_move_anon_rmap(old_page, vma); page_move_anon_rmap(old_page, vma);
set_huge_ptep_writable(vma, address, ptep); set_huge_ptep_writable(vma, haddr, ptep);
return 0; return 0;
} }
...@@ -3551,7 +3551,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3551,7 +3551,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
* be acquired again before returning to the caller, as expected. * be acquired again before returning to the caller, as expected.
*/ */
spin_unlock(ptl); spin_unlock(ptl);
new_page = alloc_huge_page(vma, address, outside_reserve); new_page = alloc_huge_page(vma, haddr, outside_reserve);
if (IS_ERR(new_page)) { if (IS_ERR(new_page)) {
/* /*
...@@ -3564,11 +3564,10 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3564,11 +3564,10 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
if (outside_reserve) { if (outside_reserve) {
put_page(old_page); put_page(old_page);
BUG_ON(huge_pte_none(pte)); BUG_ON(huge_pte_none(pte));
unmap_ref_private(mm, vma, old_page, address); unmap_ref_private(mm, vma, old_page, haddr);
BUG_ON(huge_pte_none(pte)); BUG_ON(huge_pte_none(pte));
spin_lock(ptl); spin_lock(ptl);
ptep = huge_pte_offset(mm, address & huge_page_mask(h), ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
huge_page_size(h));
if (likely(ptep && if (likely(ptep &&
pte_same(huge_ptep_get(ptep), pte))) pte_same(huge_ptep_get(ptep), pte)))
goto retry_avoidcopy; goto retry_avoidcopy;
...@@ -3593,12 +3592,12 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3593,12 +3592,12 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_release_all; goto out_release_all;
} }
copy_user_huge_page(new_page, old_page, address, vma, copy_user_huge_page(new_page, old_page, haddr, vma,
pages_per_huge_page(h)); pages_per_huge_page(h));
__SetPageUptodate(new_page); __SetPageUptodate(new_page);
set_page_huge_active(new_page); set_page_huge_active(new_page);
mmun_start = address & huge_page_mask(h); mmun_start = haddr;
mmun_end = mmun_start + huge_page_size(h); mmun_end = mmun_start + huge_page_size(h);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
...@@ -3607,25 +3606,24 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3607,25 +3606,24 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
* before the page tables are altered * before the page tables are altered
*/ */
spin_lock(ptl); spin_lock(ptl);
ptep = huge_pte_offset(mm, address & huge_page_mask(h), ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
huge_page_size(h));
if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
ClearPagePrivate(new_page); ClearPagePrivate(new_page);
/* Break COW */ /* Break COW */
huge_ptep_clear_flush(vma, address, ptep); huge_ptep_clear_flush(vma, haddr, ptep);
mmu_notifier_invalidate_range(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
set_huge_pte_at(mm, address, ptep, set_huge_pte_at(mm, haddr, ptep,
make_huge_pte(vma, new_page, 1)); make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page, true); page_remove_rmap(old_page, true);
hugepage_add_new_anon_rmap(new_page, vma, address); hugepage_add_new_anon_rmap(new_page, vma, haddr);
/* Make the old page be freed below */ /* Make the old page be freed below */
new_page = old_page; new_page = old_page;
} }
spin_unlock(ptl); spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
out_release_all: out_release_all:
restore_reserve_on_error(h, vma, address, new_page); restore_reserve_on_error(h, vma, haddr, new_page);
put_page(new_page); put_page(new_page);
out_release_old: out_release_old:
put_page(old_page); put_page(old_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment