Commit b6ec57f4 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

thp: change pmd_trans_huge_lock() interface to return ptl

After THP refcounting rework we have only two possible return values
from pmd_trans_huge_lock(): success and failure.  Return-by-pointer for
ptl doesn't make much sense in this case.

Let's convert pmd_trans_huge_lock() to return ptl on success and NULL on
failure.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Cc: Minchan Kim <minchan@kernel.org>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 404a4741
...@@ -602,7 +602,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -602,7 +602,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pte_t *pte; pte_t *pte;
spinlock_t *ptl; spinlock_t *ptl;
if (pmd_trans_huge_lock(pmd, vma, &ptl)) { ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
smaps_pmd_entry(pmd, addr, walk); smaps_pmd_entry(pmd, addr, walk);
spin_unlock(ptl); spin_unlock(ptl);
return 0; return 0;
...@@ -913,7 +914,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -913,7 +914,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
spinlock_t *ptl; spinlock_t *ptl;
struct page *page; struct page *page;
if (pmd_trans_huge_lock(pmd, vma, &ptl)) { ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
if (cp->type == CLEAR_REFS_SOFT_DIRTY) { if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
clear_soft_dirty_pmd(vma, addr, pmd); clear_soft_dirty_pmd(vma, addr, pmd);
goto out; goto out;
...@@ -1187,7 +1189,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, ...@@ -1187,7 +1189,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
int err = 0; int err = 0;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pmd_trans_huge_lock(pmdp, vma, &ptl)) { ptl = pmd_trans_huge_lock(pmdp, vma);
if (ptl) {
u64 flags = 0, frame = 0; u64 flags = 0, frame = 0;
pmd_t pmd = *pmdp; pmd_t pmd = *pmdp;
...@@ -1519,7 +1522,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, ...@@ -1519,7 +1522,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
pte_t *orig_pte; pte_t *orig_pte;
pte_t *pte; pte_t *pte;
if (pmd_trans_huge_lock(pmd, vma, &ptl)) { ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
pte_t huge_pte = *(pte_t *)pmd; pte_t huge_pte = *(pte_t *)pmd;
struct page *page; struct page *page;
......
...@@ -120,15 +120,15 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma, ...@@ -120,15 +120,15 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start, unsigned long start,
unsigned long end, unsigned long end,
long adjust_next); long adjust_next);
extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
spinlock_t **ptl); struct vm_area_struct *vma);
/* mmap_sem must be held on entry */ /* mmap_sem must be held on entry */
static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
spinlock_t **ptl) struct vm_area_struct *vma)
{ {
VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
return __pmd_trans_huge_lock(pmd, vma, ptl); return __pmd_trans_huge_lock(pmd, vma);
else else
return false; return false;
} }
...@@ -190,10 +190,10 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, ...@@ -190,10 +190,10 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
long adjust_next) long adjust_next)
{ {
} }
static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
spinlock_t **ptl) struct vm_area_struct *vma)
{ {
return false; return NULL;
} }
static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
......
...@@ -1560,7 +1560,8 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1560,7 +1560,8 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct mm_struct *mm = tlb->mm; struct mm_struct *mm = tlb->mm;
int ret = 0; int ret = 0;
if (!pmd_trans_huge_lock(pmd, vma, &ptl)) ptl = pmd_trans_huge_lock(pmd, vma);
if (!ptl)
goto out_unlocked; goto out_unlocked;
orig_pmd = *pmd; orig_pmd = *pmd;
...@@ -1627,7 +1628,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1627,7 +1628,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t orig_pmd; pmd_t orig_pmd;
spinlock_t *ptl; spinlock_t *ptl;
if (!__pmd_trans_huge_lock(pmd, vma, &ptl)) ptl = __pmd_trans_huge_lock(pmd, vma);
if (!ptl)
return 0; return 0;
/* /*
* For architectures like ppc64 we look at deposited pgtable * For architectures like ppc64 we look at deposited pgtable
...@@ -1690,7 +1692,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, ...@@ -1690,7 +1692,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
* We don't have to worry about the ordering of src and dst * We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_sem prevents deadlock. * ptlocks because exclusive mmap_sem prevents deadlock.
*/ */
if (__pmd_trans_huge_lock(old_pmd, vma, &old_ptl)) { old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
if (old_ptl) {
new_ptl = pmd_lockptr(mm, new_pmd); new_ptl = pmd_lockptr(mm, new_pmd);
if (new_ptl != old_ptl) if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
...@@ -1724,7 +1727,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1724,7 +1727,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
spinlock_t *ptl; spinlock_t *ptl;
int ret = 0; int ret = 0;
if (__pmd_trans_huge_lock(pmd, vma, &ptl)) { ptl = __pmd_trans_huge_lock(pmd, vma);
if (ptl) {
pmd_t entry; pmd_t entry;
bool preserve_write = prot_numa && pmd_write(*pmd); bool preserve_write = prot_numa && pmd_write(*pmd);
ret = 1; ret = 1;
...@@ -1760,14 +1764,14 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1760,14 +1764,14 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
* Note that if it returns true, this routine returns without unlocking page * Note that if it returns true, this routine returns without unlocking page
* table lock. So callers must unlock it. * table lock. So callers must unlock it.
*/ */
bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
spinlock_t **ptl)
{ {
*ptl = pmd_lock(vma->vm_mm, pmd); spinlock_t *ptl;
ptl = pmd_lock(vma->vm_mm, pmd);
if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd))) if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
return true; return ptl;
spin_unlock(*ptl); spin_unlock(ptl);
return false; return NULL;
} }
#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
......
...@@ -4638,7 +4638,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, ...@@ -4638,7 +4638,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
pte_t *pte; pte_t *pte;
spinlock_t *ptl; spinlock_t *ptl;
if (pmd_trans_huge_lock(pmd, vma, &ptl)) { ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
mc.precharge += HPAGE_PMD_NR; mc.precharge += HPAGE_PMD_NR;
spin_unlock(ptl); spin_unlock(ptl);
...@@ -4826,7 +4827,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, ...@@ -4826,7 +4827,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
union mc_target target; union mc_target target;
struct page *page; struct page *page;
if (pmd_trans_huge_lock(pmd, vma, &ptl)) { ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
if (mc.precharge < HPAGE_PMD_NR) { if (mc.precharge < HPAGE_PMD_NR) {
spin_unlock(ptl); spin_unlock(ptl);
return 0; return 0;
......
...@@ -117,7 +117,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -117,7 +117,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
unsigned char *vec = walk->private; unsigned char *vec = walk->private;
int nr = (end - addr) >> PAGE_SHIFT; int nr = (end - addr) >> PAGE_SHIFT;
if (pmd_trans_huge_lock(pmd, vma, &ptl)) { ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
memset(vec, 1, nr); memset(vec, 1, nr);
spin_unlock(ptl); spin_unlock(ptl);
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment