Commit 8a0516ed authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: convert p[te|md]_numa users to p[te|md]_protnone_numa

Convert existing users of pte_numa and friends to the new helper.  Note
that the kernel is broken after this patch is applied until the other page
table modifiers are also altered.  This patch layout is to make review
easier.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Acked-by: default avatarAneesh Kumar <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Tested-by: default avatarSasha Levin <sasha.levin@oracle.com>
Cc: Dave Jones <davej@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Kirill Shutemov <kirill.shutemov@linux.intel.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e7bb4b6d
...@@ -212,7 +212,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -212,7 +212,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* Look up the Linux PTE for the backing page */ /* Look up the Linux PTE for the backing page */
pte_size = psize; pte_size = psize;
pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size); pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size);
if (pte_present(pte) && !pte_numa(pte)) { if (pte_present(pte) && !pte_protnone(pte)) {
if (writing && !pte_write(pte)) if (writing && !pte_write(pte))
/* make the actual HPTE be read-only */ /* make the actual HPTE be read-only */
ptel = hpte_make_readonly(ptel); ptel = hpte_make_readonly(ptel);
......
...@@ -398,8 +398,6 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -398,8 +398,6 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
* processors use the same I/D cache coherency mechanism * processors use the same I/D cache coherency mechanism
* as embedded. * as embedded.
*/ */
if (error_code & DSISR_PROTFAULT)
goto bad_area;
#endif /* CONFIG_PPC_STD_MMU */ #endif /* CONFIG_PPC_STD_MMU */
/* /*
...@@ -423,9 +421,6 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -423,9 +421,6 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
flags |= FAULT_FLAG_WRITE; flags |= FAULT_FLAG_WRITE;
/* a read */ /* a read */
} else { } else {
/* protection fault */
if (error_code & 0x08000000)
goto bad_area;
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto bad_area; goto bad_area;
} }
......
...@@ -172,9 +172,14 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, ...@@ -172,9 +172,14 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pte) pte_t pte)
{ {
#ifdef CONFIG_DEBUG_VM /*
WARN_ON(pte_val(*ptep) & _PAGE_PRESENT); * When handling numa faults, we already have the pte marked
#endif * _PAGE_PRESENT, but we can be sure that it is not in hpte.
* Hence we can use set_pte_at for them.
*/
VM_WARN_ON((pte_val(*ptep) & (_PAGE_PRESENT | _PAGE_USER)) ==
(_PAGE_PRESENT | _PAGE_USER));
/* Note: mm->context.id might not yet have been assigned as /* Note: mm->context.id might not yet have been assigned as
* this context might not have been activated yet when this * this context might not have been activated yet when this
* is called. * is called.
......
...@@ -718,7 +718,8 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, ...@@ -718,7 +718,8 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd) pmd_t *pmdp, pmd_t pmd)
{ {
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
WARN_ON(pmd_val(*pmdp) & _PAGE_PRESENT); WARN_ON((pmd_val(*pmdp) & (_PAGE_PRESENT | _PAGE_USER)) ==
(_PAGE_PRESENT | _PAGE_USER));
assert_spin_locked(&mm->page_table_lock); assert_spin_locked(&mm->page_table_lock);
WARN_ON(!pmd_trans_huge(pmd)); WARN_ON(!pmd_trans_huge(pmd));
#endif #endif
......
...@@ -84,7 +84,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, ...@@ -84,7 +84,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
struct page *page; struct page *page;
/* Similar to the PMD case, NUMA hinting must take slow path */ /* Similar to the PMD case, NUMA hinting must take slow path */
if (pte_numa(pte)) { if (pte_protnone(pte)) {
pte_unmap(ptep); pte_unmap(ptep);
return 0; return 0;
} }
...@@ -178,7 +178,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, ...@@ -178,7 +178,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
* slowpath for accounting purposes and so that they * slowpath for accounting purposes and so that they
* can be serialised against THP migration. * can be serialised against THP migration.
*/ */
if (pmd_numa(pmd)) if (pmd_protnone(pmd))
return 0; return 0;
if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
return 0; return 0;
......
...@@ -67,7 +67,7 @@ enum mpol_rebind_step { ...@@ -67,7 +67,7 @@ enum mpol_rebind_step {
#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ #define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
#define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */ #define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */
#define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */ #define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */
#define MPOL_F_MORON (1 << 4) /* Migrate On pte_numa Reference On Node */ #define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */
#endif /* _UAPI_LINUX_MEMPOLICY_H */ #endif /* _UAPI_LINUX_MEMPOLICY_H */
...@@ -64,7 +64,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, ...@@ -64,7 +64,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
migration_entry_wait(mm, pmd, address); migration_entry_wait(mm, pmd, address);
goto retry; goto retry;
} }
if ((flags & FOLL_NUMA) && pte_numa(pte)) if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page; goto no_page;
if ((flags & FOLL_WRITE) && !pte_write(pte)) { if ((flags & FOLL_WRITE) && !pte_write(pte)) {
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
...@@ -184,7 +184,7 @@ struct page *follow_page_mask(struct vm_area_struct *vma, ...@@ -184,7 +184,7 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
return page; return page;
return no_page_table(vma, flags); return no_page_table(vma, flags);
} }
if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
return no_page_table(vma, flags); return no_page_table(vma, flags);
if (pmd_trans_huge(*pmd)) { if (pmd_trans_huge(*pmd)) {
if (flags & FOLL_SPLIT) { if (flags & FOLL_SPLIT) {
...@@ -906,10 +906,10 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, ...@@ -906,10 +906,10 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
/* /*
* Similar to the PMD case below, NUMA hinting must take slow * Similar to the PMD case below, NUMA hinting must take slow
* path * path using the pte_protnone check.
*/ */
if (!pte_present(pte) || pte_special(pte) || if (!pte_present(pte) || pte_special(pte) ||
pte_numa(pte) || (write && !pte_write(pte))) pte_protnone(pte) || (write && !pte_write(pte)))
goto pte_unmap; goto pte_unmap;
VM_BUG_ON(!pfn_valid(pte_pfn(pte))); VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
...@@ -1104,7 +1104,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, ...@@ -1104,7 +1104,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
* slowpath for accounting purposes and so that they * slowpath for accounting purposes and so that they
* can be serialised against THP migration. * can be serialised against THP migration.
*/ */
if (pmd_numa(pmd)) if (pmd_protnone(pmd))
return 0; return 0;
if (!gup_huge_pmd(pmd, pmdp, addr, next, write, if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
......
...@@ -1211,7 +1211,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, ...@@ -1211,7 +1211,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
/* Full NUMA hinting faults to serialise migration in fault paths */ /* Full NUMA hinting faults to serialise migration in fault paths */
if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
goto out; goto out;
page = pmd_page(*pmd); page = pmd_page(*pmd);
...@@ -1342,7 +1342,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1342,7 +1342,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* /*
* Migrate the THP to the requested node, returns with page unlocked * Migrate the THP to the requested node, returns with page unlocked
* and pmd_numa cleared. * and access rights restored.
*/ */
spin_unlock(ptl); spin_unlock(ptl);
migrated = migrate_misplaced_transhuge_page(mm, vma, migrated = migrate_misplaced_transhuge_page(mm, vma,
...@@ -1357,7 +1357,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1357,7 +1357,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
pmd = pmd_mknonnuma(pmd); pmd = pmd_mknonnuma(pmd);
set_pmd_at(mm, haddr, pmdp, pmd); set_pmd_at(mm, haddr, pmdp, pmd);
VM_BUG_ON(pmd_numa(*pmdp)); VM_BUG_ON(pmd_protnone(*pmdp));
update_mmu_cache_pmd(vma, addr, pmdp); update_mmu_cache_pmd(vma, addr, pmdp);
unlock_page(page); unlock_page(page);
out_unlock: out_unlock:
...@@ -1483,7 +1483,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1483,7 +1483,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
ret = 1; ret = 1;
if (!prot_numa) { if (!prot_numa) {
entry = pmdp_get_and_clear_notify(mm, addr, pmd); entry = pmdp_get_and_clear_notify(mm, addr, pmd);
if (pmd_numa(entry)) if (pmd_protnone(entry))
entry = pmd_mknonnuma(entry); entry = pmd_mknonnuma(entry);
entry = pmd_modify(entry, newprot); entry = pmd_modify(entry, newprot);
ret = HPAGE_PMD_NR; ret = HPAGE_PMD_NR;
...@@ -1499,7 +1499,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1499,7 +1499,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
* local vs remote hits on the zero page. * local vs remote hits on the zero page.
*/ */
if (!is_huge_zero_page(page) && if (!is_huge_zero_page(page) &&
!pmd_numa(*pmd)) { !pmd_protnone(*pmd)) {
pmdp_set_numa(mm, addr, pmd); pmdp_set_numa(mm, addr, pmd);
ret = HPAGE_PMD_NR; ret = HPAGE_PMD_NR;
} }
...@@ -1767,9 +1767,9 @@ static int __split_huge_page_map(struct page *page, ...@@ -1767,9 +1767,9 @@ static int __split_huge_page_map(struct page *page,
pte_t *pte, entry; pte_t *pte, entry;
BUG_ON(PageCompound(page+i)); BUG_ON(PageCompound(page+i));
/* /*
* Note that pmd_numa is not transferred deliberately * Note that NUMA hinting access restrictions are not
* to avoid any possibility that pte_numa leaks to * transferred to avoid any possibility of altering
* a PROT_NONE VMA by accident. * permissions across VMAs.
*/ */
entry = mk_pte(page + i, vma->vm_page_prot); entry = mk_pte(page + i, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma); entry = maybe_mkwrite(pte_mkdirty(entry), vma);
......
...@@ -3124,7 +3124,7 @@ static int handle_pte_fault(struct mm_struct *mm, ...@@ -3124,7 +3124,7 @@ static int handle_pte_fault(struct mm_struct *mm,
pte, pmd, flags, entry); pte, pmd, flags, entry);
} }
if (pte_numa(entry)) if (pte_protnone(entry))
return do_numa_page(mm, vma, address, entry, pte, pmd); return do_numa_page(mm, vma, address, entry, pte, pmd);
ptl = pte_lockptr(mm, pmd); ptl = pte_lockptr(mm, pmd);
...@@ -3202,7 +3202,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3202,7 +3202,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (pmd_trans_splitting(orig_pmd)) if (pmd_trans_splitting(orig_pmd))
return 0; return 0;
if (pmd_numa(orig_pmd)) if (pmd_protnone(orig_pmd))
return do_huge_pmd_numa_page(mm, vma, address, return do_huge_pmd_numa_page(mm, vma, address,
orig_pmd, pmd); orig_pmd, pmd);
......
...@@ -75,35 +75,17 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -75,35 +75,17 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
oldpte = *pte; oldpte = *pte;
if (pte_present(oldpte)) { if (pte_present(oldpte)) {
pte_t ptent; pte_t ptent;
bool updated = false;
if (!prot_numa) {
ptent = ptep_modify_prot_start(mm, addr, pte); ptent = ptep_modify_prot_start(mm, addr, pte);
if (pte_numa(ptent))
ptent = pte_mknonnuma(ptent);
ptent = pte_modify(ptent, newprot); ptent = pte_modify(ptent, newprot);
/*
* Avoid taking write faults for pages we /* Avoid taking write faults for known dirty pages */
* know to be dirty.
*/
if (dirty_accountable && pte_dirty(ptent) && if (dirty_accountable && pte_dirty(ptent) &&
(pte_soft_dirty(ptent) || (pte_soft_dirty(ptent) ||
!(vma->vm_flags & VM_SOFTDIRTY))) !(vma->vm_flags & VM_SOFTDIRTY))) {
ptent = pte_mkwrite(ptent); ptent = pte_mkwrite(ptent);
ptep_modify_prot_commit(mm, addr, pte, ptent);
updated = true;
} else {
struct page *page;
page = vm_normal_page(vma, addr, oldpte);
if (page && !PageKsm(page)) {
if (!pte_numa(oldpte)) {
ptep_set_numa(mm, addr, pte);
updated = true;
}
} }
} ptep_modify_prot_commit(mm, addr, pte, ptent);
if (updated)
pages++; pages++;
} else if (IS_ENABLED(CONFIG_MIGRATION)) { } else if (IS_ENABLED(CONFIG_MIGRATION)) {
swp_entry_t entry = pte_to_swp_entry(oldpte); swp_entry_t entry = pte_to_swp_entry(oldpte);
......
...@@ -193,7 +193,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, ...@@ -193,7 +193,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp) pmd_t *pmdp)
{ {
pmd_t entry = *pmdp; pmd_t entry = *pmdp;
if (pmd_numa(entry)) if (pmd_protnone(entry))
entry = pmd_mknonnuma(entry); entry = pmd_mknonnuma(entry);
set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment