Commit 7df67697 authored by Bibo Mao's avatar Bibo Mao Committed by Thomas Bogendoerfer

mm/memory.c: Update local TLB if PTE entry exists

If two threads concurrently fault at the same page, the thread that
won the race updates the PTE and its local TLB. For now, the other
thread gives up, simply does nothing, and continues.

It could happen that this second thread triggers another fault, whereby
it only updates its local TLB while handling the fault. Instead of
triggering another fault, let's directly update the local TLB of the
second thread. Function update_mmu_tlb is used here to update local
TLB on the second thread, and it is defined as empty on other arches.
Signed-off-by: default avatarBibo Mao <maobibo@loongson.cn>
Acked-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarThomas Bogendoerfer <tsbogend@alpha.franken.de>
parent 4dd7683e
...@@ -483,6 +483,26 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, ...@@ -483,6 +483,26 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
{ {
} }
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
return pte_val(pte_a) == pte_val(pte_b);
}
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
static inline int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
if (!pte_same(*ptep, entry))
set_pte_at(vma->vm_mm, address, ptep, entry);
/*
* update_mmu_cache will unconditionally execute, handling both
* the case that the PTE changed and the spurious fault case.
*/
return true;
}
/* /*
* Conversion functions: convert a page and protection to a page entry, * Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to. * and a page entry and page directory to the page they refer to.
...@@ -526,6 +546,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, ...@@ -526,6 +546,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
__update_tlb(vma, address, pte); __update_tlb(vma, address, pte);
} }
#define __HAVE_ARCH_UPDATE_MMU_TLB
#define update_mmu_tlb update_mmu_cache
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp) unsigned long address, pmd_t *pmdp)
{ {
......
...@@ -188,6 +188,23 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, ...@@ -188,6 +188,23 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
} }
#endif #endif
/*
* If two threads concurrently fault at the same page, the thread that
* won the race updates the PTE and its local TLB/Cache. The other thread
* gives up, simply does nothing, and continues; on architectures where
* software can update TLB, local TLB can be updated here to avoid next page
* fault. This function updates TLB only, do nothing with cache or others.
* It is the difference with function update_mmu_cache.
*/
#ifndef __HAVE_ARCH_UPDATE_MMU_TLB
static inline void update_mmu_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
}
#define __HAVE_ARCH_UPDATE_MMU_TLB
#endif
/* /*
* Some architectures may be able to avoid expensive synchronization * Some architectures may be able to avoid expensive synchronization
* primitives when modifications are made to PTE's which are already * primitives when modifications are made to PTE's which are already
......
...@@ -2436,10 +2436,9 @@ static inline bool cow_user_page(struct page *dst, struct page *src, ...@@ -2436,10 +2436,9 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
/* /*
* Other thread has already handled the fault * Other thread has already handled the fault
* and we don't need to do anything. If it's * and update local tlb only
* not the case, the fault will be triggered
* again on the same address.
*/ */
update_mmu_tlb(vma, addr, vmf->pte);
ret = false; ret = false;
goto pte_unlock; goto pte_unlock;
} }
...@@ -2463,7 +2462,8 @@ static inline bool cow_user_page(struct page *dst, struct page *src, ...@@ -2463,7 +2462,8 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
locked = true; locked = true;
if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
/* The PTE changed under us. Retry page fault. */ /* The PTE changed under us, update local tlb */
update_mmu_tlb(vma, addr, vmf->pte);
ret = false; ret = false;
goto pte_unlock; goto pte_unlock;
} }
...@@ -2752,6 +2752,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -2752,6 +2752,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
new_page = old_page; new_page = old_page;
page_copied = 1; page_copied = 1;
} else { } else {
update_mmu_tlb(vma, vmf->address, vmf->pte);
mem_cgroup_cancel_charge(new_page, memcg, false); mem_cgroup_cancel_charge(new_page, memcg, false);
} }
...@@ -2812,6 +2813,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) ...@@ -2812,6 +2813,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
* pte_offset_map_lock. * pte_offset_map_lock.
*/ */
if (!pte_same(*vmf->pte, vmf->orig_pte)) { if (!pte_same(*vmf->pte, vmf->orig_pte)) {
update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
} }
...@@ -2936,6 +2938,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) ...@@ -2936,6 +2938,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl); vmf->address, &vmf->ptl);
if (!pte_same(*vmf->pte, vmf->orig_pte)) { if (!pte_same(*vmf->pte, vmf->orig_pte)) {
update_mmu_tlb(vma, vmf->address, vmf->pte);
unlock_page(vmf->page); unlock_page(vmf->page);
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
put_page(vmf->page); put_page(vmf->page);
...@@ -3341,8 +3344,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) ...@@ -3341,8 +3344,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
vma->vm_page_prot)); vma->vm_page_prot));
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl); vmf->address, &vmf->ptl);
if (!pte_none(*vmf->pte)) if (!pte_none(*vmf->pte)) {
update_mmu_tlb(vma, vmf->address, vmf->pte);
goto unlock; goto unlock;
}
ret = check_stable_address_space(vma->vm_mm); ret = check_stable_address_space(vma->vm_mm);
if (ret) if (ret)
goto unlock; goto unlock;
...@@ -3378,8 +3383,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) ...@@ -3378,8 +3383,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
&vmf->ptl); &vmf->ptl);
if (!pte_none(*vmf->pte)) if (!pte_none(*vmf->pte)) {
update_mmu_cache(vma, vmf->address, vmf->pte);
goto release; goto release;
}
ret = check_stable_address_space(vma->vm_mm); ret = check_stable_address_space(vma->vm_mm);
if (ret) if (ret)
...@@ -3646,8 +3653,10 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, ...@@ -3646,8 +3653,10 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
} }
/* Re-check under ptl */ /* Re-check under ptl */
if (unlikely(!pte_none(*vmf->pte))) if (unlikely(!pte_none(*vmf->pte))) {
update_mmu_tlb(vma, vmf->address, vmf->pte);
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
}
flush_icache_page(vma, page); flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot); entry = mk_pte(page, vma->vm_page_prot);
...@@ -4224,8 +4233,10 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) ...@@ -4224,8 +4233,10 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
spin_lock(vmf->ptl); spin_lock(vmf->ptl);
entry = vmf->orig_pte; entry = vmf->orig_pte;
if (unlikely(!pte_same(*vmf->pte, entry))) if (unlikely(!pte_same(*vmf->pte, entry))) {
update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
goto unlock; goto unlock;
}
if (vmf->flags & FAULT_FLAG_WRITE) { if (vmf->flags & FAULT_FLAG_WRITE) {
if (!pte_write(entry)) if (!pte_write(entry))
return do_wp_page(vmf); return do_wp_page(vmf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment