Commit d6d861e3 authored by Zachary Amsden's avatar Zachary Amsden Committed by Linus Torvalds

[PATCH] paravirt: optimize ptep establish for pae

The ptep_establish macro is only used on user-level PTEs, for P->P mapping
changes.  Since these always happen under protection of the pagetable lock,
the strong synchronization of a 64-bit cmpxchg is not needed, in fact, not
even a lock prefix needs to be used.  We can simply instead clear the P-bit,
followed by a normal set.  The write ordering is still important to avoid the
possibility of the TLB snooping a partially written PTE and getting a bad
mapping installed.
Signed-off-by: default avatarZachary Amsden <zach@vmware.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 23002d88
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#define set_pte(pteptr, pteval) (*(pteptr) = pteval) #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval)
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
......
...@@ -58,6 +58,21 @@ static inline void set_pte(pte_t *ptep, pte_t pte) ...@@ -58,6 +58,21 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
} }
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/*
* Since this is only called on user PTEs, and the page fault handler
* must handle the already racy situation of simultaneous page faults,
* we are justified in merely clearing the PTE present bit, followed
* by a set. The ordering here is important.
*/
static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
{
ptep->pte_low = 0;
smp_wmb();
ptep->pte_high = pte.pte_high;
smp_wmb();
ptep->pte_low = pte.pte_low;
}
#define __HAVE_ARCH_SET_PTE_ATOMIC #define __HAVE_ARCH_SET_PTE_ATOMIC
#define set_pte_atomic(pteptr,pteval) \ #define set_pte_atomic(pteptr,pteval) \
set_64bit((unsigned long long *)(pteptr),pte_val(pteval)) set_64bit((unsigned long long *)(pteptr),pte_val(pteval))
......
...@@ -269,6 +269,17 @@ do { \ ...@@ -269,6 +269,17 @@ do { \
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
/*
* Rules for using ptep_establish: the pte MUST be a user pte, and
* must be a present->present transition.
*/
#define __HAVE_ARCH_PTEP_ESTABLISH
#define ptep_establish(vma, address, ptep, pteval) \
do { \
set_pte_present((vma)->vm_mm, address, ptep, pteval); \
flush_tlb_page(vma, address); \
} while (0)
#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
#define ptep_clear_flush_dirty(vma, address, ptep) \ #define ptep_clear_flush_dirty(vma, address, ptep) \
({ \ ({ \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment