Commit 2e917175 authored by Juergen Gross's avatar Juergen Gross Committed by David Vrabel

xen: Speed up set_phys_to_machine() by using read-only mappings

Instead of checking at each call of set_phys_to_machine() whether a
new p2m page has to be allocated due to writing an entry in a large
invalid or identity area, just map those areas read only and react
to a page fault on write by allocating the new page.

This change will make the common path with no allocation much
faster as it only requires a single write of the new mfn instead
of walking the address translation tables and checking for the
special cases.
Suggested-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
parent 054954eb
...@@ -70,6 +70,7 @@ ...@@ -70,6 +70,7 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/xen/page.h> #include <asm/xen/page.h>
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
...@@ -316,9 +317,9 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m) ...@@ -316,9 +317,9 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT); paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT);
for (i = 0; i < PTRS_PER_PTE; i++) { for (i = 0; i < PTRS_PER_PTE; i++) {
set_pte(p2m_missing_pte + i, set_pte(p2m_missing_pte + i,
pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL)); pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO));
set_pte(p2m_identity_pte + i, set_pte(p2m_identity_pte + i,
pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL)); pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO));
} }
for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) { for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
...@@ -365,7 +366,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m) ...@@ -365,7 +366,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
p2m_missing : p2m_identity; p2m_missing : p2m_identity;
ptep = populate_extra_pte((unsigned long)(p2m + pfn)); ptep = populate_extra_pte((unsigned long)(p2m + pfn));
set_pte(ptep, set_pte(ptep,
pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL)); pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO));
continue; continue;
} }
...@@ -624,6 +625,9 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) ...@@ -624,6 +625,9 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
return true; return true;
} }
if (likely(!__put_user(mfn, xen_p2m_addr + pfn)))
return true;
ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level); ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
BUG_ON(!ptep || level != PG_LEVEL_4K); BUG_ON(!ptep || level != PG_LEVEL_4K);
...@@ -633,9 +637,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) ...@@ -633,9 +637,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity))) if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
return mfn == IDENTITY_FRAME(pfn); return mfn == IDENTITY_FRAME(pfn);
xen_p2m_addr[pfn] = mfn; return false;
return true;
} }
bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment