Commit 32df75cd authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk

xen/mmu/p2m: Refactor the xen_pagetable_init code (v2).

The revectoring and copying of the P2M only happens when
!auto-xlat and on 64-bit builds. It is not obvious from
the code, so lets have seperate 32 and 64-bit functions.

We also invert the check for auto-xlat to make the code
flow simpler.
Suggested-by: default avatarStefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 696fd7c5
...@@ -1198,19 +1198,15 @@ static void __init xen_cleanhighmap(unsigned long vaddr, ...@@ -1198,19 +1198,15 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
* instead of somewhere later and be confusing. */ * instead of somewhere later and be confusing. */
xen_mc_flush(); xen_mc_flush();
} }
#endif static void __init xen_pagetable_p2m_copy(void)
static void __init xen_pagetable_init(void)
{ {
#ifdef CONFIG_X86_64
unsigned long size; unsigned long size;
unsigned long addr; unsigned long addr;
#endif
paging_init();
xen_setup_shared_info();
#ifdef CONFIG_X86_64
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long new_mfn_list; unsigned long new_mfn_list;
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
/* On 32-bit, we get zero so this never gets executed. */ /* On 32-bit, we get zero so this never gets executed. */
...@@ -1234,8 +1230,8 @@ static void __init xen_pagetable_init(void) ...@@ -1234,8 +1230,8 @@ static void __init xen_pagetable_init(void)
/* And revector! Bye bye old array */ /* And revector! Bye bye old array */
xen_start_info->mfn_list = new_mfn_list; xen_start_info->mfn_list = new_mfn_list;
} else } else
goto skip; return;
}
/* At this stage, cleanup_highmap has already cleaned __ka space /* At this stage, cleanup_highmap has already cleaned __ka space
* from _brk_limit way up to the max_pfn_mapped (which is the end of * from _brk_limit way up to the max_pfn_mapped (which is the end of
* the ramdisk). We continue on, erasing PMD entries that point to page * the ramdisk). We continue on, erasing PMD entries that point to page
...@@ -1255,7 +1251,15 @@ static void __init xen_pagetable_init(void) ...@@ -1255,7 +1251,15 @@ static void __init xen_pagetable_init(void)
* anything at this stage. */ * anything at this stage. */
xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1); xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
#endif #endif
skip: }
#endif
static void __init xen_pagetable_init(void)
{
paging_init();
xen_setup_shared_info();
#ifdef CONFIG_X86_64
xen_pagetable_p2m_copy();
#endif #endif
xen_post_allocator_init(); xen_post_allocator_init();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment