Commit 66d67fec authored by steve.wahl@hpe.com's avatar steve.wahl@hpe.com Committed by Thomas Gleixner

x86/efi: Remove references to no-longer-used efi_have_uv1_memmap()

In removing UV1 support, efi_have_uv1_memmap is no longer used.
Signed-off-by: default avatarSteve Wahl <steve.wahl@hpe.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarArd Biesheuvel <ardb@kernel.org>
Link: https://lkml.kernel.org/r/20200713212955.786177105@hpe.com
parent cadde237
...@@ -170,15 +170,6 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr, ...@@ -170,15 +170,6 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
if (!current_ei->efi_memmap_size) if (!current_ei->efi_memmap_size)
return 0; return 0;
/*
* If 1:1 mapping is not enabled, second kernel can not setup EFI
* and use EFI run time services. User space will have to pass
* acpi_rsdp=<addr> on kernel command line to make second kernel boot
* without efi.
*/
if (efi_have_uv1_memmap())
return 0;
params->secure_boot = boot_params.secure_boot; params->secure_boot = boot_params.secure_boot;
ei->efi_loader_signature = current_ei->efi_loader_signature; ei->efi_loader_signature = current_ei->efi_loader_signature;
ei->efi_systab = current_ei->efi_systab; ei->efi_systab = current_ei->efi_systab;
......
...@@ -648,7 +648,7 @@ static inline void *efi_map_next_entry_reverse(void *entry) ...@@ -648,7 +648,7 @@ static inline void *efi_map_next_entry_reverse(void *entry)
*/ */
static void *efi_map_next_entry(void *entry) static void *efi_map_next_entry(void *entry)
{ {
if (!efi_have_uv1_memmap() && efi_enabled(EFI_64BIT)) { if (efi_enabled(EFI_64BIT)) {
/* /*
* Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
* config table feature requires us to map all entries * config table feature requires us to map all entries
...@@ -777,11 +777,9 @@ static void __init kexec_enter_virtual_mode(void) ...@@ -777,11 +777,9 @@ static void __init kexec_enter_virtual_mode(void)
/* /*
* We don't do virtual mode, since we don't do runtime services, on * We don't do virtual mode, since we don't do runtime services, on
* non-native EFI. With the UV1 memmap, we don't do runtime services in * non-native EFI.
* kexec kernel because in the initial boot something else might
* have been mapped at these virtual addresses.
*/ */
if (efi_is_mixed() || efi_have_uv1_memmap()) { if (efi_is_mixed()) {
efi_memmap_unmap(); efi_memmap_unmap();
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return; return;
...@@ -832,12 +830,6 @@ static void __init kexec_enter_virtual_mode(void) ...@@ -832,12 +830,6 @@ static void __init kexec_enter_virtual_mode(void)
* has the runtime attribute bit set in its memory descriptor into the * has the runtime attribute bit set in its memory descriptor into the
* efi_pgd page table. * efi_pgd page table.
* *
* The old method which used to update that memory descriptor with the
* virtual address obtained from ioremap() is still supported when the
* kernel is booted on SG1 UV1 hardware. Same old method enabled the
* runtime services to be called without having to thunk back into
* physical mode for every invocation.
*
* The new method does a pagetable switch in a preemption-safe manner * The new method does a pagetable switch in a preemption-safe manner
* so that we're in a different address space when calling a runtime * so that we're in a different address space when calling a runtime
* function. For function arguments passing we do copy the PUDs of the * function. For function arguments passing we do copy the PUDs of the
......
...@@ -74,9 +74,6 @@ int __init efi_alloc_page_tables(void) ...@@ -74,9 +74,6 @@ int __init efi_alloc_page_tables(void)
pud_t *pud; pud_t *pud;
gfp_t gfp_mask; gfp_t gfp_mask;
if (efi_have_uv1_memmap())
return 0;
gfp_mask = GFP_KERNEL | __GFP_ZERO; gfp_mask = GFP_KERNEL | __GFP_ZERO;
efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER); efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
if (!efi_pgd) if (!efi_pgd)
...@@ -115,9 +112,6 @@ void efi_sync_low_kernel_mappings(void) ...@@ -115,9 +112,6 @@ void efi_sync_low_kernel_mappings(void)
pud_t *pud_k, *pud_efi; pud_t *pud_k, *pud_efi;
pgd_t *efi_pgd = efi_mm.pgd; pgd_t *efi_pgd = efi_mm.pgd;
if (efi_have_uv1_memmap())
return;
/* /*
* We can share all PGD entries apart from the one entry that * We can share all PGD entries apart from the one entry that
* covers the EFI runtime mapping space. * covers the EFI runtime mapping space.
...@@ -206,9 +200,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) ...@@ -206,9 +200,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
unsigned npages; unsigned npages;
pgd_t *pgd = efi_mm.pgd; pgd_t *pgd = efi_mm.pgd;
if (efi_have_uv1_memmap())
return 0;
/* /*
* It can happen that the physical address of new_memmap lands in memory * It can happen that the physical address of new_memmap lands in memory
* which is not mapped in the EFI page table. Therefore we need to go * which is not mapped in the EFI page table. Therefore we need to go
...@@ -315,9 +306,6 @@ void __init efi_map_region(efi_memory_desc_t *md) ...@@ -315,9 +306,6 @@ void __init efi_map_region(efi_memory_desc_t *md)
unsigned long size = md->num_pages << PAGE_SHIFT; unsigned long size = md->num_pages << PAGE_SHIFT;
u64 pa = md->phys_addr; u64 pa = md->phys_addr;
if (efi_have_uv1_memmap())
return old_map_region(md);
/* /*
* Make sure the 1:1 mappings are present as a catch-all for b0rked * Make sure the 1:1 mappings are present as a catch-all for b0rked
* firmware which doesn't update all internal pointers after switching * firmware which doesn't update all internal pointers after switching
...@@ -420,12 +408,6 @@ void __init efi_runtime_update_mappings(void) ...@@ -420,12 +408,6 @@ void __init efi_runtime_update_mappings(void)
{ {
efi_memory_desc_t *md; efi_memory_desc_t *md;
if (efi_have_uv1_memmap()) {
if (__supported_pte_mask & _PAGE_NX)
runtime_code_page_mkexec();
return;
}
/* /*
* Use the EFI Memory Attribute Table for mapping permissions if it * Use the EFI Memory Attribute Table for mapping permissions if it
* exists, since it is intended to supersede EFI_PROPERTIES_TABLE. * exists, since it is intended to supersede EFI_PROPERTIES_TABLE.
...@@ -474,10 +456,7 @@ void __init efi_runtime_update_mappings(void) ...@@ -474,10 +456,7 @@ void __init efi_runtime_update_mappings(void)
void __init efi_dump_pagetable(void) void __init efi_dump_pagetable(void)
{ {
#ifdef CONFIG_EFI_PGT_DUMP #ifdef CONFIG_EFI_PGT_DUMP
if (efi_have_uv1_memmap()) ptdump_walk_pgd_level(NULL, &efi_mm);
ptdump_walk_pgd_level(NULL, &init_mm);
else
ptdump_walk_pgd_level(NULL, &efi_mm);
#endif #endif
} }
...@@ -849,21 +828,13 @@ efi_set_virtual_address_map(unsigned long memory_map_size, ...@@ -849,21 +828,13 @@ efi_set_virtual_address_map(unsigned long memory_map_size,
const efi_system_table_t *systab = (efi_system_table_t *)systab_phys; const efi_system_table_t *systab = (efi_system_table_t *)systab_phys;
efi_status_t status; efi_status_t status;
unsigned long flags; unsigned long flags;
pgd_t *save_pgd = NULL;
if (efi_is_mixed()) if (efi_is_mixed())
return efi_thunk_set_virtual_address_map(memory_map_size, return efi_thunk_set_virtual_address_map(memory_map_size,
descriptor_size, descriptor_size,
descriptor_version, descriptor_version,
virtual_map); virtual_map);
efi_switch_mm(&efi_mm);
if (efi_have_uv1_memmap()) {
save_pgd = efi_uv1_memmap_phys_prolog();
if (!save_pgd)
return EFI_ABORTED;
} else {
efi_switch_mm(&efi_mm);
}
kernel_fpu_begin(); kernel_fpu_begin();
...@@ -879,10 +850,7 @@ efi_set_virtual_address_map(unsigned long memory_map_size, ...@@ -879,10 +850,7 @@ efi_set_virtual_address_map(unsigned long memory_map_size,
/* grab the virtually remapped EFI runtime services table pointer */ /* grab the virtually remapped EFI runtime services table pointer */
efi.runtime = READ_ONCE(systab->runtime); efi.runtime = READ_ONCE(systab->runtime);
if (save_pgd) efi_switch_mm(efi_scratch.prev_mm);
efi_uv1_memmap_phys_epilog(save_pgd);
else
efi_switch_mm(efi_scratch.prev_mm);
return status; return status;
} }
...@@ -380,14 +380,6 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md) ...@@ -380,14 +380,6 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md)
u64 pa = md->phys_addr; u64 pa = md->phys_addr;
u64 va = md->virt_addr; u64 va = md->virt_addr;
/*
* To Do: Remove this check after adding functionality to unmap EFI boot
* services code/data regions from direct mapping area because the UV1
* memory map maps EFI regions in swapper_pg_dir.
*/
if (efi_have_uv1_memmap())
return;
/* /*
* EFI mixed mode has all RAM mapped to access arguments while making * EFI mixed mode has all RAM mapped to access arguments while making
* EFI runtime calls, hence don't unmap EFI boot services code/data * EFI runtime calls, hence don't unmap EFI boot services code/data
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment