Commit 03781e40 authored by Sai Praneeth's avatar Sai Praneeth Committed by Ingo Molnar

x86/efi: Use efi_switch_mm() rather than manually twiddling with %cr3

Use helper function efi_switch_mm() to switch to/from efi_mm when
invoking any UEFI runtime services.

Likewise, we need to switch back to previous mm (mm context stolen
by efi_mm) after the above calls return successfully. We can use
efi_switch_mm() helper function only with x86_64 kernel and
"efi=old_map" disabled because, x86_32 and efi=old_map do not use
efi_pgd, rather they use swapper_pg_dir.
Tested-by: default avatarBhupesh Sharma <bhsharma@redhat.com>
[ardb: add #include of sched/task.h for task_lock/_unlock]
Signed-off-by: default avatarSai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: default avatarMatt Fleming <matt@codeblueprint.co.uk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Lee, Chun-Yi <jlee@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Shankar <ravi.v.shankar@intel.com>
Cc: Ricardo Neri <ricardo.neri@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: linux-efi@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 3ede3417
...@@ -70,14 +70,13 @@ extern asmlinkage u64 efi_call(void *fp, ...); ...@@ -70,14 +70,13 @@ extern asmlinkage u64 efi_call(void *fp, ...);
#define efi_call_phys(f, args...) efi_call((f), args) #define efi_call_phys(f, args...) efi_call((f), args)
/* /*
* Scratch space used for switching the pagetable in the EFI stub * struct efi_scratch - Scratch space used while switching to/from efi_mm
* @phys_stack: stack used during EFI Mixed Mode
* @prev_mm: store/restore stolen mm_struct while switching to/from efi_mm
*/ */
struct efi_scratch { struct efi_scratch {
u64 r15; u64 phys_stack;
u64 prev_cr3; struct mm_struct *prev_mm;
pgd_t *efi_pgt;
bool use_pgd;
u64 phys_stack;
} __packed; } __packed;
#define arch_efi_call_virt_setup() \ #define arch_efi_call_virt_setup() \
...@@ -87,11 +86,8 @@ struct efi_scratch { ...@@ -87,11 +86,8 @@ struct efi_scratch {
__kernel_fpu_begin(); \ __kernel_fpu_begin(); \
firmware_restrict_branch_speculation_start(); \ firmware_restrict_branch_speculation_start(); \
\ \
if (efi_scratch.use_pgd) { \ if (!efi_enabled(EFI_OLD_MEMMAP)) \
efi_scratch.prev_cr3 = __read_cr3(); \ efi_switch_mm(&efi_mm); \
write_cr3((unsigned long)efi_scratch.efi_pgt); \
__flush_tlb_all(); \
} \
}) })
#define arch_efi_call_virt(p, f, args...) \ #define arch_efi_call_virt(p, f, args...) \
...@@ -99,10 +95,8 @@ struct efi_scratch { ...@@ -99,10 +95,8 @@ struct efi_scratch {
#define arch_efi_call_virt_teardown() \ #define arch_efi_call_virt_teardown() \
({ \ ({ \
if (efi_scratch.use_pgd) { \ if (!efi_enabled(EFI_OLD_MEMMAP)) \
write_cr3(efi_scratch.prev_cr3); \ efi_switch_mm(efi_scratch.prev_mm); \
__flush_tlb_all(); \
} \
\ \
firmware_restrict_branch_speculation_end(); \ firmware_restrict_branch_speculation_end(); \
__kernel_fpu_end(); \ __kernel_fpu_end(); \
...@@ -145,6 +139,7 @@ extern void __init efi_dump_pagetable(void); ...@@ -145,6 +139,7 @@ extern void __init efi_dump_pagetable(void);
extern void __init efi_apply_memmap_quirks(void); extern void __init efi_apply_memmap_quirks(void);
extern int __init efi_reuse_config(u64 tables, int nr_tables); extern int __init efi_reuse_config(u64 tables, int nr_tables);
extern void efi_delete_dummy_variable(void); extern void efi_delete_dummy_variable(void);
extern void efi_switch_mm(struct mm_struct *mm);
struct efi_setup_data { struct efi_setup_data {
u64 fw_vendor; u64 fw_vendor;
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/ucs2_string.h> #include <linux/ucs2_string.h>
#include <linux/mem_encrypt.h> #include <linux/mem_encrypt.h>
#include <linux/sched/task.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -82,9 +83,8 @@ pgd_t * __init efi_call_phys_prolog(void) ...@@ -82,9 +83,8 @@ pgd_t * __init efi_call_phys_prolog(void)
int n_pgds, i, j; int n_pgds, i, j;
if (!efi_enabled(EFI_OLD_MEMMAP)) { if (!efi_enabled(EFI_OLD_MEMMAP)) {
save_pgd = (pgd_t *)__read_cr3(); efi_switch_mm(&efi_mm);
write_cr3((unsigned long)efi_scratch.efi_pgt); return NULL;
goto out;
} }
early_code_mapping_set_exec(1); early_code_mapping_set_exec(1);
...@@ -156,8 +156,7 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) ...@@ -156,8 +156,7 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
pud_t *pud; pud_t *pud;
if (!efi_enabled(EFI_OLD_MEMMAP)) { if (!efi_enabled(EFI_OLD_MEMMAP)) {
write_cr3((unsigned long)save_pgd); efi_switch_mm(efi_scratch.prev_mm);
__flush_tlb_all();
return; return;
} }
...@@ -347,13 +346,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) ...@@ -347,13 +346,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
if (efi_enabled(EFI_OLD_MEMMAP)) if (efi_enabled(EFI_OLD_MEMMAP))
return 0; return 0;
/*
* Since the PGD is encrypted, set the encryption mask so that when
* this value is loaded into cr3 the PGD will be decrypted during
* the pagetable walk.
*/
efi_scratch.efi_pgt = (pgd_t *)__sme_pa(pgd);
/* /*
* It can happen that the physical address of new_memmap lands in memory * It can happen that the physical address of new_memmap lands in memory
* which is not mapped in the EFI page table. Therefore we need to go * which is not mapped in the EFI page table. Therefore we need to go
...@@ -367,8 +359,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) ...@@ -367,8 +359,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
return 1; return 1;
} }
efi_scratch.use_pgd = true;
/* /*
* Certain firmware versions are way too sentimential and still believe * Certain firmware versions are way too sentimential and still believe
* they are exclusive and unquestionable owners of the first physical page, * they are exclusive and unquestionable owners of the first physical page,
...@@ -627,6 +617,22 @@ void __init efi_dump_pagetable(void) ...@@ -627,6 +617,22 @@ void __init efi_dump_pagetable(void)
#endif #endif
} }
/*
* Makes the calling thread switch to/from efi_mm context. Can be used
* for SetVirtualAddressMap() i.e. current->active_mm == init_mm as well
* as during efi runtime calls i.e current->active_mm == current_mm.
* We are not mm_dropping()/mm_grabbing() any mm, because we are not
* losing/creating any references.
*/
void efi_switch_mm(struct mm_struct *mm)
{
task_lock(current);
efi_scratch.prev_mm = current->active_mm;
current->active_mm = mm;
switch_mm(efi_scratch.prev_mm, mm, NULL);
task_unlock(current);
}
#ifdef CONFIG_EFI_MIXED #ifdef CONFIG_EFI_MIXED
extern efi_status_t efi64_thunk(u32, ...); extern efi_status_t efi64_thunk(u32, ...);
...@@ -680,16 +686,13 @@ efi_status_t efi_thunk_set_virtual_address_map( ...@@ -680,16 +686,13 @@ efi_status_t efi_thunk_set_virtual_address_map(
efi_sync_low_kernel_mappings(); efi_sync_low_kernel_mappings();
local_irq_save(flags); local_irq_save(flags);
efi_scratch.prev_cr3 = __read_cr3(); efi_switch_mm(&efi_mm);
write_cr3((unsigned long)efi_scratch.efi_pgt);
__flush_tlb_all();
func = (u32)(unsigned long)phys_set_virtual_address_map; func = (u32)(unsigned long)phys_set_virtual_address_map;
status = efi64_thunk(func, memory_map_size, descriptor_size, status = efi64_thunk(func, memory_map_size, descriptor_size,
descriptor_version, virtual_map); descriptor_version, virtual_map);
write_cr3(efi_scratch.prev_cr3); efi_switch_mm(efi_scratch.prev_mm);
__flush_tlb_all();
local_irq_restore(flags); local_irq_restore(flags);
return status; return status;
......
...@@ -33,7 +33,7 @@ ENTRY(efi64_thunk) ...@@ -33,7 +33,7 @@ ENTRY(efi64_thunk)
* Switch to 1:1 mapped 32-bit stack pointer. * Switch to 1:1 mapped 32-bit stack pointer.
*/ */
movq %rsp, efi_saved_sp(%rip) movq %rsp, efi_saved_sp(%rip)
movq efi_scratch+25(%rip), %rsp movq efi_scratch(%rip), %rsp
/* /*
* Calculate the physical address of the kernel text. * Calculate the physical address of the kernel text.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment