Commit ca0e22d4 authored by Joerg Roedel's avatar Joerg Roedel Committed by Borislav Petkov

x86/boot/compressed/64: Always switch to own page table

When booted through startup_64(), the kernel keeps running on the EFI
page table until the KASLR code sets up its own page table. Without
KASLR, the pre-decompression boot code never switches off the EFI page
table. Change that by unconditionally switching to a kernel-controlled
page table after relocation.

This makes sure the kernel can make changes to the mapping when
necessary, for example map pages unencrypted in SEV and SEV-ES guests.

Also, remove the debug_putstr() calls in initialize_identity_maps()
because the function now runs before console_init() is called.

 [ bp: Massage commit message. ]
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20200907131613.12703-17-joro@8bytes.org
parent 8b0d3b3b
......@@ -543,10 +543,11 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
rep stosq
/*
* Load stage2 IDT
* Load stage2 IDT and switch to our own page-table
*/
pushq %rsi
call load_stage2_idt
call initialize_identity_maps
popq %rsi
/*
......
......@@ -86,9 +86,31 @@ phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
*/
static struct x86_mapping_info mapping_info;
/*
* Adds the specified range to what will become the new identity mappings.
* Once all ranges have been added, the new mapping is activated by calling
* finalize_identity_maps() below.
*/
void add_identity_map(unsigned long start, unsigned long size)
{
unsigned long end = start + size;
/* Align boundary to 2M. */
start = round_down(start, PMD_SIZE);
end = round_up(end, PMD_SIZE);
if (start >= end)
return;
/* Build the mapping. */
kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt,
start, end);
}
/* Locates and clears a region for a new top level page table. */
void initialize_identity_maps(void)
{
unsigned long start, size;
/* If running as an SEV guest, the encryption mask is required. */
set_sev_encryption_mask();
......@@ -121,37 +143,24 @@ void initialize_identity_maps(void)
*/
top_level_pgt = read_cr3_pa();
if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) {
debug_putstr("booted via startup_32()\n");
pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE;
pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE;
memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
} else {
debug_putstr("booted via startup_64()\n");
pgt_data.pgt_buf = _pgtable;
pgt_data.pgt_buf_size = BOOT_PGT_SIZE;
memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data);
}
}
/*
* Adds the specified range to what will become the new identity mappings.
* Once all ranges have been added, the new mapping is activated by calling
* finalize_identity_maps() below.
*/
void add_identity_map(unsigned long start, unsigned long size)
{
unsigned long end = start + size;
/* Align boundary to 2M. */
start = round_down(start, PMD_SIZE);
end = round_up(end, PMD_SIZE);
if (start >= end)
return;
/* Build the mapping. */
kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt,
start, end);
/*
* New page-table is set up - map the kernel image and load it
* into cr3.
*/
start = (unsigned long)_head;
size = _end - _head;
add_identity_map(start, size);
write_cr3(top_level_pgt);
}
/*
......
......@@ -861,9 +861,6 @@ void choose_random_location(unsigned long input,
boot_params->hdr.loadflags |= KASLR_FLAG;
/* Prepare to add new identity pagetables on demand. */
initialize_identity_maps();
if (IS_ENABLED(CONFIG_X86_32))
mem_limit = KERNEL_IMAGE_SIZE;
else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment