Commit 684019dd authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'efi-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull EFI updates from Ingo Molnar:
 "The main changes in this cycle were:

   - Allocate the E820 buffer before doing the
     GetMemoryMap/ExitBootServices dance so we don't run out of space

   - Clear EFI boot services mappings when freeing the memory

   - Harden efivars against callers that invoke it on non-EFI boots

   - Reduce the number of memblock reservations resulting from extensive
     use of the new efi_mem_reserve_persistent() API

   - Other assorted fixes and cleanups"

* 'efi-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/efi: Don't unmap EFI boot services code/data regions for EFI_OLD_MEMMAP and EFI_MIXED_MODE
  efi: Reduce the amount of memblock reservations for persistent allocations
  efi: Permit multiple entries in persistent memreserve data structure
  efi/libstub: Disable some warnings for x86{,_64}
  x86/efi: Move efi_<reserve/free>_boot_services() to arch/x86
  x86/efi: Unmap EFI boot services code/data regions from efi_pgd
  x86/mm/pageattr: Introduce helper function to unmap EFI boot services
  efi/fdt: Simplify the get_fdt() flow
  efi/fdt: Indentation fix
  firmware/efi: Add NULL pointer checks in efivars API functions
parents 792bf4d8 1debf095
......@@ -141,6 +141,8 @@ extern int __init efi_reuse_config(u64 tables, int nr_tables);
extern void efi_delete_dummy_variable(void);
extern void efi_switch_mm(struct mm_struct *mm);
extern void efi_recover_from_page_fault(unsigned long phys_addr);
extern void efi_free_boot_services(void);
extern void efi_reserve_boot_services(void);
struct efi_setup_data {
u64 fw_vendor;
......
......@@ -564,8 +564,12 @@ extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
unsigned int *level);
extern pmd_t *lookup_pmd_address(unsigned long address);
extern phys_addr_t slow_virt_to_phys(void *__address);
extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
unsigned numpages, unsigned long page_flags);
extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
unsigned long address,
unsigned numpages,
unsigned long page_flags);
extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
unsigned long numpages);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_DEFS_H */
......@@ -2346,8 +2346,8 @@ bool kernel_page_present(struct page *page)
#endif /* CONFIG_DEBUG_PAGEALLOC */
int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
unsigned numpages, unsigned long page_flags)
int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
unsigned numpages, unsigned long page_flags)
{
int retval = -EINVAL;
......@@ -2361,6 +2361,8 @@ int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
.flags = 0,
};
WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
if (!(__supported_pte_mask & _PAGE_NX))
goto out;
......@@ -2382,6 +2384,40 @@ int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
return retval;
}
/*
* __flush_tlb_all() flushes mappings only on current CPU and hence this
* function shouldn't be used in an SMP environment. Presently, it's used only
* during boot (way before smp_init()) by EFI subsystem and hence is ok.
*/
int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
unsigned long numpages)
{
int retval;
/*
* The typical sequence for unmapping is to find a pte through
* lookup_address_in_pgd() (ideally, it should never return NULL because
* the address is already mapped) and change it's protections. As pfn is
* the *target* of a mapping, it's not useful while unmapping.
*/
struct cpa_data cpa = {
.vaddr = &address,
.pfn = 0,
.pgd = pgd,
.numpages = numpages,
.mask_set = __pgprot(0),
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
.flags = 0,
};
WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
retval = __change_page_attr_set_clr(&cpa, 0);
__flush_tlb_all();
return retval;
}
/*
* The testcases use internal knowledge of the implementation that shouldn't
* be exposed to the rest of the kernel. Include these directly here.
......
......@@ -993,6 +993,8 @@ static void __init __efi_enter_virtual_mode(void)
panic("EFI call to SetVirtualAddressMap() failed!");
}
efi_free_boot_services();
/*
* Now that EFI is in virtual mode, update the function
* pointers in the runtime service table to the new virtual addresses.
......
......@@ -369,6 +369,40 @@ void __init efi_reserve_boot_services(void)
}
}
/*
* Apart from having VA mappings for EFI boot services code/data regions,
* (duplicate) 1:1 mappings were also created as a quirk for buggy firmware. So,
* unmap both 1:1 and VA mappings.
*/
static void __init efi_unmap_pages(efi_memory_desc_t *md)
{
pgd_t *pgd = efi_mm.pgd;
u64 pa = md->phys_addr;
u64 va = md->virt_addr;
/*
* To Do: Remove this check after adding functionality to unmap EFI boot
* services code/data regions from direct mapping area because
* "efi=old_map" maps EFI regions in swapper_pg_dir.
*/
if (efi_enabled(EFI_OLD_MEMMAP))
return;
/*
* EFI mixed mode has all RAM mapped to access arguments while making
* EFI runtime calls, hence don't unmap EFI boot services code/data
* regions.
*/
if (!efi_is_native())
return;
if (kernel_unmap_pages_in_pgd(pgd, pa, md->num_pages))
pr_err("Failed to unmap 1:1 mapping for 0x%llx\n", pa);
if (kernel_unmap_pages_in_pgd(pgd, va, md->num_pages))
pr_err("Failed to unmap VA mapping for 0x%llx\n", va);
}
void __init efi_free_boot_services(void)
{
phys_addr_t new_phys, new_size;
......@@ -393,6 +427,13 @@ void __init efi_free_boot_services(void)
continue;
}
/*
* Before calling set_virtual_address_map(), EFI boot services
* code/data regions were mapped as a quirk for buggy firmware.
* Unmap them from efi_pgd before freeing them up.
*/
efi_unmap_pages(md);
/*
* Nasty quirk: if all sub-1MB memory is used for boot
* services, we can get here without having allocated the
......
......@@ -602,21 +602,33 @@ int __init efi_apply_persistent_mem_reservations(void)
while (prsv) {
struct linux_efi_memreserve *rsv;
/* reserve the entry itself */
memblock_reserve(prsv, sizeof(*rsv));
rsv = early_memremap(prsv, sizeof(*rsv));
if (rsv == NULL) {
u8 *p;
int i;
/*
* Just map a full page: that is what we will get
* anyway, and it permits us to map the entire entry
* before knowing its size.
*/
p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
PAGE_SIZE);
if (p == NULL) {
pr_err("Could not map UEFI memreserve entry!\n");
return -ENOMEM;
}
if (rsv->size)
memblock_reserve(rsv->base, rsv->size);
rsv = (void *)(p + prsv % PAGE_SIZE);
/* reserve the entry itself */
memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size));
for (i = 0; i < atomic_read(&rsv->count); i++) {
memblock_reserve(rsv->entry[i].base,
rsv->entry[i].size);
}
prsv = rsv->next;
early_memunmap(rsv, sizeof(*rsv));
early_memunmap(p, PAGE_SIZE);
}
}
......@@ -985,7 +997,8 @@ static int __init efi_memreserve_map_root(void)
int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
{
struct linux_efi_memreserve *rsv;
int rc;
unsigned long prsv;
int rc, index;
if (efi_memreserve_root == (void *)ULONG_MAX)
return -ENODEV;
......@@ -996,12 +1009,27 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
return rc;
}
rsv = kmalloc(sizeof(*rsv), GFP_ATOMIC);
/* first try to find a slot in an existing linked list entry */
for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
rsv = __va(prsv);
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
if (index < rsv->size) {
rsv->entry[index].base = addr;
rsv->entry[index].size = size;
return 0;
}
}
/* no slot found - allocate a new linked list entry */
rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
if (!rsv)
return -ENOMEM;
rsv->base = addr;
rsv->size = size;
rsv->size = EFI_MEMRESERVE_COUNT(PAGE_SIZE);
atomic_set(&rsv->count, 1);
rsv->entry[0].base = addr;
rsv->entry[0].size = size;
spin_lock(&efi_mem_reserve_persistent_lock);
rsv->next = efi_memreserve_root->next;
......
......@@ -9,7 +9,10 @@ cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar
-mno-mmx -mno-sse -fshort-wchar \
-Wno-pointer-sign \
$(call cc-disable-warning, address-of-packed-member) \
$(call cc-disable-warning, gnu)
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin
......
......@@ -86,8 +86,8 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
}
rsv->next = 0;
rsv->base = 0;
rsv->size = 0;
atomic_set(&rsv->count, 0);
status = efi_call_early(install_configuration_table,
&memreserve_table_guid,
......
......@@ -370,22 +370,24 @@ void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size)
{
efi_guid_t fdt_guid = DEVICE_TREE_GUID;
efi_config_table_t *tables;
void *fdt;
int i;
tables = (efi_config_table_t *) sys_table->tables;
fdt = NULL;
tables = (efi_config_table_t *)sys_table->tables;
for (i = 0; i < sys_table->nr_tables; i++)
if (efi_guidcmp(tables[i].guid, fdt_guid) == 0) {
fdt = (void *) tables[i].table;
if (fdt_check_header(fdt) != 0) {
pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
return NULL;
}
*fdt_size = fdt_totalsize(fdt);
break;
}
for (i = 0; i < sys_table->nr_tables; i++) {
void *fdt;
if (efi_guidcmp(tables[i].guid, fdt_guid) != 0)
continue;
fdt = (void *)tables[i].table;
if (fdt_check_header(fdt) != 0) {
pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
return NULL;
}
*fdt_size = fdt_totalsize(fdt);
return fdt;
}
return fdt;
return NULL;
}
......@@ -318,7 +318,12 @@ EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
static efi_status_t
check_var_size(u32 attributes, unsigned long size)
{
const struct efivar_operations *fops = __efivars->ops;
const struct efivar_operations *fops;
if (!__efivars)
return EFI_UNSUPPORTED;
fops = __efivars->ops;
if (!fops->query_variable_store)
return EFI_UNSUPPORTED;
......@@ -329,7 +334,12 @@ check_var_size(u32 attributes, unsigned long size)
static efi_status_t
check_var_size_nonblocking(u32 attributes, unsigned long size)
{
const struct efivar_operations *fops = __efivars->ops;
const struct efivar_operations *fops;
if (!__efivars)
return EFI_UNSUPPORTED;
fops = __efivars->ops;
if (!fops->query_variable_store)
return EFI_UNSUPPORTED;
......@@ -429,13 +439,18 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
void *data, bool duplicates, struct list_head *head)
{
const struct efivar_operations *ops = __efivars->ops;
const struct efivar_operations *ops;
unsigned long variable_name_size = 1024;
efi_char16_t *variable_name;
efi_status_t status;
efi_guid_t vendor_guid;
int err = 0;
if (!__efivars)
return -EFAULT;
ops = __efivars->ops;
variable_name = kzalloc(variable_name_size, GFP_KERNEL);
if (!variable_name) {
printk(KERN_ERR "efivars: Memory allocation failed.\n");
......@@ -583,12 +598,14 @@ static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
*/
int __efivar_entry_delete(struct efivar_entry *entry)
{
const struct efivar_operations *ops = __efivars->ops;
efi_status_t status;
status = ops->set_variable(entry->var.VariableName,
&entry->var.VendorGuid,
0, 0, NULL);
if (!__efivars)
return -EINVAL;
status = __efivars->ops->set_variable(entry->var.VariableName,
&entry->var.VendorGuid,
0, 0, NULL);
return efi_status_to_err(status);
}
......@@ -607,12 +624,17 @@ EXPORT_SYMBOL_GPL(__efivar_entry_delete);
*/
int efivar_entry_delete(struct efivar_entry *entry)
{
const struct efivar_operations *ops = __efivars->ops;
const struct efivar_operations *ops;
efi_status_t status;
if (down_interruptible(&efivars_lock))
return -EINTR;
if (!__efivars) {
up(&efivars_lock);
return -EINVAL;
}
ops = __efivars->ops;
status = ops->set_variable(entry->var.VariableName,
&entry->var.VendorGuid,
0, 0, NULL);
......@@ -650,13 +672,19 @@ EXPORT_SYMBOL_GPL(efivar_entry_delete);
int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
unsigned long size, void *data, struct list_head *head)
{
const struct efivar_operations *ops = __efivars->ops;
const struct efivar_operations *ops;
efi_status_t status;
efi_char16_t *name = entry->var.VariableName;
efi_guid_t vendor = entry->var.VendorGuid;
if (down_interruptible(&efivars_lock))
return -EINTR;
if (!__efivars) {
up(&efivars_lock);
return -EINVAL;
}
ops = __efivars->ops;
if (head && efivar_entry_find(name, vendor, head, false)) {
up(&efivars_lock);
return -EEXIST;
......@@ -687,12 +715,17 @@ static int
efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
u32 attributes, unsigned long size, void *data)
{
const struct efivar_operations *ops = __efivars->ops;
const struct efivar_operations *ops;
efi_status_t status;
if (down_trylock(&efivars_lock))
return -EBUSY;
if (!__efivars) {
up(&efivars_lock);
return -EINVAL;
}
status = check_var_size_nonblocking(attributes,
size + ucs2_strsize(name, 1024));
if (status != EFI_SUCCESS) {
......@@ -700,6 +733,7 @@ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
return -ENOSPC;
}
ops = __efivars->ops;
status = ops->set_variable_nonblocking(name, &vendor, attributes,
size, data);
......@@ -727,9 +761,13 @@ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
bool block, unsigned long size, void *data)
{
const struct efivar_operations *ops = __efivars->ops;
const struct efivar_operations *ops;
efi_status_t status;
if (!__efivars)
return -EINVAL;
ops = __efivars->ops;
if (!ops->query_variable_store)
return -ENOSYS;
......@@ -829,13 +867,18 @@ EXPORT_SYMBOL_GPL(efivar_entry_find);
*/
int efivar_entry_size(struct efivar_entry *entry, unsigned long *size)
{
const struct efivar_operations *ops = __efivars->ops;
const struct efivar_operations *ops;
efi_status_t status;
*size = 0;
if (down_interruptible(&efivars_lock))
return -EINTR;
if (!__efivars) {
up(&efivars_lock);
return -EINVAL;
}
ops = __efivars->ops;
status = ops->get_variable(entry->var.VariableName,
&entry->var.VendorGuid, NULL, size, NULL);
up(&efivars_lock);
......@@ -861,12 +904,14 @@ EXPORT_SYMBOL_GPL(efivar_entry_size);
int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
unsigned long *size, void *data)
{
const struct efivar_operations *ops = __efivars->ops;
efi_status_t status;
status = ops->get_variable(entry->var.VariableName,
&entry->var.VendorGuid,
attributes, size, data);
if (!__efivars)
return -EINVAL;
status = __efivars->ops->get_variable(entry->var.VariableName,
&entry->var.VendorGuid,
attributes, size, data);
return efi_status_to_err(status);
}
......@@ -882,14 +927,19 @@ EXPORT_SYMBOL_GPL(__efivar_entry_get);
int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
unsigned long *size, void *data)
{
const struct efivar_operations *ops = __efivars->ops;
efi_status_t status;
if (down_interruptible(&efivars_lock))
return -EINTR;
status = ops->get_variable(entry->var.VariableName,
&entry->var.VendorGuid,
attributes, size, data);
if (!__efivars) {
up(&efivars_lock);
return -EINVAL;
}
status = __efivars->ops->get_variable(entry->var.VariableName,
&entry->var.VendorGuid,
attributes, size, data);
up(&efivars_lock);
return efi_status_to_err(status);
......@@ -921,7 +971,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_get);
int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
unsigned long *size, void *data, bool *set)
{
const struct efivar_operations *ops = __efivars->ops;
const struct efivar_operations *ops;
efi_char16_t *name = entry->var.VariableName;
efi_guid_t *vendor = &entry->var.VendorGuid;
efi_status_t status;
......@@ -940,6 +990,11 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
if (down_interruptible(&efivars_lock))
return -EINTR;
if (!__efivars) {
err = -EINVAL;
goto out;
}
/*
* Ensure that the available space hasn't shrunk below the safe level
*/
......@@ -956,6 +1011,8 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
}
}
ops = __efivars->ops;
status = ops->set_variable(name, vendor, attributes, *size, data);
if (status != EFI_SUCCESS) {
err = efi_status_to_err(status);
......
......@@ -1000,13 +1000,11 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timespec64 *ts);
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
#ifdef CONFIG_X86
extern void efi_free_boot_services(void);
extern efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
bool nonblocking);
extern void efi_find_mirror(void);
#else
static inline void efi_free_boot_services(void) {}
static inline efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
......@@ -1046,7 +1044,6 @@ extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
extern void efi_reserve_boot_services(void);
extern int efi_get_fdt_params(struct efi_fdt_params *params);
extern struct kobject *efi_kobj;
......@@ -1715,9 +1712,19 @@ extern struct efi_runtime_work efi_rts_work;
extern struct workqueue_struct *efi_rts_wq;
struct linux_efi_memreserve {
phys_addr_t next;
phys_addr_t base;
phys_addr_t size;
int size; // allocated size of the array
atomic_t count; // number of entries used
phys_addr_t next; // pa of next struct instance
struct {
phys_addr_t base;
phys_addr_t size;
} entry[0];
};
#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \
(count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \
/ sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
#endif /* _LINUX_EFI_H */
......@@ -737,10 +737,6 @@ asmlinkage __visible void __init start_kernel(void)
arch_post_acpi_subsys_init();
sfi_init_late();
if (efi_enabled(EFI_RUNTIME_SERVICES)) {
efi_free_boot_services();
}
/* Do the rest non-__init'ed, we're now alive */
arch_call_rest_init();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment