Commit ec181b7f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-urgent-2020-03-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
 "Two fixes for x86:

   - Map EFI runtime service data as encrypted when SEV is enabled.

     Otherwise e.g. SMBIOS data cannot be properly decoded by dmidecode.

   - Remove the warning in the vector management code which triggered
     when a managed interrupt affinity changed outside of a CPU hotplug
     operation.

     The warning was correct until the recent core code change that
     introduced a CPU isolation feature which needs to migrate managed
     interrupts away from online CPUs under certain conditions to
     achieve the isolation"

* tag 'x86-urgent-2020-03-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/vector: Remove warning on managed interrupt migration
  x86/ioremap: Map EFI runtime services data as encrypted for SEV
parents e99bc917 469ff207
...@@ -838,13 +838,15 @@ static void free_moved_vector(struct apic_chip_data *apicd) ...@@ -838,13 +838,15 @@ static void free_moved_vector(struct apic_chip_data *apicd)
bool managed = apicd->is_managed; bool managed = apicd->is_managed;
/* /*
* This should never happen. Managed interrupts are not * Managed interrupts are usually not migrated away
* migrated except on CPU down, which does not involve the * from an online CPU, but CPU isolation 'managed_irq'
* cleanup vector. But try to keep the accounting correct * can make that happen.
* nevertheless. * 1) Activation does not take the isolation into account
* to keep the code simple
* 2) Migration away from an isolated CPU can happen when
* a non-isolated CPU which is in the calculated
* affinity mask comes online.
*/ */
WARN_ON_ONCE(managed);
trace_vector_free_moved(apicd->irq, cpu, vector, managed); trace_vector_free_moved(apicd->irq, cpu, vector, managed);
irq_matrix_free(vector_matrix, cpu, vector, managed); irq_matrix_free(vector_matrix, cpu, vector, managed);
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
......
...@@ -106,6 +106,19 @@ static unsigned int __ioremap_check_encrypted(struct resource *res) ...@@ -106,6 +106,19 @@ static unsigned int __ioremap_check_encrypted(struct resource *res)
return 0; return 0;
} }
/*
* The EFI runtime services data area is not covered by walk_mem_res(), but must
* be mapped encrypted when SEV is active.
*/
static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
{
if (!sev_active())
return;
if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
desc->flags |= IORES_MAP_ENCRYPTED;
}
static int __ioremap_collect_map_flags(struct resource *res, void *arg) static int __ioremap_collect_map_flags(struct resource *res, void *arg)
{ {
struct ioremap_desc *desc = arg; struct ioremap_desc *desc = arg;
...@@ -124,6 +137,9 @@ static int __ioremap_collect_map_flags(struct resource *res, void *arg) ...@@ -124,6 +137,9 @@ static int __ioremap_collect_map_flags(struct resource *res, void *arg)
* To avoid multiple resource walks, this function walks resources marked as * To avoid multiple resource walks, this function walks resources marked as
* IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
* resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES). * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
*
* After that, deal with misc other ranges in __ioremap_check_other() which do
* not fall into the above category.
*/ */
static void __ioremap_check_mem(resource_size_t addr, unsigned long size, static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
struct ioremap_desc *desc) struct ioremap_desc *desc)
...@@ -135,6 +151,8 @@ static void __ioremap_check_mem(resource_size_t addr, unsigned long size, ...@@ -135,6 +151,8 @@ static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
memset(desc, 0, sizeof(struct ioremap_desc)); memset(desc, 0, sizeof(struct ioremap_desc));
walk_mem_res(start, end, desc, __ioremap_collect_map_flags); walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
__ioremap_check_other(addr, desc);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment