Commit f23d1f4a authored by Zhang Yanfei's avatar Zhang Yanfei Committed by Gleb Natapov

x86/kexec: VMCLEAR VMCSs loaded on all cpus if necessary

This patch provides a way to VMCLEAR VMCSs related to guests
on all cpus before executing the VMXOFF when doing kdump. This
is used to ensure the VMCSs in the vmcore updated and
non-corrupted.
Signed-off-by: default avatarZhang Yanfei <zhangyanfei@cn.fujitsu.com>
Acked-by: default avatarEric W. Biederman <ebiederm@xmission.com>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
parent c2193463
...@@ -163,6 +163,8 @@ struct kimage_arch { ...@@ -163,6 +163,8 @@ struct kimage_arch {
}; };
#endif #endif
extern void (*crash_vmclear_loaded_vmcss)(void);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_KEXEC_H */ #endif /* _ASM_X86_KEXEC_H */
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/elf.h> #include <linux/elf.h>
#include <linux/elfcore.h> #include <linux/elfcore.h>
#include <linux/module.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
...@@ -30,6 +31,27 @@ ...@@ -30,6 +31,27 @@
int in_crash_kexec; int in_crash_kexec;
/*
* This is used to VMCLEAR all VMCSs loaded on the
* processor. And when loading kvm_intel module, the
* callback function pointer will be assigned.
*
* protected by rcu.
*/
void (*crash_vmclear_loaded_vmcss)(void) = NULL;
EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
static inline void cpu_crash_vmclear_loaded_vmcss(void)
{
void (*do_vmclear_operation)(void) = NULL;
rcu_read_lock();
do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
if (do_vmclear_operation)
do_vmclear_operation();
rcu_read_unlock();
}
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
static void kdump_nmi_callback(int cpu, struct pt_regs *regs) static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
...@@ -46,6 +68,11 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs) ...@@ -46,6 +68,11 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
#endif #endif
crash_save_cpu(regs, cpu); crash_save_cpu(regs, cpu);
/*
* VMCLEAR VMCSs loaded on all cpus if needed.
*/
cpu_crash_vmclear_loaded_vmcss();
/* Disable VMX or SVM if needed. /* Disable VMX or SVM if needed.
* *
* We need to disable virtualization on all CPUs. * We need to disable virtualization on all CPUs.
...@@ -88,6 +115,11 @@ void native_machine_crash_shutdown(struct pt_regs *regs) ...@@ -88,6 +115,11 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
kdump_nmi_shootdown_cpus(); kdump_nmi_shootdown_cpus();
/*
* VMCLEAR VMCSs loaded on this cpu if needed.
*/
cpu_crash_vmclear_loaded_vmcss();
/* Booting kdump kernel with VMX or SVM enabled won't work, /* Booting kdump kernel with VMX or SVM enabled won't work,
* because (among other limitations) we can't disable paging * because (among other limitations) we can't disable paging
* with the virt flags. * with the virt flags.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment