Commit 45fc8757 authored by Thomas Garnier's avatar Thomas Garnier Committed by Ingo Molnar

x86: Make the GDT remapping read-only on 64-bit

This patch makes the GDT remapped pages read-only, to prevent accidental
(or intentional) corruption of this key data structure.

This change is done only on 64-bit, because 32-bit needs it to be writable
for TSS switches.

The native_load_tr_desc function was adapted to correctly handle a
read-only GDT. The LTR instruction always writes to the GDT TSS entry.
This generates a page fault if the GDT is read-only. This change checks
if the current GDT is a remap and swap GDTs as needed. This function was
tested by booting multiple machines and checking hibernation works
properly.

KVM SVM and VMX were adapted to use the writeable GDT. On VMX, the
per-cpu variable was removed for functions to fetch the original GDT.
Instead of reloading the previous GDT, VMX will reload the fixmap GDT as
expected. For testing, VMs were started and restored on multiple
configurations.
Signed-off-by: default avatarThomas Garnier <thgarnie@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Kosina <jikos@kernel.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Len Brown <len.brown@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Luis R . Rodriguez <mcgrof@kernel.org>
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Rafael J . Wysocki <rjw@rjwysocki.net>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Stanislaw Gruszka <sgruszka@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: kasan-dev@googlegroups.com
Cc: kernel-hardening@lists.openwall.com
Cc: kvm@vger.kernel.org
Cc: lguest@lists.ozlabs.org
Cc: linux-doc@vger.kernel.org
Cc: linux-efi@vger.kernel.org
Cc: linux-mm@kvack.org
Cc: linux-pm@vger.kernel.org
Cc: xen-devel@lists.xenproject.org
Cc: zijun_hu <zijun_hu@htc.com>
Link: http://lkml.kernel.org/r/20170314170508.100882-3-thgarnie@google.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 69218e47
...@@ -248,9 +248,77 @@ static inline void native_set_ldt(const void *addr, unsigned int entries) ...@@ -248,9 +248,77 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
} }
} }
static inline void native_load_gdt(const struct desc_ptr *dtr)
{
asm volatile("lgdt %0"::"m" (*dtr));
}
static inline void native_load_idt(const struct desc_ptr *dtr)
{
asm volatile("lidt %0"::"m" (*dtr));
}
static inline void native_store_gdt(struct desc_ptr *dtr)
{
asm volatile("sgdt %0":"=m" (*dtr));
}
static inline void native_store_idt(struct desc_ptr *dtr)
{
asm volatile("sidt %0":"=m" (*dtr));
}
/*
* The LTR instruction marks the TSS GDT entry as busy. On 64-bit, the GDT is
* a read-only remapping. To prevent a page fault, the GDT is switched to the
* original writeable version when needed.
*/
#ifdef CONFIG_X86_64
static inline void native_load_tr_desc(void) static inline void native_load_tr_desc(void)
{ {
struct desc_ptr gdt;
int cpu = raw_smp_processor_id();
bool restore = 0;
struct desc_struct *fixmap_gdt;
native_store_gdt(&gdt);
fixmap_gdt = get_cpu_gdt_ro(cpu);
/*
* If the current GDT is the read-only fixmap, swap to the original
* writeable version. Swap back at the end.
*/
if (gdt.address == (unsigned long)fixmap_gdt) {
load_direct_gdt(cpu);
restore = 1;
}
asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
if (restore)
load_fixmap_gdt(cpu);
}
#else
static inline void native_load_tr_desc(void)
{
asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
}
#endif
static inline unsigned long native_store_tr(void)
{
unsigned long tr;
asm volatile("str %0":"=r" (tr));
return tr;
}
static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
{
struct desc_struct *gdt = get_cpu_gdt_rw(cpu);
unsigned int i;
for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
} }
DECLARE_PER_CPU(bool, __tss_limit_invalid); DECLARE_PER_CPU(bool, __tss_limit_invalid);
...@@ -305,44 +373,6 @@ static inline void invalidate_tss_limit(void) ...@@ -305,44 +373,6 @@ static inline void invalidate_tss_limit(void)
this_cpu_write(__tss_limit_invalid, true); this_cpu_write(__tss_limit_invalid, true);
} }
static inline void native_load_gdt(const struct desc_ptr *dtr)
{
asm volatile("lgdt %0"::"m" (*dtr));
}
static inline void native_load_idt(const struct desc_ptr *dtr)
{
asm volatile("lidt %0"::"m" (*dtr));
}
static inline void native_store_gdt(struct desc_ptr *dtr)
{
asm volatile("sgdt %0":"=m" (*dtr));
}
static inline void native_store_idt(struct desc_ptr *dtr)
{
asm volatile("sidt %0":"=m" (*dtr));
}
static inline unsigned long native_store_tr(void)
{
unsigned long tr;
asm volatile("str %0":"=r" (tr));
return tr;
}
static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
{
struct desc_struct *gdt = get_cpu_gdt_rw(cpu);
unsigned int i;
for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
}
/* This intentionally ignores lm, since 32-bit apps don't have that field. */ /* This intentionally ignores lm, since 32-bit apps don't have that field. */
#define LDT_empty(info) \ #define LDT_empty(info) \
((info)->base_addr == 0 && \ ((info)->base_addr == 0 && \
......
...@@ -716,6 +716,7 @@ extern struct desc_ptr early_gdt_descr; ...@@ -716,6 +716,7 @@ extern struct desc_ptr early_gdt_descr;
extern void cpu_set_gdt(int); extern void cpu_set_gdt(int);
extern void switch_to_new_gdt(int); extern void switch_to_new_gdt(int);
extern void load_direct_gdt(int);
extern void load_fixmap_gdt(int); extern void load_fixmap_gdt(int);
extern void load_percpu_segment(int); extern void load_percpu_segment(int);
extern void cpu_init(void); extern void cpu_init(void);
......
...@@ -448,8 +448,15 @@ void load_percpu_segment(int cpu) ...@@ -448,8 +448,15 @@ void load_percpu_segment(int cpu)
load_stack_canary_segment(); load_stack_canary_segment();
} }
/* Used by XEN to force the GDT read-only when required */ /*
* On 64-bit the GDT remapping is read-only.
* A global is used for Xen to change the default when required.
*/
#ifdef CONFIG_X86_64
pgprot_t pg_fixmap_gdt_flags = PAGE_KERNEL_RO;
#else
pgprot_t pg_fixmap_gdt_flags = PAGE_KERNEL; pgprot_t pg_fixmap_gdt_flags = PAGE_KERNEL;
#endif
/* Setup the fixmap mapping only once per-processor */ /* Setup the fixmap mapping only once per-processor */
static inline void setup_fixmap_gdt(int cpu) static inline void setup_fixmap_gdt(int cpu)
...@@ -458,6 +465,17 @@ static inline void setup_fixmap_gdt(int cpu) ...@@ -458,6 +465,17 @@ static inline void setup_fixmap_gdt(int cpu)
__pa(get_cpu_gdt_rw(cpu)), pg_fixmap_gdt_flags); __pa(get_cpu_gdt_rw(cpu)), pg_fixmap_gdt_flags);
} }
/* Load the original GDT from the per-cpu structure */
void load_direct_gdt(int cpu)
{
struct desc_ptr gdt_descr;
gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
}
EXPORT_SYMBOL_GPL(load_direct_gdt);
/* Load a fixmap remapping of the per-cpu GDT */ /* Load a fixmap remapping of the per-cpu GDT */
void load_fixmap_gdt(int cpu) void load_fixmap_gdt(int cpu)
{ {
...@@ -467,6 +485,7 @@ void load_fixmap_gdt(int cpu) ...@@ -467,6 +485,7 @@ void load_fixmap_gdt(int cpu)
gdt_descr.size = GDT_SIZE - 1; gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr); load_gdt(&gdt_descr);
} }
EXPORT_SYMBOL_GPL(load_fixmap_gdt);
/* /*
* Current gdt points %fs at the "master" per-cpu area: after this, * Current gdt points %fs at the "master" per-cpu area: after this,
...@@ -474,11 +493,8 @@ void load_fixmap_gdt(int cpu) ...@@ -474,11 +493,8 @@ void load_fixmap_gdt(int cpu)
*/ */
void switch_to_new_gdt(int cpu) void switch_to_new_gdt(int cpu)
{ {
struct desc_ptr gdt_descr; /* Load the original GDT */
load_direct_gdt(cpu);
gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
/* Reload the per-cpu base */ /* Reload the per-cpu base */
load_percpu_segment(cpu); load_percpu_segment(cpu);
} }
......
...@@ -741,7 +741,6 @@ static int svm_hardware_enable(void) ...@@ -741,7 +741,6 @@ static int svm_hardware_enable(void)
struct svm_cpu_data *sd; struct svm_cpu_data *sd;
uint64_t efer; uint64_t efer;
struct desc_ptr gdt_descr;
struct desc_struct *gdt; struct desc_struct *gdt;
int me = raw_smp_processor_id(); int me = raw_smp_processor_id();
...@@ -763,8 +762,7 @@ static int svm_hardware_enable(void) ...@@ -763,8 +762,7 @@ static int svm_hardware_enable(void)
sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
sd->next_asid = sd->max_asid + 1; sd->next_asid = sd->max_asid + 1;
native_store_gdt(&gdt_descr); gdt = get_current_gdt_rw();
gdt = (struct desc_struct *)gdt_descr.address;
sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
wrmsrl(MSR_EFER, efer | EFER_SVME); wrmsrl(MSR_EFER, efer | EFER_SVME);
......
...@@ -935,7 +935,6 @@ static DEFINE_PER_CPU(struct vmcs *, current_vmcs); ...@@ -935,7 +935,6 @@ static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
* when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
*/ */
static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
/* /*
* We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
...@@ -2052,14 +2051,13 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) ...@@ -2052,14 +2051,13 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
*/ */
static unsigned long segment_base(u16 selector) static unsigned long segment_base(u16 selector)
{ {
struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
struct desc_struct *table; struct desc_struct *table;
unsigned long v; unsigned long v;
if (!(selector & ~SEGMENT_RPL_MASK)) if (!(selector & ~SEGMENT_RPL_MASK))
return 0; return 0;
table = (struct desc_struct *)gdt->address; table = get_current_gdt_ro();
if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) { if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
u16 ldt_selector = kvm_read_ldt(); u16 ldt_selector = kvm_read_ldt();
...@@ -2164,7 +2162,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) ...@@ -2164,7 +2162,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
#endif #endif
if (vmx->host_state.msr_host_bndcfgs) if (vmx->host_state.msr_host_bndcfgs)
wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
load_gdt(this_cpu_ptr(&host_gdt)); load_fixmap_gdt(raw_smp_processor_id());
} }
static void vmx_load_host_state(struct vcpu_vmx *vmx) static void vmx_load_host_state(struct vcpu_vmx *vmx)
...@@ -2266,7 +2264,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -2266,7 +2264,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
} }
if (!already_loaded) { if (!already_loaded) {
struct desc_ptr *gdt = this_cpu_ptr(&host_gdt); unsigned long gdt = get_current_gdt_ro_vaddr();
unsigned long sysenter_esp; unsigned long sysenter_esp;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
...@@ -2277,7 +2275,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -2277,7 +2275,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
*/ */
vmcs_writel(HOST_TR_BASE, vmcs_writel(HOST_TR_BASE,
(unsigned long)this_cpu_ptr(&cpu_tss)); (unsigned long)this_cpu_ptr(&cpu_tss));
vmcs_writel(HOST_GDTR_BASE, gdt->address); vmcs_writel(HOST_GDTR_BASE, gdt); /* 22.2.4 */
/* /*
* VM exits change the host TR limit to 0x67 after a VM * VM exits change the host TR limit to 0x67 after a VM
...@@ -3465,8 +3463,6 @@ static int hardware_enable(void) ...@@ -3465,8 +3463,6 @@ static int hardware_enable(void)
ept_sync_global(); ept_sync_global();
} }
native_store_gdt(this_cpu_ptr(&host_gdt));
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment