Commit c482feef authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar

x86/entry/64: Make cpu_entry_area.tss read-only

The TSS is a fairly juicy target for exploits, and, now that the TSS
is in the cpu_entry_area, it's no longer protected by kASLR.  Make it
read-only on x86_64.

On x86_32, it can't be RO because it's written by the CPU during task
switches, and we use a task gate for double faults.  I'd also be
nervous about errata if we tried to make it RO even on configurations
without double fault handling.

[ tglx: AMD confirmed that there is no problem on 64-bit with TSS RO.  So
  	it's probably safe to assume that it's a non issue, though Intel
  	might have been creative in that area. Still waiting for
  	confirmation. ]
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarBorislav Petkov <bpetkov@suse.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Laight <David.Laight@aculab.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Eduardo Valentin <eduval@amazon.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: aliguori@amazon.com
Cc: daniel.gruss@iaik.tugraz.at
Cc: hughd@google.com
Cc: keescook@google.com
Link: https://lkml.kernel.org/r/20171204150606.733700132@linutronix.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0f9a4810
...@@ -942,7 +942,7 @@ ENTRY(debug) ...@@ -942,7 +942,7 @@ ENTRY(debug)
/* Are we currently on the SYSENTER stack? */ /* Are we currently on the SYSENTER stack? */
movl PER_CPU_VAR(cpu_entry_area), %ecx movl PER_CPU_VAR(cpu_entry_area), %ecx
addl $CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
cmpl $SIZEOF_SYSENTER_stack, %ecx cmpl $SIZEOF_SYSENTER_stack, %ecx
jb .Ldebug_from_sysenter_stack jb .Ldebug_from_sysenter_stack
...@@ -986,7 +986,7 @@ ENTRY(nmi) ...@@ -986,7 +986,7 @@ ENTRY(nmi)
/* Are we currently on the SYSENTER stack? */ /* Are we currently on the SYSENTER stack? */
movl PER_CPU_VAR(cpu_entry_area), %ecx movl PER_CPU_VAR(cpu_entry_area), %ecx
addl $CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
cmpl $SIZEOF_SYSENTER_stack, %ecx cmpl $SIZEOF_SYSENTER_stack, %ecx
jb .Lnmi_from_sysenter_stack jb .Lnmi_from_sysenter_stack
......
...@@ -154,7 +154,7 @@ END(native_usergs_sysret64) ...@@ -154,7 +154,7 @@ END(native_usergs_sysret64)
_entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip)
/* The top word of the SYSENTER stack is hot and is usable as scratch space. */ /* The top word of the SYSENTER stack is hot and is usable as scratch space. */
#define RSP_SCRATCH CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + \ #define RSP_SCRATCH CPU_ENTRY_AREA_SYSENTER_stack + \
SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA
ENTRY(entry_SYSCALL_64_trampoline) ENTRY(entry_SYSCALL_64_trampoline)
...@@ -390,7 +390,7 @@ syscall_return_via_sysret: ...@@ -390,7 +390,7 @@ syscall_return_via_sysret:
* Save old stack pointer and switch to trampoline stack. * Save old stack pointer and switch to trampoline stack.
*/ */
movq %rsp, %rdi movq %rsp, %rdi
movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
pushq RSP-RDI(%rdi) /* RSP */ pushq RSP-RDI(%rdi) /* RSP */
pushq (%rdi) /* RDI */ pushq (%rdi) /* RDI */
...@@ -719,7 +719,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) ...@@ -719,7 +719,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
* Save old stack pointer and switch to trampoline stack. * Save old stack pointer and switch to trampoline stack.
*/ */
movq %rsp, %rdi movq %rsp, %rdi
movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
/* Copy the IRET frame to the trampoline stack. */ /* Copy the IRET frame to the trampoline stack. */
pushq 6*8(%rdi) /* SS */ pushq 6*8(%rdi) /* SS */
...@@ -934,7 +934,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt ...@@ -934,7 +934,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
/* /*
* Exception entry points. * Exception entry points.
*/ */
#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
/* /*
* Switch to the thread stack. This is called with the IRET frame and * Switch to the thread stack. This is called with the IRET frame and
......
...@@ -56,9 +56,14 @@ struct cpu_entry_area { ...@@ -56,9 +56,14 @@ struct cpu_entry_area {
char gdt[PAGE_SIZE]; char gdt[PAGE_SIZE];
/* /*
* The GDT is just below cpu_tss and thus serves (on x86_64) as a * The GDT is just below SYSENTER_stack and thus serves (on x86_64) as
* a read-only guard page for the SYSENTER stack at the bottom * a a read-only guard page.
* of the TSS region. */
struct SYSENTER_stack_page SYSENTER_stack_page;
/*
* On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because
* we need task switches to work, and task switches write to the TSS.
*/ */
struct tss_struct tss; struct tss_struct tss;
...@@ -247,7 +252,7 @@ static inline struct cpu_entry_area *get_cpu_entry_area(int cpu) ...@@ -247,7 +252,7 @@ static inline struct cpu_entry_area *get_cpu_entry_area(int cpu)
static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu) static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu)
{ {
return &get_cpu_entry_area(cpu)->tss.SYSENTER_stack; return &get_cpu_entry_area(cpu)->SYSENTER_stack_page.stack;
} }
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -340,13 +340,11 @@ struct SYSENTER_stack { ...@@ -340,13 +340,11 @@ struct SYSENTER_stack {
unsigned long words[64]; unsigned long words[64];
}; };
struct tss_struct { struct SYSENTER_stack_page {
/* struct SYSENTER_stack stack;
* Space for the temporary SYSENTER stack, used for SYSENTER } __aligned(PAGE_SIZE);
* and the entry trampoline as well.
*/
struct SYSENTER_stack SYSENTER_stack;
struct tss_struct {
/* /*
* The fixed hardware portion. This must not cross a page boundary * The fixed hardware portion. This must not cross a page boundary
* at risk of violating the SDM's advice and potentially triggering * at risk of violating the SDM's advice and potentially triggering
...@@ -363,7 +361,7 @@ struct tss_struct { ...@@ -363,7 +361,7 @@ struct tss_struct {
unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
} __aligned(PAGE_SIZE); } __aligned(PAGE_SIZE);
DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss); DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
/* /*
* sizeof(unsigned long) coming from an extra "long" at the end * sizeof(unsigned long) coming from an extra "long" at the end
...@@ -378,7 +376,8 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss); ...@@ -378,7 +376,8 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
#else #else
#define cpu_current_top_of_stack cpu_tss.x86_tss.sp1 /* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
#endif #endif
/* /*
...@@ -538,7 +537,7 @@ static inline void native_set_iopl_mask(unsigned mask) ...@@ -538,7 +537,7 @@ static inline void native_set_iopl_mask(unsigned mask)
static inline void static inline void
native_load_sp0(unsigned long sp0) native_load_sp0(unsigned long sp0)
{ {
this_cpu_write(cpu_tss.x86_tss.sp0, sp0); this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
} }
static inline void native_swapgs(void) static inline void native_swapgs(void)
......
...@@ -79,10 +79,10 @@ do { \ ...@@ -79,10 +79,10 @@ do { \
static inline void refresh_sysenter_cs(struct thread_struct *thread) static inline void refresh_sysenter_cs(struct thread_struct *thread)
{ {
/* Only happens when SEP is enabled, no need to test "SEP"arately: */ /* Only happens when SEP is enabled, no need to test "SEP"arately: */
if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs)) if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
return; return;
this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs); this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
} }
#endif #endif
......
...@@ -207,7 +207,7 @@ static inline int arch_within_stack_frames(const void * const stack, ...@@ -207,7 +207,7 @@ static inline int arch_within_stack_frames(const void * const stack,
#else /* !__ASSEMBLY__ */ #else /* !__ASSEMBLY__ */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
# define cpu_current_top_of_stack (cpu_tss + TSS_sp1) # define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1)
#endif #endif
#endif #endif
......
...@@ -94,10 +94,9 @@ void common(void) { ...@@ -94,10 +94,9 @@ void common(void) {
BLANK(); BLANK();
DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
OFFSET(TSS_STRUCT_SYSENTER_stack, tss_struct, SYSENTER_stack);
DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack));
/* Layout info for cpu_entry_area */ /* Layout info for cpu_entry_area */
OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss); OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline); OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
OFFSET(CPU_ENTRY_AREA_SYSENTER_stack, cpu_entry_area, SYSENTER_stack_page);
DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack));
} }
...@@ -47,8 +47,8 @@ void foo(void) ...@@ -47,8 +47,8 @@ void foo(void)
BLANK(); BLANK();
/* Offset from the sysenter stack to tss.sp0 */ /* Offset from the sysenter stack to tss.sp0 */
DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
offsetofend(struct tss_struct, SYSENTER_stack)); offsetofend(struct cpu_entry_area, SYSENTER_stack_page.stack));
#ifdef CONFIG_CC_STACKPROTECTOR #ifdef CONFIG_CC_STACKPROTECTOR
BLANK(); BLANK();
......
...@@ -487,6 +487,9 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks ...@@ -487,6 +487,9 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
#endif #endif
static DEFINE_PER_CPU_PAGE_ALIGNED(struct SYSENTER_stack_page,
SYSENTER_stack_storage);
static void __init static void __init
set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot) set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot)
{ {
...@@ -500,23 +503,29 @@ static void __init setup_cpu_entry_area(int cpu) ...@@ -500,23 +503,29 @@ static void __init setup_cpu_entry_area(int cpu)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
extern char _entry_trampoline[]; extern char _entry_trampoline[];
/* On 64-bit systems, we use a read-only fixmap GDT. */ /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
pgprot_t gdt_prot = PAGE_KERNEL_RO; pgprot_t gdt_prot = PAGE_KERNEL_RO;
pgprot_t tss_prot = PAGE_KERNEL_RO;
#else #else
/* /*
* On native 32-bit systems, the GDT cannot be read-only because * On native 32-bit systems, the GDT cannot be read-only because
* our double fault handler uses a task gate, and entering through * our double fault handler uses a task gate, and entering through
* a task gate needs to change an available TSS to busy. If the GDT * a task gate needs to change an available TSS to busy. If the
* is read-only, that will triple fault. * GDT is read-only, that will triple fault. The TSS cannot be
* read-only because the CPU writes to it on task switches.
* *
* On Xen PV, the GDT must be read-only because the hypervisor requires * On Xen PV, the GDT must be read-only because the hypervisor
* it. * requires it.
*/ */
pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ? pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
PAGE_KERNEL_RO : PAGE_KERNEL; PAGE_KERNEL_RO : PAGE_KERNEL;
pgprot_t tss_prot = PAGE_KERNEL;
#endif #endif
__set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot); __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot);
set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, SYSENTER_stack_page),
per_cpu_ptr(&SYSENTER_stack_storage, cpu), 1,
PAGE_KERNEL);
/* /*
* The Intel SDM says (Volume 3, 7.2.1): * The Intel SDM says (Volume 3, 7.2.1):
...@@ -539,9 +548,9 @@ static void __init setup_cpu_entry_area(int cpu) ...@@ -539,9 +548,9 @@ static void __init setup_cpu_entry_area(int cpu)
offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss), set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss),
&per_cpu(cpu_tss, cpu), &per_cpu(cpu_tss_rw, cpu),
sizeof(struct tss_struct) / PAGE_SIZE, sizeof(struct tss_struct) / PAGE_SIZE,
PAGE_KERNEL); tss_prot);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
...@@ -1305,7 +1314,7 @@ void enable_sep_cpu(void) ...@@ -1305,7 +1314,7 @@ void enable_sep_cpu(void)
return; return;
cpu = get_cpu(); cpu = get_cpu();
tss = &per_cpu(cpu_tss, cpu); tss = &per_cpu(cpu_tss_rw, cpu);
/* /*
* We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
...@@ -1575,7 +1584,7 @@ void cpu_init(void) ...@@ -1575,7 +1584,7 @@ void cpu_init(void)
if (cpu) if (cpu)
load_ucode_ap(); load_ucode_ap();
t = &per_cpu(cpu_tss, cpu); t = &per_cpu(cpu_tss_rw, cpu);
oist = &per_cpu(orig_ist, cpu); oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
...@@ -1667,7 +1676,7 @@ void cpu_init(void) ...@@ -1667,7 +1676,7 @@ void cpu_init(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct task_struct *curr = current; struct task_struct *curr = current;
struct tss_struct *t = &per_cpu(cpu_tss, cpu); struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
wait_for_master_cpu(cpu); wait_for_master_cpu(cpu);
......
...@@ -67,7 +67,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) ...@@ -67,7 +67,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
* because the ->io_bitmap_max value must match the bitmap * because the ->io_bitmap_max value must match the bitmap
* contents: * contents:
*/ */
tss = &per_cpu(cpu_tss, get_cpu()); tss = &per_cpu(cpu_tss_rw, get_cpu());
if (turn_on) if (turn_on)
bitmap_clear(t->io_bitmap_ptr, from, num); bitmap_clear(t->io_bitmap_ptr, from, num);
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
* section. Since TSS's are completely CPU-local, we want them * section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong. * on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/ */
__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = {
.x86_tss = { .x86_tss = {
/* /*
* .sp0 is only used when entering ring 0 from a lower * .sp0 is only used when entering ring 0 from a lower
...@@ -82,7 +82,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { ...@@ -82,7 +82,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
#endif #endif
}; };
EXPORT_PER_CPU_SYMBOL(cpu_tss); EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
DEFINE_PER_CPU(bool, __tss_limit_invalid); DEFINE_PER_CPU(bool, __tss_limit_invalid);
EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
...@@ -111,7 +111,7 @@ void exit_thread(struct task_struct *tsk) ...@@ -111,7 +111,7 @@ void exit_thread(struct task_struct *tsk)
struct fpu *fpu = &t->fpu; struct fpu *fpu = &t->fpu;
if (bp) { if (bp) {
struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu()); struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
t->io_bitmap_ptr = NULL; t->io_bitmap_ptr = NULL;
clear_thread_flag(TIF_IO_BITMAP); clear_thread_flag(TIF_IO_BITMAP);
......
...@@ -234,7 +234,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -234,7 +234,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct fpu *prev_fpu = &prev->fpu; struct fpu *prev_fpu = &prev->fpu;
struct fpu *next_fpu = &next->fpu; struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(cpu_tss, cpu); struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
......
...@@ -399,7 +399,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -399,7 +399,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct fpu *prev_fpu = &prev->fpu; struct fpu *prev_fpu = &prev->fpu;
struct fpu *next_fpu = &next->fpu; struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(cpu_tss, cpu); struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
this_cpu_read(irq_count) != -1); this_cpu_read(irq_count) != -1);
......
...@@ -364,7 +364,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) ...@@ -364,7 +364,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
regs->cs == __KERNEL_CS && regs->cs == __KERNEL_CS &&
regs->ip == (unsigned long)native_irq_return_iret) regs->ip == (unsigned long)native_irq_return_iret)
{ {
struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1; struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
/* /*
* regs->sp points to the failing IRET frame on the * regs->sp points to the failing IRET frame on the
...@@ -649,7 +649,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) ...@@ -649,7 +649,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
* exception came from the IRET target. * exception came from the IRET target.
*/ */
struct bad_iret_stack *new_stack = struct bad_iret_stack *new_stack =
(struct bad_iret_stack *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1; (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
/* Copy the IRET target to the new stack. */ /* Copy the IRET target to the new stack. */
memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
......
...@@ -107,10 +107,10 @@ static void delay_mwaitx(unsigned long __loops) ...@@ -107,10 +107,10 @@ static void delay_mwaitx(unsigned long __loops)
delay = min_t(u64, MWAITX_MAX_LOOPS, loops); delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
/* /*
* Use cpu_tss as a cacheline-aligned, seldomly * Use cpu_tss_rw as a cacheline-aligned, seldomly
* accessed per-cpu variable as the monitor target. * accessed per-cpu variable as the monitor target.
*/ */
__monitorx(raw_cpu_ptr(&cpu_tss), 0, 0); __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
/* /*
* AMD, like Intel, supports the EAX hint and EAX=0xf * AMD, like Intel, supports the EAX hint and EAX=0xf
......
...@@ -818,7 +818,7 @@ static void xen_load_sp0(unsigned long sp0) ...@@ -818,7 +818,7 @@ static void xen_load_sp0(unsigned long sp0)
mcs = xen_mc_entry(0); mcs = xen_mc_entry(0);
MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0); MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(PARAVIRT_LAZY_CPU);
this_cpu_write(cpu_tss.x86_tss.sp0, sp0); this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
} }
void xen_set_iopl_mask(unsigned mask) void xen_set_iopl_mask(unsigned mask)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment