Commit 8ef46a67 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar

x86/asm/entry: Add this_cpu_sp0() to read sp0 for the current cpu

We currently store references to the top of the kernel stack in
multiple places: kernel_stack (with an offset) and
init_tss.x86_tss.sp0 (no offset).  The latter is defined by
hardware and is a clean canonical way to find the top of the
stack.  Add an accessor so we can start using it.

This needs minor paravirt tweaks.  On native, sp0 defines the
top of the kernel stack and is therefore always correct.  On Xen
and lguest, the hypervisor tracks the top of the stack, but we
want to start reading sp0 in the kernel.  Fixing this is simple:
just update our local copy of sp0 as well as the hypervisor's
copy on task switches.
Signed-off-by: default avatarAndy Lutomirski <luto@amacapital.net>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/8d675581859712bee09a055ed8f785d80dac1eca.1425611534.git.luto@amacapital.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 5eca7453
...@@ -564,6 +564,11 @@ static inline void native_swapgs(void) ...@@ -564,6 +564,11 @@ static inline void native_swapgs(void)
#endif #endif
} }
static inline unsigned long this_cpu_sp0(void)
{
return this_cpu_read_stable(init_tss.x86_tss.sp0);
}
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h> #include <asm/paravirt.h>
#else #else
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
* on exact cacheline boundaries, to eliminate cacheline ping-pong. * on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/ */
__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
EXPORT_PER_CPU_SYMBOL_GPL(init_tss);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
static DEFINE_PER_CPU(unsigned char, is_idle); static DEFINE_PER_CPU(unsigned char, is_idle);
......
...@@ -1076,6 +1076,7 @@ static void lguest_load_sp0(struct tss_struct *tss, ...@@ -1076,6 +1076,7 @@ static void lguest_load_sp0(struct tss_struct *tss,
{ {
lazy_hcall3(LHCALL_SET_STACK, __KERNEL_DS | 0x1, thread->sp0, lazy_hcall3(LHCALL_SET_STACK, __KERNEL_DS | 0x1, thread->sp0,
THREAD_SIZE / PAGE_SIZE); THREAD_SIZE / PAGE_SIZE);
tss->x86_tss.sp0 = thread->sp0;
} }
/* Let's just say, I wouldn't do debugging under a Guest. */ /* Let's just say, I wouldn't do debugging under a Guest. */
......
...@@ -912,6 +912,7 @@ static void xen_load_sp0(struct tss_struct *tss, ...@@ -912,6 +912,7 @@ static void xen_load_sp0(struct tss_struct *tss,
mcs = xen_mc_entry(0); mcs = xen_mc_entry(0);
MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(PARAVIRT_LAZY_CPU);
tss->x86_tss.sp0 = thread->sp0;
} }
static void xen_set_iopl_mask(unsigned mask) static void xen_set_iopl_mask(unsigned mask)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment