Commit 569dd8b4 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/entry: Convert system vectors to irq stack macro

To inline the stack switching and to prepare for enabling
CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK provide a macro template for system
vectors and device interrupts and convert the system vectors over to it.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20210210002512.676197354@linutronix.de
parent a0cfc74d
......@@ -762,7 +762,6 @@ SYM_CODE_END(.Lbad_gs)
* rdx: Function argument (can be NULL if none)
*/
SYM_FUNC_START(asm_call_on_stack)
SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL)
SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
/*
* Save the frame pointer unconditionally. This allows the ORC
......
......@@ -237,10 +237,8 @@ __visible noinstr void func(struct pt_regs *regs) \
irqentry_state_t state = irqentry_enter(regs); \
\
instrumentation_begin(); \
irq_enter_rcu(); \
kvm_set_cpu_l1tf_flush_l1d(); \
run_sysvec_on_irqstack_cond(__##func, regs); \
irq_exit_rcu(); \
instrumentation_end(); \
irqentry_exit(regs, state); \
} \
......
......@@ -105,14 +105,69 @@
#define assert_arg_type(arg, proto) \
static_assert(__builtin_types_compatible_p(typeof(arg), proto))
/*
* Macro to invoke system vector and device interrupt C handlers.
*/
#define call_on_irqstack_cond(func, regs, asm_call, constr, c_args...) \
{ \
/* \
* User mode entry and interrupt on the irq stack do not \
* switch stacks. If from user mode the task stack is empty. \
*/ \
if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) { \
irq_enter_rcu(); \
func(c_args); \
irq_exit_rcu(); \
} else { \
/* \
* Mark the irq stack inuse _before_ and unmark _after_ \
* switching stacks. Interrupts are disabled in both \
* places. Invoke the stack switch macro with the call \
* sequence which matches the above direct invocation. \
*/ \
__this_cpu_write(hardirq_stack_inuse, true); \
call_on_irqstack(func, asm_call, constr); \
__this_cpu_write(hardirq_stack_inuse, false); \
} \
}
/*
* Function call sequence for __call_on_irqstack() for system vectors.
*
* Note that irq_enter_rcu() and irq_exit_rcu() do not use the input
* mechanism because these functions are global and cannot be optimized out
* when compiling a particular source file which uses one of these macros.
*
* The argument (regs) does not need to be pushed or stashed in a callee
* saved register to be safe vs. the irq_enter_rcu() call because the
* clobbers already prevent the compiler from storing it in a callee
* clobbered register. As the compiler has to preserve @regs for the final
* call to idtentry_exit() anyway, it's likely that it does not cause extra
* effort for this asm magic.
*/
#define ASM_CALL_SYSVEC \
"call irq_enter_rcu \n" \
"movq %[arg1], %%rdi \n" \
"call %P[__func] \n" \
"call irq_exit_rcu \n"
#define SYSVEC_CONSTRAINTS , [arg1] "r" (regs)
#define run_sysvec_on_irqstack_cond(func, regs) \
{ \
assert_function_type(func, void (*)(struct pt_regs *)); \
assert_arg_type(regs, struct pt_regs *); \
\
call_on_irqstack_cond(func, regs, ASM_CALL_SYSVEC, \
SYSVEC_CONSTRAINTS, regs); \
}
static __always_inline bool irqstack_active(void)
{
return __this_cpu_read(hardirq_stack_inuse);
}
void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
struct pt_regs *regs);
void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
struct irq_desc *desc);
......@@ -125,17 +180,6 @@ static __always_inline void __run_on_irqstack(void (*func)(void))
__this_cpu_write(hardirq_stack_inuse, false);
}
static __always_inline void
__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
struct pt_regs *regs)
{
void *tos = __this_cpu_read(hardirq_stack_ptr);
__this_cpu_write(hardirq_stack_inuse, true);
asm_call_sysvec_on_stack(tos, func, regs);
__this_cpu_write(hardirq_stack_inuse, false);
}
static __always_inline void
__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
struct irq_desc *desc)
......@@ -148,10 +192,17 @@ __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
}
#else /* CONFIG_X86_64 */
/* System vector handlers always run on the stack they interrupted. */
#define run_sysvec_on_irqstack_cond(func, regs) \
{ \
irq_enter_rcu(); \
func(regs); \
irq_exit_rcu(); \
}
static inline bool irqstack_active(void) { return false; }
static inline void __run_on_irqstack(void (*func)(void)) { }
static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
struct pt_regs *regs) { }
static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
struct irq_desc *desc) { }
#endif /* !CONFIG_X86_64 */
......@@ -177,18 +228,6 @@ static __always_inline void run_on_irqstack_cond(void (*func)(void),
func();
}
static __always_inline void
run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs),
struct pt_regs *regs)
{
lockdep_assert_irqs_disabled();
if (irq_needs_irq_stack(regs))
__run_sysvec_on_irqstack(func, regs);
else
func(regs);
}
static __always_inline void
run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
struct pt_regs *regs)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment