Commit d7bfc7d5 authored by Peter Zijlstra's avatar Peter Zijlstra

x86/xen: Make irq_enable() noinstr

vmlinux.o: warning: objtool: pv_ops[32]: native_irq_enable
vmlinux.o: warning: objtool: pv_ops[32]: __raw_callee_save_xen_irq_enable
vmlinux.o: warning: objtool: pv_ops[32]: xen_irq_enable_direct
vmlinux.o: warning: objtool: lock_is_held_type()+0xfe: call to pv_ops[32]() leaves .noinstr.text section
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarJuergen Gross <jgross@suse.com>
Link: https://lore.kernel.org/r/20210624095148.872254932@infradead.org
parent 74ea805b
...@@ -238,6 +238,11 @@ static noinstr void pv_native_set_debugreg(int regno, unsigned long val) ...@@ -238,6 +238,11 @@ static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
{ {
native_set_debugreg(regno, val); native_set_debugreg(regno, val);
} }
static noinstr void pv_native_irq_enable(void)
{
native_irq_enable();
}
#endif #endif
enum paravirt_lazy_mode paravirt_get_lazy_mode(void) enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
...@@ -302,7 +307,7 @@ struct paravirt_patch_template pv_ops = { ...@@ -302,7 +307,7 @@ struct paravirt_patch_template pv_ops = {
/* Irq ops. */ /* Irq ops. */
.irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
.irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), .irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
.irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable), .irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
.irq.safe_halt = native_safe_halt, .irq.safe_halt = native_safe_halt,
.irq.halt = native_halt, .irq.halt = native_halt,
#endif /* CONFIG_PARAVIRT_XXL */ #endif /* CONFIG_PARAVIRT_XXL */
......
...@@ -53,7 +53,7 @@ asmlinkage __visible void xen_irq_disable(void) ...@@ -53,7 +53,7 @@ asmlinkage __visible void xen_irq_disable(void)
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
asmlinkage __visible void xen_irq_enable(void) asmlinkage __visible noinstr void xen_irq_enable(void)
{ {
struct vcpu_info *vcpu; struct vcpu_info *vcpu;
...@@ -76,7 +76,7 @@ asmlinkage __visible void xen_irq_enable(void) ...@@ -76,7 +76,7 @@ asmlinkage __visible void xen_irq_enable(void)
preempt_enable(); preempt_enable();
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable); __PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable, ".noinstr.text");
static void xen_safe_halt(void) static void xen_safe_halt(void)
{ {
......
...@@ -21,33 +21,6 @@ ...@@ -21,33 +21,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h> #include <linux/linkage.h>
/*
* Enable events. This clears the event mask and tests the pending
* event status with one and operation. If there are pending events,
* then enter the hypervisor to get them handled.
*/
SYM_FUNC_START(xen_irq_enable_direct)
FRAME_BEGIN
/* Unmask events */
movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
/*
* Preempt here doesn't matter because that will deal with any
* pending interrupts. The pending check may end up being run
* on the wrong CPU, but that doesn't hurt.
*/
/* Test for pending */
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
jz 1f
call check_events
1:
FRAME_END
ret
SYM_FUNC_END(xen_irq_enable_direct)
/* /*
* Disabling events is simply a matter of making the event mask * Disabling events is simply a matter of making the event mask
* non-zero. * non-zero.
...@@ -57,6 +30,8 @@ SYM_FUNC_START(xen_irq_disable_direct) ...@@ -57,6 +30,8 @@ SYM_FUNC_START(xen_irq_disable_direct)
ret ret
SYM_FUNC_END(xen_irq_disable_direct) SYM_FUNC_END(xen_irq_disable_direct)
.pushsection .noinstr.text, "ax"
/* /*
* Force an event check by making a hypercall, but preserve regs * Force an event check by making a hypercall, but preserve regs
* before making the call. * before making the call.
...@@ -86,7 +61,32 @@ SYM_FUNC_START(check_events) ...@@ -86,7 +61,32 @@ SYM_FUNC_START(check_events)
ret ret
SYM_FUNC_END(check_events) SYM_FUNC_END(check_events)
.pushsection .noinstr.text, "ax" /*
* Enable events. This clears the event mask and tests the pending
* event status with one and operation. If there are pending events,
* then enter the hypervisor to get them handled.
*/
SYM_FUNC_START(xen_irq_enable_direct)
FRAME_BEGIN
/* Unmask events */
movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
/*
* Preempt here doesn't matter because that will deal with any
* pending interrupts. The pending check may end up being run
* on the wrong CPU, but that doesn't hurt.
*/
/* Test for pending */
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
jz 1f
call check_events
1:
FRAME_END
ret
SYM_FUNC_END(xen_irq_enable_direct)
/* /*
* (xen_)save_fl is used to get the current interrupt enable status. * (xen_)save_fl is used to get the current interrupt enable status.
* Callers expect the status to be in X86_EFLAGS_IF, and other bits * Callers expect the status to be in X86_EFLAGS_IF, and other bits
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment