Commit edcb5cf8 authored by Juergen Gross's avatar Juergen Gross Committed by Ingo Molnar

x86/paravirt/xen: Remove xen_patch()

Xen's paravirt patch function xen_patch() does some special casing for
irq_ops functions to apply relocations when those functions can be
patched inline instead of calls.

Unfortunately none of the special case function replacements is small
enough to be patched inline, so the special case never applies.

As xen_patch() will call paravirt_patch_default() in all cases it can
be just dropped. xen-asm.h doesn't seem necessary without xen_patch()
as the only thing left in it would be the definition of XEN_EFLAGS_NMI
used only once. So move that definition and remove xen-asm.h.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: boris.ostrovsky@oracle.com
Cc: lguest@lists.ozlabs.org
Cc: rusty@rustcorp.com.au
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/20170816173157.8633-2-jgross@suse.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ee97638b
...@@ -981,59 +981,6 @@ void __ref xen_setup_vcpu_info_placement(void) ...@@ -981,59 +981,6 @@ void __ref xen_setup_vcpu_info_placement(void)
} }
} }
static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
unsigned long addr, unsigned len)
{
char *start, *end, *reloc;
unsigned ret;
start = end = reloc = NULL;
#define SITE(op, x) \
case PARAVIRT_PATCH(op.x): \
if (xen_have_vcpu_info_placement) { \
start = (char *)xen_##x##_direct; \
end = xen_##x##_direct_end; \
reloc = xen_##x##_direct_reloc; \
} \
goto patch_site
switch (type) {
SITE(pv_irq_ops, irq_enable);
SITE(pv_irq_ops, irq_disable);
SITE(pv_irq_ops, save_fl);
SITE(pv_irq_ops, restore_fl);
#undef SITE
patch_site:
if (start == NULL || (end-start) > len)
goto default_patch;
ret = paravirt_patch_insns(insnbuf, len, start, end);
/* Note: because reloc is assigned from something that
appears to be an array, gcc assumes it's non-null,
but doesn't know its relationship with start and
end. */
if (reloc > start && reloc < end) {
int reloc_off = reloc - start;
long *relocp = (long *)(insnbuf + reloc_off);
long delta = start - (char *)addr;
*relocp += delta;
}
break;
default_patch:
default:
ret = paravirt_patch_default(type, clobbers, insnbuf,
addr, len);
break;
}
return ret;
}
static const struct pv_info xen_info __initconst = { static const struct pv_info xen_info __initconst = {
.shared_kernel_pmd = 0, .shared_kernel_pmd = 0,
...@@ -1043,10 +990,6 @@ static const struct pv_info xen_info __initconst = { ...@@ -1043,10 +990,6 @@ static const struct pv_info xen_info __initconst = {
.name = "Xen", .name = "Xen",
}; };
static const struct pv_init_ops xen_init_ops __initconst = {
.patch = xen_patch,
};
static const struct pv_cpu_ops xen_cpu_ops __initconst = { static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.cpuid = xen_cpuid, .cpuid = xen_cpuid,
...@@ -1244,7 +1187,7 @@ asmlinkage __visible void __init xen_start_kernel(void) ...@@ -1244,7 +1187,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
/* Install Xen paravirt ops */ /* Install Xen paravirt ops */
pv_info = xen_info; pv_info = xen_info;
pv_init_ops = xen_init_ops; pv_init_ops.patch = paravirt_patch_default;
pv_cpu_ops = xen_cpu_ops; pv_cpu_ops = xen_cpu_ops;
x86_platform.get_nmi_reason = xen_get_nmi_reason; x86_platform.get_nmi_reason = xen_get_nmi_reason;
......
/* /*
* Asm versions of Xen pv-ops, suitable for either direct use or * Asm versions of Xen pv-ops, suitable for direct use.
* inlining. The inline versions are the same as the direct-use
* versions, with the pre- and post-amble chopped off.
*
* This code is encoded for size rather than absolute efficiency, with
* a view to being able to inline as much as possible.
* *
* We only bother with direct forms (ie, vcpu in percpu data) of the * We only bother with direct forms (ie, vcpu in percpu data) of the
* operations here; the indirect forms are better handled in C, since * operations here; the indirect forms are better handled in C.
* they're generally too large to inline anyway.
*/ */
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -16,7 +10,7 @@ ...@@ -16,7 +10,7 @@
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/frame.h> #include <asm/frame.h>
#include "xen-asm.h" #include <linux/linkage.h>
/* /*
* Enable events. This clears the event mask and tests the pending * Enable events. This clears the event mask and tests the pending
...@@ -38,13 +32,11 @@ ENTRY(xen_irq_enable_direct) ...@@ -38,13 +32,11 @@ ENTRY(xen_irq_enable_direct)
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
jz 1f jz 1f
2: call check_events call check_events
1: 1:
ENDPATCH(xen_irq_enable_direct)
FRAME_END FRAME_END
ret ret
ENDPROC(xen_irq_enable_direct) ENDPROC(xen_irq_enable_direct)
RELOC(xen_irq_enable_direct, 2b+1)
/* /*
...@@ -53,10 +45,8 @@ ENDPATCH(xen_irq_enable_direct) ...@@ -53,10 +45,8 @@ ENDPATCH(xen_irq_enable_direct)
*/ */
ENTRY(xen_irq_disable_direct) ENTRY(xen_irq_disable_direct)
movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
ENDPATCH(xen_irq_disable_direct)
ret ret
ENDPROC(xen_irq_disable_direct) ENDPROC(xen_irq_disable_direct)
RELOC(xen_irq_disable_direct, 0)
/* /*
* (xen_)save_fl is used to get the current interrupt enable status. * (xen_)save_fl is used to get the current interrupt enable status.
...@@ -71,10 +61,8 @@ ENTRY(xen_save_fl_direct) ...@@ -71,10 +61,8 @@ ENTRY(xen_save_fl_direct)
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
setz %ah setz %ah
addb %ah, %ah addb %ah, %ah
ENDPATCH(xen_save_fl_direct)
ret ret
ENDPROC(xen_save_fl_direct) ENDPROC(xen_save_fl_direct)
RELOC(xen_save_fl_direct, 0)
/* /*
...@@ -101,13 +89,11 @@ ENTRY(xen_restore_fl_direct) ...@@ -101,13 +89,11 @@ ENTRY(xen_restore_fl_direct)
/* check for unmasked and pending */ /* check for unmasked and pending */
cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
jnz 1f jnz 1f
2: call check_events call check_events
1: 1:
ENDPATCH(xen_restore_fl_direct)
FRAME_END FRAME_END
ret ret
ENDPROC(xen_restore_fl_direct) ENDPROC(xen_restore_fl_direct)
RELOC(xen_restore_fl_direct, 2b+1)
/* /*
......
#ifndef _XEN_XEN_ASM_H
#define _XEN_XEN_ASM_H
#include <linux/linkage.h>
#define RELOC(x, v) .globl x##_reloc; x##_reloc=v
#define ENDPATCH(x) .globl x##_end; x##_end=.
/* Pseudo-flag used for virtual NMI, which we don't implement yet */
#define XEN_EFLAGS_NMI 0x80000000
#endif
/* /*
* Asm versions of Xen pv-ops, suitable for either direct use or * Asm versions of Xen pv-ops, suitable for direct use.
* inlining. The inline versions are the same as the direct-use
* versions, with the pre- and post-amble chopped off.
*
* This code is encoded for size rather than absolute efficiency, with
* a view to being able to inline as much as possible.
* *
* We only bother with direct forms (ie, vcpu in pda) of the * We only bother with direct forms (ie, vcpu in pda) of the
* operations here; the indirect forms are better handled in C, since * operations here; the indirect forms are better handled in C.
* they're generally too large to inline anyway.
*/ */
#include <asm/thread_info.h> #include <asm/thread_info.h>
...@@ -18,21 +12,10 @@ ...@@ -18,21 +12,10 @@
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#include "xen-asm.h" #include <linux/linkage.h>
/* /* Pseudo-flag used for virtual NMI, which we don't implement yet */
* Force an event check by making a hypercall, but preserve regs #define XEN_EFLAGS_NMI 0x80000000
* before making the call.
*/
check_events:
push %eax
push %ecx
push %edx
call xen_force_evtchn_callback
pop %edx
pop %ecx
pop %eax
ret
/* /*
* This is run where a normal iret would be run, with the same stack setup: * This is run where a normal iret would be run, with the same stack setup:
......
/* /*
* Asm versions of Xen pv-ops, suitable for either direct use or * Asm versions of Xen pv-ops, suitable for direct use.
* inlining. The inline versions are the same as the direct-use
* versions, with the pre- and post-amble chopped off.
*
* This code is encoded for size rather than absolute efficiency, with
* a view to being able to inline as much as possible.
* *
* We only bother with direct forms (ie, vcpu in pda) of the * We only bother with direct forms (ie, vcpu in pda) of the
* operations here; the indirect forms are better handled in C, since * operations here; the indirect forms are better handled in C.
* they're generally too large to inline anyway.
*/ */
#include <asm/errno.h> #include <asm/errno.h>
...@@ -20,7 +14,7 @@ ...@@ -20,7 +14,7 @@
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#include "xen-asm.h" #include <linux/linkage.h>
ENTRY(xen_adjust_exception_frame) ENTRY(xen_adjust_exception_frame)
mov 8+0(%rsp), %rcx mov 8+0(%rsp), %rcx
...@@ -46,9 +40,7 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 ...@@ -46,9 +40,7 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
*/ */
ENTRY(xen_iret) ENTRY(xen_iret)
pushq $0 pushq $0
1: jmp hypercall_iret jmp hypercall_iret
ENDPATCH(xen_iret)
RELOC(xen_iret, 1b+1)
ENTRY(xen_sysret64) ENTRY(xen_sysret64)
/* /*
...@@ -65,9 +57,7 @@ ENTRY(xen_sysret64) ...@@ -65,9 +57,7 @@ ENTRY(xen_sysret64)
pushq %rcx pushq %rcx
pushq $VGCF_in_syscall pushq $VGCF_in_syscall
1: jmp hypercall_iret jmp hypercall_iret
ENDPATCH(xen_sysret64)
RELOC(xen_sysret64, 1b+1)
/* /*
* Xen handles syscall callbacks much like ordinary exceptions, which * Xen handles syscall callbacks much like ordinary exceptions, which
......
...@@ -129,17 +129,10 @@ static inline void __init xen_efi_init(void) ...@@ -129,17 +129,10 @@ static inline void __init xen_efi_init(void)
} }
#endif #endif
/* Declare an asm function, along with symbols needed to make it __visible void xen_irq_enable_direct(void);
inlineable */ __visible void xen_irq_disable_direct(void);
#define DECL_ASM(ret, name, ...) \ __visible unsigned long xen_save_fl_direct(void);
__visible ret name(__VA_ARGS__); \ __visible void xen_restore_fl_direct(unsigned long);
extern char name##_end[] __visible; \
extern char name##_reloc[] __visible
DECL_ASM(void, xen_irq_enable_direct, void);
DECL_ASM(void, xen_irq_disable_direct, void);
DECL_ASM(unsigned long, xen_save_fl_direct, void);
DECL_ASM(void, xen_restore_fl_direct, unsigned long);
/* These are not functions, and cannot be called normally */ /* These are not functions, and cannot be called normally */
__visible void xen_iret(void); __visible void xen_iret(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment