Commit e7ae2ecd authored by Marc Zyngier's avatar Marc Zyngier

Merge branch 'kvm-arm64/hyp-reloc' into kvmarm-master/next

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents c5db649f bc93763f
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
#ifndef __ARM64_HYP_IMAGE_H__ #ifndef __ARM64_HYP_IMAGE_H__
#define __ARM64_HYP_IMAGE_H__ #define __ARM64_HYP_IMAGE_H__
#define __HYP_CONCAT(a, b) a ## b
#define HYP_CONCAT(a, b) __HYP_CONCAT(a, b)
/* /*
* KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_, * KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_,
* to separate it from the kernel proper. * to separate it from the kernel proper.
...@@ -21,9 +24,31 @@ ...@@ -21,9 +24,31 @@
*/ */
#define HYP_SECTION_NAME(NAME) .hyp##NAME #define HYP_SECTION_NAME(NAME) .hyp##NAME
/* Symbol defined at the beginning of each hyp section. */
#define HYP_SECTION_SYMBOL_NAME(NAME) \
HYP_CONCAT(__hyp_section_, HYP_SECTION_NAME(NAME))
/*
* Helper to generate linker script statements starting a hyp section.
*
* A symbol with a well-known name is defined at the first byte. This
* is used as a base for hyp relocations (see gen-hyprel.c). It must
* be defined inside the section so the linker of `vmlinux` cannot
* separate it from the section data.
*/
#define BEGIN_HYP_SECTION(NAME) \
HYP_SECTION_NAME(NAME) : { \
HYP_SECTION_SYMBOL_NAME(NAME) = .;
/* Helper to generate linker script statements ending a hyp section. */
#define END_HYP_SECTION \
}
/* Defines an ELF hyp section from input section @NAME and its subsections. */ /* Defines an ELF hyp section from input section @NAME and its subsections. */
#define HYP_SECTION(NAME) \ #define HYP_SECTION(NAME) \
HYP_SECTION_NAME(NAME) : { *(NAME NAME##.*) } BEGIN_HYP_SECTION(NAME) \
*(NAME NAME##.*) \
END_HYP_SECTION
/* /*
* Defines a linker script alias of a kernel-proper symbol referenced by * Defines a linker script alias of a kernel-proper symbol referenced by
......
...@@ -199,32 +199,6 @@ extern void __vgic_v3_init_lrs(void); ...@@ -199,32 +199,6 @@ extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void); extern u32 __kvm_get_mdcr_el2(void);
#if defined(GCC_VERSION) && GCC_VERSION < 50000
#define SYM_CONSTRAINT "i"
#else
#define SYM_CONSTRAINT "S"
#endif
/*
* Obtain the PC-relative address of a kernel symbol
* s: symbol
*
* The goal of this macro is to return a symbol's address based on a
* PC-relative computation, as opposed to a loading the VA from a
* constant pool or something similar. This works well for HYP, as an
* absolute VA is guaranteed to be wrong. Only use this if trying to
* obtain the address of a symbol (i.e. not something you obtained by
* following a pointer).
*/
#define hyp_symbol_addr(s) \
({ \
typeof(s) *addr; \
asm("adrp %0, %1\n" \
"add %0, %0, :lo12:%1\n" \
: "=r" (addr) : SYM_CONSTRAINT (&s)); \
addr; \
})
#define __KVM_EXTABLE(from, to) \ #define __KVM_EXTABLE(from, to) \
" .pushsection __kvm_ex_table, \"a\"\n" \ " .pushsection __kvm_ex_table, \"a\"\n" \
" .align 3\n" \ " .align 3\n" \
......
...@@ -73,49 +73,39 @@ alternative_cb_end ...@@ -73,49 +73,39 @@ alternative_cb_end
.endm .endm
/* /*
* Convert a kernel image address to a PA * Convert a hypervisor VA to a PA
* reg: kernel address to be converted in place * reg: hypervisor address to be converted in place
* tmp: temporary register * tmp: temporary register
*
* The actual code generation takes place in kvm_get_kimage_voffset, and
* the instructions below are only there to reserve the space and
* perform the register allocation (kvm_get_kimage_voffset uses the
* specific registers encoded in the instructions).
*/ */
.macro kimg_pa reg, tmp .macro hyp_pa reg, tmp
alternative_cb kvm_get_kimage_voffset ldr_l \tmp, hyp_physvirt_offset
movz \tmp, #0 add \reg, \reg, \tmp
movk \tmp, #0, lsl #16
movk \tmp, #0, lsl #32
movk \tmp, #0, lsl #48
alternative_cb_end
/* reg = __pa(reg) */
sub \reg, \reg, \tmp
.endm .endm
/* /*
* Convert a kernel image address to a hyp VA * Convert a hypervisor VA to a kernel image address
* reg: kernel address to be converted in place * reg: hypervisor address to be converted in place
* tmp: temporary register * tmp: temporary register
* *
* The actual code generation takes place in kvm_get_kimage_voffset, and * The actual code generation takes place in kvm_get_kimage_voffset, and
* the instructions below are only there to reserve the space and * the instructions below are only there to reserve the space and
* perform the register allocation (kvm_update_kimg_phys_offset uses the * perform the register allocation (kvm_get_kimage_voffset uses the
* specific registers encoded in the instructions). * specific registers encoded in the instructions).
*/ */
.macro kimg_hyp_va reg, tmp .macro hyp_kimg_va reg, tmp
alternative_cb kvm_update_kimg_phys_offset /* Convert hyp VA -> PA. */
hyp_pa \reg, \tmp
/* Load kimage_voffset. */
alternative_cb kvm_get_kimage_voffset
movz \tmp, #0 movz \tmp, #0
movk \tmp, #0, lsl #16 movk \tmp, #0, lsl #16
movk \tmp, #0, lsl #32 movk \tmp, #0, lsl #32
movk \tmp, #0, lsl #48 movk \tmp, #0, lsl #48
alternative_cb_end alternative_cb_end
sub \reg, \reg, \tmp /* Convert PA -> kimg VA. */
mov_q \tmp, PAGE_OFFSET add \reg, \reg, \tmp
orr \reg, \reg, \tmp
kern_hyp_va \reg
.endm .endm
#else #else
...@@ -129,6 +119,7 @@ alternative_cb_end ...@@ -129,6 +119,7 @@ alternative_cb_end
void kvm_update_va_mask(struct alt_instr *alt, void kvm_update_va_mask(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst); __le32 *origptr, __le32 *updptr, int nr_inst);
void kvm_compute_layout(void); void kvm_compute_layout(void);
void kvm_apply_hyp_relocations(void);
static __always_inline unsigned long __kern_hyp_va(unsigned long v) static __always_inline unsigned long __kern_hyp_va(unsigned long v)
{ {
...@@ -144,24 +135,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) ...@@ -144,24 +135,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
static __always_inline unsigned long __kimg_hyp_va(unsigned long v)
{
unsigned long offset;
asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
"movk %0, #0, lsl #16\n"
"movk %0, #0, lsl #32\n"
"movk %0, #0, lsl #48\n",
kvm_update_kimg_phys_offset)
: "=r" (offset));
return __kern_hyp_va((v - offset) | PAGE_OFFSET);
}
#define kimg_fn_hyp_va(v) ((typeof(*v))(__kimg_hyp_va((unsigned long)(v))))
#define kimg_fn_ptr(x) (typeof(x) **)(x)
/* /*
* We currently support using a VM-specified IPA size. For backward * We currently support using a VM-specified IPA size. For backward
* compatibility, the default IPA size is fixed to 40bits. * compatibility, the default IPA size is fixed to 40bits.
......
...@@ -11,7 +11,8 @@ extern char __alt_instructions[], __alt_instructions_end[]; ...@@ -11,7 +11,8 @@ extern char __alt_instructions[], __alt_instructions_end[];
extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[]; extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
extern char __hyp_text_start[], __hyp_text_end[]; extern char __hyp_text_start[], __hyp_text_end[];
extern char __hyp_data_ro_after_init_start[], __hyp_data_ro_after_init_end[]; extern char __hyp_rodata_start[], __hyp_rodata_end[];
extern char __hyp_reloc_begin[], __hyp_reloc_end[];
extern char __idmap_text_start[], __idmap_text_end[]; extern char __idmap_text_start[], __idmap_text_end[];
extern char __initdata_begin[], __initdata_end[]; extern char __initdata_begin[], __initdata_end[];
extern char __inittext_begin[], __inittext_end[]; extern char __inittext_begin[], __inittext_end[];
......
...@@ -64,7 +64,6 @@ __efistub__ctype = _ctype; ...@@ -64,7 +64,6 @@ __efistub__ctype = _ctype;
/* Alternative callbacks for init-time patching of nVHE hyp code. */ /* Alternative callbacks for init-time patching of nVHE hyp code. */
KVM_NVHE_ALIAS(kvm_patch_vector_branch); KVM_NVHE_ALIAS(kvm_patch_vector_branch);
KVM_NVHE_ALIAS(kvm_update_va_mask); KVM_NVHE_ALIAS(kvm_update_va_mask);
KVM_NVHE_ALIAS(kvm_update_kimg_phys_offset);
KVM_NVHE_ALIAS(kvm_get_kimage_voffset); KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
/* Global kernel state accessed by nVHE hyp code. */ /* Global kernel state accessed by nVHE hyp code. */
......
...@@ -434,8 +434,10 @@ static void __init hyp_mode_check(void) ...@@ -434,8 +434,10 @@ static void __init hyp_mode_check(void)
"CPU: CPUs started in inconsistent modes"); "CPU: CPUs started in inconsistent modes");
else else
pr_info("CPU: All CPU(s) started at EL1\n"); pr_info("CPU: All CPU(s) started at EL1\n");
if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) {
kvm_compute_layout(); kvm_compute_layout();
kvm_apply_hyp_relocations();
}
} }
void __init smp_cpus_done(unsigned int max_cpus) void __init smp_cpus_done(unsigned int max_cpus)
......
...@@ -31,10 +31,11 @@ jiffies = jiffies_64; ...@@ -31,10 +31,11 @@ jiffies = jiffies_64;
__stop___kvm_ex_table = .; __stop___kvm_ex_table = .;
#define HYPERVISOR_DATA_SECTIONS \ #define HYPERVISOR_DATA_SECTIONS \
HYP_SECTION_NAME(.data..ro_after_init) : { \ HYP_SECTION_NAME(.rodata) : { \
__hyp_data_ro_after_init_start = .; \ __hyp_rodata_start = .; \
*(HYP_SECTION_NAME(.data..ro_after_init)) \ *(HYP_SECTION_NAME(.data..ro_after_init)) \
__hyp_data_ro_after_init_end = .; \ *(HYP_SECTION_NAME(.rodata)) \
__hyp_rodata_end = .; \
} }
#define HYPERVISOR_PERCPU_SECTION \ #define HYPERVISOR_PERCPU_SECTION \
...@@ -42,10 +43,19 @@ jiffies = jiffies_64; ...@@ -42,10 +43,19 @@ jiffies = jiffies_64;
HYP_SECTION_NAME(.data..percpu) : { \ HYP_SECTION_NAME(.data..percpu) : { \
*(HYP_SECTION_NAME(.data..percpu)) \ *(HYP_SECTION_NAME(.data..percpu)) \
} }
#define HYPERVISOR_RELOC_SECTION \
.hyp.reloc : ALIGN(4) { \
__hyp_reloc_begin = .; \
*(.hyp.reloc) \
__hyp_reloc_end = .; \
}
#else /* CONFIG_KVM */ #else /* CONFIG_KVM */
#define HYPERVISOR_EXTABLE #define HYPERVISOR_EXTABLE
#define HYPERVISOR_DATA_SECTIONS #define HYPERVISOR_DATA_SECTIONS
#define HYPERVISOR_PERCPU_SECTION #define HYPERVISOR_PERCPU_SECTION
#define HYPERVISOR_RELOC_SECTION
#endif #endif
#define HYPERVISOR_TEXT \ #define HYPERVISOR_TEXT \
...@@ -216,6 +226,8 @@ SECTIONS ...@@ -216,6 +226,8 @@ SECTIONS
PERCPU_SECTION(L1_CACHE_BYTES) PERCPU_SECTION(L1_CACHE_BYTES)
HYPERVISOR_PERCPU_SECTION HYPERVISOR_PERCPU_SECTION
HYPERVISOR_RELOC_SECTION
.rela.dyn : ALIGN(8) { .rela.dyn : ALIGN(8) {
*(.rela .rela*) *(.rela .rela*)
} }
......
...@@ -1750,11 +1750,10 @@ static int init_hyp_mode(void) ...@@ -1750,11 +1750,10 @@ static int init_hyp_mode(void)
goto out_err; goto out_err;
} }
err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_ro_after_init_start), err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start),
kvm_ksym_ref(__hyp_data_ro_after_init_end), kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO);
PAGE_HYP_RO);
if (err) { if (err) {
kvm_err("Cannot map .hyp.data..ro_after_init section\n"); kvm_err("Cannot map .hyp.rodata section\n");
goto out_err; goto out_err;
} }
......
...@@ -505,8 +505,8 @@ static inline void __kvm_unexpected_el2_exception(void) ...@@ -505,8 +505,8 @@ static inline void __kvm_unexpected_el2_exception(void)
struct exception_table_entry *entry, *end; struct exception_table_entry *entry, *end;
unsigned long elr_el2 = read_sysreg(elr_el2); unsigned long elr_el2 = read_sysreg(elr_el2);
entry = hyp_symbol_addr(__start___kvm_ex_table); entry = &__start___kvm_ex_table;
end = hyp_symbol_addr(__stop___kvm_ex_table); end = &__stop___kvm_ex_table;
while (entry < end) { while (entry < end) {
addr = (unsigned long)&entry->insn + entry->insn; addr = (unsigned long)&entry->insn + entry->insn;
......
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
gen-hyprel
hyp.lds hyp.lds
hyp-reloc.S
...@@ -6,6 +6,9 @@ ...@@ -6,6 +6,9 @@
asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
hostprogs := gen-hyprel
HOST_EXTRACFLAGS += -I$(objtree)/include
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \ obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
hyp-main.o hyp-smp.o psci-relay.o hyp-main.o hyp-smp.o psci-relay.o
obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
...@@ -19,7 +22,7 @@ obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ ...@@ -19,7 +22,7 @@ obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
hyp-obj := $(patsubst %.o,%.nvhe.o,$(obj-y)) hyp-obj := $(patsubst %.o,%.nvhe.o,$(obj-y))
obj-y := kvm_nvhe.o obj-y := kvm_nvhe.o
extra-y := $(hyp-obj) kvm_nvhe.tmp.o hyp.lds extra-y := $(hyp-obj) kvm_nvhe.tmp.o kvm_nvhe.rel.o hyp.lds hyp-reloc.S hyp-reloc.o
# 1) Compile all source files to `.nvhe.o` object files. The file extension # 1) Compile all source files to `.nvhe.o` object files. The file extension
# avoids file name clashes for files shared with VHE. # avoids file name clashes for files shared with VHE.
...@@ -42,11 +45,31 @@ LDFLAGS_kvm_nvhe.tmp.o := -r -T ...@@ -42,11 +45,31 @@ LDFLAGS_kvm_nvhe.tmp.o := -r -T
$(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE $(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE
$(call if_changed,ld) $(call if_changed,ld)
# 4) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'. # 4) Generate list of hyp code/data positions that need to be relocated at
# runtime. Because the hypervisor is part of the kernel binary, relocations
# produce a kernel VA. We enumerate relocations targeting hyp at build time
# and convert the kernel VAs at those positions to hyp VAs.
$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel
$(call if_changed,hyprel)
# 5) Compile hyp-reloc.S and link it into the existing partially linked object.
# The object file now contains a section with pointers to hyp positions that
# will contain kernel VAs at runtime. These pointers have relocations on them
# so that they get updated as the hyp object is linked into `vmlinux`.
LDFLAGS_kvm_nvhe.rel.o := -r
$(obj)/kvm_nvhe.rel.o: $(obj)/kvm_nvhe.tmp.o $(obj)/hyp-reloc.o FORCE
$(call if_changed,ld)
# 6) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'.
# Prefixes names of ELF symbols with '__kvm_nvhe_'. # Prefixes names of ELF symbols with '__kvm_nvhe_'.
$(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.tmp.o FORCE $(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.rel.o FORCE
$(call if_changed,hypcopy) $(call if_changed,hypcopy)
# The HYPREL command calls `gen-hyprel` to generate an assembly file with
# a list of relocations targeting hyp code/data.
quiet_cmd_hyprel = HYPREL $@
cmd_hyprel = $(obj)/gen-hyprel $< > $@
# The HYPCOPY command uses `objcopy` to prefix all ELF symbol names # The HYPCOPY command uses `objcopy` to prefix all ELF symbol names
# to avoid clashes with VHE code/data. # to avoid clashes with VHE code/data.
quiet_cmd_hypcopy = HYPCOPY $@ quiet_cmd_hypcopy = HYPCOPY $@
......
This diff is collapsed.
...@@ -74,27 +74,28 @@ SYM_FUNC_END(__host_enter) ...@@ -74,27 +74,28 @@ SYM_FUNC_END(__host_enter)
* void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par); * void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
*/ */
SYM_FUNC_START(__hyp_do_panic) SYM_FUNC_START(__hyp_do_panic)
/* Load the format arguments into x1-7 */
mov x6, x3
get_vcpu_ptr x7, x3
mrs x3, esr_el2
mrs x4, far_el2
mrs x5, hpfar_el2
/* Prepare and exit to the host's panic funciton. */ /* Prepare and exit to the host's panic funciton. */
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h) PSR_MODE_EL1h)
msr spsr_el2, lr msr spsr_el2, lr
ldr lr, =panic ldr lr, =panic
hyp_kimg_va lr, x6
msr elr_el2, lr msr elr_el2, lr
/* /* Set the panic format string. Use the, now free, LR as scratch. */
* Set the panic format string and enter the host, conditionally ldr lr, =__hyp_panic_string
* restoring the host context. hyp_kimg_va lr, x6
*/
/* Load the format arguments into x1-7. */
mov x6, x3
get_vcpu_ptr x7, x3
mrs x3, esr_el2
mrs x4, far_el2
mrs x5, hpfar_el2
/* Enter the host, conditionally restoring the host context. */
cmp x0, xzr cmp x0, xzr
ldr x0, =__hyp_panic_string mov x0, lr
b.eq __host_enter_without_restoring b.eq __host_enter_without_restoring
b __host_enter_for_panic b __host_enter_for_panic
SYM_FUNC_END(__hyp_do_panic) SYM_FUNC_END(__hyp_do_panic)
...@@ -124,7 +125,7 @@ SYM_FUNC_END(__hyp_do_panic) ...@@ -124,7 +125,7 @@ SYM_FUNC_END(__hyp_do_panic)
* Preserve x0-x4, which may contain stub parameters. * Preserve x0-x4, which may contain stub parameters.
*/ */
ldr x5, =__kvm_handle_stub_hvc ldr x5, =__kvm_handle_stub_hvc
kimg_pa x5, x6 hyp_pa x5, x6
br x5 br x5
.L__vect_end\@: .L__vect_end\@:
.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80) .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <asm/virt.h> #include <asm/virt.h>
.text .text
.pushsection .hyp.idmap.text, "ax" .pushsection .idmap.text, "ax"
.align 11 .align 11
...@@ -132,7 +132,6 @@ alternative_else_nop_endif ...@@ -132,7 +132,6 @@ alternative_else_nop_endif
/* Set the host vector */ /* Set the host vector */
ldr x0, =__kvm_hyp_host_vector ldr x0, =__kvm_hyp_host_vector
kimg_hyp_va x0, x1
msr vbar_el2, x0 msr vbar_el2, x0
ret ret
...@@ -191,7 +190,6 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) ...@@ -191,7 +190,6 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
/* Leave idmap. */ /* Leave idmap. */
mov x0, x29 mov x0, x29
ldr x1, =kvm_host_psci_cpu_entry ldr x1, =kvm_host_psci_cpu_entry
kimg_hyp_va x1, x2
br x1 br x1
SYM_CODE_END(__kvm_hyp_init_cpu) SYM_CODE_END(__kvm_hyp_init_cpu)
......
...@@ -108,9 +108,9 @@ static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt) ...@@ -108,9 +108,9 @@ static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
typedef void (*hcall_t)(struct kvm_cpu_context *); typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = kimg_fn_ptr(handle_##x) #define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
static const hcall_t *host_hcall[] = { static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_vcpu_run), HANDLE_FUNC(__kvm_vcpu_run),
HANDLE_FUNC(__kvm_flush_vm_context), HANDLE_FUNC(__kvm_flush_vm_context),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa), HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
...@@ -130,7 +130,6 @@ static const hcall_t *host_hcall[] = { ...@@ -130,7 +130,6 @@ static const hcall_t *host_hcall[] = {
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
{ {
DECLARE_REG(unsigned long, id, host_ctxt, 0); DECLARE_REG(unsigned long, id, host_ctxt, 0);
const hcall_t *kfn;
hcall_t hfn; hcall_t hfn;
id -= KVM_HOST_SMCCC_ID(0); id -= KVM_HOST_SMCCC_ID(0);
...@@ -138,13 +137,11 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) ...@@ -138,13 +137,11 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
if (unlikely(id >= ARRAY_SIZE(host_hcall))) if (unlikely(id >= ARRAY_SIZE(host_hcall)))
goto inval; goto inval;
kfn = host_hcall[id]; hfn = host_hcall[id];
if (unlikely(!kfn)) if (unlikely(!hfn))
goto inval; goto inval;
cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS; cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
hfn = kimg_fn_hyp_va(kfn);
hfn(host_ctxt); hfn(host_ctxt);
return; return;
......
...@@ -33,8 +33,8 @@ unsigned long __hyp_per_cpu_offset(unsigned int cpu) ...@@ -33,8 +33,8 @@ unsigned long __hyp_per_cpu_offset(unsigned int cpu)
if (cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base)) if (cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base))
hyp_panic(); hyp_panic();
cpu_base_array = (unsigned long *)hyp_symbol_addr(kvm_arm_hyp_percpu_base); cpu_base_array = (unsigned long *)&kvm_arm_hyp_percpu_base;
this_cpu_base = kern_hyp_va(cpu_base_array[cpu]); this_cpu_base = kern_hyp_va(cpu_base_array[cpu]);
elf_base = (unsigned long)hyp_symbol_addr(__per_cpu_start); elf_base = (unsigned long)&__per_cpu_start;
return this_cpu_base - elf_base; return this_cpu_base - elf_base;
} }
...@@ -12,14 +12,17 @@ ...@@ -12,14 +12,17 @@
#include <asm/memory.h> #include <asm/memory.h>
SECTIONS { SECTIONS {
HYP_SECTION(.idmap.text)
HYP_SECTION(.text) HYP_SECTION(.text)
HYP_SECTION(.data..ro_after_init)
HYP_SECTION(.rodata)
/* /*
* .hyp..data..percpu needs to be page aligned to maintain the same * .hyp..data..percpu needs to be page aligned to maintain the same
* alignment for when linking into vmlinux. * alignment for when linking into vmlinux.
*/ */
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
HYP_SECTION_NAME(.data..percpu) : { BEGIN_HYP_SECTION(.data..percpu)
PERCPU_INPUT(L1_CACHE_BYTES) PERCPU_INPUT(L1_CACHE_BYTES)
} END_HYP_SECTION
HYP_SECTION(.data..ro_after_init)
} }
...@@ -128,8 +128,8 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) ...@@ -128,8 +128,8 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
if (cpu_id == INVALID_CPU_ID) if (cpu_id == INVALID_CPU_ID)
return PSCI_RET_INVALID_PARAMS; return PSCI_RET_INVALID_PARAMS;
boot_args = per_cpu_ptr(hyp_symbol_addr(cpu_on_args), cpu_id); boot_args = per_cpu_ptr(&cpu_on_args, cpu_id);
init_params = per_cpu_ptr(hyp_symbol_addr(kvm_init_params), cpu_id); init_params = per_cpu_ptr(&kvm_init_params, cpu_id);
/* Check if the target CPU is already being booted. */ /* Check if the target CPU is already being booted. */
if (!try_acquire_boot_args(boot_args)) if (!try_acquire_boot_args(boot_args))
...@@ -140,7 +140,7 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) ...@@ -140,7 +140,7 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
wmb(); wmb();
ret = psci_call(func_id, mpidr, ret = psci_call(func_id, mpidr,
__hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_entry)), __hyp_pa(&kvm_hyp_cpu_entry),
__hyp_pa(init_params)); __hyp_pa(init_params));
/* If successful, the lock will be released by the target CPU. */ /* If successful, the lock will be released by the target CPU. */
...@@ -159,8 +159,8 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) ...@@ -159,8 +159,8 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
struct psci_boot_args *boot_args; struct psci_boot_args *boot_args;
struct kvm_nvhe_init_params *init_params; struct kvm_nvhe_init_params *init_params;
boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args)); boot_args = this_cpu_ptr(&suspend_args);
init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params)); init_params = this_cpu_ptr(&kvm_init_params);
/* /*
* No need to acquire a lock before writing to boot_args because a core * No need to acquire a lock before writing to boot_args because a core
...@@ -174,7 +174,7 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) ...@@ -174,7 +174,7 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
* point if it is a deep sleep state. * point if it is a deep sleep state.
*/ */
return psci_call(func_id, power_state, return psci_call(func_id, power_state,
__hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)), __hyp_pa(&kvm_hyp_cpu_resume),
__hyp_pa(init_params)); __hyp_pa(init_params));
} }
...@@ -186,8 +186,8 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) ...@@ -186,8 +186,8 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
struct psci_boot_args *boot_args; struct psci_boot_args *boot_args;
struct kvm_nvhe_init_params *init_params; struct kvm_nvhe_init_params *init_params;
boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args)); boot_args = this_cpu_ptr(&suspend_args);
init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params)); init_params = this_cpu_ptr(&kvm_init_params);
/* /*
* No need to acquire a lock before writing to boot_args because a core * No need to acquire a lock before writing to boot_args because a core
...@@ -198,7 +198,7 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) ...@@ -198,7 +198,7 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
/* Will only return on error. */ /* Will only return on error. */
return psci_call(func_id, return psci_call(func_id,
__hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)), __hyp_pa(&kvm_hyp_cpu_resume),
__hyp_pa(init_params), 0); __hyp_pa(init_params), 0);
} }
...@@ -207,12 +207,12 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on) ...@@ -207,12 +207,12 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
struct psci_boot_args *boot_args; struct psci_boot_args *boot_args;
struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *host_ctxt;
host_ctxt = &this_cpu_ptr(hyp_symbol_addr(kvm_host_data))->host_ctxt; host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
if (is_cpu_on) if (is_cpu_on)
boot_args = this_cpu_ptr(hyp_symbol_addr(cpu_on_args)); boot_args = this_cpu_ptr(&cpu_on_args);
else else
boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args)); boot_args = this_cpu_ptr(&suspend_args);
cpu_reg(host_ctxt, 0) = boot_args->r0; cpu_reg(host_ctxt, 0) = boot_args->r0;
write_sysreg_el2(boot_args->pc, SYS_ELR); write_sysreg_el2(boot_args->pc, SYS_ELR);
......
...@@ -64,7 +64,7 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) ...@@ -64,7 +64,7 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
} }
rd = kvm_vcpu_dabt_get_rd(vcpu); rd = kvm_vcpu_dabt_get_rd(vcpu);
addr = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va; addr = kvm_vgic_global_state.vcpu_hyp_va;
addr += fault_ipa - vgic->vgic_cpu_base; addr += fault_ipa - vgic->vgic_cpu_base;
if (kvm_vcpu_dabt_iswrite(vcpu)) { if (kvm_vcpu_dabt_iswrite(vcpu)) {
......
...@@ -81,6 +81,34 @@ __init void kvm_compute_layout(void) ...@@ -81,6 +81,34 @@ __init void kvm_compute_layout(void)
init_hyp_physvirt_offset(); init_hyp_physvirt_offset();
} }
/*
* The .hyp.reloc ELF section contains a list of kimg positions that
* contains kimg VAs but will be accessed only in hyp execution context.
* Convert them to hyp VAs. See gen-hyprel.c for more details.
*/
__init void kvm_apply_hyp_relocations(void)
{
int32_t *rel;
int32_t *begin = (int32_t *)__hyp_reloc_begin;
int32_t *end = (int32_t *)__hyp_reloc_end;
for (rel = begin; rel < end; ++rel) {
uintptr_t *ptr, kimg_va;
/*
* Each entry contains a 32-bit relative offset from itself
* to a kimg VA position.
*/
ptr = (uintptr_t *)lm_alias((char *)rel + *rel);
/* Read the kimg VA value at the relocation address. */
kimg_va = *ptr;
/* Convert to hyp VA and store back to the relocation address. */
*ptr = __early_kern_hyp_va((uintptr_t)lm_alias(kimg_va));
}
}
static u32 compute_instruction(int n, u32 rd, u32 rn) static u32 compute_instruction(int n, u32 rd, u32 rn)
{ {
u32 insn = AARCH64_BREAK_FAULT; u32 insn = AARCH64_BREAK_FAULT;
...@@ -255,12 +283,6 @@ static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst ...@@ -255,12 +283,6 @@ static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst
*updptr++ = cpu_to_le32(insn); *updptr++ = cpu_to_le32(insn);
} }
void kvm_update_kimg_phys_offset(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst)
{
generate_mov_q(kimage_voffset + PHYS_OFFSET, origptr, updptr, nr_inst);
}
void kvm_get_kimage_voffset(struct alt_instr *alt, void kvm_get_kimage_voffset(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst) __le32 *origptr, __le32 *updptr, int nr_inst)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment