Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
e7ae2ecd
Commit
e7ae2ecd
authored
Feb 12, 2021
by
Marc Zyngier
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'kvm-arm64/hyp-reloc' into kvmarm-master/next
Signed-off-by:
Marc Zyngier
<
maz@kernel.org
>
parents
c5db649f
bc93763f
Changes
20
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
604 additions
and
135 deletions
+604
-135
arch/arm64/include/asm/hyp_image.h
arch/arm64/include/asm/hyp_image.h
+27
-2
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_asm.h
+0
-26
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/kvm_mmu.h
+17
-44
arch/arm64/include/asm/sections.h
arch/arm64/include/asm/sections.h
+2
-1
arch/arm64/kernel/image-vars.h
arch/arm64/kernel/image-vars.h
+0
-1
arch/arm64/kernel/smp.c
arch/arm64/kernel/smp.c
+3
-1
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kernel/vmlinux.lds.S
+15
-3
arch/arm64/kvm/arm.c
arch/arm64/kvm/arm.c
+3
-4
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/include/hyp/switch.h
+2
-2
arch/arm64/kvm/hyp/nvhe/.gitignore
arch/arm64/kvm/hyp/nvhe/.gitignore
+2
-0
arch/arm64/kvm/hyp/nvhe/Makefile
arch/arm64/kvm/hyp/nvhe/Makefile
+26
-3
arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
+438
-0
arch/arm64/kvm/hyp/nvhe/host.S
arch/arm64/kvm/hyp/nvhe/host.S
+15
-14
arch/arm64/kvm/hyp/nvhe/hyp-init.S
arch/arm64/kvm/hyp/nvhe/hyp-init.S
+1
-3
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/hyp-main.c
+4
-7
arch/arm64/kvm/hyp/nvhe/hyp-smp.c
arch/arm64/kvm/hyp/nvhe/hyp-smp.c
+2
-2
arch/arm64/kvm/hyp/nvhe/hyp.lds.S
arch/arm64/kvm/hyp/nvhe/hyp.lds.S
+6
-3
arch/arm64/kvm/hyp/nvhe/psci-relay.c
arch/arm64/kvm/hyp/nvhe/psci-relay.c
+12
-12
arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
+1
-1
arch/arm64/kvm/va_layout.c
arch/arm64/kvm/va_layout.c
+28
-6
No files found.
arch/arm64/include/asm/hyp_image.h
View file @
e7ae2ecd
...
...
@@ -7,6 +7,9 @@
#ifndef __ARM64_HYP_IMAGE_H__
#define __ARM64_HYP_IMAGE_H__
#define __HYP_CONCAT(a, b) a ## b
#define HYP_CONCAT(a, b) __HYP_CONCAT(a, b)
/*
* KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_,
* to separate it from the kernel proper.
...
...
@@ -21,9 +24,31 @@
*/
#define HYP_SECTION_NAME(NAME) .hyp##NAME
/* Symbol defined at the beginning of each hyp section. */
#define HYP_SECTION_SYMBOL_NAME(NAME) \
HYP_CONCAT(__hyp_section_, HYP_SECTION_NAME(NAME))
/*
* Helper to generate linker script statements starting a hyp section.
*
* A symbol with a well-known name is defined at the first byte. This
* is used as a base for hyp relocations (see gen-hyprel.c). It must
* be defined inside the section so the linker of `vmlinux` cannot
* separate it from the section data.
*/
#define BEGIN_HYP_SECTION(NAME) \
HYP_SECTION_NAME(NAME) : { \
HYP_SECTION_SYMBOL_NAME(NAME) = .;
/* Helper to generate linker script statements ending a hyp section. */
#define END_HYP_SECTION \
}
/* Defines an ELF hyp section from input section @NAME and its subsections. */
#define HYP_SECTION(NAME) \
HYP_SECTION_NAME(NAME) : { *(NAME NAME##.*) }
#define HYP_SECTION(NAME) \
BEGIN_HYP_SECTION(NAME) \
*(NAME NAME##.*) \
END_HYP_SECTION
/*
* Defines a linker script alias of a kernel-proper symbol referenced by
...
...
arch/arm64/include/asm/kvm_asm.h
View file @
e7ae2ecd
...
...
@@ -199,32 +199,6 @@ extern void __vgic_v3_init_lrs(void);
extern
u32
__kvm_get_mdcr_el2
(
void
);
#if defined(GCC_VERSION) && GCC_VERSION < 50000
#define SYM_CONSTRAINT "i"
#else
#define SYM_CONSTRAINT "S"
#endif
/*
* Obtain the PC-relative address of a kernel symbol
* s: symbol
*
* The goal of this macro is to return a symbol's address based on a
* PC-relative computation, as opposed to a loading the VA from a
* constant pool or something similar. This works well for HYP, as an
* absolute VA is guaranteed to be wrong. Only use this if trying to
* obtain the address of a symbol (i.e. not something you obtained by
* following a pointer).
*/
#define hyp_symbol_addr(s) \
({ \
typeof(s) *addr; \
asm("adrp %0, %1\n" \
"add %0, %0, :lo12:%1\n" \
: "=r" (addr) : SYM_CONSTRAINT (&s)); \
addr; \
})
#define __KVM_EXTABLE(from, to) \
" .pushsection __kvm_ex_table, \"a\"\n" \
" .align 3\n" \
...
...
arch/arm64/include/asm/kvm_mmu.h
View file @
e7ae2ecd
...
...
@@ -73,49 +73,39 @@ alternative_cb_end
.
endm
/*
* Convert a
kernel image address
to a PA
* reg:
kernel
address to be converted in place
* Convert a
hypervisor VA
to a PA
* reg:
hypervisor
address to be converted in place
* tmp: temporary register
*
* The actual code generation takes place in kvm_get_kimage_voffset, and
* the instructions below are only there to reserve the space and
* perform the register allocation (kvm_get_kimage_voffset uses the
* specific registers encoded in the instructions).
*/
.
macro
kimg_pa
reg
,
tmp
alternative_cb
kvm_get_kimage_voffset
movz
\
tmp
,
#
0
movk
\
tmp
,
#
0
,
lsl
#
16
movk
\
tmp
,
#
0
,
lsl
#
32
movk
\
tmp
,
#
0
,
lsl
#
48
alternative_cb_end
/* reg = __pa(reg) */
sub
\
reg
,
\
reg
,
\
tmp
.
macro
hyp_pa
reg
,
tmp
ldr_l
\
tmp
,
hyp_physvirt_offset
add
\
reg
,
\
reg
,
\
tmp
.
endm
/*
* Convert a
kernel image address to a hyp VA
* reg:
kernel
address to be converted in place
* Convert a
hypervisor VA to a kernel image address
* reg:
hypervisor
address to be converted in place
* tmp: temporary register
*
* The actual code generation takes place in kvm_get_kimage_voffset, and
* the instructions below are only there to reserve the space and
* perform the register allocation (kvm_
update_kimg_phys_
offset uses the
* perform the register allocation (kvm_
get_kimage_v
offset uses the
* specific registers encoded in the instructions).
*/
.
macro
kimg_hyp_va
reg
,
tmp
alternative_cb
kvm_update_kimg_phys_offset
.
macro
hyp_kimg_va
reg
,
tmp
/* Convert hyp VA -> PA. */
hyp_pa
\
reg
,
\
tmp
/* Load kimage_voffset. */
alternative_cb
kvm_get_kimage_voffset
movz
\
tmp
,
#
0
movk
\
tmp
,
#
0
,
lsl
#
16
movk
\
tmp
,
#
0
,
lsl
#
32
movk
\
tmp
,
#
0
,
lsl
#
48
alternative_cb_end
sub
\
reg
,
\
reg
,
\
tmp
mov_q
\
tmp
,
PAGE_OFFSET
orr
\
reg
,
\
reg
,
\
tmp
kern_hyp_va
\
reg
/* Convert PA -> kimg VA. */
add
\
reg
,
\
reg
,
\
tmp
.
endm
#else
...
...
@@ -129,6 +119,7 @@ alternative_cb_end
void
kvm_update_va_mask
(
struct
alt_instr
*
alt
,
__le32
*
origptr
,
__le32
*
updptr
,
int
nr_inst
);
void
kvm_compute_layout
(
void
);
void
kvm_apply_hyp_relocations
(
void
);
static
__always_inline
unsigned
long
__kern_hyp_va
(
unsigned
long
v
)
{
...
...
@@ -144,24 +135,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
static
__always_inline
unsigned
long
__kimg_hyp_va
(
unsigned
long
v
)
{
unsigned
long
offset
;
asm
volatile
(
ALTERNATIVE_CB
(
"movz %0, #0
\n
"
"movk %0, #0, lsl #16
\n
"
"movk %0, #0, lsl #32
\n
"
"movk %0, #0, lsl #48
\n
"
,
kvm_update_kimg_phys_offset
)
:
"=r"
(
offset
));
return
__kern_hyp_va
((
v
-
offset
)
|
PAGE_OFFSET
);
}
#define kimg_fn_hyp_va(v) ((typeof(*v))(__kimg_hyp_va((unsigned long)(v))))
#define kimg_fn_ptr(x) (typeof(x) **)(x)
/*
* We currently support using a VM-specified IPA size. For backward
* compatibility, the default IPA size is fixed to 40bits.
...
...
arch/arm64/include/asm/sections.h
View file @
e7ae2ecd
...
...
@@ -11,7 +11,8 @@ extern char __alt_instructions[], __alt_instructions_end[];
extern
char
__hibernate_exit_text_start
[],
__hibernate_exit_text_end
[];
extern
char
__hyp_idmap_text_start
[],
__hyp_idmap_text_end
[];
extern
char
__hyp_text_start
[],
__hyp_text_end
[];
extern
char
__hyp_data_ro_after_init_start
[],
__hyp_data_ro_after_init_end
[];
extern
char
__hyp_rodata_start
[],
__hyp_rodata_end
[];
extern
char
__hyp_reloc_begin
[],
__hyp_reloc_end
[];
extern
char
__idmap_text_start
[],
__idmap_text_end
[];
extern
char
__initdata_begin
[],
__initdata_end
[];
extern
char
__inittext_begin
[],
__inittext_end
[];
...
...
arch/arm64/kernel/image-vars.h
View file @
e7ae2ecd
...
...
@@ -64,7 +64,6 @@ __efistub__ctype = _ctype;
/* Alternative callbacks for init-time patching of nVHE hyp code. */
KVM_NVHE_ALIAS
(
kvm_patch_vector_branch
);
KVM_NVHE_ALIAS
(
kvm_update_va_mask
);
KVM_NVHE_ALIAS
(
kvm_update_kimg_phys_offset
);
KVM_NVHE_ALIAS
(
kvm_get_kimage_voffset
);
/* Global kernel state accessed by nVHE hyp code. */
...
...
arch/arm64/kernel/smp.c
View file @
e7ae2ecd
...
...
@@ -434,8 +434,10 @@ static void __init hyp_mode_check(void)
"CPU: CPUs started in inconsistent modes"
);
else
pr_info
(
"CPU: All CPU(s) started at EL1
\n
"
);
if
(
IS_ENABLED
(
CONFIG_KVM
)
&&
!
is_kernel_in_hyp_mode
())
if
(
IS_ENABLED
(
CONFIG_KVM
)
&&
!
is_kernel_in_hyp_mode
())
{
kvm_compute_layout
();
kvm_apply_hyp_relocations
();
}
}
void
__init
smp_cpus_done
(
unsigned
int
max_cpus
)
...
...
arch/arm64/kernel/vmlinux.lds.S
View file @
e7ae2ecd
...
...
@@ -31,10 +31,11 @@ jiffies = jiffies_64;
__stop___kvm_ex_table
=
.
;
#define HYPERVISOR_DATA_SECTIONS \
HYP_SECTION_NAME
(.
data..
ro_after_init
)
:
{
\
__hyp_
data_ro_after_init_start
=
.
;
\
HYP_SECTION_NAME
(.
rodata
)
:
{
\
__hyp_
rodata_start
=
.
;
\
*(
HYP_SECTION_NAME
(.
data..
ro_after_init
))
\
__hyp_data_ro_after_init_end
=
.
; \
*(
HYP_SECTION_NAME
(.
rodata
))
\
__hyp_rodata_end
=
.
; \
}
#define HYPERVISOR_PERCPU_SECTION \
...
...
@@ -42,10 +43,19 @@ jiffies = jiffies_64;
HYP_SECTION_NAME
(.
data..percpu
)
:
{
\
*(
HYP_SECTION_NAME
(.
data..percpu
))
\
}
#define HYPERVISOR_RELOC_SECTION \
.
hyp.reloc
:
ALIGN
(
4
)
{
\
__hyp_reloc_begin
=
.
; \
*(.
hyp.reloc
)
\
__hyp_reloc_end
=
.
; \
}
#else /* CONFIG_KVM */
#define HYPERVISOR_EXTABLE
#define HYPERVISOR_DATA_SECTIONS
#define HYPERVISOR_PERCPU_SECTION
#define HYPERVISOR_RELOC_SECTION
#endif
#define HYPERVISOR_TEXT \
...
...
@@ -216,6 +226,8 @@ SECTIONS
PERCPU_SECTION
(
L1_CACHE_BYTES
)
HYPERVISOR_PERCPU_SECTION
HYPERVISOR_RELOC_SECTION
.
rela.dyn
:
ALIGN
(
8
)
{
*(.
rela
.
rela
*)
}
...
...
arch/arm64/kvm/arm.c
View file @
e7ae2ecd
...
...
@@ -1750,11 +1750,10 @@ static int init_hyp_mode(void)
goto
out_err
;
}
err
=
create_hyp_mappings
(
kvm_ksym_ref
(
__hyp_data_ro_after_init_start
),
kvm_ksym_ref
(
__hyp_data_ro_after_init_end
),
PAGE_HYP_RO
);
err
=
create_hyp_mappings
(
kvm_ksym_ref
(
__hyp_rodata_start
),
kvm_ksym_ref
(
__hyp_rodata_end
),
PAGE_HYP_RO
);
if
(
err
)
{
kvm_err
(
"Cannot map .hyp.
data..ro_after_init
section
\n
"
);
kvm_err
(
"Cannot map .hyp.
rodata
section
\n
"
);
goto
out_err
;
}
...
...
arch/arm64/kvm/hyp/include/hyp/switch.h
View file @
e7ae2ecd
...
...
@@ -505,8 +505,8 @@ static inline void __kvm_unexpected_el2_exception(void)
struct
exception_table_entry
*
entry
,
*
end
;
unsigned
long
elr_el2
=
read_sysreg
(
elr_el2
);
entry
=
hyp_symbol_addr
(
__start___kvm_ex_table
)
;
end
=
hyp_symbol_addr
(
__stop___kvm_ex_table
)
;
entry
=
&
__start___kvm_ex_table
;
end
=
&
__stop___kvm_ex_table
;
while
(
entry
<
end
)
{
addr
=
(
unsigned
long
)
&
entry
->
insn
+
entry
->
insn
;
...
...
arch/arm64/kvm/hyp/nvhe/.gitignore
View file @
e7ae2ecd
# SPDX-License-Identifier: GPL-2.0-only
gen-hyprel
hyp.lds
hyp-reloc.S
arch/arm64/kvm/hyp/nvhe/Makefile
View file @
e7ae2ecd
...
...
@@ -6,6 +6,9 @@
asflags-y
:=
-D__KVM_NVHE_HYPERVISOR__
-D__DISABLE_EXPORTS
ccflags-y
:=
-D__KVM_NVHE_HYPERVISOR__
-D__DISABLE_EXPORTS
hostprogs
:=
gen-hyprel
HOST_EXTRACFLAGS
+=
-I
$(objtree)
/include
obj-y
:=
timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o
\
hyp-main.o hyp-smp.o psci-relay.o
obj-y
+=
../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o
\
...
...
@@ -19,7 +22,7 @@ obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
hyp-obj
:=
$(
patsubst
%.o,%.nvhe.o,
$
(
obj-y
))
obj-y
:=
kvm_nvhe.o
extra-y
:=
$
(
hyp-obj
)
kvm_nvhe.tmp.o
hyp.lds
extra-y
:=
$
(
hyp-obj
)
kvm_nvhe.tmp.o
kvm_nvhe.rel.o hyp.lds hyp-reloc.S hyp-reloc.o
# 1) Compile all source files to `.nvhe.o` object files. The file extension
# avoids file name clashes for files shared with VHE.
...
...
@@ -42,11 +45,31 @@ LDFLAGS_kvm_nvhe.tmp.o := -r -T
$(obj)/kvm_nvhe.tmp.o
:
$(obj)/hyp.lds $(addprefix $(obj)/
,
$(hyp-obj)) FORCE
$(
call
if_changed,ld
)
# 4) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'.
# 4) Generate list of hyp code/data positions that need to be relocated at
# runtime. Because the hypervisor is part of the kernel binary, relocations
# produce a kernel VA. We enumerate relocations targeting hyp at build time
# and convert the kernel VAs at those positions to hyp VAs.
$(obj)/hyp-reloc.S
:
$(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel
$(
call
if_changed,hyprel
)
# 5) Compile hyp-reloc.S and link it into the existing partially linked object.
# The object file now contains a section with pointers to hyp positions that
# will contain kernel VAs at runtime. These pointers have relocations on them
# so that they get updated as the hyp object is linked into `vmlinux`.
LDFLAGS_kvm_nvhe.rel.o
:=
-r
$(obj)/kvm_nvhe.rel.o
:
$(obj)/kvm_nvhe.tmp.o $(obj)/hyp-reloc.o FORCE
$(
call
if_changed,ld
)
# 6) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'.
# Prefixes names of ELF symbols with '__kvm_nvhe_'.
$(obj)/kvm_nvhe.o
:
$(obj)/kvm_nvhe.
tmp
.o FORCE
$(obj)/kvm_nvhe.o
:
$(obj)/kvm_nvhe.
rel
.o FORCE
$(
call
if_changed,hypcopy
)
# The HYPREL command calls `gen-hyprel` to generate an assembly file with
# a list of relocations targeting hyp code/data.
quiet_cmd_hyprel
=
HYPREL
$@
cmd_hyprel
=
$(obj)
/gen-hyprel
$<
>
$@
# The HYPCOPY command uses `objcopy` to prefix all ELF symbol names
# to avoid clashes with VHE code/data.
quiet_cmd_hypcopy
=
HYPCOPY
$@
...
...
arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
0 → 100644
View file @
e7ae2ecd
This diff is collapsed.
Click to expand it.
arch/arm64/kvm/hyp/nvhe/host.S
View file @
e7ae2ecd
...
...
@@ -74,27 +74,28 @@ SYM_FUNC_END(__host_enter)
*
void
__noreturn
__hyp_do_panic
(
bool
restore_host
,
u64
spsr
,
u64
elr
,
u64
par
)
;
*/
SYM_FUNC_START
(
__hyp_do_panic
)
/
*
Load
the
format
arguments
into
x1
-
7
*/
mov
x6
,
x3
get_vcpu_ptr
x7
,
x3
mrs
x3
,
esr_el2
mrs
x4
,
far_el2
mrs
x5
,
hpfar_el2
/
*
Prepare
and
exit
to
the
host
's panic funciton. */
mov
lr
,
#(
PSR_F_BIT
| PSR_I_BIT |
PSR_A_BIT
| PSR_D_BIT |
\
PSR_MODE_EL1h
)
msr
spsr_el2
,
lr
ldr
lr
,
=
panic
hyp_kimg_va
lr
,
x6
msr
elr_el2
,
lr
/
*
*
Set
the
panic
format
string
and
enter
the
host
,
conditionally
*
restoring
the
host
context
.
*/
/
*
Set
the
panic
format
string
.
Use
the
,
now
free
,
LR
as
scratch
.
*/
ldr
lr
,
=
__hyp_panic_string
hyp_kimg_va
lr
,
x6
/
*
Load
the
format
arguments
into
x1
-
7
.
*/
mov
x6
,
x3
get_vcpu_ptr
x7
,
x3
mrs
x3
,
esr_el2
mrs
x4
,
far_el2
mrs
x5
,
hpfar_el2
/
*
Enter
the
host
,
conditionally
restoring
the
host
context
.
*/
cmp
x0
,
xzr
ldr
x0
,
=
__hyp_panic_string
mov
x0
,
lr
b.eq
__host_enter_without_restoring
b
__host_enter_for_panic
SYM_FUNC_END
(
__hyp_do_panic
)
...
...
@@ -124,7 +125,7 @@ SYM_FUNC_END(__hyp_do_panic)
*
Preserve
x0
-
x4
,
which
may
contain
stub
parameters
.
*/
ldr
x5
,
=
__kvm_handle_stub_hvc
kimg_pa
x5
,
x6
hyp_pa
x5
,
x6
br
x5
.
L__vect_end
\@:
.
if
((.
L__vect_end
\@
-
.
L__vect_start
\
@
)
>
0x80
)
...
...
arch/arm64/kvm/hyp/nvhe/hyp-init.S
View file @
e7ae2ecd
...
...
@@ -18,7 +18,7 @@
#include <asm/virt.h>
.
text
.
pushsection
.
hyp
.
idmap
.
text
,
"ax"
.
pushsection
.
idmap
.
text
,
"ax"
.
align
11
...
...
@@ -132,7 +132,6 @@ alternative_else_nop_endif
/
*
Set
the
host
vector
*/
ldr
x0
,
=
__kvm_hyp_host_vector
kimg_hyp_va
x0
,
x1
msr
vbar_el2
,
x0
ret
...
...
@@ -191,7 +190,6 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
/
*
Leave
idmap
.
*/
mov
x0
,
x29
ldr
x1
,
=
kvm_host_psci_cpu_entry
kimg_hyp_va
x1
,
x2
br
x1
SYM_CODE_END
(
__kvm_hyp_init_cpu
)
...
...
arch/arm64/kvm/hyp/nvhe/hyp-main.c
View file @
e7ae2ecd
...
...
@@ -108,9 +108,9 @@ static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
typedef
void
(
*
hcall_t
)(
struct
kvm_cpu_context
*
);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] =
kimg_fn_ptr(handle_##x)
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] =
(hcall_t)handle_##x
static
const
hcall_t
*
host_hcall
[]
=
{
static
const
hcall_t
host_hcall
[]
=
{
HANDLE_FUNC
(
__kvm_vcpu_run
),
HANDLE_FUNC
(
__kvm_flush_vm_context
),
HANDLE_FUNC
(
__kvm_tlb_flush_vmid_ipa
),
...
...
@@ -130,7 +130,6 @@ static const hcall_t *host_hcall[] = {
static
void
handle_host_hcall
(
struct
kvm_cpu_context
*
host_ctxt
)
{
DECLARE_REG
(
unsigned
long
,
id
,
host_ctxt
,
0
);
const
hcall_t
*
kfn
;
hcall_t
hfn
;
id
-=
KVM_HOST_SMCCC_ID
(
0
);
...
...
@@ -138,13 +137,11 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
if
(
unlikely
(
id
>=
ARRAY_SIZE
(
host_hcall
)))
goto
inval
;
k
fn
=
host_hcall
[
id
];
if
(
unlikely
(
!
k
fn
))
h
fn
=
host_hcall
[
id
];
if
(
unlikely
(
!
h
fn
))
goto
inval
;
cpu_reg
(
host_ctxt
,
0
)
=
SMCCC_RET_SUCCESS
;
hfn
=
kimg_fn_hyp_va
(
kfn
);
hfn
(
host_ctxt
);
return
;
...
...
arch/arm64/kvm/hyp/nvhe/hyp-smp.c
View file @
e7ae2ecd
...
...
@@ -33,8 +33,8 @@ unsigned long __hyp_per_cpu_offset(unsigned int cpu)
if
(
cpu
>=
ARRAY_SIZE
(
kvm_arm_hyp_percpu_base
))
hyp_panic
();
cpu_base_array
=
(
unsigned
long
*
)
hyp_symbol_addr
(
kvm_arm_hyp_percpu_base
)
;
cpu_base_array
=
(
unsigned
long
*
)
&
kvm_arm_hyp_percpu_base
;
this_cpu_base
=
kern_hyp_va
(
cpu_base_array
[
cpu
]);
elf_base
=
(
unsigned
long
)
hyp_symbol_addr
(
__per_cpu_start
)
;
elf_base
=
(
unsigned
long
)
&
__per_cpu_start
;
return
this_cpu_base
-
elf_base
;
}
arch/arm64/kvm/hyp/nvhe/hyp.lds.S
View file @
e7ae2ecd
...
...
@@ -12,14 +12,17 @@
#include <asm/memory.h>
SECTIONS
{
HYP_SECTION
(.
idmap.text
)
HYP_SECTION
(.
text
)
HYP_SECTION
(.
data..
ro_after_init
)
HYP_SECTION
(.
rodata
)
/
*
*
.
hyp
..
data
..
percpu
needs
to
be
page
aligned
to
maintain
the
same
*
alignment
for
when
linking
into
vmlinux
.
*/
.
=
ALIGN
(
PAGE_SIZE
)
;
HYP_SECTION_NAME
(.
data..percpu
)
:
{
BEGIN_HYP_SECTION
(.
data..percpu
)
PERCPU_INPUT
(
L1_CACHE_BYTES
)
}
HYP_SECTION
(.
data..
ro_after_init
)
END_HYP_SECTION
}
arch/arm64/kvm/hyp/nvhe/psci-relay.c
View file @
e7ae2ecd
...
...
@@ -128,8 +128,8 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
if
(
cpu_id
==
INVALID_CPU_ID
)
return
PSCI_RET_INVALID_PARAMS
;
boot_args
=
per_cpu_ptr
(
hyp_symbol_addr
(
cpu_on_args
)
,
cpu_id
);
init_params
=
per_cpu_ptr
(
hyp_symbol_addr
(
kvm_init_params
)
,
cpu_id
);
boot_args
=
per_cpu_ptr
(
&
cpu_on_args
,
cpu_id
);
init_params
=
per_cpu_ptr
(
&
kvm_init_params
,
cpu_id
);
/* Check if the target CPU is already being booted. */
if
(
!
try_acquire_boot_args
(
boot_args
))
...
...
@@ -140,7 +140,7 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
wmb
();
ret
=
psci_call
(
func_id
,
mpidr
,
__hyp_pa
(
hyp_symbol_addr
(
kvm_hyp_cpu_entry
)
),
__hyp_pa
(
&
kvm_hyp_cpu_entry
),
__hyp_pa
(
init_params
));
/* If successful, the lock will be released by the target CPU. */
...
...
@@ -159,8 +159,8 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
struct
psci_boot_args
*
boot_args
;
struct
kvm_nvhe_init_params
*
init_params
;
boot_args
=
this_cpu_ptr
(
hyp_symbol_addr
(
suspend_args
)
);
init_params
=
this_cpu_ptr
(
hyp_symbol_addr
(
kvm_init_params
)
);
boot_args
=
this_cpu_ptr
(
&
suspend_args
);
init_params
=
this_cpu_ptr
(
&
kvm_init_params
);
/*
* No need to acquire a lock before writing to boot_args because a core
...
...
@@ -174,7 +174,7 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
* point if it is a deep sleep state.
*/
return
psci_call
(
func_id
,
power_state
,
__hyp_pa
(
hyp_symbol_addr
(
kvm_hyp_cpu_resume
)
),
__hyp_pa
(
&
kvm_hyp_cpu_resume
),
__hyp_pa
(
init_params
));
}
...
...
@@ -186,8 +186,8 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
struct
psci_boot_args
*
boot_args
;
struct
kvm_nvhe_init_params
*
init_params
;
boot_args
=
this_cpu_ptr
(
hyp_symbol_addr
(
suspend_args
)
);
init_params
=
this_cpu_ptr
(
hyp_symbol_addr
(
kvm_init_params
)
);
boot_args
=
this_cpu_ptr
(
&
suspend_args
);
init_params
=
this_cpu_ptr
(
&
kvm_init_params
);
/*
* No need to acquire a lock before writing to boot_args because a core
...
...
@@ -198,7 +198,7 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
/* Will only return on error. */
return
psci_call
(
func_id
,
__hyp_pa
(
hyp_symbol_addr
(
kvm_hyp_cpu_resume
)
),
__hyp_pa
(
&
kvm_hyp_cpu_resume
),
__hyp_pa
(
init_params
),
0
);
}
...
...
@@ -207,12 +207,12 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
struct
psci_boot_args
*
boot_args
;
struct
kvm_cpu_context
*
host_ctxt
;
host_ctxt
=
&
this_cpu_ptr
(
hyp_symbol_addr
(
kvm_host_data
)
)
->
host_ctxt
;
host_ctxt
=
&
this_cpu_ptr
(
&
kvm_host_data
)
->
host_ctxt
;
if
(
is_cpu_on
)
boot_args
=
this_cpu_ptr
(
hyp_symbol_addr
(
cpu_on_args
)
);
boot_args
=
this_cpu_ptr
(
&
cpu_on_args
);
else
boot_args
=
this_cpu_ptr
(
hyp_symbol_addr
(
suspend_args
)
);
boot_args
=
this_cpu_ptr
(
&
suspend_args
);
cpu_reg
(
host_ctxt
,
0
)
=
boot_args
->
r0
;
write_sysreg_el2
(
boot_args
->
pc
,
SYS_ELR
);
...
...
arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
View file @
e7ae2ecd
...
...
@@ -64,7 +64,7 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
}
rd
=
kvm_vcpu_dabt_get_rd
(
vcpu
);
addr
=
hyp_symbol_addr
(
kvm_vgic_global_state
)
->
vcpu_hyp_va
;
addr
=
kvm_vgic_global_state
.
vcpu_hyp_va
;
addr
+=
fault_ipa
-
vgic
->
vgic_cpu_base
;
if
(
kvm_vcpu_dabt_iswrite
(
vcpu
))
{
...
...
arch/arm64/kvm/va_layout.c
View file @
e7ae2ecd
...
...
@@ -81,6 +81,34 @@ __init void kvm_compute_layout(void)
init_hyp_physvirt_offset
();
}
/*
* The .hyp.reloc ELF section contains a list of kimg positions that
* contains kimg VAs but will be accessed only in hyp execution context.
* Convert them to hyp VAs. See gen-hyprel.c for more details.
*/
__init
void
kvm_apply_hyp_relocations
(
void
)
{
int32_t
*
rel
;
int32_t
*
begin
=
(
int32_t
*
)
__hyp_reloc_begin
;
int32_t
*
end
=
(
int32_t
*
)
__hyp_reloc_end
;
for
(
rel
=
begin
;
rel
<
end
;
++
rel
)
{
uintptr_t
*
ptr
,
kimg_va
;
/*
* Each entry contains a 32-bit relative offset from itself
* to a kimg VA position.
*/
ptr
=
(
uintptr_t
*
)
lm_alias
((
char
*
)
rel
+
*
rel
);
/* Read the kimg VA value at the relocation address. */
kimg_va
=
*
ptr
;
/* Convert to hyp VA and store back to the relocation address. */
*
ptr
=
__early_kern_hyp_va
((
uintptr_t
)
lm_alias
(
kimg_va
));
}
}
static
u32
compute_instruction
(
int
n
,
u32
rd
,
u32
rn
)
{
u32
insn
=
AARCH64_BREAK_FAULT
;
...
...
@@ -255,12 +283,6 @@ static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst
*
updptr
++
=
cpu_to_le32
(
insn
);
}
void
kvm_update_kimg_phys_offset
(
struct
alt_instr
*
alt
,
__le32
*
origptr
,
__le32
*
updptr
,
int
nr_inst
)
{
generate_mov_q
(
kimage_voffset
+
PHYS_OFFSET
,
origptr
,
updptr
,
nr_inst
);
}
void
kvm_get_kimage_voffset
(
struct
alt_instr
*
alt
,
__le32
*
origptr
,
__le32
*
updptr
,
int
nr_inst
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment