Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
15c99816
Commit
15c99816
authored
Jun 10, 2020
by
Marc Zyngier
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'kvm-arm64/ptrauth-fixes' into kvmarm-master/next
Signed-off-by:
Marc Zyngier
<
maz@kernel.org
>
parents
0370964d
304e2989
Changes
11
Show whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
111 additions
and
85 deletions
+111
-85
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_asm.h
+30
-3
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_emulate.h
+0
-6
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_host.h
+0
-3
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/kvm_mmu.h
+0
-20
arch/arm64/kvm/arm.c
arch/arm64/kvm/arm.c
+2
-4
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/handle_exit.c
+3
-29
arch/arm64/kvm/hyp/debug-sr.c
arch/arm64/kvm/hyp/debug-sr.c
+2
-2
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/hyp/switch.c
+63
-2
arch/arm64/kvm/hyp/sysreg-sr.c
arch/arm64/kvm/hyp/sysreg-sr.c
+4
-2
arch/arm64/kvm/pmu.c
arch/arm64/kvm/pmu.c
+2
-6
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/sys_regs.c
+5
-8
No files found.
arch/arm64/include/asm/kvm_asm.h
View file @
15c99816
...
...
@@ -81,12 +81,39 @@ extern u32 __kvm_get_mdcr_el2(void);
extern
char
__smccc_workaround_1_smc
[
__SMCCC_WORKAROUND_1_SMC_SZ
];
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
/*
* Obtain the PC-relative address of a kernel symbol
* s: symbol
*
* The goal of this macro is to return a symbol's address based on a
* PC-relative computation, as opposed to a loading the VA from a
* constant pool or something similar. This works well for HYP, as an
* absolute VA is guaranteed to be wrong. Only use this if trying to
* obtain the address of a symbol (i.e. not something you obtained by
* following a pointer).
*/
#define hyp_symbol_addr(s) \
({ \
typeof(s) *addr; \
asm("adrp %0, %1\n" \
"add %0, %0, :lo12:%1\n" \
: "=r" (addr) : "S" (&s)); \
addr; \
})
/*
* Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
* provided that sym is really a *symbol* and not a pointer obtained from
* a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
* sparse quiet.
*/
#define __hyp_this_cpu_ptr(sym) \
({ \
void *__ptr = hyp_symbol_addr(sym); \
void *__ptr; \
__verify_pcpu_ptr(&sym); \
__ptr = hyp_symbol_addr(sym); \
__ptr += read_sysreg(tpidr_el2); \
(typeof(
&sym))__ptr;
\
(typeof(
sym) __kernel __force *)__ptr;
\
})
#define __hyp_this_cpu_read(sym) \
...
...
arch/arm64/include/asm/kvm_emulate.h
View file @
15c99816
...
...
@@ -112,12 +112,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
vcpu
->
arch
.
hcr_el2
&=
~
(
HCR_API
|
HCR_APK
);
}
static
inline
void
vcpu_ptrauth_setup_lazy
(
struct
kvm_vcpu
*
vcpu
)
{
if
(
vcpu_has_ptrauth
(
vcpu
))
vcpu_ptrauth_disable
(
vcpu
);
}
static
inline
unsigned
long
vcpu_get_vsesr
(
struct
kvm_vcpu
*
vcpu
)
{
return
vcpu
->
arch
.
vsesr_el2
;
...
...
arch/arm64/include/asm/kvm_host.h
View file @
15c99816
...
...
@@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
struct
kvm_guest_debug_arch
vcpu_debug_state
;
struct
kvm_guest_debug_arch
external_debug_state
;
/* Pointer to host CPU context */
struct
kvm_cpu_context
*
host_cpu_context
;
struct
thread_info
*
host_thread_info
;
/* hyp VA */
struct
user_fpsimd_state
*
host_fpsimd_state
;
/* hyp VA */
...
...
arch/arm64/include/asm/kvm_mmu.h
View file @
15c99816
...
...
@@ -107,26 +107,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
/*
* Obtain the PC-relative address of a kernel symbol
* s: symbol
*
* The goal of this macro is to return a symbol's address based on a
* PC-relative computation, as opposed to a loading the VA from a
* constant pool or something similar. This works well for HYP, as an
* absolute VA is guaranteed to be wrong. Only use this if trying to
* obtain the address of a symbol (i.e. not something you obtained by
* following a pointer).
*/
#define hyp_symbol_addr(s) \
({ \
typeof(s) *addr; \
asm("adrp %0, %1\n" \
"add %0, %0, :lo12:%1\n" \
: "=r" (addr) : "S" (&s)); \
addr; \
})
/*
* We currently support using a VM-specified IPA size. For backward
* compatibility, the default IPA size is fixed to 40bits.
...
...
arch/arm64/kvm/arm.c
View file @
15c99816
...
...
@@ -340,10 +340,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
void
kvm_arch_vcpu_load
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{
int
*
last_ran
;
kvm_host_data_t
*
cpu_data
;
last_ran
=
this_cpu_ptr
(
vcpu
->
kvm
->
arch
.
last_vcpu_ran
);
cpu_data
=
this_cpu_ptr
(
&
kvm_host_data
);
/*
* We might get preempted before the vCPU actually runs, but
...
...
@@ -355,7 +353,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
vcpu
->
cpu
=
cpu
;
vcpu
->
arch
.
host_cpu_context
=
&
cpu_data
->
host_ctxt
;
kvm_vgic_load
(
vcpu
);
kvm_timer_vcpu_load
(
vcpu
);
...
...
@@ -370,7 +367,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
else
vcpu_set_wfx_traps
(
vcpu
);
vcpu_ptrauth_setup_lazy
(
vcpu
);
if
(
vcpu_has_ptrauth
(
vcpu
))
vcpu_ptrauth_disable
(
vcpu
);
}
void
kvm_arch_vcpu_put
(
struct
kvm_vcpu
*
vcpu
)
...
...
arch/arm64/kvm/handle_exit.c
View file @
15c99816
...
...
@@ -162,40 +162,14 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
return
1
;
}
#define __ptrauth_save_key(regs, key) \
({ \
regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
})
/*
* Handle the guest trying to use a ptrauth instruction, or trying to access a
* ptrauth register.
*/
void
kvm_arm_vcpu_ptrauth_trap
(
struct
kvm_vcpu
*
vcpu
)
{
struct
kvm_cpu_context
*
ctxt
;
if
(
vcpu_has_ptrauth
(
vcpu
))
{
vcpu_ptrauth_enable
(
vcpu
);
ctxt
=
vcpu
->
arch
.
host_cpu_context
;
__ptrauth_save_key
(
ctxt
->
sys_regs
,
APIA
);
__ptrauth_save_key
(
ctxt
->
sys_regs
,
APIB
);
__ptrauth_save_key
(
ctxt
->
sys_regs
,
APDA
);
__ptrauth_save_key
(
ctxt
->
sys_regs
,
APDB
);
__ptrauth_save_key
(
ctxt
->
sys_regs
,
APGA
);
}
else
{
kvm_inject_undefined
(
vcpu
);
}
}
/*
* Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
* a NOP).
* a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
* that we can do is give the guest an UNDEF.
*/
static
int
kvm_handle_ptrauth
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
kvm_
arm_vcpu_ptrauth_trap
(
vcpu
);
kvm_
inject_undefined
(
vcpu
);
return
1
;
}
...
...
arch/arm64/kvm/hyp/debug-sr.c
View file @
15c99816
...
...
@@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
if
(
!
(
vcpu
->
arch
.
flags
&
KVM_ARM64_DEBUG_DIRTY
))
return
;
host_ctxt
=
kern_hyp_va
(
vcpu
->
arch
.
host_cpu_context
)
;
host_ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
host_dbg
=
&
vcpu
->
arch
.
host_debug_state
.
regs
;
guest_dbg
=
kern_hyp_va
(
vcpu
->
arch
.
debug_ptr
);
...
...
@@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
if
(
!
(
vcpu
->
arch
.
flags
&
KVM_ARM64_DEBUG_DIRTY
))
return
;
host_ctxt
=
kern_hyp_va
(
vcpu
->
arch
.
host_cpu_context
)
;
host_ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
host_dbg
=
&
vcpu
->
arch
.
host_debug_state
.
regs
;
guest_dbg
=
kern_hyp_va
(
vcpu
->
arch
.
debug_ptr
);
...
...
arch/arm64/kvm/hyp/switch.c
View file @
15c99816
...
...
@@ -490,6 +490,64 @@ static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
return
true
;
}
static
bool
__hyp_text
esr_is_ptrauth_trap
(
u32
esr
)
{
u32
ec
=
ESR_ELx_EC
(
esr
);
if
(
ec
==
ESR_ELx_EC_PAC
)
return
true
;
if
(
ec
!=
ESR_ELx_EC_SYS64
)
return
false
;
switch
(
esr_sys64_to_sysreg
(
esr
))
{
case
SYS_APIAKEYLO_EL1
:
case
SYS_APIAKEYHI_EL1
:
case
SYS_APIBKEYLO_EL1
:
case
SYS_APIBKEYHI_EL1
:
case
SYS_APDAKEYLO_EL1
:
case
SYS_APDAKEYHI_EL1
:
case
SYS_APDBKEYLO_EL1
:
case
SYS_APDBKEYHI_EL1
:
case
SYS_APGAKEYLO_EL1
:
case
SYS_APGAKEYHI_EL1
:
return
true
;
}
return
false
;
}
#define __ptrauth_save_key(regs, key) \
({ \
regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
})
static
bool
__hyp_text
__hyp_handle_ptrauth
(
struct
kvm_vcpu
*
vcpu
)
{
struct
kvm_cpu_context
*
ctxt
;
u64
val
;
if
(
!
vcpu_has_ptrauth
(
vcpu
)
||
!
esr_is_ptrauth_trap
(
kvm_vcpu_get_hsr
(
vcpu
)))
return
false
;
ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
__ptrauth_save_key
(
ctxt
->
sys_regs
,
APIA
);
__ptrauth_save_key
(
ctxt
->
sys_regs
,
APIB
);
__ptrauth_save_key
(
ctxt
->
sys_regs
,
APDA
);
__ptrauth_save_key
(
ctxt
->
sys_regs
,
APDB
);
__ptrauth_save_key
(
ctxt
->
sys_regs
,
APGA
);
vcpu_ptrauth_enable
(
vcpu
);
val
=
read_sysreg
(
hcr_el2
);
val
|=
(
HCR_API
|
HCR_APK
);
write_sysreg
(
val
,
hcr_el2
);
return
true
;
}
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
...
...
@@ -524,6 +582,9 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if
(
__hyp_handle_fpsimd
(
vcpu
))
return
true
;
if
(
__hyp_handle_ptrauth
(
vcpu
))
return
true
;
if
(
!
__populate_fault_info
(
vcpu
))
return
true
;
...
...
@@ -642,7 +703,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
struct
kvm_cpu_context
*
guest_ctxt
;
u64
exit_code
;
host_ctxt
=
vcpu
->
arch
.
host_cpu_conte
xt
;
host_ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ct
xt
;
host_ctxt
->
__hyp_running_vcpu
=
vcpu
;
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
...
...
@@ -747,7 +808,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
vcpu
=
kern_hyp_va
(
vcpu
);
host_ctxt
=
kern_hyp_va
(
vcpu
->
arch
.
host_cpu_context
)
;
host_ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
host_ctxt
->
__hyp_running_vcpu
=
vcpu
;
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
...
...
arch/arm64/kvm/hyp/sysreg-sr.c
View file @
15c99816
...
...
@@ -263,12 +263,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
*/
void
kvm_vcpu_load_sysregs
(
struct
kvm_vcpu
*
vcpu
)
{
struct
kvm_cpu_context
*
host_ctxt
=
vcpu
->
arch
.
host_cpu_context
;
struct
kvm_cpu_context
*
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
struct
kvm_cpu_context
*
host_ctxt
;
if
(
!
has_vhe
())
return
;
host_ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
__sysreg_save_user_state
(
host_ctxt
);
/*
...
...
@@ -299,12 +300,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
*/
void
kvm_vcpu_put_sysregs
(
struct
kvm_vcpu
*
vcpu
)
{
struct
kvm_cpu_context
*
host_ctxt
=
vcpu
->
arch
.
host_cpu_context
;
struct
kvm_cpu_context
*
guest_ctxt
=
&
vcpu
->
arch
.
ctxt
;
struct
kvm_cpu_context
*
host_ctxt
;
if
(
!
has_vhe
())
return
;
host_ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
deactivate_traps_vhe_put
();
__sysreg_save_el1_state
(
guest_ctxt
);
...
...
arch/arm64/kvm/pmu.c
View file @
15c99816
...
...
@@ -163,15 +163,13 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
*/
void
kvm_vcpu_pmu_restore_guest
(
struct
kvm_vcpu
*
vcpu
)
{
struct
kvm_cpu_context
*
host_ctxt
;
struct
kvm_host_data
*
host
;
u32
events_guest
,
events_host
;
if
(
!
has_vhe
())
return
;
host_ctxt
=
vcpu
->
arch
.
host_cpu_context
;
host
=
container_of
(
host_ctxt
,
struct
kvm_host_data
,
host_ctxt
);
host
=
this_cpu_ptr
(
&
kvm_host_data
);
events_guest
=
host
->
pmu_events
.
events_guest
;
events_host
=
host
->
pmu_events
.
events_host
;
...
...
@@ -184,15 +182,13 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
*/
void
kvm_vcpu_pmu_restore_host
(
struct
kvm_vcpu
*
vcpu
)
{
struct
kvm_cpu_context
*
host_ctxt
;
struct
kvm_host_data
*
host
;
u32
events_guest
,
events_host
;
if
(
!
has_vhe
())
return
;
host_ctxt
=
vcpu
->
arch
.
host_cpu_context
;
host
=
container_of
(
host_ctxt
,
struct
kvm_host_data
,
host_ctxt
);
host
=
this_cpu_ptr
(
&
kvm_host_data
);
events_guest
=
host
->
pmu_events
.
events_guest
;
events_host
=
host
->
pmu_events
.
events_host
;
...
...
arch/arm64/kvm/sys_regs.c
View file @
15c99816
...
...
@@ -1032,16 +1032,13 @@ static bool trap_ptrauth(struct kvm_vcpu *vcpu,
struct
sys_reg_params
*
p
,
const
struct
sys_reg_desc
*
rd
)
{
kvm_arm_vcpu_ptrauth_trap
(
vcpu
);
/*
* Return false for both cases as we never skip the trapped
* instruction:
*
* - Either we re-execute the same key register access instruction
* after enabling ptrauth.
* - Or an UNDEF is injected as ptrauth is not supported/enabled.
* If we land here, that is because we didn't fixup the access on exit
* by allowing the PtrAuth sysregs. The only way this happens is when
* the guest does not have PtrAuth support enabled.
*/
kvm_inject_undefined
(
vcpu
);
return
false
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment