Commit 88a84ccc authored by James Morse's avatar James Morse Committed by Catalin Marinas

KVM: arm64: Survive synchronous exceptions caused by AT instructions

KVM doesn't expect any synchronous exceptions when executing, any such
exception leads to a panic(). AT instructions access the guest page
tables, and can cause a synchronous external abort to be taken.

The arm-arm is unclear on what should happen if the guest has configured
the hardware update of the access-flag, and a memory type in TCR_EL1 that
does not support atomic operations. B2.2.6 "Possible implementation
restrictions on using atomic instructions" from DDI0487F.a lists
synchronous external abort as a possible behaviour of atomic instructions
that target memory that isn't writeback cacheable, but the page table
walker may behave differently.

Make KVM robust to synchronous exceptions caused by AT instructions.
Add a get_user() style helper for AT instructions that returns -EFAULT
if an exception was generated.

While KVM's version of the exception table mixes synchronous and
asynchronous exceptions, only one of these can occur at each location.

Re-enter the guest when the AT instructions take an exception on the
assumption the guest will take the same exception. This isn't guaranteed
to make forward progress, as the AT instructions may always walk the page
tables, but guest execution may use the translation cached in the TLB.

This isn't a problem, as since commit 5dcd0fdb ("KVM: arm64: Defer guest
entry when an asynchronous exception is pending"), KVM will return to the
host to process IRQs allowing the rest of the system to keep running.

Cc: stable@vger.kernel.org # <v5.3: 5dcd0fdb ("KVM: arm64: Defer guest entry when an asynchronous exception is pending")
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Reviewed-by: default avatarMarc Zyngier <maz@kernel.org>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent e9ee186b
......@@ -169,6 +169,34 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
*__hyp_this_cpu_ptr(sym); \
})
#define __KVM_EXTABLE(from, to) \
" .pushsection __kvm_ex_table, \"a\"\n" \
" .align 3\n" \
" .long (" #from " - .), (" #to " - .)\n" \
" .popsection\n"
#define __kvm_at(at_op, addr) \
( { \
int __kvm_at_err = 0; \
u64 spsr, elr; \
asm volatile( \
" mrs %1, spsr_el2\n" \
" mrs %2, elr_el2\n" \
"1: at "at_op", %3\n" \
" isb\n" \
" b 9f\n" \
"2: msr spsr_el2, %1\n" \
" msr elr_el2, %2\n" \
" mov %w0, %4\n" \
"9:\n" \
__KVM_EXTABLE(1b, 2b) \
: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
: "r" (addr), "i" (-EFAULT)); \
__kvm_at_err; \
} )
#else /* __ASSEMBLY__ */
.macro hyp_adr_this_cpu reg, sym, tmp
......
......@@ -167,13 +167,19 @@ el1_error:
b __guest_exit
el2_sync:
/* Check for illegal exception return, otherwise panic */
/* Check for illegal exception return */
mrs x0, spsr_el2
tbnz x0, #20, 1f
/* if this was something else, then panic! */
tst x0, #PSR_IL_BIT
b.eq __hyp_panic
save_caller_saved_regs_vect
stp x29, x30, [sp, #-16]!
bl kvm_unexpected_el2_exception
ldp x29, x30, [sp], #16
restore_caller_saved_regs_vect
eret
1:
/* Let's attempt a recovery from the illegal exception return */
get_vcpu_ptr x1, x0
mov x0, #ARM_EXCEPTION_IL
......
......@@ -146,10 +146,10 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
* saved the guest context yet, and we may return early...
*/
par = read_sysreg(par_el1);
asm volatile("at s1e1r, %0" : : "r" (far));
isb();
tmp = read_sysreg(par_el1);
if (!__kvm_at("s1e1r", far))
tmp = read_sysreg(par_el1);
else
tmp = SYS_PAR_EL1_F; /* back to the guest */
write_sysreg(par, par_el1);
if (unlikely(tmp & SYS_PAR_EL1_F))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment