Commit 6ddbc281 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: Move kvm_vcpu_trap_il_is32bit into kvm_skip_instr32()

There is no need to feed the result of kvm_vcpu_trap_il_is32bit()
to kvm_skip_instr(), as only AArch32 has a variable length ISA, and
this helper can equally be called from kvm_skip_instr32(), reducing
the complexity at all the call sites.
Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent c22588c9
...@@ -26,7 +26,7 @@ unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu); ...@@ -26,7 +26,7 @@ unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu);
void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v); void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v);
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); void kvm_skip_instr32(struct kvm_vcpu *vcpu);
void kvm_inject_undefined(struct kvm_vcpu *vcpu); void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_vabt(struct kvm_vcpu *vcpu); void kvm_inject_vabt(struct kvm_vcpu *vcpu);
...@@ -472,10 +472,10 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, ...@@ -472,10 +472,10 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
return data; /* Leave LE untouched */ return data; /* Leave LE untouched */
} }
static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
{ {
if (vcpu_mode_is_32bit(vcpu)) { if (vcpu_mode_is_32bit(vcpu)) {
kvm_skip_instr32(vcpu, is_wide_instr); kvm_skip_instr32(vcpu);
} else { } else {
*vcpu_pc(vcpu) += 4; *vcpu_pc(vcpu) += 4;
*vcpu_cpsr(vcpu) &= ~PSR_BTYPE_MASK; *vcpu_cpsr(vcpu) &= ~PSR_BTYPE_MASK;
...@@ -494,7 +494,7 @@ static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu) ...@@ -494,7 +494,7 @@ static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
vcpu_gp_regs(vcpu)->pstate = read_sysreg_el2(SYS_SPSR); vcpu_gp_regs(vcpu)->pstate = read_sysreg_el2(SYS_SPSR);
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); kvm_skip_instr(vcpu);
write_sysreg_el2(vcpu_gp_regs(vcpu)->pstate, SYS_SPSR); write_sysreg_el2(vcpu_gp_regs(vcpu)->pstate, SYS_SPSR);
write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
......
...@@ -61,7 +61,7 @@ static int handle_smc(struct kvm_vcpu *vcpu) ...@@ -61,7 +61,7 @@ static int handle_smc(struct kvm_vcpu *vcpu)
* otherwise return to the same address... * otherwise return to the same address...
*/ */
vcpu_set_reg(vcpu, 0, ~0UL); vcpu_set_reg(vcpu, 0, ~0UL);
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); kvm_skip_instr(vcpu);
return 1; return 1;
} }
...@@ -100,7 +100,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu) ...@@ -100,7 +100,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
kvm_clear_request(KVM_REQ_UNHALT, vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
} }
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); kvm_skip_instr(vcpu);
return 1; return 1;
} }
...@@ -221,7 +221,7 @@ static int handle_trap_exceptions(struct kvm_vcpu *vcpu) ...@@ -221,7 +221,7 @@ static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
* that fail their condition code check" * that fail their condition code check"
*/ */
if (!kvm_condition_valid(vcpu)) { if (!kvm_condition_valid(vcpu)) {
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); kvm_skip_instr(vcpu);
handled = 1; handled = 1;
} else { } else {
exit_handle_fn exit_handler; exit_handle_fn exit_handler;
......
...@@ -123,13 +123,13 @@ static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) ...@@ -123,13 +123,13 @@ static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
* kvm_skip_instr - skip a trapped instruction and proceed to the next * kvm_skip_instr - skip a trapped instruction and proceed to the next
* @vcpu: The vcpu pointer * @vcpu: The vcpu pointer
*/ */
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) void kvm_skip_instr32(struct kvm_vcpu *vcpu)
{ {
u32 pc = *vcpu_pc(vcpu); u32 pc = *vcpu_pc(vcpu);
bool is_thumb; bool is_thumb;
is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT); is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
if (is_thumb && !is_wide_instr) if (is_thumb && !kvm_vcpu_trap_il_is32bit(vcpu))
pc += 2; pc += 2;
else else
pc += 4; pc += 4;
......
...@@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu) ...@@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
* The MMIO instruction is emulated and should not be re-executed * The MMIO instruction is emulated and should not be re-executed
* in the guest. * in the guest.
*/ */
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); kvm_skip_instr(vcpu);
return 0; return 0;
} }
......
...@@ -1014,7 +1014,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) ...@@ -1014,7 +1014,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
* cautious, and skip the instruction. * cautious, and skip the instruction.
*/ */
if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) { if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); kvm_skip_instr(vcpu);
ret = 1; ret = 1;
goto out_unlock; goto out_unlock;
} }
......
...@@ -2199,7 +2199,7 @@ static void perform_access(struct kvm_vcpu *vcpu, ...@@ -2199,7 +2199,7 @@ static void perform_access(struct kvm_vcpu *vcpu,
/* Skip instruction if instructed so */ /* Skip instruction if instructed so */
if (likely(r->access(vcpu, params, r))) if (likely(r->access(vcpu, params, r)))
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); kvm_skip_instr(vcpu);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment