Commit 7846b311 authored by Dave Martin's avatar Dave Martin Committed by Marc Zyngier

KVM: arm64: Fold redundant exit code checks out of fixup_guest_exit()

The entire tail of fixup_guest_exit() is contained in if statements
of the form if (x && *exit_code == ARM_EXCEPTION_TRAP).  As a result,
we can check just once and bail out of the function early, allowing
the remaining if conditions to be simplified.

The only awkward case is where *exit_code is changed to
ARM_EXCEPTION_EL1_SERROR in the case of an illegal GICv2 CPU
interface access: in that case, the GICv3 trap handling code is
skipped using a goto.  This avoids pointlessly evaluating the
static branch check for the GICv3 case, even though we can't have
vgic_v2_cpuif_trap and vgic_v3_cpuif_trap true simultaneously
unless we have a GICv3 and GICv2 on the host: that sounds stupid,
but I haven't satisfied myself that it can't happen.

No functional change.
Signed-off-by: default avatarDave Martin <Dave.Martin@arm.com>
Reviewed-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Reviewed-by: default avatarAlex Bennée <alex.bennee@linaro.org>
Acked-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent ba4f4cb0
...@@ -387,11 +387,13 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) ...@@ -387,11 +387,13 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
* same PC once the SError has been injected, and replay the * same PC once the SError has been injected, and replay the
* trapping instruction. * trapping instruction.
*/ */
if (*exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) if (*exit_code != ARM_EXCEPTION_TRAP)
goto exit;
if (!__populate_fault_info(vcpu))
return true; return true;
if (static_branch_unlikely(&vgic_v2_cpuif_trap) && if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
*exit_code == ARM_EXCEPTION_TRAP) {
bool valid; bool valid;
valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW && valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
...@@ -417,11 +419,12 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) ...@@ -417,11 +419,12 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
*exit_code = ARM_EXCEPTION_EL1_SERROR; *exit_code = ARM_EXCEPTION_EL1_SERROR;
} }
goto exit;
} }
} }
if (static_branch_unlikely(&vgic_v3_cpuif_trap) && if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
*exit_code == ARM_EXCEPTION_TRAP &&
(kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 || (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) { kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
int ret = __vgic_v3_perform_cpuif_access(vcpu); int ret = __vgic_v3_perform_cpuif_access(vcpu);
...@@ -430,6 +433,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) ...@@ -430,6 +433,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
return true; return true;
} }
exit:
/* Return to the host kernel and handle the exit */ /* Return to the host kernel and handle the exit */
return false; return false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment