Commit 8038a922 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-fixes-5.8-3' of...

Merge tag 'kvmarm-fixes-5.8-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master

KVM/arm fixes for 5.8, take #3

- Disable preemption on context-switching PMU EL0 state happening
  on system register trap
- Don't clobber X0 when tearing down KVM via a soft reset (kexec)
parents fa71e952 b9e10d4a
...@@ -136,11 +136,15 @@ SYM_CODE_START(__kvm_handle_stub_hvc) ...@@ -136,11 +136,15 @@ SYM_CODE_START(__kvm_handle_stub_hvc)
1: cmp x0, #HVC_RESET_VECTORS 1: cmp x0, #HVC_RESET_VECTORS
b.ne 1f b.ne 1f
reset:
/* /*
* Reset kvm back to the hyp stub. Do not clobber x0-x4 in * Set the HVC_RESET_VECTORS return code before entering the common
* case we coming via HVC_SOFT_RESTART. * path so that we do not clobber x0-x2 in case we are coming via
* HVC_SOFT_RESTART.
*/ */
mov x0, xzr
reset:
/* Reset kvm back to the hyp stub. */
mrs x5, sctlr_el2 mrs x5, sctlr_el2
mov_q x6, SCTLR_ELx_FLAGS mov_q x6, SCTLR_ELx_FLAGS
bic x5, x5, x6 // Clear SCTL_M and etc bic x5, x5, x6 // Clear SCTL_M and etc
...@@ -151,7 +155,6 @@ reset: ...@@ -151,7 +155,6 @@ reset:
/* Install stub vectors */ /* Install stub vectors */
adr_l x5, __hyp_stub_vectors adr_l x5, __hyp_stub_vectors
msr vbar_el2, x5 msr vbar_el2, x5
mov x0, xzr
eret eret
1: /* Bad stub call */ 1: /* Bad stub call */
......
...@@ -159,7 +159,10 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events) ...@@ -159,7 +159,10 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
} }
/* /*
* On VHE ensure that only guest events have EL0 counting enabled * On VHE ensure that only guest events have EL0 counting enabled.
* This is called from both vcpu_{load,put} and the sysreg handling.
* Since the latter is preemptible, special care must be taken to
* disable preemption.
*/ */
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{ {
...@@ -169,12 +172,14 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) ...@@ -169,12 +172,14 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
if (!has_vhe()) if (!has_vhe())
return; return;
preempt_disable();
host = this_cpu_ptr(&kvm_host_data); host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest; events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host; events_host = host->pmu_events.events_host;
kvm_vcpu_pmu_enable_el0(events_guest); kvm_vcpu_pmu_enable_el0(events_guest);
kvm_vcpu_pmu_disable_el0(events_host); kvm_vcpu_pmu_disable_el0(events_host);
preempt_enable();
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment