Commit 5eba523e authored by Marc Zyngier's avatar Marc Zyngier Committed by Oliver Upton

KVM: arm64: Reload stage-2 for VMID change on VHE

Naturally, a change to the VMID for an MMU implies a new value for
VTTBR. Reload on VMID change in anticipation of loading stage-2 on
vcpu_load() instead of every guest entry.
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20231018233212.2888027-4-oliver.upton@linux.devSigned-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent 4288ff7b
...@@ -1025,7 +1025,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, ...@@ -1025,7 +1025,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
extern unsigned int __ro_after_init kvm_arm_vmid_bits; extern unsigned int __ro_after_init kvm_arm_vmid_bits;
int __init kvm_arm_vmid_alloc_init(void); int __init kvm_arm_vmid_alloc_init(void);
void __init kvm_arm_vmid_alloc_free(void); void __init kvm_arm_vmid_alloc_free(void);
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
void kvm_arm_vmid_clear_active(void); void kvm_arm_vmid_clear_active(void);
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
......
...@@ -950,7 +950,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -950,7 +950,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* making a thread's VMID inactive. So we need to call * making a thread's VMID inactive. So we need to call
* kvm_arm_vmid_update() in non-premptible context. * kvm_arm_vmid_update() in non-premptible context.
*/ */
kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid); if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
has_vhe())
__load_stage2(vcpu->arch.hw_mmu,
vcpu->arch.hw_mmu->arch);
kvm_pmu_flush_hwstate(vcpu); kvm_pmu_flush_hwstate(vcpu);
......
...@@ -135,10 +135,11 @@ void kvm_arm_vmid_clear_active(void) ...@@ -135,10 +135,11 @@ void kvm_arm_vmid_clear_active(void)
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID); atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
} }
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
{ {
unsigned long flags; unsigned long flags;
u64 vmid, old_active_vmid; u64 vmid, old_active_vmid;
bool updated = false;
vmid = atomic64_read(&kvm_vmid->id); vmid = atomic64_read(&kvm_vmid->id);
...@@ -156,17 +157,21 @@ void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) ...@@ -156,17 +157,21 @@ void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
if (old_active_vmid != 0 && vmid_gen_match(vmid) && if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids), 0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
old_active_vmid, vmid)) old_active_vmid, vmid))
return; return false;
raw_spin_lock_irqsave(&cpu_vmid_lock, flags); raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
/* Check that our VMID belongs to the current generation. */ /* Check that our VMID belongs to the current generation. */
vmid = atomic64_read(&kvm_vmid->id); vmid = atomic64_read(&kvm_vmid->id);
if (!vmid_gen_match(vmid)) if (!vmid_gen_match(vmid)) {
vmid = new_vmid(kvm_vmid); vmid = new_vmid(kvm_vmid);
updated = true;
}
atomic64_set(this_cpu_ptr(&active_vmids), vmid); atomic64_set(this_cpu_ptr(&active_vmids), vmid);
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags); raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
return updated;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment