Commit e525523c authored by Marc Zyngier's avatar Marc Zyngier

Merge branch kvm-arm64/vcpu-first-run into kvmarm-master/next

* kvm-arm64/vcpu-first-run:
  : Rework the "vcpu first run" sequence to be driven by KVM's
  : "PID change" callback, removing the need for extra state.
  KVM: arm64: Drop vcpu->arch.has_run_once for vcpu->pid
  KVM: arm64: Merge kvm_arch_vcpu_run_pid_change() and kvm_vcpu_first_run_init()
  KVM: arm64: Restructure the point where has_run_once is advertised
  KVM: arm64: Move kvm_arch_vcpu_run_pid_change() out of line
  KVM: arm64: Move SVE state mapping at HYP to finalize-time
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents d58071a8 cc5705fb
...@@ -367,9 +367,6 @@ struct kvm_vcpu_arch { ...@@ -367,9 +367,6 @@ struct kvm_vcpu_arch {
int target; int target;
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
/* Detect first run of a vcpu */
bool has_run_once;
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */ /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
u64 vsesr_el2; u64 vsesr_el2;
...@@ -606,6 +603,8 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, ...@@ -606,6 +603,8 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm);
#define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
#ifndef __KVM_NVHE_HYPERVISOR__ #ifndef __KVM_NVHE_HYPERVISOR__
#define kvm_call_hyp_nvhe(f, ...) \ #define kvm_call_hyp_nvhe(f, ...) \
({ \ ({ \
...@@ -749,12 +748,7 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) ...@@ -749,12 +748,7 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ #ifdef CONFIG_KVM
static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
{
return kvm_arch_vcpu_run_map_fp(vcpu);
}
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
void kvm_clr_pmu_events(u32 clr); void kvm_clr_pmu_events(u32 clr);
......
...@@ -351,7 +351,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) ...@@ -351,7 +351,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm))) if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
static_branch_dec(&userspace_irqchip_in_use); static_branch_dec(&userspace_irqchip_in_use);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
...@@ -584,18 +584,33 @@ static void update_vmid(struct kvm_vmid *vmid) ...@@ -584,18 +584,33 @@ static void update_vmid(struct kvm_vmid *vmid)
spin_unlock(&kvm_vmid_lock); spin_unlock(&kvm_vmid_lock);
} }
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
return vcpu->arch.target >= 0;
}
/*
* Handle both the initialisation that is being done when the vcpu is
* run for the first time, as well as the updates that must be
* performed each time we get a new thread dealing with this vcpu.
*/
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
int ret = 0; int ret;
if (likely(vcpu->arch.has_run_once)) if (!kvm_vcpu_initialized(vcpu))
return 0; return -ENOEXEC;
if (!kvm_arm_vcpu_is_finalized(vcpu)) if (!kvm_arm_vcpu_is_finalized(vcpu))
return -EPERM; return -EPERM;
vcpu->arch.has_run_once = true; ret = kvm_arch_vcpu_run_map_fp(vcpu);
if (ret)
return ret;
if (likely(vcpu_has_run_once(vcpu)))
return 0;
kvm_arm_vcpu_init_debug(vcpu); kvm_arm_vcpu_init_debug(vcpu);
...@@ -607,12 +622,6 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ...@@ -607,12 +622,6 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
ret = kvm_vgic_map_resources(kvm); ret = kvm_vgic_map_resources(kvm);
if (ret) if (ret)
return ret; return ret;
} else {
/*
* Tell the rest of the code that there are userspace irqchip
* VMs in the wild.
*/
static_branch_inc(&userspace_irqchip_in_use);
} }
ret = kvm_timer_enable(vcpu); ret = kvm_timer_enable(vcpu);
...@@ -620,6 +629,16 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ...@@ -620,6 +629,16 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
return ret; return ret;
ret = kvm_arm_pmu_v3_enable(vcpu); ret = kvm_arm_pmu_v3_enable(vcpu);
if (ret)
return ret;
if (!irqchip_in_kernel(kvm)) {
/*
* Tell the rest of the code that there are userspace irqchip
* VMs in the wild.
*/
static_branch_inc(&userspace_irqchip_in_use);
}
/* /*
* Initialize traps for protected VMs. * Initialize traps for protected VMs.
...@@ -679,11 +698,6 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu) ...@@ -679,11 +698,6 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
smp_rmb(); smp_rmb();
} }
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
return vcpu->arch.target >= 0;
}
static void check_vcpu_requests(struct kvm_vcpu *vcpu) static void check_vcpu_requests(struct kvm_vcpu *vcpu)
{ {
if (kvm_request_pending(vcpu)) { if (kvm_request_pending(vcpu)) {
...@@ -779,13 +793,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -779,13 +793,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
int ret; int ret;
if (unlikely(!kvm_vcpu_initialized(vcpu)))
return -ENOEXEC;
ret = kvm_vcpu_first_run_init(vcpu);
if (ret)
return ret;
if (run->exit_reason == KVM_EXIT_MMIO) { if (run->exit_reason == KVM_EXIT_MMIO) {
ret = kvm_handle_mmio_return(vcpu); ret = kvm_handle_mmio_return(vcpu);
if (ret) if (ret)
...@@ -1123,7 +1130,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, ...@@ -1123,7 +1130,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
* need to invalidate the I-cache though, as FWB does *not* * need to invalidate the I-cache though, as FWB does *not*
* imply CTR_EL0.DIC. * imply CTR_EL0.DIC.
*/ */
if (vcpu->arch.has_run_once) { if (vcpu_has_run_once(vcpu)) {
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
stage2_unmap_vm(vcpu->kvm); stage2_unmap_vm(vcpu->kvm);
else else
......
...@@ -43,17 +43,6 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) ...@@ -43,17 +43,6 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
if (ret) if (ret)
goto error; goto error;
if (vcpu->arch.sve_state) {
void *sve_end;
sve_end = vcpu->arch.sve_state + vcpu_sve_state_size(vcpu);
ret = create_hyp_mappings(vcpu->arch.sve_state, sve_end,
PAGE_HYP);
if (ret)
goto error;
}
vcpu->arch.host_thread_info = kern_hyp_va(ti); vcpu->arch.host_thread_info = kern_hyp_va(ti);
vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
error: error:
......
...@@ -94,6 +94,8 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) ...@@ -94,6 +94,8 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
{ {
void *buf; void *buf;
unsigned int vl; unsigned int vl;
size_t reg_sz;
int ret;
vl = vcpu->arch.sve_max_vl; vl = vcpu->arch.sve_max_vl;
...@@ -106,10 +108,17 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) ...@@ -106,10 +108,17 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
vl > SVE_VL_ARCH_MAX)) vl > SVE_VL_ARCH_MAX))
return -EIO; return -EIO;
buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL_ACCOUNT); reg_sz = vcpu_sve_state_size(vcpu);
buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
ret = create_hyp_mappings(buf, buf + reg_sz, PAGE_HYP);
if (ret) {
kfree(buf);
return ret;
}
vcpu->arch.sve_state = buf; vcpu->arch.sve_state = buf;
vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
return 0; return 0;
......
...@@ -91,7 +91,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) ...@@ -91,7 +91,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
return ret; return ret;
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.has_run_once) if (vcpu_has_run_once(vcpu))
goto out_unlock; goto out_unlock;
} }
ret = 0; ret = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment