Commit 27406cd5 authored by Christian Borntraeger's avatar Christian Borntraeger

KVM: s390: provide functions for blocking all CPUs

Some updates to the control blocks need to be done in a way that
ensures that no CPU is within SIE. Provide wrappers around the
s390_vcpu_block functions and adopt the TOD migration code to
update in a guaranteed fashion. Also rename these functions to
have the kvm_s390_ prefix as everything else.
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
parent 8e236546
...@@ -454,10 +454,10 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -454,10 +454,10 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
kvm->arch.epoch = gtod - host_tod; kvm->arch.epoch = gtod - host_tod;
kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) { kvm_s390_vcpu_block_all(kvm);
kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
exit_sie(cur_vcpu); kvm_s390_vcpu_unblock_all(kvm);
}
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return 0; return 0;
} }
...@@ -1414,12 +1414,12 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) ...@@ -1414,12 +1414,12 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
return kvm_s390_vcpu_has_irq(vcpu, 0); return kvm_s390_vcpu_has_irq(vcpu, 0);
} }
void s390_vcpu_block(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
{ {
atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
} }
void s390_vcpu_unblock(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
{ {
atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
} }
......
...@@ -211,8 +211,8 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); ...@@ -211,8 +211,8 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void s390_vcpu_block(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
void s390_vcpu_unblock(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
void exit_sie(struct kvm_vcpu *vcpu); void exit_sie(struct kvm_vcpu *vcpu);
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu); void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
...@@ -228,6 +228,25 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); ...@@ -228,6 +228,25 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
struct kvm_s390_pgm_info *pgm_info); struct kvm_s390_pgm_info *pgm_info);
static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
{
int i;
struct kvm_vcpu *vcpu;
WARN_ON(!mutex_is_locked(&kvm->lock));
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_s390_vcpu_block(vcpu);
}
static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
{
int i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_s390_vcpu_unblock(vcpu);
}
/** /**
* kvm_s390_inject_prog_cond - conditionally inject a program check * kvm_s390_inject_prog_cond - conditionally inject a program check
* @vcpu: virtual cpu * @vcpu: virtual cpu
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment