Commit ef8f4f49 authored by David Hildenbrand's avatar David Hildenbrand Committed by Christian Borntraeger

KVM: s390: reuse kvm_s390_set_cpuflags()

Use it in all places where we set cpuflags.
Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Message-Id: <20180123170531.13687-3-david@redhat.com>
Reviewed-by: default avatarThomas Huth <thuth@redhat.com>
Reviewed-by: default avatarCornelia Huck <cohuck@redhat.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent 2018224d
...@@ -101,7 +101,7 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) ...@@ -101,7 +101,7 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
/* another external call is pending */ /* another external call is pending */
return -EBUSY; return -EBUSY;
} }
atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
return 0; return 0;
} }
...@@ -277,7 +277,7 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) ...@@ -277,7 +277,7 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu) static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{ {
atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask); set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
} }
......
...@@ -2329,7 +2329,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -2329,7 +2329,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
gmap_enable(vcpu->arch.enabled_gmap); gmap_enable(vcpu->arch.enabled_gmap);
atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
__start_cpu_timer_accounting(vcpu); __start_cpu_timer_accounting(vcpu);
vcpu->cpu = cpu; vcpu->cpu = cpu;
...@@ -2436,9 +2436,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -2436,9 +2436,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
CPUSTAT_STOPPED); CPUSTAT_STOPPED);
if (test_kvm_facility(vcpu->kvm, 78)) if (test_kvm_facility(vcpu->kvm, 78))
atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
else if (test_kvm_facility(vcpu->kvm, 8)) else if (test_kvm_facility(vcpu->kvm, 8))
atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
kvm_s390_vcpu_setup_model(vcpu); kvm_s390_vcpu_setup_model(vcpu);
...@@ -2475,7 +2475,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -2475,7 +2475,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
if (sclp.has_kss) if (sclp.has_kss)
atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags); kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
else else
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
...@@ -2578,7 +2578,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) ...@@ -2578,7 +2578,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
* return immediately. */ * return immediately. */
void exit_sie(struct kvm_vcpu *vcpu) void exit_sie(struct kvm_vcpu *vcpu)
{ {
atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
cpu_relax(); cpu_relax();
} }
...@@ -2822,7 +2822,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, ...@@ -2822,7 +2822,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
if (dbg->control & KVM_GUESTDBG_ENABLE) { if (dbg->control & KVM_GUESTDBG_ENABLE) {
vcpu->guest_debug = dbg->control; vcpu->guest_debug = dbg->control;
/* enforce guest PER */ /* enforce guest PER */
atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
if (dbg->control & KVM_GUESTDBG_USE_HW_BP) if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
rc = kvm_s390_import_bp_data(vcpu, dbg); rc = kvm_s390_import_bp_data(vcpu, dbg);
...@@ -2911,8 +2911,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -2911,8 +2911,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
if (!ibs_enabled(vcpu)) { if (!ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
atomic_or(CPUSTAT_IBS, kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
&vcpu->arch.sie_block->cpuflags);
} }
goto retry; goto retry;
} }
...@@ -3591,7 +3590,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) ...@@ -3591,7 +3590,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
kvm_s390_clear_stop_irq(vcpu); kvm_s390_clear_stop_irq(vcpu);
atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
__disable_ibs_on_vcpu(vcpu); __disable_ibs_on_vcpu(vcpu);
for (i = 0; i < online_vcpus; i++) { for (i = 0; i < online_vcpus; i++) {
......
...@@ -913,7 +913,7 @@ static void register_shadow_scb(struct kvm_vcpu *vcpu, ...@@ -913,7 +913,7 @@ static void register_shadow_scb(struct kvm_vcpu *vcpu,
* External calls have to lead to a kick of the vcpu and * External calls have to lead to a kick of the vcpu and
* therefore the vsie -> Simulate Wait state. * therefore the vsie -> Simulate Wait state.
*/ */
atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
/* /*
* We have to adjust the g3 epoch by the g2 epoch. The epoch will * We have to adjust the g3 epoch by the g2 epoch. The epoch will
* automatically be adjusted on tod clock changes via kvm_sync_clock. * automatically be adjusted on tod clock changes via kvm_sync_clock.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment