Commit 7d43bafc authored by Eugene (jno) Dvurechenski's avatar Eugene (jno) Dvurechenski Committed by Christian Borntraeger

KVM: s390: Make provisions for ESCA utilization

This patch updates the routines (sca_*) to provide transparent access
to and manipulation on the data for both Basic and Extended SCA in use.
The kvm.arch.sca is generalized to (void *) to handle BSCA/ESCA cases.
Also the kvm.arch.use_esca flag is provided.
The actual functionality is kept the same.
Signed-off-by: default avatarEugene (jno) Dvurechenski <jno@linux.vnet.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent bc784cce
...@@ -620,7 +620,8 @@ struct kvm_s390_crypto_cb { ...@@ -620,7 +620,8 @@ struct kvm_s390_crypto_cb {
}; };
struct kvm_arch{ struct kvm_arch{
struct bsca_block *sca; void *sca;
int use_esca;
debug_info_t *dbf; debug_info_t *dbf;
struct kvm_s390_float_interrupt float_int; struct kvm_s390_float_interrupt float_int;
struct kvm_device *flic; struct kvm_device *flic;
......
...@@ -37,22 +37,51 @@ ...@@ -37,22 +37,51 @@
/* handle external calls via sigp interpretation facility */ /* handle external calls via sigp interpretation facility */
static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id) static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
{ {
int c, scn;
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl sigp_ctrl =
sca->cpu[vcpu->vcpu_id].sigp_ctrl;
c = sigp_ctrl.c;
scn = sigp_ctrl.scn;
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca; struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl sigp_ctrl = sca->cpu[vcpu->vcpu_id].sigp_ctrl; union bsca_sigp_ctrl sigp_ctrl =
sca->cpu[vcpu->vcpu_id].sigp_ctrl;
c = sigp_ctrl.c;
scn = sigp_ctrl.scn;
}
if (src_id) if (src_id)
*src_id = sigp_ctrl.scn; *src_id = scn;
return sigp_ctrl.c && return c && atomic_read(&vcpu->arch.sie_block->cpuflags) &
atomic_read(&vcpu->arch.sie_block->cpuflags) &
CPUSTAT_ECALL_PEND; CPUSTAT_ECALL_PEND;
} }
static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
{ {
int expect, rc; int expect, rc;
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
new_val.scn = src_id;
new_val.c = 1;
old_val.c = 0;
expect = old_val.value;
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca; struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
new_val.scn = src_id; new_val.scn = src_id;
...@@ -61,6 +90,7 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) ...@@ -61,6 +90,7 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
expect = old_val.value; expect = old_val.value;
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value); rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
}
if (rc != expect) { if (rc != expect) {
/* another external call is pending */ /* another external call is pending */
...@@ -72,12 +102,28 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) ...@@ -72,12 +102,28 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
static void sca_clear_ext_call(struct kvm_vcpu *vcpu) static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
{ {
struct bsca_block *sca = vcpu->kvm->arch.sca;
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
union bsca_sigp_ctrl *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); int rc, expect;
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags); atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
sigp_ctrl->value = 0; if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union esca_sigp_ctrl old = *sigp_ctrl;
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union bsca_sigp_ctrl old = *sigp_ctrl;
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
}
WARN_ON(rc != expect); /* cannot clear? */
} }
int psw_extint_disabled(struct kvm_vcpu *vcpu) int psw_extint_disabled(struct kvm_vcpu *vcpu)
......
...@@ -1077,6 +1077,15 @@ static int kvm_s390_crypto_init(struct kvm *kvm) ...@@ -1077,6 +1077,15 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
return 0; return 0;
} }
static void sca_dispose(struct kvm *kvm)
{
if (kvm->arch.use_esca)
BUG(); /* not implemented yet */
else
free_page((unsigned long)(kvm->arch.sca));
kvm->arch.sca = NULL;
}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{ {
int i, rc; int i, rc;
...@@ -1100,6 +1109,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -1100,6 +1109,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
rc = -ENOMEM; rc = -ENOMEM;
kvm->arch.use_esca = 0; /* start with basic SCA */
kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL); kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
if (!kvm->arch.sca) if (!kvm->arch.sca)
goto out_err; goto out_err;
...@@ -1180,7 +1190,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -1180,7 +1190,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kfree(kvm->arch.crypto.crycb); kfree(kvm->arch.crypto.crycb);
free_page((unsigned long)kvm->arch.model.fac); free_page((unsigned long)kvm->arch.model.fac);
debug_unregister(kvm->arch.dbf); debug_unregister(kvm->arch.dbf);
free_page((unsigned long)(kvm->arch.sca)); sca_dispose(kvm);
KVM_EVENT(3, "creation of vm failed: %d", rc); KVM_EVENT(3, "creation of vm failed: %d", rc);
return rc; return rc;
} }
...@@ -1226,7 +1236,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -1226,7 +1236,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{ {
kvm_free_vcpus(kvm); kvm_free_vcpus(kvm);
free_page((unsigned long)kvm->arch.model.fac); free_page((unsigned long)kvm->arch.model.fac);
free_page((unsigned long)(kvm->arch.sca)); sca_dispose(kvm);
debug_unregister(kvm->arch.dbf); debug_unregister(kvm->arch.dbf);
kfree(kvm->arch.crypto.crycb); kfree(kvm->arch.crypto.crycb);
if (!kvm_is_ucontrol(kvm)) if (!kvm_is_ucontrol(kvm))
...@@ -1249,16 +1259,33 @@ static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -1249,16 +1259,33 @@ static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
static void sca_del_vcpu(struct kvm_vcpu *vcpu) static void sca_del_vcpu(struct kvm_vcpu *vcpu)
{ {
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
if (sca->cpu[vcpu->vcpu_id].sda == (__u64) vcpu->arch.sie_block)
sca->cpu[vcpu->vcpu_id].sda = 0;
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca; struct bsca_block *sca = vcpu->kvm->arch.sca;
clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
if (sca->cpu[vcpu->vcpu_id].sda == (__u64) vcpu->arch.sie_block) if (sca->cpu[vcpu->vcpu_id].sda == (__u64) vcpu->arch.sie_block)
sca->cpu[vcpu->vcpu_id].sda = 0; sca->cpu[vcpu->vcpu_id].sda = 0;
}
} }
static void sca_add_vcpu(struct kvm_vcpu *vcpu, struct kvm *kvm, static void sca_add_vcpu(struct kvm_vcpu *vcpu, struct kvm *kvm,
unsigned int id) unsigned int id)
{ {
if (kvm->arch.use_esca) {
struct esca_block *sca = kvm->arch.sca;
if (!sca->cpu[id].sda)
sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
set_bit_inv(id, (unsigned long *) sca->mcn);
} else {
struct bsca_block *sca = kvm->arch.sca; struct bsca_block *sca = kvm->arch.sca;
if (!sca->cpu[id].sda) if (!sca->cpu[id].sda)
...@@ -1266,6 +1293,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu, struct kvm *kvm, ...@@ -1266,6 +1293,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu, struct kvm *kvm,
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
set_bit_inv(id, (unsigned long *) &sca->mcn); set_bit_inv(id, (unsigned long *) &sca->mcn);
}
} }
static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id) static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
...@@ -1458,6 +1486,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -1458,6 +1486,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->ecb |= 0x10; vcpu->arch.sie_block->ecb |= 0x10;
vcpu->arch.sie_block->ecb2 = 8; vcpu->arch.sie_block->ecb2 = 8;
if (vcpu->kvm->arch.use_esca)
vcpu->arch.sie_block->ecb2 |= 4;
vcpu->arch.sie_block->eca = 0xC1002000U; vcpu->arch.sie_block->eca = 0xC1002000U;
if (sclp.has_siif) if (sclp.has_siif)
vcpu->arch.sie_block->eca |= 1; vcpu->arch.sie_block->eca |= 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment