Commit a1b7b9b2 authored by David Hildenbrand's avatar David Hildenbrand Committed by Christian Borntraeger

KVM: s390: vsie: support guest-storage-limit-suppression

We can easily forward guest-storage-limit-suppression if available.

One thing to care about is keeping the prefix properly mapped when
gsls in toggled on/off or the mso changes in between. Therefore we better
remap the prefix on any mso changes just like we already do with the
prefix.
Acked-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent 77d18f6d
...@@ -102,6 +102,7 @@ struct kvm_s390_vm_cpu_machine { ...@@ -102,6 +102,7 @@ struct kvm_s390_vm_cpu_machine {
#define KVM_S390_VM_CPU_FEAT_64BSCAO 2 #define KVM_S390_VM_CPU_FEAT_64BSCAO 2
#define KVM_S390_VM_CPU_FEAT_SIIF 3 #define KVM_S390_VM_CPU_FEAT_SIIF 3
#define KVM_S390_VM_CPU_FEAT_GPERE 4 #define KVM_S390_VM_CPU_FEAT_GPERE 4
#define KVM_S390_VM_CPU_FEAT_GSLS 5
struct kvm_s390_vm_cpu_feat { struct kvm_s390_vm_cpu_feat {
__u64 feat[16]; __u64 feat[16];
}; };
......
...@@ -271,6 +271,8 @@ static void kvm_s390_cpu_feat_init(void) ...@@ -271,6 +271,8 @@ static void kvm_s390_cpu_feat_init(void)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF); allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
if (sclp.has_gpere) if (sclp.has_gpere)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE); allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
if (sclp.has_gsls)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
} }
int kvm_arch_init(void *opaque) int kvm_arch_init(void *opaque)
......
...@@ -109,6 +109,8 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -109,6 +109,8 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
} }
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE)) if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
newflags |= cpuflags & CPUSTAT_P; newflags |= cpuflags & CPUSTAT_P;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
newflags |= cpuflags & CPUSTAT_SM;
atomic_set(&scb_s->cpuflags, newflags); atomic_set(&scb_s->cpuflags, newflags);
return 0; return 0;
...@@ -242,7 +244,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -242,7 +244,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
bool had_tx = scb_s->ecb & 0x10U; bool had_tx = scb_s->ecb & 0x10U;
unsigned long new_mso; unsigned long new_mso = 0;
int rc; int rc;
/* make sure we don't have any leftovers when reusing the scb */ /* make sure we don't have any leftovers when reusing the scb */
...@@ -284,7 +286,8 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -284,7 +286,8 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
scb_s->icpua = scb_o->icpua; scb_s->icpua = scb_o->icpua;
new_mso = scb_o->mso & 0xfffffffffff00000UL; if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
new_mso = scb_o->mso & 0xfffffffffff00000UL;
/* if the hva of the prefix changes, we have to remap the prefix */ /* if the hva of the prefix changes, we have to remap the prefix */
if (scb_s->mso != new_mso || scb_s->prefix != scb_o->prefix) if (scb_s->mso != new_mso || scb_s->prefix != scb_o->prefix)
prefix_unmapped(vsie_page); prefix_unmapped(vsie_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment