Commit 166ecb3d authored by David Hildenbrand's avatar David Hildenbrand Committed by Christian Borntraeger

KVM: s390: vsie: support transactional execution

As soon as guest 2 is allowed to use transactional execution (indicated via
STFLE), he can also enable it for guest 3.

Active transactional execution requires also the second prefix page to be
mapped. If that page cannot be mapped, a validity icpt has to be presented
to the guest.

We have to take care of tx being toggled on/off, otherwise we might get
wrong prefix validity icpt.
Acked-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent bbeaa58b
...@@ -239,6 +239,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -239,6 +239,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{ {
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
bool had_tx = scb_s->ecb & 0x10U;
unsigned long new_mso; unsigned long new_mso;
int rc; int rc;
...@@ -299,6 +300,13 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -299,6 +300,13 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
/* Host-protection-interruption introduced with ESOP */ /* Host-protection-interruption introduced with ESOP */
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP)) if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
scb_s->ecb |= scb_o->ecb & 0x02U; scb_s->ecb |= scb_o->ecb & 0x02U;
/* transactional execution */
if (test_kvm_facility(vcpu->kvm, 73)) {
/* remap the prefix is tx is toggled on */
if ((scb_o->ecb & 0x10U) && !had_tx)
prefix_unmapped(vsie_page);
scb_s->ecb |= scb_o->ecb & 0x10U;
}
prepare_ibc(vcpu, vsie_page); prepare_ibc(vcpu, vsie_page);
rc = shadow_crycb(vcpu, vsie_page); rc = shadow_crycb(vcpu, vsie_page);
...@@ -337,13 +345,13 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start, ...@@ -337,13 +345,13 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT; prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
/* with mso/msl, the prefix lies at an offset */ /* with mso/msl, the prefix lies at an offset */
prefix += cur->scb_s.mso; prefix += cur->scb_s.mso;
if (prefix <= end && start <= prefix + PAGE_SIZE - 1) if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
prefix_unmapped_sync(cur); prefix_unmapped_sync(cur);
} }
} }
/* /*
* Map the first prefix page. * Map the first prefix page and if tx is enabled also the second prefix page.
* *
* The prefix will be protected, a gmap notifier will inform about unmaps. * The prefix will be protected, a gmap notifier will inform about unmaps.
* The shadow scb must not be executed until the prefix is remapped, this is * The shadow scb must not be executed until the prefix is remapped, this is
...@@ -370,6 +378,9 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -370,6 +378,9 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
prefix += scb_s->mso; prefix += scb_s->mso;
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix); rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
if (!rc && (scb_s->ecb & 0x10U))
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
prefix + PAGE_SIZE);
/* /*
* We don't have to mprotect, we will be called for all unshadows. * We don't have to mprotect, we will be called for all unshadows.
* SIE will detect if protection applies and trigger a validity. * SIE will detect if protection applies and trigger a validity.
...@@ -434,6 +445,13 @@ static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -434,6 +445,13 @@ static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->scaol = 0; scb_s->scaol = 0;
scb_s->scaoh = 0; scb_s->scaoh = 0;
} }
hpa = scb_s->itdba;
if (hpa) {
gpa = scb_o->itdba & ~0xffUL;
unpin_guest_page(vcpu->kvm, gpa, hpa);
scb_s->itdba = 0;
}
} }
/* /*
...@@ -477,6 +495,21 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -477,6 +495,21 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->scaoh = (u32)((u64)hpa >> 32); scb_s->scaoh = (u32)((u64)hpa >> 32);
scb_s->scaol = (u32)(u64)hpa; scb_s->scaol = (u32)(u64)hpa;
} }
gpa = scb_o->itdba & ~0xffUL;
if (gpa && (scb_s->ecb & 0x10U)) {
if (!(gpa & ~0x1fffU)) {
rc = set_validity_icpt(scb_s, 0x0080U);
goto unpin;
}
/* 256 bytes cannot cross page boundaries */
rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
if (rc == -EINVAL)
rc = set_validity_icpt(scb_s, 0x0080U);
if (rc)
goto unpin;
scb_s->itdba = hpa;
}
return 0; return 0;
unpin: unpin:
unpin_blocks(vcpu, vsie_page); unpin_blocks(vcpu, vsie_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment