Commit bd17117b authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-4.12-2' of...

Merge tag 'kvm-s390-next-4.12-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: Guarded storage fixup and keyless subset mode

- detect and use the keyless subset mode (guests without
  storage keys)
- fix vSIE support for sdnxc
- fix machine check data for guarded storage
parents ec594c47 730cd632
...@@ -122,6 +122,7 @@ struct esca_block { ...@@ -122,6 +122,7 @@ struct esca_block {
#define CPUSTAT_SLSR 0x00002000 #define CPUSTAT_SLSR 0x00002000
#define CPUSTAT_ZARCH 0x00000800 #define CPUSTAT_ZARCH 0x00000800
#define CPUSTAT_MCDS 0x00000100 #define CPUSTAT_MCDS 0x00000100
#define CPUSTAT_KSS 0x00000200
#define CPUSTAT_SM 0x00000080 #define CPUSTAT_SM 0x00000080
#define CPUSTAT_IBS 0x00000040 #define CPUSTAT_IBS 0x00000040
#define CPUSTAT_GED2 0x00000010 #define CPUSTAT_GED2 0x00000010
...@@ -185,6 +186,7 @@ struct kvm_s390_sie_block { ...@@ -185,6 +186,7 @@ struct kvm_s390_sie_block {
#define ICPT_OPEREXC 0x2C #define ICPT_OPEREXC 0x2C
#define ICPT_PARTEXEC 0x38 #define ICPT_PARTEXEC 0x38
#define ICPT_IOINST 0x40 #define ICPT_IOINST 0x40
#define ICPT_KSS 0x5c
__u8 icptcode; /* 0x0050 */ __u8 icptcode; /* 0x0050 */
__u8 icptstatus; /* 0x0051 */ __u8 icptstatus; /* 0x0051 */
__u16 ihcpu; /* 0x0052 */ __u16 ihcpu; /* 0x0052 */
......
...@@ -75,6 +75,7 @@ struct sclp_info { ...@@ -75,6 +75,7 @@ struct sclp_info {
unsigned char has_pfmfi : 1; unsigned char has_pfmfi : 1;
unsigned char has_ibs : 1; unsigned char has_ibs : 1;
unsigned char has_skey : 1; unsigned char has_skey : 1;
unsigned char has_kss : 1;
unsigned int ibc; unsigned int ibc;
unsigned int mtid; unsigned int mtid;
unsigned int mtid_cp; unsigned int mtid_cp;
......
...@@ -119,6 +119,7 @@ struct kvm_s390_vm_cpu_machine { ...@@ -119,6 +119,7 @@ struct kvm_s390_vm_cpu_machine {
#define KVM_S390_VM_CPU_FEAT_CMMA 10 #define KVM_S390_VM_CPU_FEAT_CMMA 10
#define KVM_S390_VM_CPU_FEAT_PFMFI 11 #define KVM_S390_VM_CPU_FEAT_PFMFI 11
#define KVM_S390_VM_CPU_FEAT_SIGPIF 12 #define KVM_S390_VM_CPU_FEAT_SIGPIF 12
#define KVM_S390_VM_CPU_FEAT_KSS 13
struct kvm_s390_vm_cpu_feat { struct kvm_s390_vm_cpu_feat {
__u64 feat[16]; __u64 feat[16];
}; };
......
...@@ -426,6 +426,9 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) ...@@ -426,6 +426,9 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
case ICPT_PARTEXEC: case ICPT_PARTEXEC:
rc = handle_partial_execution(vcpu); rc = handle_partial_execution(vcpu);
break; break;
case ICPT_KSS:
rc = kvm_s390_skey_check_enable(vcpu);
break;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
...@@ -419,6 +419,8 @@ static int __write_machine_check(struct kvm_vcpu *vcpu, ...@@ -419,6 +419,8 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
/* take care of lazy register loading */ /* take care of lazy register loading */
save_fpu_regs(); save_fpu_regs();
save_access_regs(vcpu->run->s.regs.acrs); save_access_regs(vcpu->run->s.regs.acrs);
if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb);
/* Extended save area */ /* Extended save area */
rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr, rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
......
...@@ -300,6 +300,8 @@ static void kvm_s390_cpu_feat_init(void) ...@@ -300,6 +300,8 @@ static void kvm_s390_cpu_feat_init(void)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI); allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
if (sclp.has_ibs) if (sclp.has_ibs)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS); allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
if (sclp.has_kss)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
/* /*
* KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
* all skey handling functions read/set the skey from the PGSTE * all skey handling functions read/set the skey from the PGSTE
...@@ -2034,6 +2036,10 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -2034,6 +2036,10 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx) vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
| SDNXC; | SDNXC;
vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
if (sclp.has_kss)
atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
else
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
if (vcpu->kvm->arch.use_cmma) { if (vcpu->kvm->arch.use_cmma) {
......
...@@ -254,6 +254,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu); ...@@ -254,6 +254,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu); int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu); int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu); int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu);
/* implemented in vsie.c */ /* implemented in vsie.c */
int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu); int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
......
...@@ -198,18 +198,25 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) ...@@ -198,18 +198,25 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static int __skey_check_enable(struct kvm_vcpu *vcpu) int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
{ {
int rc = 0; int rc = 0;
struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
trace_kvm_s390_skey_related_inst(vcpu); trace_kvm_s390_skey_related_inst(vcpu);
if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) &&
!(atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS))
return rc; return rc;
rc = s390_enable_skey(); rc = s390_enable_skey();
VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc); VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
if (!rc) if (!rc) {
vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); if (atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS)
atomic_andnot(CPUSTAT_KSS, &sie_block->cpuflags);
else
sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE |
ICTL_RRBE);
}
return rc; return rc;
} }
...@@ -218,7 +225,7 @@ static int try_handle_skey(struct kvm_vcpu *vcpu) ...@@ -218,7 +225,7 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
int rc; int rc;
vcpu->stat.instruction_storage_key++; vcpu->stat.instruction_storage_key++;
rc = __skey_check_enable(vcpu); rc = kvm_s390_skey_check_enable(vcpu);
if (rc) if (rc)
return rc; return rc;
if (sclp.has_skey) { if (sclp.has_skey) {
...@@ -916,7 +923,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) ...@@ -916,7 +923,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
} }
if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
int rc = __skey_check_enable(vcpu); int rc = kvm_s390_skey_check_enable(vcpu);
if (rc) if (rc)
return rc; return rc;
......
...@@ -117,6 +117,8 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -117,6 +117,8 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
newflags |= cpuflags & CPUSTAT_SM; newflags |= cpuflags & CPUSTAT_SM;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS)) if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
newflags |= cpuflags & CPUSTAT_IBS; newflags |= cpuflags & CPUSTAT_IBS;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
newflags |= cpuflags & CPUSTAT_KSS;
atomic_set(&scb_s->cpuflags, newflags); atomic_set(&scb_s->cpuflags, newflags);
return 0; return 0;
...@@ -289,7 +291,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -289,7 +291,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
* bits. Therefore we cannot provide interpretation and would later * bits. Therefore we cannot provide interpretation and would later
* have to provide own emulation handlers. * have to provide own emulation handlers.
*/ */
if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS))
scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
scb_s->icpua = scb_o->icpua; scb_s->icpua = scb_o->icpua;
if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM)) if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
...@@ -627,7 +631,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -627,7 +631,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
rc = set_validity_icpt(scb_s, 0x10b0U); rc = set_validity_icpt(scb_s, 0x10b0U);
if (rc) if (rc)
goto unpin; goto unpin;
scb_s->sdnxo = hpa; scb_s->sdnxo = hpa | sdnxc;
} }
return 0; return 0;
unpin: unpin:
......
...@@ -40,7 +40,8 @@ struct read_info_sccb { ...@@ -40,7 +40,8 @@ struct read_info_sccb {
u8 fac85; /* 85 */ u8 fac85; /* 85 */
u8 _pad_86[91 - 86]; /* 86-90 */ u8 _pad_86[91 - 86]; /* 86-90 */
u8 flags; /* 91 */ u8 flags; /* 91 */
u8 _pad_92[99 - 92]; /* 92-98 */ u8 _pad_92[98 - 92]; /* 92-97 */
u8 fac98; /* 98 */
u8 hamaxpow; /* 99 */ u8 hamaxpow; /* 99 */
u32 rnsize2; /* 100-103 */ u32 rnsize2; /* 100-103 */
u64 rnmax2; /* 104-111 */ u64 rnmax2; /* 104-111 */
...@@ -99,6 +100,7 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb) ...@@ -99,6 +100,7 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
sclp.has_pfmfi = !!(sccb->fac117 & 0x40); sclp.has_pfmfi = !!(sccb->fac117 & 0x40);
sclp.has_ibs = !!(sccb->fac117 & 0x20); sclp.has_ibs = !!(sccb->fac117 & 0x20);
sclp.has_hvs = !!(sccb->fac119 & 0x80); sclp.has_hvs = !!(sccb->fac119 & 0x80);
sclp.has_kss = !!(sccb->fac98 & 0x01);
if (sccb->fac85 & 0x02) if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment