Commit 46014634 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-4.5-1' of...

Merge tag 'kvm-s390-next-4.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390 features, kvm_get_vcpu_by_id and stat

Several features for s390
1. ESCA support (up to 248 vCPUs)
2. KVM detection: we  can now detect if we support KVM (e.g. does KVM
   under KVM work?)

kvm_stat:
1. cleanup the exit path

kvm_get_vcpu_by_id:
1. Use kvm_get_vcpu_by_id where appropriate
2. Apply a heuristic to optimize for ID VCPU == No. VCPU
parents bb11c6c9 2f8a43d4
...@@ -308,16 +308,10 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) ...@@ -308,16 +308,10 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
{ {
int r; struct kvm_vcpu *ret;
struct kvm_vcpu *v, *ret = NULL;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
kvm_for_each_vcpu(r, v, kvm) { ret = kvm_get_vcpu_by_id(kvm, id);
if (v->vcpu_id == id) {
ret = v;
break;
}
}
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return ret; return ret;
} }
......
...@@ -104,6 +104,9 @@ ...@@ -104,6 +104,9 @@
#define HWCAP_S390_TE 1024 #define HWCAP_S390_TE 1024
#define HWCAP_S390_VXRS 2048 #define HWCAP_S390_VXRS 2048
/* Internal bits, not exposed via elf */
#define HWCAP_INT_SIE 1UL
/* /*
* These are used to set parameters in the core dumps. * These are used to set parameters in the core dumps.
*/ */
...@@ -169,6 +172,10 @@ extern unsigned int vdso_enabled; ...@@ -169,6 +172,10 @@ extern unsigned int vdso_enabled;
extern unsigned long elf_hwcap; extern unsigned long elf_hwcap;
#define ELF_HWCAP (elf_hwcap) #define ELF_HWCAP (elf_hwcap)
/* Internal hardware capabilities, not exposed via elf */
extern unsigned long int_hwcap;
/* This yields a string that ld.so will use to load implementation /* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo. intent than poking at uname or /proc/cpuinfo.
......
...@@ -25,7 +25,9 @@ ...@@ -25,7 +25,9 @@
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
#include <asm/isc.h> #include <asm/isc.h>
#define KVM_MAX_VCPUS 64 #define KVM_S390_BSCA_CPU_SLOTS 64
#define KVM_S390_ESCA_CPU_SLOTS 248
#define KVM_MAX_VCPUS KVM_S390_ESCA_CPU_SLOTS
#define KVM_USER_MEM_SLOTS 32 #define KVM_USER_MEM_SLOTS 32
/* /*
...@@ -40,9 +42,34 @@ ...@@ -40,9 +42,34 @@
#define SIGP_CTRL_C 0x80 #define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f #define SIGP_CTRL_SCN_MASK 0x3f
struct sca_entry { union bsca_sigp_ctrl {
__u8 value;
struct {
__u8 c : 1;
__u8 r : 1;
__u8 scn : 6;
};
} __packed;
union esca_sigp_ctrl {
__u16 value;
struct {
__u8 c : 1;
__u8 reserved: 7;
__u8 scn;
};
} __packed;
struct esca_entry {
union esca_sigp_ctrl sigp_ctrl;
__u16 reserved1[3];
__u64 sda;
__u64 reserved2[6];
} __packed;
struct bsca_entry {
__u8 reserved0; __u8 reserved0;
__u8 sigp_ctrl; union bsca_sigp_ctrl sigp_ctrl;
__u16 reserved[3]; __u16 reserved[3];
__u64 sda; __u64 sda;
__u64 reserved2[2]; __u64 reserved2[2];
...@@ -57,14 +84,22 @@ union ipte_control { ...@@ -57,14 +84,22 @@ union ipte_control {
}; };
}; };
struct sca_block { struct bsca_block {
union ipte_control ipte_control; union ipte_control ipte_control;
__u64 reserved[5]; __u64 reserved[5];
__u64 mcn; __u64 mcn;
__u64 reserved2; __u64 reserved2;
struct sca_entry cpu[64]; struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
} __attribute__((packed)); } __attribute__((packed));
struct esca_block {
union ipte_control ipte_control;
__u64 reserved1[7];
__u64 mcn[4];
__u64 reserved2[20];
struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
} __packed;
#define CPUSTAT_STOPPED 0x80000000 #define CPUSTAT_STOPPED 0x80000000
#define CPUSTAT_WAIT 0x10000000 #define CPUSTAT_WAIT 0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000 #define CPUSTAT_ECALL_PEND 0x08000000
...@@ -585,7 +620,9 @@ struct kvm_s390_crypto_cb { ...@@ -585,7 +620,9 @@ struct kvm_s390_crypto_cb {
}; };
struct kvm_arch{ struct kvm_arch{
struct sca_block *sca; void *sca;
int use_esca;
rwlock_t sca_lock;
debug_info_t *dbf; debug_info_t *dbf;
struct kvm_s390_float_interrupt float_int; struct kvm_s390_float_interrupt float_int;
struct kvm_device *flic; struct kvm_device *flic;
......
...@@ -29,7 +29,10 @@ struct sclp_ipl_info { ...@@ -29,7 +29,10 @@ struct sclp_ipl_info {
struct sclp_core_entry { struct sclp_core_entry {
u8 core_id; u8 core_id;
u8 reserved0[2]; u8 reserved0;
u8 : 4;
u8 sief2 : 1;
u8 : 3;
u8 : 3; u8 : 3;
u8 siif : 1; u8 siif : 1;
u8 sigpif : 1; u8 sigpif : 1;
...@@ -53,6 +56,9 @@ struct sclp_info { ...@@ -53,6 +56,9 @@ struct sclp_info {
unsigned char has_sigpif : 1; unsigned char has_sigpif : 1;
unsigned char has_core_type : 1; unsigned char has_core_type : 1;
unsigned char has_sprp : 1; unsigned char has_sprp : 1;
unsigned char has_hvs : 1;
unsigned char has_esca : 1;
unsigned char has_sief2 : 1;
unsigned int ibc; unsigned int ibc;
unsigned int mtid; unsigned int mtid;
unsigned int mtid_cp; unsigned int mtid_cp;
......
...@@ -61,6 +61,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -61,6 +61,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat", "etf3eh", "highgprs", "te", "vx" "edat", "etf3eh", "highgprs", "te", "vx"
}; };
static const char * const int_hwcap_str[] = {
"sie"
};
unsigned long n = (unsigned long) v - 1; unsigned long n = (unsigned long) v - 1;
int i; int i;
...@@ -75,6 +78,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -75,6 +78,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
for (i = 0; i < ARRAY_SIZE(hwcap_str); i++) for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
if (hwcap_str[i] && (elf_hwcap & (1UL << i))) if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
seq_printf(m, "%s ", hwcap_str[i]); seq_printf(m, "%s ", hwcap_str[i]);
for (i = 0; i < ARRAY_SIZE(int_hwcap_str); i++)
if (int_hwcap_str[i] && (int_hwcap & (1UL << i)))
seq_printf(m, "%s ", int_hwcap_str[i]);
seq_puts(m, "\n"); seq_puts(m, "\n");
show_cacheinfo(m); show_cacheinfo(m);
} }
......
...@@ -80,6 +80,8 @@ EXPORT_SYMBOL(console_irq); ...@@ -80,6 +80,8 @@ EXPORT_SYMBOL(console_irq);
unsigned long elf_hwcap __read_mostly = 0; unsigned long elf_hwcap __read_mostly = 0;
char elf_platform[ELF_PLATFORM_SIZE]; char elf_platform[ELF_PLATFORM_SIZE];
unsigned long int_hwcap = 0;
int __initdata memory_end_set; int __initdata memory_end_set;
unsigned long __initdata memory_end; unsigned long __initdata memory_end;
unsigned long __initdata max_physmem_end; unsigned long __initdata max_physmem_end;
...@@ -793,6 +795,13 @@ static int __init setup_hwcaps(void) ...@@ -793,6 +795,13 @@ static int __init setup_hwcaps(void)
strcpy(elf_platform, "z13"); strcpy(elf_platform, "z13");
break; break;
} }
/*
* Virtualization support HWCAP_INT_SIE is bit 0.
*/
if (sclp.has_sief2)
int_hwcap |= HWCAP_INT_SIE;
return 0; return 0;
} }
arch_initcall(setup_hwcaps); arch_initcall(setup_hwcaps);
......
...@@ -155,10 +155,8 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu) ...@@ -155,10 +155,8 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tcpu; struct kvm_vcpu *tcpu;
int tid; int tid;
int i;
tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
vcpu->stat.diagnose_9c++; vcpu->stat.diagnose_9c++;
...@@ -167,12 +165,9 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) ...@@ -167,12 +165,9 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
if (tid == vcpu->vcpu_id) if (tid == vcpu->vcpu_id)
return 0; return 0;
kvm_for_each_vcpu(i, tcpu, kvm) tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
if (tcpu->vcpu_id == tid) { if (tcpu)
kvm_vcpu_yield_to(tcpu); kvm_vcpu_yield_to(tcpu);
break;
}
return 0; return 0;
} }
......
...@@ -259,10 +259,14 @@ struct aste { ...@@ -259,10 +259,14 @@ struct aste {
int ipte_lock_held(struct kvm_vcpu *vcpu) int ipte_lock_held(struct kvm_vcpu *vcpu)
{ {
union ipte_control *ic = &vcpu->kvm->arch.sca->ipte_control; if (vcpu->arch.sie_block->eca & 1) {
int rc;
if (vcpu->arch.sie_block->eca & 1) read_lock(&vcpu->kvm->arch.sca_lock);
return ic->kh != 0; rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0;
read_unlock(&vcpu->kvm->arch.sca_lock);
return rc;
}
return vcpu->kvm->arch.ipte_lock_count != 0; return vcpu->kvm->arch.ipte_lock_count != 0;
} }
...@@ -274,16 +278,20 @@ static void ipte_lock_simple(struct kvm_vcpu *vcpu) ...@@ -274,16 +278,20 @@ static void ipte_lock_simple(struct kvm_vcpu *vcpu)
vcpu->kvm->arch.ipte_lock_count++; vcpu->kvm->arch.ipte_lock_count++;
if (vcpu->kvm->arch.ipte_lock_count > 1) if (vcpu->kvm->arch.ipte_lock_count > 1)
goto out; goto out;
ic = &vcpu->kvm->arch.sca->ipte_control; retry:
read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do { do {
old = READ_ONCE(*ic); old = READ_ONCE(*ic);
while (old.k) { if (old.k) {
read_unlock(&vcpu->kvm->arch.sca_lock);
cond_resched(); cond_resched();
old = READ_ONCE(*ic); goto retry;
} }
new = old; new = old;
new.k = 1; new.k = 1;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val); } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
out: out:
mutex_unlock(&vcpu->kvm->arch.ipte_mutex); mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
} }
...@@ -296,12 +304,14 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu) ...@@ -296,12 +304,14 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
vcpu->kvm->arch.ipte_lock_count--; vcpu->kvm->arch.ipte_lock_count--;
if (vcpu->kvm->arch.ipte_lock_count) if (vcpu->kvm->arch.ipte_lock_count)
goto out; goto out;
ic = &vcpu->kvm->arch.sca->ipte_control; read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do { do {
old = READ_ONCE(*ic); old = READ_ONCE(*ic);
new = old; new = old;
new.k = 0; new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val); } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
wake_up(&vcpu->kvm->arch.ipte_wq); wake_up(&vcpu->kvm->arch.ipte_wq);
out: out:
mutex_unlock(&vcpu->kvm->arch.ipte_mutex); mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
...@@ -311,24 +321,29 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu) ...@@ -311,24 +321,29 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu)
{ {
union ipte_control old, new, *ic; union ipte_control old, new, *ic;
ic = &vcpu->kvm->arch.sca->ipte_control; retry:
read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do { do {
old = READ_ONCE(*ic); old = READ_ONCE(*ic);
while (old.kg) { if (old.kg) {
read_unlock(&vcpu->kvm->arch.sca_lock);
cond_resched(); cond_resched();
old = READ_ONCE(*ic); goto retry;
} }
new = old; new = old;
new.k = 1; new.k = 1;
new.kh++; new.kh++;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val); } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
} }
static void ipte_unlock_siif(struct kvm_vcpu *vcpu) static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
{ {
union ipte_control old, new, *ic; union ipte_control old, new, *ic;
ic = &vcpu->kvm->arch.sca->ipte_control; read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do { do {
old = READ_ONCE(*ic); old = READ_ONCE(*ic);
new = old; new = old;
...@@ -336,6 +351,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu) ...@@ -336,6 +351,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
if (!new.kh) if (!new.kh)
new.k = 0; new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val); } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
if (!new.kh) if (!new.kh)
wake_up(&vcpu->kvm->arch.ipte_wq); wake_up(&vcpu->kvm->arch.ipte_wq);
} }
......
...@@ -54,9 +54,6 @@ void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc) ...@@ -54,9 +54,6 @@ void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc)
static int handle_noop(struct kvm_vcpu *vcpu) static int handle_noop(struct kvm_vcpu *vcpu)
{ {
switch (vcpu->arch.sie_block->icptcode) { switch (vcpu->arch.sie_block->icptcode) {
case 0x0:
vcpu->stat.exit_null++;
break;
case 0x10: case 0x10:
vcpu->stat.exit_external_request++; vcpu->stat.exit_external_request++;
break; break;
...@@ -338,8 +335,10 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu) ...@@ -338,8 +335,10 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
{ {
if (kvm_is_ucontrol(vcpu->kvm))
return -EOPNOTSUPP;
switch (vcpu->arch.sie_block->icptcode) { switch (vcpu->arch.sie_block->icptcode) {
case 0x00:
case 0x10: case 0x10:
case 0x18: case 0x18:
return handle_noop(vcpu); return handle_noop(vcpu);
......
...@@ -34,6 +34,106 @@ ...@@ -34,6 +34,106 @@
#define PFAULT_DONE 0x0680 #define PFAULT_DONE 0x0680
#define VIRTIO_PARAM 0x0d00 #define VIRTIO_PARAM 0x0d00
/* handle external calls via sigp interpretation facility */
static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
{
int c, scn;
if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
return 0;
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl sigp_ctrl =
sca->cpu[vcpu->vcpu_id].sigp_ctrl;
c = sigp_ctrl.c;
scn = sigp_ctrl.scn;
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl sigp_ctrl =
sca->cpu[vcpu->vcpu_id].sigp_ctrl;
c = sigp_ctrl.c;
scn = sigp_ctrl.scn;
}
read_unlock(&vcpu->kvm->arch.sca_lock);
if (src_id)
*src_id = scn;
return c;
}
static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
{
int expect, rc;
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
new_val.scn = src_id;
new_val.c = 1;
old_val.c = 0;
expect = old_val.value;
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
new_val.scn = src_id;
new_val.c = 1;
old_val.c = 0;
expect = old_val.value;
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
}
read_unlock(&vcpu->kvm->arch.sca_lock);
if (rc != expect) {
/* another external call is pending */
return -EBUSY;
}
atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
return 0;
}
static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc, expect;
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union esca_sigp_ctrl old = *sigp_ctrl;
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union bsca_sigp_ctrl old = *sigp_ctrl;
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
}
read_unlock(&vcpu->kvm->arch.sca_lock);
WARN_ON(rc != expect); /* cannot clear? */
}
int psw_extint_disabled(struct kvm_vcpu *vcpu) int psw_extint_disabled(struct kvm_vcpu *vcpu)
{ {
return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
...@@ -792,13 +892,11 @@ static const deliver_irq_t deliver_irq_funcs[] = { ...@@ -792,13 +892,11 @@ static const deliver_irq_t deliver_irq_funcs[] = {
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
if (!sclp.has_sigpif) if (!sclp.has_sigpif)
return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
return (sigp_ctrl & SIGP_CTRL_C) && return sca_ext_call_pending(vcpu, NULL);
(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND);
} }
int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
...@@ -909,9 +1007,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) ...@@ -909,9 +1007,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
memset(&li->irq, 0, sizeof(li->irq)); memset(&li->irq, 0, sizeof(li->irq));
spin_unlock(&li->lock); spin_unlock(&li->lock);
/* clear pending external calls set by sigp interpretation facility */ sca_clear_ext_call(vcpu);
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
} }
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
...@@ -1003,21 +1099,6 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -1003,21 +1099,6 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
return 0; return 0;
} }
static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
{
unsigned char new_val, old_val;
uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
old_val = *sigp_ctrl & ~SIGP_CTRL_C;
if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
/* another external call is pending */
return -EBUSY;
}
atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
return 0;
}
static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
...@@ -1034,7 +1115,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -1034,7 +1115,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
return -EINVAL; return -EINVAL;
if (sclp.has_sigpif) if (sclp.has_sigpif)
return __inject_extcall_sigpif(vcpu, src_id); return sca_inject_ext_call(vcpu, src_id);
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
return -EBUSY; return -EBUSY;
...@@ -2203,7 +2284,7 @@ static void store_local_irq(struct kvm_s390_local_interrupt *li, ...@@ -2203,7 +2284,7 @@ static void store_local_irq(struct kvm_s390_local_interrupt *li,
int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
{ {
uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl; int scn;
unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)]; unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
unsigned long pending_irqs; unsigned long pending_irqs;
...@@ -2243,14 +2324,12 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) ...@@ -2243,14 +2324,12 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
} }
} }
if ((sigp_ctrl & SIGP_CTRL_C) && if (sca_ext_call_pending(vcpu, &scn)) {
(atomic_read(&vcpu->arch.sie_block->cpuflags) &
CPUSTAT_ECALL_PEND)) {
if (n + sizeof(irq) > len) if (n + sizeof(irq) > len)
return -ENOBUFS; return -ENOBUFS;
memset(&irq, 0, sizeof(irq)); memset(&irq, 0, sizeof(irq));
irq.type = KVM_S390_INT_EXTERNAL_CALL; irq.type = KVM_S390_INT_EXTERNAL_CALL;
irq.u.extcall.code = sigp_ctrl & SIGP_CTRL_SCN_MASK; irq.u.extcall.code = scn;
if (copy_to_user(&buf[n], &irq, sizeof(irq))) if (copy_to_user(&buf[n], &irq, sizeof(irq)))
return -EFAULT; return -EFAULT;
n += sizeof(irq); n += sizeof(irq);
......
...@@ -246,7 +246,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -246,7 +246,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break; break;
case KVM_CAP_NR_VCPUS: case KVM_CAP_NR_VCPUS:
case KVM_CAP_MAX_VCPUS: case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS; r = sclp.has_esca ? KVM_S390_ESCA_CPU_SLOTS
: KVM_S390_BSCA_CPU_SLOTS;
break; break;
case KVM_CAP_NR_MEMSLOTS: case KVM_CAP_NR_MEMSLOTS:
r = KVM_USER_MEM_SLOTS; r = KVM_USER_MEM_SLOTS;
...@@ -283,6 +284,8 @@ static void kvm_s390_sync_dirty_log(struct kvm *kvm, ...@@ -283,6 +284,8 @@ static void kvm_s390_sync_dirty_log(struct kvm *kvm,
} }
/* Section: vm related */ /* Section: vm related */
static void sca_del_vcpu(struct kvm_vcpu *vcpu);
/* /*
* Get (and clear) the dirty memory log for a memory slot. * Get (and clear) the dirty memory log for a memory slot.
*/ */
...@@ -1024,7 +1027,7 @@ static int kvm_s390_apxa_installed(void) ...@@ -1024,7 +1027,7 @@ static int kvm_s390_apxa_installed(void)
u8 config[128]; u8 config[128];
int cc; int cc;
if (test_facility(2) && test_facility(12)) { if (test_facility(12)) {
cc = kvm_s390_query_ap_config(config); cc = kvm_s390_query_ap_config(config);
if (cc) if (cc)
...@@ -1075,6 +1078,15 @@ static int kvm_s390_crypto_init(struct kvm *kvm) ...@@ -1075,6 +1078,15 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
return 0; return 0;
} }
static void sca_dispose(struct kvm *kvm)
{
if (kvm->arch.use_esca)
free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
else
free_page((unsigned long)(kvm->arch.sca));
kvm->arch.sca = NULL;
}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{ {
int i, rc; int i, rc;
...@@ -1098,14 +1110,17 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -1098,14 +1110,17 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
rc = -ENOMEM; rc = -ENOMEM;
kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); kvm->arch.use_esca = 0; /* start with basic SCA */
rwlock_init(&kvm->arch.sca_lock);
kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
if (!kvm->arch.sca) if (!kvm->arch.sca)
goto out_err; goto out_err;
spin_lock(&kvm_lock); spin_lock(&kvm_lock);
sca_offset += 16; sca_offset += 16;
if (sca_offset + sizeof(struct sca_block) > PAGE_SIZE) if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
sca_offset = 0; sca_offset = 0;
kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); kvm->arch.sca = (struct bsca_block *)
((char *) kvm->arch.sca + sca_offset);
spin_unlock(&kvm_lock); spin_unlock(&kvm_lock);
sprintf(debug_name, "kvm-%u", current->pid); sprintf(debug_name, "kvm-%u", current->pid);
...@@ -1177,7 +1192,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -1177,7 +1192,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kfree(kvm->arch.crypto.crycb); kfree(kvm->arch.crypto.crycb);
free_page((unsigned long)kvm->arch.model.fac); free_page((unsigned long)kvm->arch.model.fac);
debug_unregister(kvm->arch.dbf); debug_unregister(kvm->arch.dbf);
free_page((unsigned long)(kvm->arch.sca)); sca_dispose(kvm);
KVM_EVENT(3, "creation of vm failed: %d", rc); KVM_EVENT(3, "creation of vm failed: %d", rc);
return rc; return rc;
} }
...@@ -1188,13 +1203,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -1188,13 +1203,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
kvm_s390_clear_local_irqs(vcpu); kvm_s390_clear_local_irqs(vcpu);
kvm_clear_async_pf_completion_queue(vcpu); kvm_clear_async_pf_completion_queue(vcpu);
if (!kvm_is_ucontrol(vcpu->kvm)) { if (!kvm_is_ucontrol(vcpu->kvm))
clear_bit(63 - vcpu->vcpu_id, sca_del_vcpu(vcpu);
(unsigned long *) &vcpu->kvm->arch.sca->mcn);
if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
(__u64) vcpu->arch.sie_block)
vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
}
smp_mb(); smp_mb();
if (kvm_is_ucontrol(vcpu->kvm)) if (kvm_is_ucontrol(vcpu->kvm))
...@@ -1228,7 +1238,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -1228,7 +1238,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{ {
kvm_free_vcpus(kvm); kvm_free_vcpus(kvm);
free_page((unsigned long)kvm->arch.model.fac); free_page((unsigned long)kvm->arch.model.fac);
free_page((unsigned long)(kvm->arch.sca)); sca_dispose(kvm);
debug_unregister(kvm->arch.dbf); debug_unregister(kvm->arch.dbf);
kfree(kvm->arch.crypto.crycb); kfree(kvm->arch.crypto.crycb);
if (!kvm_is_ucontrol(kvm)) if (!kvm_is_ucontrol(kvm))
...@@ -1249,6 +1259,116 @@ static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -1249,6 +1259,116 @@ static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static void sca_del_vcpu(struct kvm_vcpu *vcpu)
{
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
sca->cpu[vcpu->vcpu_id].sda = 0;
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
sca->cpu[vcpu->vcpu_id].sda = 0;
}
read_unlock(&vcpu->kvm->arch.sca_lock);
}
static void sca_add_vcpu(struct kvm_vcpu *vcpu)
{
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
vcpu->arch.sie_block->ecb2 |= 0x04U;
set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
}
read_unlock(&vcpu->kvm->arch.sca_lock);
}
/* Basic SCA to Extended SCA data copy routines */
static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
{
d->sda = s->sda;
d->sigp_ctrl.c = s->sigp_ctrl.c;
d->sigp_ctrl.scn = s->sigp_ctrl.scn;
}
static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
{
int i;
d->ipte_control = s->ipte_control;
d->mcn[0] = s->mcn;
for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
sca_copy_entry(&d->cpu[i], &s->cpu[i]);
}
static int sca_switch_to_extended(struct kvm *kvm)
{
struct bsca_block *old_sca = kvm->arch.sca;
struct esca_block *new_sca;
struct kvm_vcpu *vcpu;
unsigned int vcpu_idx;
u32 scaol, scaoh;
new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
if (!new_sca)
return -ENOMEM;
scaoh = (u32)((u64)(new_sca) >> 32);
scaol = (u32)(u64)(new_sca) & ~0x3fU;
kvm_s390_vcpu_block_all(kvm);
write_lock(&kvm->arch.sca_lock);
sca_copy_b_to_e(new_sca, old_sca);
kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
vcpu->arch.sie_block->scaoh = scaoh;
vcpu->arch.sie_block->scaol = scaol;
vcpu->arch.sie_block->ecb2 |= 0x04U;
}
kvm->arch.sca = new_sca;
kvm->arch.use_esca = 1;
write_unlock(&kvm->arch.sca_lock);
kvm_s390_vcpu_unblock_all(kvm);
free_page((unsigned long)old_sca);
VM_EVENT(kvm, 2, "Switched to ESCA (%p -> %p)", old_sca, kvm->arch.sca);
return 0;
}
static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
{
int rc;
if (id < KVM_S390_BSCA_CPU_SLOTS)
return true;
if (!sclp.has_esca)
return false;
mutex_lock(&kvm->lock);
rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
mutex_unlock(&kvm->lock);
return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
}
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
...@@ -1369,8 +1489,11 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) ...@@ -1369,8 +1489,11 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
preempt_enable(); preempt_enable();
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
if (!kvm_is_ucontrol(vcpu->kvm)) if (!kvm_is_ucontrol(vcpu->kvm)) {
vcpu->arch.gmap = vcpu->kvm->arch.gmap; vcpu->arch.gmap = vcpu->kvm->arch.gmap;
sca_add_vcpu(vcpu);
}
} }
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
...@@ -1465,7 +1588,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -1465,7 +1588,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
struct sie_page *sie_page; struct sie_page *sie_page;
int rc = -EINVAL; int rc = -EINVAL;
if (id >= KVM_MAX_VCPUS) if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
goto out; goto out;
rc = -ENOMEM; rc = -ENOMEM;
...@@ -1482,20 +1605,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -1482,20 +1605,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
vcpu->arch.sie_block->icpua = id; vcpu->arch.sie_block->icpua = id;
if (!kvm_is_ucontrol(kvm)) {
if (!kvm->arch.sca) {
WARN_ON_ONCE(1);
goto out_free_cpu;
}
if (!kvm->arch.sca->cpu[id].sda)
kvm->arch.sca->cpu[id].sda =
(__u64) vcpu->arch.sie_block;
vcpu->arch.sie_block->scaoh =
(__u32)(((__u64)kvm->arch.sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
}
spin_lock_init(&vcpu->arch.local_int.lock); spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.float_int = &kvm->arch.float_int; vcpu->arch.local_int.float_int = &kvm->arch.float_int;
vcpu->arch.local_int.wq = &vcpu->wq; vcpu->arch.local_int.wq = &vcpu->wq;
...@@ -1509,10 +1618,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -1509,10 +1618,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
*/ */
vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS, vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
GFP_KERNEL); GFP_KERNEL);
if (!vcpu->arch.guest_fpregs.fprs) { if (!vcpu->arch.guest_fpregs.fprs)
rc = -ENOMEM;
goto out_free_sie_block; goto out_free_sie_block;
}
rc = kvm_vcpu_init(vcpu, kvm, id); rc = kvm_vcpu_init(vcpu, kvm, id);
if (rc) if (rc)
...@@ -2071,8 +2178,6 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) ...@@ -2071,8 +2178,6 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
{ {
int rc = -1;
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode); vcpu->arch.sie_block->icptcode);
trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
...@@ -2080,40 +2185,35 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) ...@@ -2080,40 +2185,35 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
if (guestdbg_enabled(vcpu)) if (guestdbg_enabled(vcpu))
kvm_s390_restore_guest_per_regs(vcpu); kvm_s390_restore_guest_per_regs(vcpu);
if (exit_reason >= 0) { memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
rc = 0;
if (vcpu->arch.sie_block->icptcode > 0) {
int rc = kvm_handle_sie_intercept(vcpu);
if (rc != -EOPNOTSUPP)
return rc;
vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
return -EREMOTE;
} else if (exit_reason != -EFAULT) {
vcpu->stat.exit_null++;
return 0;
} else if (kvm_is_ucontrol(vcpu->kvm)) { } else if (kvm_is_ucontrol(vcpu->kvm)) {
vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
vcpu->run->s390_ucontrol.trans_exc_code = vcpu->run->s390_ucontrol.trans_exc_code =
current->thread.gmap_addr; current->thread.gmap_addr;
vcpu->run->s390_ucontrol.pgm_code = 0x10; vcpu->run->s390_ucontrol.pgm_code = 0x10;
rc = -EREMOTE; return -EREMOTE;
} else if (current->thread.gmap_pfault) { } else if (current->thread.gmap_pfault) {
trace_kvm_s390_major_guest_pfault(vcpu); trace_kvm_s390_major_guest_pfault(vcpu);
current->thread.gmap_pfault = 0; current->thread.gmap_pfault = 0;
if (kvm_arch_setup_async_pf(vcpu)) { if (kvm_arch_setup_async_pf(vcpu))
rc = 0; return 0;
} else { return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
gpa_t gpa = current->thread.gmap_addr;
rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
}
} }
return vcpu_post_run_fault_in_sie(vcpu);
if (rc == -1)
rc = vcpu_post_run_fault_in_sie(vcpu);
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
if (rc == 0) {
if (kvm_is_ucontrol(vcpu->kvm))
/* Don't exit for host interrupts. */
rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
else
rc = kvm_handle_sie_intercept(vcpu);
}
return rc;
} }
static int __vcpu_run(struct kvm_vcpu *vcpu) static int __vcpu_run(struct kvm_vcpu *vcpu)
...@@ -2233,18 +2333,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2233,18 +2333,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
rc = 0; rc = 0;
} }
if (rc == -EOPNOTSUPP) {
/* intercept cannot be handled in-kernel, prepare kvm-run */
kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
rc = 0;
}
if (rc == -EREMOTE) { if (rc == -EREMOTE) {
/* intercept was handled, but userspace support is needed /* userspace support is needed, kvm_run has been prepared */
* kvm_run has been prepared by the handler */
rc = 0; rc = 0;
} }
...@@ -2767,6 +2857,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, ...@@ -2767,6 +2857,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
static int __init kvm_s390_init(void) static int __init kvm_s390_init(void)
{ {
if (!sclp.has_sief2) {
pr_info("SIE not available\n");
return -ENODEV;
}
return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
} }
......
...@@ -340,4 +340,11 @@ void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu); ...@@ -340,4 +340,11 @@ void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu); void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu); void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
/* support for Basic/Extended SCA handling */
static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
{
struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */
return &sca->ipte_control;
}
#endif #endif
...@@ -43,7 +43,10 @@ struct read_info_sccb { ...@@ -43,7 +43,10 @@ struct read_info_sccb {
u8 _pad_92[100 - 92]; /* 92-99 */ u8 _pad_92[100 - 92]; /* 92-99 */
u32 rnsize2; /* 100-103 */ u32 rnsize2; /* 100-103 */
u64 rnmax2; /* 104-111 */ u64 rnmax2; /* 104-111 */
u8 _pad_112[120 - 112]; /* 112-119 */ u8 _pad_112[116 - 112]; /* 112-115 */
u8 fac116; /* 116 */
u8 _pad_117[119 - 117]; /* 117-118 */
u8 fac119; /* 119 */
u16 hcpua; /* 120-121 */ u16 hcpua; /* 120-121 */
u8 _pad_122[4096 - 122]; /* 122-4095 */ u8 _pad_122[4096 - 122]; /* 122-4095 */
} __packed __aligned(PAGE_SIZE); } __packed __aligned(PAGE_SIZE);
...@@ -108,6 +111,8 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) ...@@ -108,6 +111,8 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
sclp.facilities = sccb->facilities; sclp.facilities = sccb->facilities;
sclp.has_sprp = !!(sccb->fac84 & 0x02); sclp.has_sprp = !!(sccb->fac84 & 0x02);
sclp.has_core_type = !!(sccb->fac84 & 0x01); sclp.has_core_type = !!(sccb->fac84 & 0x01);
sclp.has_esca = !!(sccb->fac116 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
if (sccb->fac85 & 0x02) if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
...@@ -131,6 +136,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) ...@@ -131,6 +136,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
continue; continue;
sclp.has_siif = cpue->siif; sclp.has_siif = cpue->siif;
sclp.has_sigpif = cpue->sigpif; sclp.has_sigpif = cpue->sigpif;
sclp.has_sief2 = cpue->sief2;
break; break;
} }
......
...@@ -472,6 +472,11 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) ...@@ -472,6 +472,11 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int i; int i;
if (id < 0 || id >= KVM_MAX_VCPUS)
return NULL;
vcpu = kvm_get_vcpu(kvm, id);
if (vcpu && vcpu->vcpu_id == id)
return vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
if (vcpu->vcpu_id == id) if (vcpu->vcpu_id == id)
return vcpu; return vcpu;
...@@ -1011,7 +1016,6 @@ struct kvm_stats_debugfs_item { ...@@ -1011,7 +1016,6 @@ struct kvm_stats_debugfs_item {
const char *name; const char *name;
int offset; int offset;
enum kvm_stat_kind kind; enum kvm_stat_kind kind;
struct dentry *dentry;
}; };
extern struct kvm_stats_debugfs_item debugfs_entries[]; extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir; extern struct dentry *kvm_debugfs_dir;
......
...@@ -2257,7 +2257,7 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu) ...@@ -2257,7 +2257,7 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
{ {
int r; int r;
struct kvm_vcpu *vcpu, *v; struct kvm_vcpu *vcpu;
if (id >= KVM_MAX_VCPUS) if (id >= KVM_MAX_VCPUS)
return -EINVAL; return -EINVAL;
...@@ -2281,12 +2281,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) ...@@ -2281,12 +2281,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
r = -EINVAL; r = -EINVAL;
goto unlock_vcpu_destroy; goto unlock_vcpu_destroy;
} }
if (kvm_get_vcpu_by_id(kvm, id)) {
kvm_for_each_vcpu(r, v, kvm) r = -EEXIST;
if (v->vcpu_id == id) { goto unlock_vcpu_destroy;
r = -EEXIST; }
goto unlock_vcpu_destroy;
}
BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
...@@ -3449,10 +3447,9 @@ static int kvm_init_debug(void) ...@@ -3449,10 +3447,9 @@ static int kvm_init_debug(void)
goto out; goto out;
for (p = debugfs_entries; p->name; ++p) { for (p = debugfs_entries; p->name; ++p) {
p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, if (!debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
(void *)(long)p->offset, (void *)(long)p->offset,
stat_fops[p->kind]); stat_fops[p->kind]))
if (p->dentry == NULL)
goto out_dir; goto out_dir;
} }
...@@ -3464,15 +3461,6 @@ static int kvm_init_debug(void) ...@@ -3464,15 +3461,6 @@ static int kvm_init_debug(void)
return r; return r;
} }
static void kvm_exit_debug(void)
{
struct kvm_stats_debugfs_item *p;
for (p = debugfs_entries; p->name; ++p)
debugfs_remove(p->dentry);
debugfs_remove(kvm_debugfs_dir);
}
static int kvm_suspend(void) static int kvm_suspend(void)
{ {
if (kvm_usage_count) if (kvm_usage_count)
...@@ -3630,7 +3618,7 @@ EXPORT_SYMBOL_GPL(kvm_init); ...@@ -3630,7 +3618,7 @@ EXPORT_SYMBOL_GPL(kvm_init);
void kvm_exit(void) void kvm_exit(void)
{ {
kvm_exit_debug(); debugfs_remove_recursive(kvm_debugfs_dir);
misc_deregister(&kvm_dev); misc_deregister(&kvm_dev);
kmem_cache_destroy(kvm_vcpu_cache); kmem_cache_destroy(kvm_vcpu_cache);
kvm_async_pf_deinit(); kvm_async_pf_deinit();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment