Commit 46014634 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-4.5-1' of...

Merge tag 'kvm-s390-next-4.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390 features, kvm_get_vcpu_by_id and stat

Several features for s390
1. ESCA support (up to 248 vCPUs)
2. KVM detection: we  can now detect if we support KVM (e.g. does KVM
   under KVM work?)

kvm_stat:
1. cleanup the exit path

kvm_get_vcpu_by_id:
1. Use kvm_get_vcpu_by_id where appropriate
2. Apply a heuristic to optimize for ID VCPU == No. VCPU
parents bb11c6c9 2f8a43d4
...@@ -308,16 +308,10 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) ...@@ -308,16 +308,10 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
{ {
int r; struct kvm_vcpu *ret;
struct kvm_vcpu *v, *ret = NULL;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
kvm_for_each_vcpu(r, v, kvm) { ret = kvm_get_vcpu_by_id(kvm, id);
if (v->vcpu_id == id) {
ret = v;
break;
}
}
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return ret; return ret;
} }
......
...@@ -104,6 +104,9 @@ ...@@ -104,6 +104,9 @@
#define HWCAP_S390_TE 1024 #define HWCAP_S390_TE 1024
#define HWCAP_S390_VXRS 2048 #define HWCAP_S390_VXRS 2048
/* Internal bits, not exposed via elf */
#define HWCAP_INT_SIE 1UL
/* /*
* These are used to set parameters in the core dumps. * These are used to set parameters in the core dumps.
*/ */
...@@ -169,6 +172,10 @@ extern unsigned int vdso_enabled; ...@@ -169,6 +172,10 @@ extern unsigned int vdso_enabled;
extern unsigned long elf_hwcap; extern unsigned long elf_hwcap;
#define ELF_HWCAP (elf_hwcap) #define ELF_HWCAP (elf_hwcap)
/* Internal hardware capabilities, not exposed via elf */
extern unsigned long int_hwcap;
/* This yields a string that ld.so will use to load implementation /* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo. intent than poking at uname or /proc/cpuinfo.
......
...@@ -25,7 +25,9 @@ ...@@ -25,7 +25,9 @@
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
#include <asm/isc.h> #include <asm/isc.h>
#define KVM_MAX_VCPUS 64 #define KVM_S390_BSCA_CPU_SLOTS 64
#define KVM_S390_ESCA_CPU_SLOTS 248
#define KVM_MAX_VCPUS KVM_S390_ESCA_CPU_SLOTS
#define KVM_USER_MEM_SLOTS 32 #define KVM_USER_MEM_SLOTS 32
/* /*
...@@ -40,9 +42,34 @@ ...@@ -40,9 +42,34 @@
#define SIGP_CTRL_C 0x80 #define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f #define SIGP_CTRL_SCN_MASK 0x3f
struct sca_entry { union bsca_sigp_ctrl {
__u8 value;
struct {
__u8 c : 1;
__u8 r : 1;
__u8 scn : 6;
};
} __packed;
union esca_sigp_ctrl {
__u16 value;
struct {
__u8 c : 1;
__u8 reserved: 7;
__u8 scn;
};
} __packed;
struct esca_entry {
union esca_sigp_ctrl sigp_ctrl;
__u16 reserved1[3];
__u64 sda;
__u64 reserved2[6];
} __packed;
struct bsca_entry {
__u8 reserved0; __u8 reserved0;
__u8 sigp_ctrl; union bsca_sigp_ctrl sigp_ctrl;
__u16 reserved[3]; __u16 reserved[3];
__u64 sda; __u64 sda;
__u64 reserved2[2]; __u64 reserved2[2];
...@@ -57,14 +84,22 @@ union ipte_control { ...@@ -57,14 +84,22 @@ union ipte_control {
}; };
}; };
struct sca_block { struct bsca_block {
union ipte_control ipte_control; union ipte_control ipte_control;
__u64 reserved[5]; __u64 reserved[5];
__u64 mcn; __u64 mcn;
__u64 reserved2; __u64 reserved2;
struct sca_entry cpu[64]; struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
} __attribute__((packed)); } __attribute__((packed));
struct esca_block {
union ipte_control ipte_control;
__u64 reserved1[7];
__u64 mcn[4];
__u64 reserved2[20];
struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
} __packed;
#define CPUSTAT_STOPPED 0x80000000 #define CPUSTAT_STOPPED 0x80000000
#define CPUSTAT_WAIT 0x10000000 #define CPUSTAT_WAIT 0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000 #define CPUSTAT_ECALL_PEND 0x08000000
...@@ -585,7 +620,9 @@ struct kvm_s390_crypto_cb { ...@@ -585,7 +620,9 @@ struct kvm_s390_crypto_cb {
}; };
struct kvm_arch{ struct kvm_arch{
struct sca_block *sca; void *sca;
int use_esca;
rwlock_t sca_lock;
debug_info_t *dbf; debug_info_t *dbf;
struct kvm_s390_float_interrupt float_int; struct kvm_s390_float_interrupt float_int;
struct kvm_device *flic; struct kvm_device *flic;
......
...@@ -29,7 +29,10 @@ struct sclp_ipl_info { ...@@ -29,7 +29,10 @@ struct sclp_ipl_info {
struct sclp_core_entry { struct sclp_core_entry {
u8 core_id; u8 core_id;
u8 reserved0[2]; u8 reserved0;
u8 : 4;
u8 sief2 : 1;
u8 : 3;
u8 : 3; u8 : 3;
u8 siif : 1; u8 siif : 1;
u8 sigpif : 1; u8 sigpif : 1;
...@@ -53,6 +56,9 @@ struct sclp_info { ...@@ -53,6 +56,9 @@ struct sclp_info {
unsigned char has_sigpif : 1; unsigned char has_sigpif : 1;
unsigned char has_core_type : 1; unsigned char has_core_type : 1;
unsigned char has_sprp : 1; unsigned char has_sprp : 1;
unsigned char has_hvs : 1;
unsigned char has_esca : 1;
unsigned char has_sief2 : 1;
unsigned int ibc; unsigned int ibc;
unsigned int mtid; unsigned int mtid;
unsigned int mtid_cp; unsigned int mtid_cp;
......
...@@ -61,6 +61,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -61,6 +61,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat", "etf3eh", "highgprs", "te", "vx" "edat", "etf3eh", "highgprs", "te", "vx"
}; };
static const char * const int_hwcap_str[] = {
"sie"
};
unsigned long n = (unsigned long) v - 1; unsigned long n = (unsigned long) v - 1;
int i; int i;
...@@ -75,6 +78,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -75,6 +78,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
for (i = 0; i < ARRAY_SIZE(hwcap_str); i++) for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
if (hwcap_str[i] && (elf_hwcap & (1UL << i))) if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
seq_printf(m, "%s ", hwcap_str[i]); seq_printf(m, "%s ", hwcap_str[i]);
for (i = 0; i < ARRAY_SIZE(int_hwcap_str); i++)
if (int_hwcap_str[i] && (int_hwcap & (1UL << i)))
seq_printf(m, "%s ", int_hwcap_str[i]);
seq_puts(m, "\n"); seq_puts(m, "\n");
show_cacheinfo(m); show_cacheinfo(m);
} }
......
...@@ -80,6 +80,8 @@ EXPORT_SYMBOL(console_irq); ...@@ -80,6 +80,8 @@ EXPORT_SYMBOL(console_irq);
unsigned long elf_hwcap __read_mostly = 0; unsigned long elf_hwcap __read_mostly = 0;
char elf_platform[ELF_PLATFORM_SIZE]; char elf_platform[ELF_PLATFORM_SIZE];
unsigned long int_hwcap = 0;
int __initdata memory_end_set; int __initdata memory_end_set;
unsigned long __initdata memory_end; unsigned long __initdata memory_end;
unsigned long __initdata max_physmem_end; unsigned long __initdata max_physmem_end;
...@@ -793,6 +795,13 @@ static int __init setup_hwcaps(void) ...@@ -793,6 +795,13 @@ static int __init setup_hwcaps(void)
strcpy(elf_platform, "z13"); strcpy(elf_platform, "z13");
break; break;
} }
/*
* Virtualization support HWCAP_INT_SIE is bit 0.
*/
if (sclp.has_sief2)
int_hwcap |= HWCAP_INT_SIE;
return 0; return 0;
} }
arch_initcall(setup_hwcaps); arch_initcall(setup_hwcaps);
......
...@@ -155,10 +155,8 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu) ...@@ -155,10 +155,8 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tcpu; struct kvm_vcpu *tcpu;
int tid; int tid;
int i;
tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
vcpu->stat.diagnose_9c++; vcpu->stat.diagnose_9c++;
...@@ -167,12 +165,9 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) ...@@ -167,12 +165,9 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
if (tid == vcpu->vcpu_id) if (tid == vcpu->vcpu_id)
return 0; return 0;
kvm_for_each_vcpu(i, tcpu, kvm) tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
if (tcpu->vcpu_id == tid) { if (tcpu)
kvm_vcpu_yield_to(tcpu); kvm_vcpu_yield_to(tcpu);
break;
}
return 0; return 0;
} }
......
...@@ -259,10 +259,14 @@ struct aste { ...@@ -259,10 +259,14 @@ struct aste {
int ipte_lock_held(struct kvm_vcpu *vcpu) int ipte_lock_held(struct kvm_vcpu *vcpu)
{ {
union ipte_control *ic = &vcpu->kvm->arch.sca->ipte_control; if (vcpu->arch.sie_block->eca & 1) {
int rc;
if (vcpu->arch.sie_block->eca & 1) read_lock(&vcpu->kvm->arch.sca_lock);
return ic->kh != 0; rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0;
read_unlock(&vcpu->kvm->arch.sca_lock);
return rc;
}
return vcpu->kvm->arch.ipte_lock_count != 0; return vcpu->kvm->arch.ipte_lock_count != 0;
} }
...@@ -274,16 +278,20 @@ static void ipte_lock_simple(struct kvm_vcpu *vcpu) ...@@ -274,16 +278,20 @@ static void ipte_lock_simple(struct kvm_vcpu *vcpu)
vcpu->kvm->arch.ipte_lock_count++; vcpu->kvm->arch.ipte_lock_count++;
if (vcpu->kvm->arch.ipte_lock_count > 1) if (vcpu->kvm->arch.ipte_lock_count > 1)
goto out; goto out;
ic = &vcpu->kvm->arch.sca->ipte_control; retry:
read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do { do {
old = READ_ONCE(*ic); old = READ_ONCE(*ic);
while (old.k) { if (old.k) {
read_unlock(&vcpu->kvm->arch.sca_lock);
cond_resched(); cond_resched();
old = READ_ONCE(*ic); goto retry;
} }
new = old; new = old;
new.k = 1; new.k = 1;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val); } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
out: out:
mutex_unlock(&vcpu->kvm->arch.ipte_mutex); mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
} }
...@@ -296,12 +304,14 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu) ...@@ -296,12 +304,14 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
vcpu->kvm->arch.ipte_lock_count--; vcpu->kvm->arch.ipte_lock_count--;
if (vcpu->kvm->arch.ipte_lock_count) if (vcpu->kvm->arch.ipte_lock_count)
goto out; goto out;
ic = &vcpu->kvm->arch.sca->ipte_control; read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do { do {
old = READ_ONCE(*ic); old = READ_ONCE(*ic);
new = old; new = old;
new.k = 0; new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val); } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
wake_up(&vcpu->kvm->arch.ipte_wq); wake_up(&vcpu->kvm->arch.ipte_wq);
out: out:
mutex_unlock(&vcpu->kvm->arch.ipte_mutex); mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
...@@ -311,24 +321,29 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu) ...@@ -311,24 +321,29 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu)
{ {
union ipte_control old, new, *ic; union ipte_control old, new, *ic;
ic = &vcpu->kvm->arch.sca->ipte_control; retry:
read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do { do {
old = READ_ONCE(*ic); old = READ_ONCE(*ic);
while (old.kg) { if (old.kg) {
read_unlock(&vcpu->kvm->arch.sca_lock);
cond_resched(); cond_resched();
old = READ_ONCE(*ic); goto retry;
} }
new = old; new = old;
new.k = 1; new.k = 1;
new.kh++; new.kh++;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val); } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
} }
static void ipte_unlock_siif(struct kvm_vcpu *vcpu) static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
{ {
union ipte_control old, new, *ic; union ipte_control old, new, *ic;
ic = &vcpu->kvm->arch.sca->ipte_control; read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do { do {
old = READ_ONCE(*ic); old = READ_ONCE(*ic);
new = old; new = old;
...@@ -336,6 +351,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu) ...@@ -336,6 +351,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
if (!new.kh) if (!new.kh)
new.k = 0; new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val); } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
if (!new.kh) if (!new.kh)
wake_up(&vcpu->kvm->arch.ipte_wq); wake_up(&vcpu->kvm->arch.ipte_wq);
} }
......
...@@ -54,9 +54,6 @@ void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc) ...@@ -54,9 +54,6 @@ void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc)
static int handle_noop(struct kvm_vcpu *vcpu) static int handle_noop(struct kvm_vcpu *vcpu)
{ {
switch (vcpu->arch.sie_block->icptcode) { switch (vcpu->arch.sie_block->icptcode) {
case 0x0:
vcpu->stat.exit_null++;
break;
case 0x10: case 0x10:
vcpu->stat.exit_external_request++; vcpu->stat.exit_external_request++;
break; break;
...@@ -338,8 +335,10 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu) ...@@ -338,8 +335,10 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
{ {
if (kvm_is_ucontrol(vcpu->kvm))
return -EOPNOTSUPP;
switch (vcpu->arch.sie_block->icptcode) { switch (vcpu->arch.sie_block->icptcode) {
case 0x00:
case 0x10: case 0x10:
case 0x18: case 0x18:
return handle_noop(vcpu); return handle_noop(vcpu);
......
...@@ -34,6 +34,106 @@ ...@@ -34,6 +34,106 @@
#define PFAULT_DONE 0x0680 #define PFAULT_DONE 0x0680
#define VIRTIO_PARAM 0x0d00 #define VIRTIO_PARAM 0x0d00
/* handle external calls via sigp interpretation facility */
static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
{
int c, scn;
if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
return 0;
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl sigp_ctrl =
sca->cpu[vcpu->vcpu_id].sigp_ctrl;
c = sigp_ctrl.c;
scn = sigp_ctrl.scn;
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl sigp_ctrl =
sca->cpu[vcpu->vcpu_id].sigp_ctrl;
c = sigp_ctrl.c;
scn = sigp_ctrl.scn;
}
read_unlock(&vcpu->kvm->arch.sca_lock);
if (src_id)
*src_id = scn;
return c;
}
static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
{
int expect, rc;
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
new_val.scn = src_id;
new_val.c = 1;
old_val.c = 0;
expect = old_val.value;
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
new_val.scn = src_id;
new_val.c = 1;
old_val.c = 0;
expect = old_val.value;
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
}
read_unlock(&vcpu->kvm->arch.sca_lock);
if (rc != expect) {
/* another external call is pending */
return -EBUSY;
}
atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
return 0;
}
static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc, expect;
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union esca_sigp_ctrl old = *sigp_ctrl;
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union bsca_sigp_ctrl old = *sigp_ctrl;
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
}
read_unlock(&vcpu->kvm->arch.sca_lock);
WARN_ON(rc != expect); /* cannot clear? */
}
int psw_extint_disabled(struct kvm_vcpu *vcpu) int psw_extint_disabled(struct kvm_vcpu *vcpu)
{ {
return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
...@@ -792,13 +892,11 @@ static const deliver_irq_t deliver_irq_funcs[] = { ...@@ -792,13 +892,11 @@ static const deliver_irq_t deliver_irq_funcs[] = {
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
if (!sclp.has_sigpif) if (!sclp.has_sigpif)
return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
return (sigp_ctrl & SIGP_CTRL_C) && return sca_ext_call_pending(vcpu, NULL);
(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND);
} }
int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
...@@ -909,9 +1007,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) ...@@ -909,9 +1007,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
memset(&li->irq, 0, sizeof(li->irq)); memset(&li->irq, 0, sizeof(li->irq));
spin_unlock(&li->lock); spin_unlock(&li->lock);
/* clear pending external calls set by sigp interpretation facility */ sca_clear_ext_call(vcpu);
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
} }
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
...@@ -1003,21 +1099,6 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -1003,21 +1099,6 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
return 0; return 0;
} }
static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
{
unsigned char new_val, old_val;
uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
old_val = *sigp_ctrl & ~SIGP_CTRL_C;
if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
/* another external call is pending */
return -EBUSY;
}
atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
return 0;
}
static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
...@@ -1034,7 +1115,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -1034,7 +1115,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
return -EINVAL; return -EINVAL;
if (sclp.has_sigpif) if (sclp.has_sigpif)
return __inject_extcall_sigpif(vcpu, src_id); return sca_inject_ext_call(vcpu, src_id);
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
return -EBUSY; return -EBUSY;
...@@ -2203,7 +2284,7 @@ static void store_local_irq(struct kvm_s390_local_interrupt *li, ...@@ -2203,7 +2284,7 @@ static void store_local_irq(struct kvm_s390_local_interrupt *li,
int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
{ {
uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl; int scn;
unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)]; unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
unsigned long pending_irqs; unsigned long pending_irqs;
...@@ -2243,14 +2324,12 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) ...@@ -2243,14 +2324,12 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
} }
} }
if ((sigp_ctrl & SIGP_CTRL_C) && if (sca_ext_call_pending(vcpu, &scn)) {
(atomic_read(&vcpu->arch.sie_block->cpuflags) &
CPUSTAT_ECALL_PEND)) {
if (n + sizeof(irq) > len) if (n + sizeof(irq) > len)
return -ENOBUFS; return -ENOBUFS;
memset(&irq, 0, sizeof(irq)); memset(&irq, 0, sizeof(irq));
irq.type = KVM_S390_INT_EXTERNAL_CALL; irq.type = KVM_S390_INT_EXTERNAL_CALL;
irq.u.extcall.code = sigp_ctrl & SIGP_CTRL_SCN_MASK; irq.u.extcall.code = scn;
if (copy_to_user(&buf[n], &irq, sizeof(irq))) if (copy_to_user(&buf[n], &irq, sizeof(irq)))
return -EFAULT; return -EFAULT;
n += sizeof(irq); n += sizeof(irq);
......
This diff is collapsed.
...@@ -340,4 +340,11 @@ void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu); ...@@ -340,4 +340,11 @@ void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu); void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu); void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
/* support for Basic/Extended SCA handling */
static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
{
struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */
return &sca->ipte_control;
}
#endif #endif
...@@ -43,7 +43,10 @@ struct read_info_sccb { ...@@ -43,7 +43,10 @@ struct read_info_sccb {
u8 _pad_92[100 - 92]; /* 92-99 */ u8 _pad_92[100 - 92]; /* 92-99 */
u32 rnsize2; /* 100-103 */ u32 rnsize2; /* 100-103 */
u64 rnmax2; /* 104-111 */ u64 rnmax2; /* 104-111 */
u8 _pad_112[120 - 112]; /* 112-119 */ u8 _pad_112[116 - 112]; /* 112-115 */
u8 fac116; /* 116 */
u8 _pad_117[119 - 117]; /* 117-118 */
u8 fac119; /* 119 */
u16 hcpua; /* 120-121 */ u16 hcpua; /* 120-121 */
u8 _pad_122[4096 - 122]; /* 122-4095 */ u8 _pad_122[4096 - 122]; /* 122-4095 */
} __packed __aligned(PAGE_SIZE); } __packed __aligned(PAGE_SIZE);
...@@ -108,6 +111,8 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) ...@@ -108,6 +111,8 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
sclp.facilities = sccb->facilities; sclp.facilities = sccb->facilities;
sclp.has_sprp = !!(sccb->fac84 & 0x02); sclp.has_sprp = !!(sccb->fac84 & 0x02);
sclp.has_core_type = !!(sccb->fac84 & 0x01); sclp.has_core_type = !!(sccb->fac84 & 0x01);
sclp.has_esca = !!(sccb->fac116 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
if (sccb->fac85 & 0x02) if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
...@@ -131,6 +136,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) ...@@ -131,6 +136,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
continue; continue;
sclp.has_siif = cpue->siif; sclp.has_siif = cpue->siif;
sclp.has_sigpif = cpue->sigpif; sclp.has_sigpif = cpue->sigpif;
sclp.has_sief2 = cpue->sief2;
break; break;
} }
......
...@@ -472,6 +472,11 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) ...@@ -472,6 +472,11 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int i; int i;
if (id < 0 || id >= KVM_MAX_VCPUS)
return NULL;
vcpu = kvm_get_vcpu(kvm, id);
if (vcpu && vcpu->vcpu_id == id)
return vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
if (vcpu->vcpu_id == id) if (vcpu->vcpu_id == id)
return vcpu; return vcpu;
...@@ -1011,7 +1016,6 @@ struct kvm_stats_debugfs_item { ...@@ -1011,7 +1016,6 @@ struct kvm_stats_debugfs_item {
const char *name; const char *name;
int offset; int offset;
enum kvm_stat_kind kind; enum kvm_stat_kind kind;
struct dentry *dentry;
}; };
extern struct kvm_stats_debugfs_item debugfs_entries[]; extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir; extern struct dentry *kvm_debugfs_dir;
......
...@@ -2257,7 +2257,7 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu) ...@@ -2257,7 +2257,7 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
{ {
int r; int r;
struct kvm_vcpu *vcpu, *v; struct kvm_vcpu *vcpu;
if (id >= KVM_MAX_VCPUS) if (id >= KVM_MAX_VCPUS)
return -EINVAL; return -EINVAL;
...@@ -2281,9 +2281,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) ...@@ -2281,9 +2281,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
r = -EINVAL; r = -EINVAL;
goto unlock_vcpu_destroy; goto unlock_vcpu_destroy;
} }
if (kvm_get_vcpu_by_id(kvm, id)) {
kvm_for_each_vcpu(r, v, kvm)
if (v->vcpu_id == id) {
r = -EEXIST; r = -EEXIST;
goto unlock_vcpu_destroy; goto unlock_vcpu_destroy;
} }
...@@ -3449,10 +3447,9 @@ static int kvm_init_debug(void) ...@@ -3449,10 +3447,9 @@ static int kvm_init_debug(void)
goto out; goto out;
for (p = debugfs_entries; p->name; ++p) { for (p = debugfs_entries; p->name; ++p) {
p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, if (!debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
(void *)(long)p->offset, (void *)(long)p->offset,
stat_fops[p->kind]); stat_fops[p->kind]))
if (p->dentry == NULL)
goto out_dir; goto out_dir;
} }
...@@ -3464,15 +3461,6 @@ static int kvm_init_debug(void) ...@@ -3464,15 +3461,6 @@ static int kvm_init_debug(void)
return r; return r;
} }
static void kvm_exit_debug(void)
{
struct kvm_stats_debugfs_item *p;
for (p = debugfs_entries; p->name; ++p)
debugfs_remove(p->dentry);
debugfs_remove(kvm_debugfs_dir);
}
static int kvm_suspend(void) static int kvm_suspend(void)
{ {
if (kvm_usage_count) if (kvm_usage_count)
...@@ -3630,7 +3618,7 @@ EXPORT_SYMBOL_GPL(kvm_init); ...@@ -3630,7 +3618,7 @@ EXPORT_SYMBOL_GPL(kvm_init);
void kvm_exit(void) void kvm_exit(void)
{ {
kvm_exit_debug(); debugfs_remove_recursive(kvm_debugfs_dir);
misc_deregister(&kvm_dev); misc_deregister(&kvm_dev);
kmem_cache_destroy(kvm_vcpu_cache); kmem_cache_destroy(kvm_vcpu_cache);
kvm_async_pf_deinit(); kvm_async_pf_deinit();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment