Commit 8f060f53 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-5.1-1' of...

Merge tag 'kvm-s390-next-5.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next

KVM: s390: Features for 5.1

- Clarify KVM related kernel messages
- Interrupt cleanup
- Introduction of the Guest Information Block (GIB)
- Preparation for processor subfunctions in cpu model
parents a2420107 11ba5961
...@@ -331,5 +331,6 @@ extern void css_schedule_reprobe(void); ...@@ -331,5 +331,6 @@ extern void css_schedule_reprobe(void);
/* Function from drivers/s390/cio/chsc.c */ /* Function from drivers/s390/cio/chsc.c */
int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta); int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta);
int chsc_sstpi(void *page, void *result, size_t size); int chsc_sstpi(void *page, void *result, size_t size);
int chsc_sgib(u32 origin);
#endif #endif
...@@ -62,6 +62,7 @@ enum interruption_class { ...@@ -62,6 +62,7 @@ enum interruption_class {
IRQIO_MSI, IRQIO_MSI,
IRQIO_VIR, IRQIO_VIR,
IRQIO_VAI, IRQIO_VAI,
IRQIO_GAL,
NMI_NMI, NMI_NMI,
CPU_RST, CPU_RST,
NR_ARCH_IRQS NR_ARCH_IRQS
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
/* Adapter interrupts. */ /* Adapter interrupts. */
#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */ #define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
#define PCI_ISC 2 /* PCI I/O subchannels */ #define PCI_ISC 2 /* PCI I/O subchannels */
#define GAL_ISC 5 /* GIB alert */
#define AP_ISC 6 /* adjunct processor (crypto) devices */ #define AP_ISC 6 /* adjunct processor (crypto) devices */
/* Functions for registration of I/O interruption subclasses */ /* Functions for registration of I/O interruption subclasses */
......
...@@ -591,7 +591,6 @@ struct kvm_s390_float_interrupt { ...@@ -591,7 +591,6 @@ struct kvm_s390_float_interrupt {
struct kvm_s390_mchk_info mchk; struct kvm_s390_mchk_info mchk;
struct kvm_s390_ext_info srv_signal; struct kvm_s390_ext_info srv_signal;
int next_rr_cpu; int next_rr_cpu;
unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
struct mutex ais_lock; struct mutex ais_lock;
u8 simm; u8 simm;
u8 nimm; u8 nimm;
...@@ -712,6 +711,7 @@ struct s390_io_adapter { ...@@ -712,6 +711,7 @@ struct s390_io_adapter {
struct kvm_s390_cpu_model { struct kvm_s390_cpu_model {
/* facility mask supported by kvm & hosting machine */ /* facility mask supported by kvm & hosting machine */
__u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64]; __u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64];
struct kvm_s390_vm_cpu_subfunc subfuncs;
/* facility list requested by guest (in dma page) */ /* facility list requested by guest (in dma page) */
__u64 *fac_list; __u64 *fac_list;
u64 cpuid; u64 cpuid;
...@@ -782,9 +782,21 @@ struct kvm_s390_gisa { ...@@ -782,9 +782,21 @@ struct kvm_s390_gisa {
u8 reserved03[11]; u8 reserved03[11];
u32 airq_count; u32 airq_count;
} g1; } g1;
struct {
u64 word[4];
} u64;
}; };
}; };
struct kvm_s390_gib {
u32 alert_list_origin;
u32 reserved01;
u8:5;
u8 nisc:3;
u8 reserved03[3];
u32 reserved04[5];
};
/* /*
* sie_page2 has to be allocated as DMA because fac_list, crycb and * sie_page2 has to be allocated as DMA because fac_list, crycb and
* gisa need 31bit addresses in the sie control block. * gisa need 31bit addresses in the sie control block.
...@@ -793,7 +805,8 @@ struct sie_page2 { ...@@ -793,7 +805,8 @@ struct sie_page2 {
__u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64]; /* 0x0000 */ __u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64]; /* 0x0000 */
struct kvm_s390_crypto_cb crycb; /* 0x0800 */ struct kvm_s390_crypto_cb crycb; /* 0x0800 */
struct kvm_s390_gisa gisa; /* 0x0900 */ struct kvm_s390_gisa gisa; /* 0x0900 */
u8 reserved920[0x1000 - 0x920]; /* 0x0920 */ struct kvm *kvm; /* 0x0920 */
u8 reserved928[0x1000 - 0x928]; /* 0x0928 */
}; };
struct kvm_s390_vsie { struct kvm_s390_vsie {
...@@ -804,6 +817,20 @@ struct kvm_s390_vsie { ...@@ -804,6 +817,20 @@ struct kvm_s390_vsie {
struct page *pages[KVM_MAX_VCPUS]; struct page *pages[KVM_MAX_VCPUS];
}; };
struct kvm_s390_gisa_iam {
u8 mask;
spinlock_t ref_lock;
u32 ref_count[MAX_ISC + 1];
};
struct kvm_s390_gisa_interrupt {
struct kvm_s390_gisa *origin;
struct kvm_s390_gisa_iam alert;
struct hrtimer timer;
u64 expires;
DECLARE_BITMAP(kicked_mask, KVM_MAX_VCPUS);
};
struct kvm_arch{ struct kvm_arch{
void *sca; void *sca;
int use_esca; int use_esca;
...@@ -837,7 +864,8 @@ struct kvm_arch{ ...@@ -837,7 +864,8 @@ struct kvm_arch{
atomic64_t cmma_dirty_pages; atomic64_t cmma_dirty_pages;
/* subset of available cpu features enabled by user space */ /* subset of available cpu features enabled by user space */
DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
struct kvm_s390_gisa *gisa; DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
struct kvm_s390_gisa_interrupt gisa_int;
}; };
#define KVM_HVA_ERR_BAD (-1UL) #define KVM_HVA_ERR_BAD (-1UL)
...@@ -871,6 +899,9 @@ void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, ...@@ -871,6 +899,9 @@ void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
extern int sie64a(struct kvm_s390_sie_block *, u64 *); extern int sie64a(struct kvm_s390_sie_block *, u64 *);
extern char sie_exit; extern char sie_exit;
extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_check_processor_compat(void *rtn) {} static inline void kvm_arch_check_processor_compat(void *rtn) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {}
......
...@@ -88,6 +88,7 @@ static const struct irq_class irqclass_sub_desc[] = { ...@@ -88,6 +88,7 @@ static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQIO_MSI, .name = "MSI", .desc = "[I/O] MSI Interrupt" }, {.irq = IRQIO_MSI, .name = "MSI", .desc = "[I/O] MSI Interrupt" },
{.irq = IRQIO_VIR, .name = "VIR", .desc = "[I/O] Virtual I/O Devices"}, {.irq = IRQIO_VIR, .name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
{.irq = IRQIO_VAI, .name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"}, {.irq = IRQIO_VAI, .name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"},
{.irq = IRQIO_GAL, .name = "GAL", .desc = "[I/O] GIB Alert"},
{.irq = NMI_NMI, .name = "NMI", .desc = "[NMI] Machine Check"}, {.irq = NMI_NMI, .name = "NMI", .desc = "[NMI] Machine Check"},
{.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"}, {.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"},
}; };
......
...@@ -7,6 +7,9 @@ ...@@ -7,6 +7,9 @@
* Author(s): Carsten Otte <cotte@de.ibm.com> * Author(s): Carsten Otte <cotte@de.ibm.com>
*/ */
#define KMSG_COMPONENT "kvm-s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
...@@ -23,6 +26,7 @@ ...@@ -23,6 +26,7 @@
#include <asm/gmap.h> #include <asm/gmap.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/airq.h>
#include "kvm-s390.h" #include "kvm-s390.h"
#include "gaccess.h" #include "gaccess.h"
#include "trace-s390.h" #include "trace-s390.h"
...@@ -31,6 +35,8 @@ ...@@ -31,6 +35,8 @@
#define PFAULT_DONE 0x0680 #define PFAULT_DONE 0x0680
#define VIRTIO_PARAM 0x0d00 #define VIRTIO_PARAM 0x0d00
static struct kvm_s390_gib *gib;
/* handle external calls via sigp interpretation facility */ /* handle external calls via sigp interpretation facility */
static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id) static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
{ {
...@@ -217,22 +223,100 @@ static inline u8 int_word_to_isc(u32 int_word) ...@@ -217,22 +223,100 @@ static inline u8 int_word_to_isc(u32 int_word)
*/ */
#define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE) #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) /**
* gisa_set_iam - change the GISA interruption alert mask
*
* @gisa: gisa to operate on
* @iam: new IAM value to use
*
* Change the IAM atomically with the next alert address and the IPM
* of the GISA if the GISA is not part of the GIB alert list. All three
* fields are located in the first long word of the GISA.
*
* Returns: 0 on success
* -EBUSY in case the gisa is part of the alert list
*/
static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam)
{
u64 word, _word;
do {
word = READ_ONCE(gisa->u64.word[0]);
if ((u64)gisa != word >> 32)
return -EBUSY;
_word = (word & ~0xffUL) | iam;
} while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
return 0;
}
/**
* gisa_clear_ipm - clear the GISA interruption pending mask
*
* @gisa: gisa to operate on
*
* Clear the IPM atomically with the next alert address and the IAM
* of the GISA unconditionally. All three fields are located in the
* first long word of the GISA.
*/
static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa)
{
u64 word, _word;
do {
word = READ_ONCE(gisa->u64.word[0]);
_word = word & ~(0xffUL << 24);
} while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
}
/**
* gisa_get_ipm_or_restore_iam - return IPM or restore GISA IAM
*
* @gi: gisa interrupt struct to work on
*
* Atomically restores the interruption alert mask if none of the
* relevant ISCs are pending and return the IPM.
*
* Returns: the relevant pending ISCs
*/
static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
{
u8 pending_mask, alert_mask;
u64 word, _word;
do {
word = READ_ONCE(gi->origin->u64.word[0]);
alert_mask = READ_ONCE(gi->alert.mask);
pending_mask = (u8)(word >> 24) & alert_mask;
if (pending_mask)
return pending_mask;
_word = (word & ~0xffUL) | alert_mask;
} while (cmpxchg(&gi->origin->u64.word[0], word, _word) != word);
return 0;
}
static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
{
return READ_ONCE(gisa->next_alert) != (u32)(u64)gisa;
}
static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
{ {
set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
} }
static inline u8 kvm_s390_gisa_get_ipm(struct kvm_s390_gisa *gisa) static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
{ {
return READ_ONCE(gisa->ipm); return READ_ONCE(gisa->ipm);
} }
static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
{ {
clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
} }
static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
{ {
return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
} }
...@@ -245,8 +329,13 @@ static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu) ...@@ -245,8 +329,13 @@ static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
{ {
return pending_irqs_no_gisa(vcpu) | struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7; unsigned long pending_mask;
pending_mask = pending_irqs_no_gisa(vcpu);
if (gi->origin)
pending_mask |= gisa_get_ipm(gi->origin) << IRQ_PEND_IO_ISC_7;
return pending_mask;
} }
static inline int isc_to_irq_type(unsigned long isc) static inline int isc_to_irq_type(unsigned long isc)
...@@ -318,13 +407,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) ...@@ -318,13 +407,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu) static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{ {
kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT); kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask); set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
} }
static void __unset_cpu_idle(struct kvm_vcpu *vcpu) static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{ {
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT); kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask); clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
} }
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
...@@ -345,7 +434,7 @@ static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) ...@@ -345,7 +434,7 @@ static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
{ {
if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK)) if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
return; return;
else if (psw_ioint_disabled(vcpu)) if (psw_ioint_disabled(vcpu))
kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT); kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
else else
vcpu->arch.sie_block->lctl |= LCTL_CR6; vcpu->arch.sie_block->lctl |= LCTL_CR6;
...@@ -353,7 +442,7 @@ static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) ...@@ -353,7 +442,7 @@ static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
{ {
if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK)) if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_EXT_MASK))
return; return;
if (psw_extint_disabled(vcpu)) if (psw_extint_disabled(vcpu))
kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT); kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
...@@ -363,7 +452,7 @@ static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) ...@@ -363,7 +452,7 @@ static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
{ {
if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_MCHK_MASK))
return; return;
if (psw_mchk_disabled(vcpu)) if (psw_mchk_disabled(vcpu))
vcpu->arch.sie_block->ictl |= ICTL_LPSW; vcpu->arch.sie_block->ictl |= ICTL_LPSW;
...@@ -956,6 +1045,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu, ...@@ -956,6 +1045,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
{ {
struct list_head *isc_list; struct list_head *isc_list;
struct kvm_s390_float_interrupt *fi; struct kvm_s390_float_interrupt *fi;
struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
struct kvm_s390_interrupt_info *inti = NULL; struct kvm_s390_interrupt_info *inti = NULL;
struct kvm_s390_io_info io; struct kvm_s390_io_info io;
u32 isc; u32 isc;
...@@ -998,8 +1088,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu, ...@@ -998,8 +1088,7 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
goto out; goto out;
} }
if (vcpu->kvm->arch.gisa && if (gi->origin && gisa_tac_ipm_gisc(gi->origin, isc)) {
kvm_s390_gisa_tac_ipm_gisc(vcpu->kvm->arch.gisa, isc)) {
/* /*
* in case an adapter interrupt was not delivered * in case an adapter interrupt was not delivered
* in SIE context KVM will handle the delivery * in SIE context KVM will handle the delivery
...@@ -1089,6 +1178,7 @@ static u64 __calculate_sltime(struct kvm_vcpu *vcpu) ...@@ -1089,6 +1178,7 @@ static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
u64 sltime; u64 sltime;
vcpu->stat.exit_wait_state++; vcpu->stat.exit_wait_state++;
...@@ -1102,6 +1192,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) ...@@ -1102,6 +1192,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP; /* disabled wait */ return -EOPNOTSUPP; /* disabled wait */
} }
if (gi->origin &&
(gisa_get_ipm_or_restore_iam(gi) &
vcpu->arch.sie_block->gcr[6] >> 24))
return 0;
if (!ckc_interrupts_enabled(vcpu) && if (!ckc_interrupts_enabled(vcpu) &&
!cpu_timer_interrupts_enabled(vcpu)) { !cpu_timer_interrupts_enabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
...@@ -1533,18 +1628,19 @@ static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm, ...@@ -1533,18 +1628,19 @@ static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid) static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
{ {
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
unsigned long active_mask; unsigned long active_mask;
int isc; int isc;
if (schid) if (schid)
goto out; goto out;
if (!kvm->arch.gisa) if (!gi->origin)
goto out; goto out;
active_mask = (isc_mask & kvm_s390_gisa_get_ipm(kvm->arch.gisa) << 24) << 32; active_mask = (isc_mask & gisa_get_ipm(gi->origin) << 24) << 32;
while (active_mask) { while (active_mask) {
isc = __fls(active_mask) ^ (BITS_PER_LONG - 1); isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, isc)) if (gisa_tac_ipm_gisc(gi->origin, isc))
return isc; return isc;
clear_bit_inv(isc, &active_mask); clear_bit_inv(isc, &active_mask);
} }
...@@ -1567,6 +1663,7 @@ static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid) ...@@ -1567,6 +1663,7 @@ static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
u64 isc_mask, u32 schid) u64 isc_mask, u32 schid)
{ {
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_s390_interrupt_info *inti, *tmp_inti; struct kvm_s390_interrupt_info *inti, *tmp_inti;
int isc; int isc;
...@@ -1584,7 +1681,7 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, ...@@ -1584,7 +1681,7 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
/* both types of interrupts present */ /* both types of interrupts present */
if (int_word_to_isc(inti->io.io_int_word) <= isc) { if (int_word_to_isc(inti->io.io_int_word) <= isc) {
/* classical IO int with higher priority */ /* classical IO int with higher priority */
kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc); gisa_set_ipm_gisc(gi->origin, isc);
goto out; goto out;
} }
gisa_out: gisa_out:
...@@ -1596,7 +1693,7 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, ...@@ -1596,7 +1693,7 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
kvm_s390_reinject_io_int(kvm, inti); kvm_s390_reinject_io_int(kvm, inti);
inti = tmp_inti; inti = tmp_inti;
} else } else
kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc); gisa_set_ipm_gisc(gi->origin, isc);
out: out:
return inti; return inti;
} }
...@@ -1685,6 +1782,7 @@ static int __inject_float_mchk(struct kvm *kvm, ...@@ -1685,6 +1782,7 @@ static int __inject_float_mchk(struct kvm *kvm,
static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
{ {
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_s390_float_interrupt *fi; struct kvm_s390_float_interrupt *fi;
struct list_head *list; struct list_head *list;
int isc; int isc;
...@@ -1692,9 +1790,9 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) ...@@ -1692,9 +1790,9 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
kvm->stat.inject_io++; kvm->stat.inject_io++;
isc = int_word_to_isc(inti->io.io_int_word); isc = int_word_to_isc(inti->io.io_int_word);
if (kvm->arch.gisa && inti->type & KVM_S390_INT_IO_AI_MASK) { if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc); VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc); gisa_set_ipm_gisc(gi->origin, isc);
kfree(inti); kfree(inti);
return 0; return 0;
} }
...@@ -1726,7 +1824,6 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) ...@@ -1726,7 +1824,6 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
*/ */
static void __floating_irq_kick(struct kvm *kvm, u64 type) static void __floating_irq_kick(struct kvm *kvm, u64 type)
{ {
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
struct kvm_vcpu *dst_vcpu; struct kvm_vcpu *dst_vcpu;
int sigcpu, online_vcpus, nr_tries = 0; int sigcpu, online_vcpus, nr_tries = 0;
...@@ -1735,11 +1832,11 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) ...@@ -1735,11 +1832,11 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
return; return;
/* find idle VCPUs first, then round robin */ /* find idle VCPUs first, then round robin */
sigcpu = find_first_bit(fi->idle_mask, online_vcpus); sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus);
if (sigcpu == online_vcpus) { if (sigcpu == online_vcpus) {
do { do {
sigcpu = fi->next_rr_cpu; sigcpu = kvm->arch.float_int.next_rr_cpu++;
fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus; kvm->arch.float_int.next_rr_cpu %= online_vcpus;
/* avoid endless loops if all vcpus are stopped */ /* avoid endless loops if all vcpus are stopped */
if (nr_tries++ >= online_vcpus) if (nr_tries++ >= online_vcpus)
return; return;
...@@ -1753,7 +1850,8 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) ...@@ -1753,7 +1850,8 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT); kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
break; break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
if (!(type & KVM_S390_INT_IO_AI_MASK && kvm->arch.gisa)) if (!(type & KVM_S390_INT_IO_AI_MASK &&
kvm->arch.gisa_int.origin))
kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT); kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
break; break;
default: default:
...@@ -2003,6 +2101,7 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm) ...@@ -2003,6 +2101,7 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
{ {
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_s390_interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
struct kvm_s390_float_interrupt *fi; struct kvm_s390_float_interrupt *fi;
struct kvm_s390_irq *buf; struct kvm_s390_irq *buf;
...@@ -2026,15 +2125,14 @@ static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) ...@@ -2026,15 +2125,14 @@ static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
max_irqs = len / sizeof(struct kvm_s390_irq); max_irqs = len / sizeof(struct kvm_s390_irq);
if (kvm->arch.gisa && if (gi->origin && gisa_get_ipm(gi->origin)) {
kvm_s390_gisa_get_ipm(kvm->arch.gisa)) {
for (i = 0; i <= MAX_ISC; i++) { for (i = 0; i <= MAX_ISC; i++) {
if (n == max_irqs) { if (n == max_irqs) {
/* signal userspace to try again */ /* signal userspace to try again */
ret = -ENOMEM; ret = -ENOMEM;
goto out_nolock; goto out_nolock;
} }
if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, i)) { if (gisa_tac_ipm_gisc(gi->origin, i)) {
irq = (struct kvm_s390_irq *) &buf[n]; irq = (struct kvm_s390_irq *) &buf[n];
irq->type = KVM_S390_INT_IO(1, 0, 0, 0); irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
irq->u.io.io_int_word = isc_to_int_word(i); irq->u.io.io_int_word = isc_to_int_word(i);
...@@ -2831,7 +2929,7 @@ static void store_local_irq(struct kvm_s390_local_interrupt *li, ...@@ -2831,7 +2929,7 @@ static void store_local_irq(struct kvm_s390_local_interrupt *li,
int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
{ {
int scn; int scn;
unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)]; DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
unsigned long pending_irqs; unsigned long pending_irqs;
struct kvm_s390_irq irq; struct kvm_s390_irq irq;
...@@ -2884,27 +2982,278 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) ...@@ -2884,27 +2982,278 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
return n; return n;
} }
void kvm_s390_gisa_clear(struct kvm *kvm) static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
{ {
if (kvm->arch.gisa) { int vcpu_id, online_vcpus = atomic_read(&kvm->online_vcpus);
memset(kvm->arch.gisa, 0, sizeof(struct kvm_s390_gisa)); struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
kvm->arch.gisa->next_alert = (u32)(u64)kvm->arch.gisa; struct kvm_vcpu *vcpu;
VM_EVENT(kvm, 3, "gisa 0x%pK cleared", kvm->arch.gisa);
for_each_set_bit(vcpu_id, kvm->arch.idle_mask, online_vcpus) {
vcpu = kvm_get_vcpu(kvm, vcpu_id);
if (psw_ioint_disabled(vcpu))
continue;
deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
if (deliverable_mask) {
/* lately kicked but not yet running */
if (test_and_set_bit(vcpu_id, gi->kicked_mask))
return;
kvm_s390_vcpu_wakeup(vcpu);
return;
}
} }
} }
void kvm_s390_gisa_init(struct kvm *kvm) static enum hrtimer_restart gisa_vcpu_kicker(struct hrtimer *timer)
{ {
if (css_general_characteristics.aiv) { struct kvm_s390_gisa_interrupt *gi =
kvm->arch.gisa = &kvm->arch.sie_page2->gisa; container_of(timer, struct kvm_s390_gisa_interrupt, timer);
VM_EVENT(kvm, 3, "gisa 0x%pK initialized", kvm->arch.gisa); struct kvm *kvm =
kvm_s390_gisa_clear(kvm); container_of(gi->origin, struct sie_page2, gisa)->kvm;
u8 pending_mask;
pending_mask = gisa_get_ipm_or_restore_iam(gi);
if (pending_mask) {
__airqs_kick_single_vcpu(kvm, pending_mask);
hrtimer_forward_now(timer, ns_to_ktime(gi->expires));
return HRTIMER_RESTART;
};
return HRTIMER_NORESTART;
}
#define NULL_GISA_ADDR 0x00000000UL
#define NONE_GISA_ADDR 0x00000001UL
#define GISA_ADDR_MASK 0xfffff000UL
static void process_gib_alert_list(void)
{
struct kvm_s390_gisa_interrupt *gi;
struct kvm_s390_gisa *gisa;
struct kvm *kvm;
u32 final, origin = 0UL;
do {
/*
* If the NONE_GISA_ADDR is still stored in the alert list
* origin, we will leave the outer loop. No further GISA has
* been added to the alert list by millicode while processing
* the current alert list.
*/
final = (origin & NONE_GISA_ADDR);
/*
* Cut off the alert list and store the NONE_GISA_ADDR in the
* alert list origin to avoid further GAL interruptions.
* A new alert list can be build up by millicode in parallel
* for guests not in the yet cut-off alert list. When in the
* final loop, store the NULL_GISA_ADDR instead. This will re-
* enable GAL interruptions on the host again.
*/
origin = xchg(&gib->alert_list_origin,
(!final) ? NONE_GISA_ADDR : NULL_GISA_ADDR);
/*
* Loop through the just cut-off alert list and start the
* gisa timers to kick idle vcpus to consume the pending
* interruptions asap.
*/
while (origin & GISA_ADDR_MASK) {
gisa = (struct kvm_s390_gisa *)(u64)origin;
origin = gisa->next_alert;
gisa->next_alert = (u32)(u64)gisa;
kvm = container_of(gisa, struct sie_page2, gisa)->kvm;
gi = &kvm->arch.gisa_int;
if (hrtimer_active(&gi->timer))
hrtimer_cancel(&gi->timer);
hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
} }
} while (!final);
}
void kvm_s390_gisa_clear(struct kvm *kvm)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
if (!gi->origin)
return;
gisa_clear_ipm(gi->origin);
VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
}
void kvm_s390_gisa_init(struct kvm *kvm)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
if (!css_general_characteristics.aiv)
return;
gi->origin = &kvm->arch.sie_page2->gisa;
gi->alert.mask = 0;
spin_lock_init(&gi->alert.ref_lock);
gi->expires = 50 * 1000; /* 50 usec */
hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
gi->timer.function = gisa_vcpu_kicker;
memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
gi->origin->next_alert = (u32)(u64)gi->origin;
VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
} }
void kvm_s390_gisa_destroy(struct kvm *kvm) void kvm_s390_gisa_destroy(struct kvm *kvm)
{ {
if (!kvm->arch.gisa) struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
if (!gi->origin)
return; return;
kvm->arch.gisa = NULL; if (gi->alert.mask)
KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x",
kvm, gi->alert.mask);
while (gisa_in_alert_list(gi->origin))
cpu_relax();
hrtimer_cancel(&gi->timer);
gi->origin = NULL;
}
/**
* kvm_s390_gisc_register - register a guest ISC
*
* @kvm: the kernel vm to work with
* @gisc: the guest interruption sub class to register
*
* The function extends the vm specific alert mask to use.
* The effective IAM mask in the GISA is updated as well
* in case the GISA is not part of the GIB alert list.
* It will be updated latest when the IAM gets restored
* by gisa_get_ipm_or_restore_iam().
*
* Returns: the nonspecific ISC (NISC) the gib alert mechanism
* has registered with the channel subsystem.
* -ENODEV in case the vm uses no GISA
* -ERANGE in case the guest ISC is invalid
*/
int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
if (!gi->origin)
return -ENODEV;
if (gisc > MAX_ISC)
return -ERANGE;
spin_lock(&gi->alert.ref_lock);
gi->alert.ref_count[gisc]++;
if (gi->alert.ref_count[gisc] == 1) {
gi->alert.mask |= 0x80 >> gisc;
gisa_set_iam(gi->origin, gi->alert.mask);
}
spin_unlock(&gi->alert.ref_lock);
return gib->nisc;
}
EXPORT_SYMBOL_GPL(kvm_s390_gisc_register);
/**
* kvm_s390_gisc_unregister - unregister a guest ISC
*
* @kvm: the kernel vm to work with
* @gisc: the guest interruption sub class to register
*
* The function reduces the vm specific alert mask to use.
* The effective IAM mask in the GISA is updated as well
* in case the GISA is not part of the GIB alert list.
* It will be updated latest when the IAM gets restored
* by gisa_get_ipm_or_restore_iam().
*
* Returns: the nonspecific ISC (NISC) the gib alert mechanism
* has registered with the channel subsystem.
* -ENODEV in case the vm uses no GISA
* -ERANGE in case the guest ISC is invalid
* -EINVAL in case the guest ISC is not registered
*/
int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
int rc = 0;
if (!gi->origin)
return -ENODEV;
if (gisc > MAX_ISC)
return -ERANGE;
spin_lock(&gi->alert.ref_lock);
if (gi->alert.ref_count[gisc] == 0) {
rc = -EINVAL;
goto out;
}
gi->alert.ref_count[gisc]--;
if (gi->alert.ref_count[gisc] == 0) {
gi->alert.mask &= ~(0x80 >> gisc);
gisa_set_iam(gi->origin, gi->alert.mask);
}
out:
spin_unlock(&gi->alert.ref_lock);
return rc;
}
EXPORT_SYMBOL_GPL(kvm_s390_gisc_unregister);
static void gib_alert_irq_handler(struct airq_struct *airq)
{
inc_irq_stat(IRQIO_GAL);
process_gib_alert_list();
}
static struct airq_struct gib_alert_irq = {
.handler = gib_alert_irq_handler,
.lsi_ptr = &gib_alert_irq.lsi_mask,
};
void kvm_s390_gib_destroy(void)
{
if (!gib)
return;
chsc_sgib(0);
unregister_adapter_interrupt(&gib_alert_irq);
free_page((unsigned long)gib);
gib = NULL;
}
int kvm_s390_gib_init(u8 nisc)
{
int rc = 0;
if (!css_general_characteristics.aiv) {
KVM_EVENT(3, "%s", "gib not initialized, no AIV facility");
goto out;
}
gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!gib) {
rc = -ENOMEM;
goto out;
}
gib_alert_irq.isc = nisc;
if (register_adapter_interrupt(&gib_alert_irq)) {
pr_err("Registering the GIB alert interruption handler failed\n");
rc = -EIO;
goto out_free_gib;
}
gib->nisc = nisc;
if (chsc_sgib((u32)(u64)gib)) {
pr_err("Associating the GIB with the AIV facility failed\n");
free_page((unsigned long)gib);
gib = NULL;
rc = -EIO;
goto out_unreg_gal;
}
KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
goto out;
out_unreg_gal:
unregister_adapter_interrupt(&gib_alert_irq);
out_free_gib:
free_page((unsigned long)gib);
gib = NULL;
out:
return rc;
} }
...@@ -432,11 +432,18 @@ int kvm_arch_init(void *opaque) ...@@ -432,11 +432,18 @@ int kvm_arch_init(void *opaque)
/* Register floating interrupt controller interface. */ /* Register floating interrupt controller interface. */
rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
if (rc) { if (rc) {
pr_err("Failed to register FLIC rc=%d\n", rc); pr_err("A FLIC registration call failed with rc=%d\n", rc);
goto out_debug_unreg; goto out_debug_unreg;
} }
rc = kvm_s390_gib_init(GAL_ISC);
if (rc)
goto out_gib_destroy;
return 0; return 0;
out_gib_destroy:
kvm_s390_gib_destroy();
out_debug_unreg: out_debug_unreg:
debug_unregister(kvm_s390_dbf); debug_unregister(kvm_s390_dbf);
return rc; return rc;
...@@ -444,6 +451,7 @@ int kvm_arch_init(void *opaque) ...@@ -444,6 +451,7 @@ int kvm_arch_init(void *opaque)
void kvm_arch_exit(void) void kvm_arch_exit(void)
{ {
kvm_s390_gib_destroy();
debug_unregister(kvm_s390_dbf); debug_unregister(kvm_s390_dbf);
} }
...@@ -1258,11 +1266,65 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm, ...@@ -1258,11 +1266,65 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm,
static int kvm_s390_set_processor_subfunc(struct kvm *kvm, static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
/* mutex_lock(&kvm->lock);
* Once supported by kernel + hw, we have to store the subfunctions if (kvm->created_vcpus) {
* in kvm->arch and remember that user space configured them. mutex_unlock(&kvm->lock);
*/ return -EBUSY;
return -ENXIO; }
if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
sizeof(struct kvm_s390_vm_cpu_subfunc))) {
mutex_unlock(&kvm->lock);
return -EFAULT;
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
return 0;
} }
static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
...@@ -1381,12 +1443,56 @@ static int kvm_s390_get_machine_feat(struct kvm *kvm, ...@@ -1381,12 +1443,56 @@ static int kvm_s390_get_machine_feat(struct kvm *kvm,
static int kvm_s390_get_processor_subfunc(struct kvm *kvm, static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
/* if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
* Once we can actually configure subfunctions (kernel + hw support), sizeof(struct kvm_s390_vm_cpu_subfunc)))
* we have to check if they were already set by user space, if so copy return -EFAULT;
* them from kvm->arch.
*/ VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
return -ENXIO; ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
return 0;
} }
static int kvm_s390_get_machine_subfunc(struct kvm *kvm, static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
...@@ -1395,8 +1501,55 @@ static int kvm_s390_get_machine_subfunc(struct kvm *kvm, ...@@ -1395,8 +1501,55 @@ static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc, if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
sizeof(struct kvm_s390_vm_cpu_subfunc))) sizeof(struct kvm_s390_vm_cpu_subfunc)))
return -EFAULT; return -EFAULT;
VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.km)[0],
((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
return 0; return 0;
} }
static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{ {
int ret = -ENXIO; int ret = -ENXIO;
...@@ -1514,10 +1667,9 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -1514,10 +1667,9 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_CPU_PROCESSOR_FEAT: case KVM_S390_VM_CPU_PROCESSOR_FEAT:
case KVM_S390_VM_CPU_MACHINE_FEAT: case KVM_S390_VM_CPU_MACHINE_FEAT:
case KVM_S390_VM_CPU_MACHINE_SUBFUNC: case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
ret = 0; ret = 0;
break; break;
/* configuring subfunctions is not supported yet */
case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
default: default:
ret = -ENXIO; ret = -ENXIO;
break; break;
...@@ -2209,6 +2361,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -2209,6 +2361,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.sie_page2) if (!kvm->arch.sie_page2)
goto out_err; goto out_err;
kvm->arch.sie_page2->kvm = kvm;
kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
for (i = 0; i < kvm_s390_fac_size(); i++) { for (i = 0; i < kvm_s390_fac_size(); i++) {
...@@ -2218,6 +2371,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -2218,6 +2371,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] & kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
kvm_s390_fac_base[i]; kvm_s390_fac_base[i];
} }
kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
/* we are always in czam mode - even on pre z14 machines */ /* we are always in czam mode - even on pre z14 machines */
set_kvm_facility(kvm->arch.model.fac_mask, 138); set_kvm_facility(kvm->arch.model.fac_mask, 138);
...@@ -2812,7 +2966,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -2812,7 +2966,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->icpua = id; vcpu->arch.sie_block->icpua = id;
spin_lock_init(&vcpu->arch.local_int.lock); spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa; vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
if (vcpu->arch.sie_block->gd && sclp.has_gisaf) if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
vcpu->arch.sie_block->gd |= GISA_FORMAT1; vcpu->arch.sie_block->gd |= GISA_FORMAT1;
seqcount_init(&vcpu->arch.cputm_seqcount); seqcount_init(&vcpu->arch.cputm_seqcount);
...@@ -3458,6 +3612,8 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) ...@@ -3458,6 +3612,8 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
kvm_s390_patch_guest_per_regs(vcpu); kvm_s390_patch_guest_per_regs(vcpu);
} }
clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
vcpu->arch.sie_block->icptcode = 0; vcpu->arch.sie_block->icptcode = 0;
cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
...@@ -4293,12 +4449,12 @@ static int __init kvm_s390_init(void) ...@@ -4293,12 +4449,12 @@ static int __init kvm_s390_init(void)
int i; int i;
if (!sclp.has_sief2) { if (!sclp.has_sief2) {
pr_info("SIE not available\n"); pr_info("SIE is not available\n");
return -ENODEV; return -ENODEV;
} }
if (nested && hpage) { if (nested && hpage) {
pr_info("nested (vSIE) and hpage (huge page backing) can currently not be activated concurrently"); pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
return -EINVAL; return -EINVAL;
} }
......
...@@ -67,7 +67,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) ...@@ -67,7 +67,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu) static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{ {
return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask); return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
} }
static inline int kvm_is_ucontrol(struct kvm *kvm) static inline int kvm_is_ucontrol(struct kvm *kvm)
...@@ -381,6 +381,8 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, ...@@ -381,6 +381,8 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
void kvm_s390_gisa_init(struct kvm *kvm); void kvm_s390_gisa_init(struct kvm *kvm);
void kvm_s390_gisa_clear(struct kvm *kvm); void kvm_s390_gisa_clear(struct kvm *kvm);
void kvm_s390_gisa_destroy(struct kvm *kvm); void kvm_s390_gisa_destroy(struct kvm *kvm);
int kvm_s390_gib_init(u8 nisc);
void kvm_s390_gib_destroy(void);
/* implemented in guestdbg.c */ /* implemented in guestdbg.c */
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
......
...@@ -1382,3 +1382,40 @@ int chsc_pnso_brinfo(struct subchannel_id schid, ...@@ -1382,3 +1382,40 @@ int chsc_pnso_brinfo(struct subchannel_id schid,
return chsc_error_from_response(brinfo_area->response.code); return chsc_error_from_response(brinfo_area->response.code);
} }
EXPORT_SYMBOL_GPL(chsc_pnso_brinfo); EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
int chsc_sgib(u32 origin)
{
struct {
struct chsc_header request;
u16 op;
u8 reserved01[2];
u8 reserved02:4;
u8 fmt:4;
u8 reserved03[7];
/* operation data area begin */
u8 reserved04[4];
u32 gib_origin;
u8 reserved05[10];
u8 aix;
u8 reserved06[4029];
struct chsc_header response;
u8 reserved07[4];
} *sgib_area;
int ret;
spin_lock_irq(&chsc_page_lock);
memset(chsc_page, 0, PAGE_SIZE);
sgib_area = chsc_page;
sgib_area->request.length = 0x0fe0;
sgib_area->request.code = 0x0021;
sgib_area->op = 0x1;
sgib_area->gib_origin = origin;
ret = chsc(sgib_area);
if (ret == 0)
ret = chsc_error_from_response(sgib_area->response.code);
spin_unlock_irq(&chsc_page_lock);
return ret;
}
EXPORT_SYMBOL_GPL(chsc_sgib);
...@@ -164,6 +164,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp); ...@@ -164,6 +164,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp);
int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd); int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
u64 summary_indicator_addr, u64 subchannel_indicator_addr); u64 summary_indicator_addr, u64 subchannel_indicator_addr);
int chsc_sgib(u32 origin);
int chsc_error_from_response(int response); int chsc_error_from_response(int response);
int chsc_siosl(struct subchannel_id schid); int chsc_siosl(struct subchannel_id schid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment