Commit 57b5981c authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-20140429' of...

Merge tag 'kvm-s390-20140429' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next

1. Guest handling fixes
The handling of MVPG, PFMF and Test Block is fixed to better follow
the architecture. None of these fixes is critical for any current
Linux guests, but let's play safe.

2. Optimization for single CPU guests
We can enable the IBS facility if only one VCPU is running (!STOPPED
state). We also enable this optimization for guest > 1 VCPU as soon
as all but one VCPU is in stopped state. Thus will help guests that
have tools like cpuplugd (from s390-utils) that do dynamic offline/
online of CPUs.

3. NOTES
There is one non-s390 change in include/linux/kvm_host.h that
introduces 2 defines for VCPU requests:
define KVM_REQ_ENABLE_IBS        23
define KVM_REQ_DISABLE_IBS       24
parents e4c9a5a1 8ad35755
...@@ -72,6 +72,7 @@ struct sca_block { ...@@ -72,6 +72,7 @@ struct sca_block {
#define CPUSTAT_ZARCH 0x00000800 #define CPUSTAT_ZARCH 0x00000800
#define CPUSTAT_MCDS 0x00000100 #define CPUSTAT_MCDS 0x00000100
#define CPUSTAT_SM 0x00000080 #define CPUSTAT_SM 0x00000080
#define CPUSTAT_IBS 0x00000040
#define CPUSTAT_G 0x00000008 #define CPUSTAT_G 0x00000008
#define CPUSTAT_GED 0x00000004 #define CPUSTAT_GED 0x00000004
#define CPUSTAT_J 0x00000002 #define CPUSTAT_J 0x00000002
...@@ -411,6 +412,7 @@ struct kvm_arch{ ...@@ -411,6 +412,7 @@ struct kvm_arch{
int use_cmma; int use_cmma;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq; wait_queue_head_t ipte_wq;
spinlock_t start_stop_lock;
}; };
#define KVM_HVA_ERR_BAD (-1UL) #define KVM_HVA_ERR_BAD (-1UL)
......
...@@ -176,7 +176,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) ...@@ -176,7 +176,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); kvm_s390_vcpu_stop(vcpu);
vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
......
...@@ -643,3 +643,31 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, ...@@ -643,3 +643,31 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
} }
return rc; return rc;
} }
/**
* kvm_s390_check_low_addr_protection - check for low-address protection
* @ga: Guest address
*
* Checks whether an address is subject to low-address protection and set
* up vcpu->arch.pgm accordingly if necessary.
*
* Return: 0 if no protection exception, or PGM_PROTECTION if protected.
*/
int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga)
{
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
psw_t *psw = &vcpu->arch.sie_block->gpsw;
struct trans_exc_code_bits *tec_bits;
if (!is_low_address(ga) || !low_address_protection_enabled(vcpu))
return 0;
memset(pgm, 0, sizeof(*pgm));
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
tec_bits->fsi = FSI_STORE;
tec_bits->as = psw_bits(*psw).as;
tec_bits->addr = ga >> PAGE_SHIFT;
pgm->code = PGM_PROTECTION;
return pgm->code;
}
...@@ -325,5 +325,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, ...@@ -325,5 +325,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
} }
int ipte_lock_held(struct kvm_vcpu *vcpu); int ipte_lock_held(struct kvm_vcpu *vcpu);
int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga);
#endif /* __KVM_S390_GACCESS_H */ #endif /* __KVM_S390_GACCESS_H */
/* /*
* in-kernel handling for sie intercepts * in-kernel handling for sie intercepts
* *
* Copyright IBM Corp. 2008, 2009 * Copyright IBM Corp. 2008, 2014
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only) * it under the terms of the GNU General Public License (version 2 only)
...@@ -65,8 +65,7 @@ static int handle_stop(struct kvm_vcpu *vcpu) ...@@ -65,8 +65,7 @@ static int handle_stop(struct kvm_vcpu *vcpu)
trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits); trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
atomic_set_mask(CPUSTAT_STOPPED, kvm_s390_vcpu_stop(vcpu);
&vcpu->arch.sie_block->cpuflags);
vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
rc = -EOPNOTSUPP; rc = -EOPNOTSUPP;
...@@ -234,6 +233,58 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) ...@@ -234,6 +233,58 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
return rc2; return rc2;
} }
/**
* Handle MOVE PAGE partial execution interception.
*
* This interception can only happen for guests with DAT disabled and
* addresses that are currently not mapped in the host. Thus we try to
* set up the mappings for the corresponding user pages here (or throw
* addressing exceptions in case of illegal guest addresses).
*/
static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
{
unsigned long hostaddr, srcaddr, dstaddr;
psw_t *psw = &vcpu->arch.sie_block->gpsw;
struct mm_struct *mm = current->mm;
int reg1, reg2, rc;
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]);
dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]);
/* Make sure that the source is paged-in */
hostaddr = gmap_fault(srcaddr, vcpu->arch.gmap);
if (IS_ERR_VALUE(hostaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
down_read(&mm->mmap_sem);
rc = get_user_pages(current, mm, hostaddr, 1, 0, 0, NULL, NULL);
up_read(&mm->mmap_sem);
if (rc < 0)
return rc;
/* Make sure that the destination is paged-in */
hostaddr = gmap_fault(dstaddr, vcpu->arch.gmap);
if (IS_ERR_VALUE(hostaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
down_read(&mm->mmap_sem);
rc = get_user_pages(current, mm, hostaddr, 1, 1, 0, NULL, NULL);
up_read(&mm->mmap_sem);
if (rc < 0)
return rc;
psw->addr = __rewind_psw(*psw, 4);
return 0;
}
static int handle_partial_execution(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
return handle_mvpg_pei(vcpu);
return -EOPNOTSUPP;
}
static const intercept_handler_t intercept_funcs[] = { static const intercept_handler_t intercept_funcs[] = {
[0x00 >> 2] = handle_noop, [0x00 >> 2] = handle_noop,
[0x04 >> 2] = handle_instruction, [0x04 >> 2] = handle_instruction,
...@@ -245,6 +296,7 @@ static const intercept_handler_t intercept_funcs[] = { ...@@ -245,6 +296,7 @@ static const intercept_handler_t intercept_funcs[] = {
[0x1C >> 2] = kvm_s390_handle_wait, [0x1C >> 2] = kvm_s390_handle_wait,
[0x20 >> 2] = handle_validity, [0x20 >> 2] = handle_validity,
[0x28 >> 2] = handle_stop, [0x28 >> 2] = handle_stop,
[0x38 >> 2] = handle_partial_execution,
}; };
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
......
...@@ -413,7 +413,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, ...@@ -413,7 +413,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
&vcpu->arch.sie_block->gpsw, &vcpu->arch.sie_block->gpsw,
sizeof(psw_t)); sizeof(psw_t));
atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); kvm_s390_vcpu_start(vcpu);
break; break;
case KVM_S390_PROGRAM_INT: case KVM_S390_PROGRAM_INT:
VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
......
...@@ -458,6 +458,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -458,6 +458,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.css_support = 0; kvm->arch.css_support = 0;
kvm->arch.use_irqchip = 0; kvm->arch.use_irqchip = 0;
spin_lock_init(&kvm->arch.start_stop_lock);
return 0; return 0;
out_nogmap: out_nogmap:
debug_unregister(kvm->arch.dbf); debug_unregister(kvm->arch.dbf);
...@@ -592,7 +594,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) ...@@ -592,7 +594,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->pp = 0; vcpu->arch.sie_block->pp = 0;
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
kvm_clear_async_pf_completion_queue(vcpu); kvm_clear_async_pf_completion_queue(vcpu);
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); kvm_s390_vcpu_stop(vcpu);
kvm_s390_clear_local_irqs(vcpu); kvm_s390_clear_local_irqs(vcpu);
} }
...@@ -996,8 +998,15 @@ bool kvm_s390_cmma_enabled(struct kvm *kvm) ...@@ -996,8 +998,15 @@ bool kvm_s390_cmma_enabled(struct kvm *kvm)
return true; return true;
} }
static bool ibs_enabled(struct kvm_vcpu *vcpu)
{
return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
}
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
{ {
retry:
s390_vcpu_unblock(vcpu);
/* /*
* We use MMU_RELOAD just to re-arm the ipte notifier for the * We use MMU_RELOAD just to re-arm the ipte notifier for the
* guest prefix page. gmap_ipte_notify will wait on the ptl lock. * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
...@@ -1005,15 +1014,34 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -1005,15 +1014,34 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
* already finished. We might race against a second unmapper that * already finished. We might race against a second unmapper that
* wants to set the blocking bit. Lets just retry the request loop. * wants to set the blocking bit. Lets just retry the request loop.
*/ */
while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
int rc; int rc;
rc = gmap_ipte_notify(vcpu->arch.gmap, rc = gmap_ipte_notify(vcpu->arch.gmap,
vcpu->arch.sie_block->prefix, vcpu->arch.sie_block->prefix,
PAGE_SIZE * 2); PAGE_SIZE * 2);
if (rc) if (rc)
return rc; return rc;
s390_vcpu_unblock(vcpu); goto retry;
}
if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
if (!ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
atomic_set_mask(CPUSTAT_IBS,
&vcpu->arch.sie_block->cpuflags);
}
goto retry;
} }
if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
if (ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
atomic_clear_mask(CPUSTAT_IBS,
&vcpu->arch.sie_block->cpuflags);
}
goto retry;
}
return 0; return 0;
} }
...@@ -1235,7 +1263,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1235,7 +1263,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (vcpu->sigset_active) if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); kvm_s390_vcpu_start(vcpu);
switch (kvm_run->exit_reason) { switch (kvm_run->exit_reason) {
case KVM_EXIT_S390_SIEIC: case KVM_EXIT_S390_SIEIC:
...@@ -1362,6 +1390,109 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) ...@@ -1362,6 +1390,109 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
return kvm_s390_store_status_unloaded(vcpu, addr); return kvm_s390_store_status_unloaded(vcpu, addr);
} }
static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
{
return atomic_read(&(vcpu)->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
}
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
exit_sie_sync(vcpu);
}
static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
{
unsigned int i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) {
__disable_ibs_on_vcpu(vcpu);
}
}
static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
exit_sie_sync(vcpu);
}
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
{
int i, online_vcpus, started_vcpus = 0;
if (!is_vcpu_stopped(vcpu))
return;
trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
/* Only one cpu at a time may enter/leave the STOPPED state. */
spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
for (i = 0; i < online_vcpus; i++) {
if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
started_vcpus++;
}
if (started_vcpus == 0) {
/* we're the only active VCPU -> speed it up */
__enable_ibs_on_vcpu(vcpu);
} else if (started_vcpus == 1) {
/*
* As we are starting a second VCPU, we have to disable
* the IBS facility on all VCPUs to remove potentially
* oustanding ENABLE requests.
*/
__disable_ibs_on_all_vcpus(vcpu->kvm);
}
atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
/*
* Another VCPU might have used IBS while we were offline.
* Let's play safe and flush the VCPU at startup.
*/
vcpu->arch.sie_block->ihcpu = 0xffff;
spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
return;
}
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
{
int i, online_vcpus, started_vcpus = 0;
struct kvm_vcpu *started_vcpu = NULL;
if (is_vcpu_stopped(vcpu))
return;
trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
/* Only one cpu at a time may enter/leave the STOPPED state. */
spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
__disable_ibs_on_vcpu(vcpu);
for (i = 0; i < online_vcpus; i++) {
if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
started_vcpus++;
started_vcpu = vcpu->kvm->vcpus[i];
}
}
if (started_vcpus == 1) {
/*
* As we only have one VCPU left, we want to enable the
* IBS facility for that VCPU to speed it up.
*/
__enable_ibs_on_vcpu(started_vcpu);
}
spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
return;
}
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
struct kvm_enable_cap *cap) struct kvm_enable_cap *cap)
{ {
......
...@@ -157,6 +157,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); ...@@ -157,6 +157,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
/* implemented in kvm-s390.c */ /* implemented in kvm-s390.c */
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void s390_vcpu_block(struct kvm_vcpu *vcpu); void s390_vcpu_block(struct kvm_vcpu *vcpu);
void s390_vcpu_unblock(struct kvm_vcpu *vcpu); void s390_vcpu_unblock(struct kvm_vcpu *vcpu);
void exit_sie(struct kvm_vcpu *vcpu); void exit_sie(struct kvm_vcpu *vcpu);
......
...@@ -206,6 +206,9 @@ static int handle_test_block(struct kvm_vcpu *vcpu) ...@@ -206,6 +206,9 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, NULL, &reg2); kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
addr = kvm_s390_logical_to_effective(vcpu, addr);
if (kvm_s390_check_low_addr_protection(vcpu, addr))
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
addr = kvm_s390_real_to_abs(vcpu, addr); addr = kvm_s390_real_to_abs(vcpu, addr);
if (kvm_is_error_gpa(vcpu->kvm, addr)) if (kvm_is_error_gpa(vcpu->kvm, addr))
...@@ -650,6 +653,11 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) ...@@ -650,6 +653,11 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
if (kvm_s390_check_low_addr_protection(vcpu, start))
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
}
switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
case 0x00000000: case 0x00000000:
end = (start + (1UL << 12)) & ~((1UL << 12) - 1); end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
...@@ -665,10 +673,15 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) ...@@ -665,10 +673,15 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
} }
while (start < end) { while (start < end) {
unsigned long useraddr; unsigned long useraddr, abs_addr;
useraddr = gmap_translate(start, vcpu->arch.gmap); /* Translate guest address to host address */
if (IS_ERR((void *)useraddr)) if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0)
abs_addr = kvm_s390_real_to_abs(vcpu, start);
else
abs_addr = start;
useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr));
if (kvm_is_error_hva(useraddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
......
...@@ -67,6 +67,27 @@ TRACE_EVENT(kvm_s390_destroy_vcpu, ...@@ -67,6 +67,27 @@ TRACE_EVENT(kvm_s390_destroy_vcpu,
TP_printk("destroy cpu %d", __entry->id) TP_printk("destroy cpu %d", __entry->id)
); );
/*
* Trace point for start and stop of vpcus.
*/
TRACE_EVENT(kvm_s390_vcpu_start_stop,
TP_PROTO(unsigned int id, int state),
TP_ARGS(id, state),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(int, state)
),
TP_fast_assign(
__entry->id = id;
__entry->state = state;
),
TP_printk("%s cpu %d", __entry->state ? "starting" : "stopping",
__entry->id)
);
/* /*
* Trace points for injection of interrupts, either per machine or * Trace points for injection of interrupts, either per machine or
* per vcpu. * per vcpu.
...@@ -223,6 +244,28 @@ TRACE_EVENT(kvm_s390_enable_css, ...@@ -223,6 +244,28 @@ TRACE_EVENT(kvm_s390_enable_css,
__entry->kvm) __entry->kvm)
); );
/*
* Trace point for enabling and disabling interlocking-and-broadcasting
* suppression.
*/
TRACE_EVENT(kvm_s390_enable_disable_ibs,
TP_PROTO(unsigned int id, int state),
TP_ARGS(id, state),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(int, state)
),
TP_fast_assign(
__entry->id = id;
__entry->state = state;
),
TP_printk("%s ibs on cpu %d",
__entry->state ? "enabling" : "disabling", __entry->id)
);
#endif /* _TRACE_KVMS390_H */ #endif /* _TRACE_KVMS390_H */
......
...@@ -134,6 +134,8 @@ static inline bool is_error_page(struct page *page) ...@@ -134,6 +134,8 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_EPR_EXIT 20 #define KVM_REQ_EPR_EXIT 20
#define KVM_REQ_SCAN_IOAPIC 21 #define KVM_REQ_SCAN_IOAPIC 21
#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22 #define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
#define KVM_REQ_ENABLE_IBS 23
#define KVM_REQ_DISABLE_IBS 24
#define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment