Commit a4cc3889 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'kvm-updates/3.2' of git://git.kernel.org/pub/scm/virt/kvm/kvm

* 'kvm-updates/3.2' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM guest: prevent tracing recursion with kvmclock
  Revert "KVM: PPC: Add support for explicit HIOR setting"
  KVM: VMX: Check for automatic switch msr table overflow
  KVM: VMX: Add support for guest/host-only profiling
  KVM: VMX: add support for switching of PERF_GLOBAL_CTRL
  KVM: s390: announce SYNC_MMU
  KVM: s390: Fix tprot locking
  KVM: s390: handle SIGP sense running intercepts
  KVM: s390: Fix RUNNING flag misinterpretation
parents bb893d15 95ef1e52
...@@ -148,12 +148,6 @@ struct kvm_regs { ...@@ -148,12 +148,6 @@ struct kvm_regs {
#define KVM_SREGS_E_UPDATE_DEC (1 << 2) #define KVM_SREGS_E_UPDATE_DEC (1 << 2)
#define KVM_SREGS_E_UPDATE_DBSR (1 << 3) #define KVM_SREGS_E_UPDATE_DBSR (1 << 3)
/*
* Book3S special bits to indicate contents in the struct by maintaining
* backwards compatibility with older structs. If adding a new field,
* please make sure to add a flag for that new field */
#define KVM_SREGS_S_HIOR (1 << 0)
/* /*
* In KVM_SET_SREGS, reserved/pad fields must be left untouched from a * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a
* previous KVM_GET_REGS. * previous KVM_GET_REGS.
...@@ -179,8 +173,6 @@ struct kvm_sregs { ...@@ -179,8 +173,6 @@ struct kvm_sregs {
__u64 ibat[8]; __u64 ibat[8];
__u64 dbat[8]; __u64 dbat[8];
} ppc32; } ppc32;
__u64 flags; /* KVM_SREGS_S_ */
__u64 hior;
} s; } s;
struct { struct {
union { union {
......
...@@ -90,8 +90,6 @@ struct kvmppc_vcpu_book3s { ...@@ -90,8 +90,6 @@ struct kvmppc_vcpu_book3s {
#endif #endif
int context_id[SID_CONTEXTS]; int context_id[SID_CONTEXTS];
bool hior_sregs; /* HIOR is set by SREGS, not PVR */
struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
......
...@@ -151,7 +151,6 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) ...@@ -151,7 +151,6 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
if ((pvr >= 0x330000) && (pvr < 0x70330000)) { if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
kvmppc_mmu_book3s_64_init(vcpu); kvmppc_mmu_book3s_64_init(vcpu);
if (!to_book3s(vcpu)->hior_sregs)
to_book3s(vcpu)->hior = 0xfff00000; to_book3s(vcpu)->hior = 0xfff00000;
to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
vcpu->arch.cpu_type = KVM_CPU_3S_64; vcpu->arch.cpu_type = KVM_CPU_3S_64;
...@@ -159,7 +158,6 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) ...@@ -159,7 +158,6 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
#endif #endif
{ {
kvmppc_mmu_book3s_32_init(vcpu); kvmppc_mmu_book3s_32_init(vcpu);
if (!to_book3s(vcpu)->hior_sregs)
to_book3s(vcpu)->hior = 0; to_book3s(vcpu)->hior = 0;
to_book3s(vcpu)->msr_mask = 0xffffffffULL; to_book3s(vcpu)->msr_mask = 0xffffffffULL;
vcpu->arch.cpu_type = KVM_CPU_3S_32; vcpu->arch.cpu_type = KVM_CPU_3S_32;
...@@ -797,9 +795,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, ...@@ -797,9 +795,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
} }
} }
if (sregs->u.s.flags & KVM_SREGS_S_HIOR)
sregs->u.s.hior = to_book3s(vcpu)->hior;
return 0; return 0;
} }
...@@ -836,11 +831,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -836,11 +831,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
/* Flush the MMU after messing with the segments */ /* Flush the MMU after messing with the segments */
kvmppc_mmu_pte_flush(vcpu, 0, 0); kvmppc_mmu_pte_flush(vcpu, 0, 0);
if (sregs->u.s.flags & KVM_SREGS_S_HIOR) {
to_book3s(vcpu)->hior_sregs = true;
to_book3s(vcpu)->hior = sregs->u.s.hior;
}
return 0; return 0;
} }
......
...@@ -208,7 +208,6 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -208,7 +208,6 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_PPC_BOOKE_SREGS: case KVM_CAP_PPC_BOOKE_SREGS:
#else #else
case KVM_CAP_PPC_SEGSTATE: case KVM_CAP_PPC_SEGSTATE:
case KVM_CAP_PPC_HIOR:
case KVM_CAP_PPC_PAPR: case KVM_CAP_PPC_PAPR:
#endif #endif
case KVM_CAP_PPC_UNSET_IRQ: case KVM_CAP_PPC_UNSET_IRQ:
......
...@@ -47,7 +47,7 @@ struct sca_block { ...@@ -47,7 +47,7 @@ struct sca_block {
#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
#define CPUSTAT_HOST 0x80000000 #define CPUSTAT_STOPPED 0x80000000
#define CPUSTAT_WAIT 0x10000000 #define CPUSTAT_WAIT 0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000 #define CPUSTAT_ECALL_PEND 0x08000000
#define CPUSTAT_STOP_INT 0x04000000 #define CPUSTAT_STOP_INT 0x04000000
...@@ -139,6 +139,7 @@ struct kvm_vcpu_stat { ...@@ -139,6 +139,7 @@ struct kvm_vcpu_stat {
u32 instruction_stfl; u32 instruction_stfl;
u32 instruction_tprot; u32 instruction_tprot;
u32 instruction_sigp_sense; u32 instruction_sigp_sense;
u32 instruction_sigp_sense_running;
u32 instruction_sigp_external_call; u32 instruction_sigp_external_call;
u32 instruction_sigp_emergency; u32 instruction_sigp_emergency;
u32 instruction_sigp_stop; u32 instruction_sigp_stop;
......
...@@ -70,7 +70,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) ...@@ -70,7 +70,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
......
...@@ -132,7 +132,6 @@ static int handle_stop(struct kvm_vcpu *vcpu) ...@@ -132,7 +132,6 @@ static int handle_stop(struct kvm_vcpu *vcpu)
int rc = 0; int rc = 0;
vcpu->stat.exit_stop_request++; vcpu->stat.exit_stop_request++;
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
spin_lock_bh(&vcpu->arch.local_int.lock); spin_lock_bh(&vcpu->arch.local_int.lock);
if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
...@@ -149,6 +148,8 @@ static int handle_stop(struct kvm_vcpu *vcpu) ...@@ -149,6 +148,8 @@ static int handle_stop(struct kvm_vcpu *vcpu)
} }
if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
atomic_set_mask(CPUSTAT_STOPPED,
&vcpu->arch.sie_block->cpuflags);
vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
rc = -EOPNOTSUPP; rc = -EOPNOTSUPP;
......
...@@ -252,6 +252,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, ...@@ -252,6 +252,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
if (rc == -EFAULT) if (rc == -EFAULT)
exception = 1; exception = 1;
atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
break; break;
case KVM_S390_PROGRAM_INT: case KVM_S390_PROGRAM_INT:
......
...@@ -65,6 +65,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -65,6 +65,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_stfl", VCPU_STAT(instruction_stfl) }, { "instruction_stfl", VCPU_STAT(instruction_stfl) },
{ "instruction_tprot", VCPU_STAT(instruction_tprot) }, { "instruction_tprot", VCPU_STAT(instruction_tprot) },
{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
...@@ -127,6 +128,7 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -127,6 +128,7 @@ int kvm_dev_ioctl_check_extension(long ext)
switch (ext) { switch (ext) {
case KVM_CAP_S390_PSW: case KVM_CAP_S390_PSW:
case KVM_CAP_S390_GMAP: case KVM_CAP_S390_GMAP:
case KVM_CAP_SYNC_MMU:
r = 1; r = 1;
break; break;
default: default:
...@@ -270,10 +272,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -270,10 +272,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
restore_fp_regs(&vcpu->arch.guest_fpregs); restore_fp_regs(&vcpu->arch.guest_fpregs);
restore_access_regs(vcpu->arch.guest_acrs); restore_access_regs(vcpu->arch.guest_acrs);
gmap_enable(vcpu->arch.gmap); gmap_enable(vcpu->arch.gmap);
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
} }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap); gmap_disable(vcpu->arch.gmap);
save_fp_regs(&vcpu->arch.guest_fpregs); save_fp_regs(&vcpu->arch.guest_fpregs);
save_access_regs(vcpu->arch.guest_acrs); save_access_regs(vcpu->arch.guest_acrs);
...@@ -301,7 +305,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) ...@@ -301,7 +305,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{ {
atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM); atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
CPUSTAT_SM |
CPUSTAT_STOPPED);
vcpu->arch.sie_block->ecb = 6; vcpu->arch.sie_block->ecb = 6;
vcpu->arch.sie_block->eca = 0xC1002001U; vcpu->arch.sie_block->eca = 0xC1002001U;
vcpu->arch.sie_block->fac = (int) (long) facilities; vcpu->arch.sie_block->fac = (int) (long) facilities;
...@@ -428,7 +434,7 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) ...@@ -428,7 +434,7 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
{ {
int rc = 0; int rc = 0;
if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
rc = -EBUSY; rc = -EBUSY;
else { else {
vcpu->run->psw_mask = psw.mask; vcpu->run->psw_mask = psw.mask;
...@@ -501,7 +507,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -501,7 +507,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (vcpu->sigset_active) if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
......
...@@ -336,6 +336,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu) ...@@ -336,6 +336,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0; u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0;
u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0; u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long user_address;
vcpu->stat.instruction_tprot++; vcpu->stat.instruction_tprot++;
...@@ -349,9 +350,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu) ...@@ -349,9 +350,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* we must resolve the address without holding the mmap semaphore.
* This is ok since the userspace hypervisor is not supposed to change
* the mapping while the guest queries the memory. Otherwise the guest
* might crash or get wrong info anyway. */
user_address = (unsigned long) __guestaddr_to_user(vcpu, address1);
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, vma = find_vma(current->mm, user_address);
(unsigned long) __guestaddr_to_user(vcpu, address1));
if (!vma) { if (!vma) {
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
......
...@@ -31,9 +31,11 @@ ...@@ -31,9 +31,11 @@
#define SIGP_SET_PREFIX 0x0d #define SIGP_SET_PREFIX 0x0d
#define SIGP_STORE_STATUS_ADDR 0x0e #define SIGP_STORE_STATUS_ADDR 0x0e
#define SIGP_SET_ARCH 0x12 #define SIGP_SET_ARCH 0x12
#define SIGP_SENSE_RUNNING 0x15
/* cpu status bits */ /* cpu status bits */
#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
#define SIGP_STAT_NOT_RUNNING 0x00000400UL
#define SIGP_STAT_INCORRECT_STATE 0x00000200UL #define SIGP_STAT_INCORRECT_STATE 0x00000200UL
#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
...@@ -57,8 +59,8 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, ...@@ -57,8 +59,8 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
spin_lock(&fi->lock); spin_lock(&fi->lock);
if (fi->local_int[cpu_addr] == NULL) if (fi->local_int[cpu_addr] == NULL)
rc = 3; /* not operational */ rc = 3; /* not operational */
else if (atomic_read(fi->local_int[cpu_addr]->cpuflags) else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
& CPUSTAT_RUNNING) { & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
rc = 1; /* status stored */ rc = 1; /* status stored */
} else { } else {
...@@ -251,7 +253,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, ...@@ -251,7 +253,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
spin_lock_bh(&li->lock); spin_lock_bh(&li->lock);
/* cpu must be in stopped state */ /* cpu must be in stopped state */
if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
rc = 1; /* incorrect state */ rc = 1; /* incorrect state */
*reg &= SIGP_STAT_INCORRECT_STATE; *reg &= SIGP_STAT_INCORRECT_STATE;
kfree(inti); kfree(inti);
...@@ -275,6 +277,38 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, ...@@ -275,6 +277,38 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
return rc; return rc;
} }
static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
unsigned long *reg)
{
int rc;
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
if (cpu_addr >= KVM_MAX_VCPUS)
return 3; /* not operational */
spin_lock(&fi->lock);
if (fi->local_int[cpu_addr] == NULL)
rc = 3; /* not operational */
else {
if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
& CPUSTAT_RUNNING) {
/* running */
rc = 1;
} else {
/* not running */
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STAT_NOT_RUNNING;
rc = 0;
}
}
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
rc);
return rc;
}
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{ {
int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
...@@ -331,6 +365,11 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) ...@@ -331,6 +365,11 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
&vcpu->arch.guest_gprs[r1]); &vcpu->arch.guest_gprs[r1]);
break; break;
case SIGP_SENSE_RUNNING:
vcpu->stat.instruction_sigp_sense_running++;
rc = __sigp_sense_running(vcpu, cpu_addr,
&vcpu->arch.guest_gprs[r1]);
break;
case SIGP_RESTART: case SIGP_RESTART:
vcpu->stat.instruction_sigp_restart++; vcpu->stat.instruction_sigp_restart++;
/* user space must know about restart */ /* user space must know about restart */
......
...@@ -74,9 +74,10 @@ static cycle_t kvm_clock_read(void) ...@@ -74,9 +74,10 @@ static cycle_t kvm_clock_read(void)
struct pvclock_vcpu_time_info *src; struct pvclock_vcpu_time_info *src;
cycle_t ret; cycle_t ret;
src = &get_cpu_var(hv_clock); preempt_disable_notrace();
src = &__get_cpu_var(hv_clock);
ret = pvclock_clocksource_read(src); ret = pvclock_clocksource_read(src);
put_cpu_var(hv_clock); preempt_enable_notrace();
return ret; return ret;
} }
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/i387.h> #include <asm/i387.h>
#include <asm/xcr.h> #include <asm/xcr.h>
#include <asm/perf_event.h>
#include "trace.h" #include "trace.h"
...@@ -118,7 +119,7 @@ module_param(ple_gap, int, S_IRUGO); ...@@ -118,7 +119,7 @@ module_param(ple_gap, int, S_IRUGO);
static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
module_param(ple_window, int, S_IRUGO); module_param(ple_window, int, S_IRUGO);
#define NR_AUTOLOAD_MSRS 1 #define NR_AUTOLOAD_MSRS 8
#define VMCS02_POOL_SIZE 1 #define VMCS02_POOL_SIZE 1
struct vmcs { struct vmcs {
...@@ -622,6 +623,7 @@ static unsigned long *vmx_msr_bitmap_legacy; ...@@ -622,6 +623,7 @@ static unsigned long *vmx_msr_bitmap_legacy;
static unsigned long *vmx_msr_bitmap_longmode; static unsigned long *vmx_msr_bitmap_longmode;
static bool cpu_has_load_ia32_efer; static bool cpu_has_load_ia32_efer;
static bool cpu_has_load_perf_global_ctrl;
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
static DEFINE_SPINLOCK(vmx_vpid_lock); static DEFINE_SPINLOCK(vmx_vpid_lock);
...@@ -1191,16 +1193,35 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) ...@@ -1191,16 +1193,35 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
vmcs_write32(EXCEPTION_BITMAP, eb); vmcs_write32(EXCEPTION_BITMAP, eb);
} }
static void clear_atomic_switch_msr_special(unsigned long entry,
unsigned long exit)
{
vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
}
static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
{ {
unsigned i; unsigned i;
struct msr_autoload *m = &vmx->msr_autoload; struct msr_autoload *m = &vmx->msr_autoload;
if (msr == MSR_EFER && cpu_has_load_ia32_efer) { switch (msr) {
vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); case MSR_EFER:
vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); if (cpu_has_load_ia32_efer) {
clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
VM_EXIT_LOAD_IA32_EFER);
return; return;
} }
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
if (cpu_has_load_perf_global_ctrl) {
clear_atomic_switch_msr_special(
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
return;
}
break;
}
for (i = 0; i < m->nr; ++i) for (i = 0; i < m->nr; ++i)
if (m->guest[i].index == msr) if (m->guest[i].index == msr)
...@@ -1215,25 +1236,55 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) ...@@ -1215,25 +1236,55 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
} }
static void add_atomic_switch_msr_special(unsigned long entry,
unsigned long exit, unsigned long guest_val_vmcs,
unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
{
vmcs_write64(guest_val_vmcs, guest_val);
vmcs_write64(host_val_vmcs, host_val);
vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
vmcs_set_bits(VM_EXIT_CONTROLS, exit);
}
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
u64 guest_val, u64 host_val) u64 guest_val, u64 host_val)
{ {
unsigned i; unsigned i;
struct msr_autoload *m = &vmx->msr_autoload; struct msr_autoload *m = &vmx->msr_autoload;
if (msr == MSR_EFER && cpu_has_load_ia32_efer) { switch (msr) {
vmcs_write64(GUEST_IA32_EFER, guest_val); case MSR_EFER:
vmcs_write64(HOST_IA32_EFER, host_val); if (cpu_has_load_ia32_efer) {
vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER); add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER); VM_EXIT_LOAD_IA32_EFER,
GUEST_IA32_EFER,
HOST_IA32_EFER,
guest_val, host_val);
return;
}
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
if (cpu_has_load_perf_global_ctrl) {
add_atomic_switch_msr_special(
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
GUEST_IA32_PERF_GLOBAL_CTRL,
HOST_IA32_PERF_GLOBAL_CTRL,
guest_val, host_val);
return; return;
} }
break;
}
for (i = 0; i < m->nr; ++i) for (i = 0; i < m->nr; ++i)
if (m->guest[i].index == msr) if (m->guest[i].index == msr)
break; break;
if (i == m->nr) { if (i == NR_AUTOLOAD_MSRS) {
printk_once(KERN_WARNING"Not enough mst switch entries. "
"Can't add msr %x\n", msr);
return;
} else if (i == m->nr) {
++m->nr; ++m->nr;
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
...@@ -2455,6 +2506,42 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) ...@@ -2455,6 +2506,42 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
&& allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
VM_EXIT_LOAD_IA32_EFER); VM_EXIT_LOAD_IA32_EFER);
cpu_has_load_perf_global_ctrl =
allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
&& allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
/*
* Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
* but due to arrata below it can't be used. Workaround is to use
* msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
*
* VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
*
* AAK155 (model 26)
* AAP115 (model 30)
* AAT100 (model 37)
* BC86,AAY89,BD102 (model 44)
* BA97 (model 46)
*
*/
if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
switch (boot_cpu_data.x86_model) {
case 26:
case 30:
case 37:
case 44:
case 46:
cpu_has_load_perf_global_ctrl = false;
printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
"does not work properly. Using workaround\n");
break;
default:
break;
}
}
return 0; return 0;
} }
...@@ -5968,6 +6055,24 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu) ...@@ -5968,6 +6055,24 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
} }
static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
{
int i, nr_msrs;
struct perf_guest_switch_msr *msrs;
msrs = perf_guest_get_msrs(&nr_msrs);
if (!msrs)
return;
for (i = 0; i < nr_msrs; i++)
if (msrs[i].host == msrs[i].guest)
clear_atomic_switch_msr(vmx, msrs[i].msr);
else
add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
msrs[i].host);
}
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define R "r" #define R "r"
#define Q "q" #define Q "q"
...@@ -6017,6 +6122,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6017,6 +6122,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmx_set_interrupt_shadow(vcpu, 0); vmx_set_interrupt_shadow(vcpu, 0);
atomic_switch_perf_msrs(vmx);
vmx->__launched = vmx->loaded_vmcs->launched; vmx->__launched = vmx->loaded_vmcs->launched;
asm( asm(
/* Store host registers */ /* Store host registers */
......
...@@ -555,7 +555,6 @@ struct kvm_ppc_pvinfo { ...@@ -555,7 +555,6 @@ struct kvm_ppc_pvinfo {
#define KVM_CAP_PPC_SMT 64 #define KVM_CAP_PPC_SMT 64
#define KVM_CAP_PPC_RMA 65 #define KVM_CAP_PPC_RMA 65
#define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ #define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */
#define KVM_CAP_PPC_HIOR 67
#define KVM_CAP_PPC_PAPR 68 #define KVM_CAP_PPC_PAPR 68
#define KVM_CAP_S390_GMAP 71 #define KVM_CAP_S390_GMAP 71
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment