Commit c2687cf9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 - ARM/MIPS: Fixes for ioctls when copy_from_user returns nonzero
 - x86: Small fix for Skylake TSC scaling
 - x86: Improved fix for last week's missed hardware breakpoint bug

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  kvm: x86: Update tsc multiplier on change.
  mips/kvm: fix ioctl error handling
  arm/arm64: KVM: Fix ioctl error handling
  KVM: x86: fix root cause for missed hardware breakpoints
parents 4237b2e6 2680d6da
...@@ -161,7 +161,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -161,7 +161,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 val; u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id); val = kvm_arm_timer_get_reg(vcpu, reg->id);
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
} }
static unsigned long num_core_regs(void) static unsigned long num_core_regs(void)
......
...@@ -194,7 +194,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -194,7 +194,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 val; u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id); val = kvm_arm_timer_get_reg(vcpu, reg->id);
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
} }
/** /**
......
...@@ -702,7 +702,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, ...@@ -702,7 +702,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
void __user *uaddr = (void __user *)(long)reg->addr; void __user *uaddr = (void __user *)(long)reg->addr;
return copy_to_user(uaddr, vs, 16); return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
} else { } else {
return -EINVAL; return -EINVAL;
} }
...@@ -732,7 +732,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, ...@@ -732,7 +732,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
void __user *uaddr = (void __user *)(long)reg->addr; void __user *uaddr = (void __user *)(long)reg->addr;
return copy_from_user(vs, uaddr, 16); return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
} else { } else {
return -EINVAL; return -EINVAL;
} }
......
...@@ -596,6 +596,8 @@ struct vcpu_vmx { ...@@ -596,6 +596,8 @@ struct vcpu_vmx {
/* Support for PML */ /* Support for PML */
#define PML_ENTITY_NUM 512 #define PML_ENTITY_NUM 512
struct page *pml_pg; struct page *pml_pg;
u64 current_tsc_ratio;
}; };
enum segment_cache_field { enum segment_cache_field {
...@@ -2127,14 +2129,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -2127,14 +2129,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
/* Setup TSC multiplier */
if (cpu_has_vmx_tsc_scaling())
vmcs_write64(TSC_MULTIPLIER,
vcpu->arch.tsc_scaling_ratio);
vmx->loaded_vmcs->cpu = cpu; vmx->loaded_vmcs->cpu = cpu;
} }
/* Setup TSC multiplier */
if (kvm_has_tsc_control &&
vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
}
vmx_vcpu_pi_load(vcpu, cpu); vmx_vcpu_pi_load(vcpu, cpu);
} }
......
...@@ -2752,7 +2752,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -2752,7 +2752,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
} }
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
} }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
...@@ -6619,12 +6618,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -6619,12 +6618,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* KVM_DEBUGREG_WONT_EXIT again. * KVM_DEBUGREG_WONT_EXIT again.
*/ */
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
int i;
WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
kvm_x86_ops->sync_dirty_debug_regs(vcpu); kvm_x86_ops->sync_dirty_debug_regs(vcpu);
for (i = 0; i < KVM_NR_DB_REGS; i++) kvm_update_dr0123(vcpu);
vcpu->arch.eff_db[i] = vcpu->arch.db[i]; kvm_update_dr6(vcpu);
kvm_update_dr7(vcpu);
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment