Commit 4340fa55 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Radim Krčmář:
 "ARM:
   - two fixes for 4.6 vgic [Christoffer] (cc stable)

   - six fixes for 4.7 vgic [Marc]

  x86:
   - six fixes from syzkaller reports [Paolo] (two of them cc stable)

   - allow OS X to boot [Dmitry]

   - don't trust compilers [Nadav]"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: fix OOPS after invalid KVM_SET_DEBUGREGS
  KVM: x86: avoid vmalloc(0) in the KVM_SET_CPUID
  KVM: irqfd: fix NULL pointer dereference in kvm_irq_map_gsi
  KVM: fail KVM_SET_VCPU_EVENTS with invalid exception number
  KVM: x86: avoid vmalloc(0) in the KVM_SET_CPUID
  kvm: x86: avoid warning on repeated KVM_SET_TSS_ADDR
  KVM: Handle MSR_IA32_PERF_CTL
  KVM: x86: avoid write-tearing of TDP
  KVM: arm/arm64: vgic-new: Removel harmful BUG_ON
  arm64: KVM: vgic-v3: Relax synchronization when SRE==1
  arm64: KVM: vgic-v3: Prevent the guest from messing with ICC_SRE_EL1
  arm64: KVM: Make ICC_SRE_EL1 access return the configured SRE value
  KVM: arm/arm64: vgic-v3: Always resample level interrupts
  KVM: arm/arm64: vgic-v2: Always resample level interrupts
  KVM: arm/arm64: vgic-v3: Clear all dirty LRs
  KVM: arm/arm64: vgic-v2: Clear all dirty LRs
parents 719af93a d14bdb55
...@@ -169,7 +169,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) ...@@ -169,7 +169,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
* Make sure stores to the GIC via the memory mapped interface * Make sure stores to the GIC via the memory mapped interface
* are now visible to the system register interface. * are now visible to the system register interface.
*/ */
dsb(st); if (!cpu_if->vgic_sre)
dsb(st);
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
...@@ -190,12 +191,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) ...@@ -190,12 +191,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
continue; continue;
if (cpu_if->vgic_elrsr & (1 << i)) { if (cpu_if->vgic_elrsr & (1 << i))
cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
continue; else
} cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
__gic_v3_set_lr(0, i); __gic_v3_set_lr(0, i);
} }
...@@ -236,8 +236,12 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) ...@@ -236,8 +236,12 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
val = read_gicreg(ICC_SRE_EL2); val = read_gicreg(ICC_SRE_EL2);
write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
write_gicreg(1, ICC_SRE_EL1); if (!cpu_if->vgic_sre) {
/* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
isb();
write_gicreg(1, ICC_SRE_EL1);
}
} }
void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
...@@ -256,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) ...@@ -256,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
* been actually programmed with the value we want before * been actually programmed with the value we want before
* starting to mess with the rest of the GIC. * starting to mess with the rest of the GIC.
*/ */
write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1); if (!cpu_if->vgic_sre) {
isb(); write_gicreg(0, ICC_SRE_EL1);
isb();
}
val = read_gicreg(ICH_VTR_EL2); val = read_gicreg(ICH_VTR_EL2);
max_lr_idx = vtr_to_max_lr_idx(val); max_lr_idx = vtr_to_max_lr_idx(val);
...@@ -306,18 +312,18 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) ...@@ -306,18 +312,18 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
* (re)distributors. This ensure the guest will read the * (re)distributors. This ensure the guest will read the
* correct values from the memory-mapped interface. * correct values from the memory-mapped interface.
*/ */
isb(); if (!cpu_if->vgic_sre) {
dsb(sy); isb();
dsb(sy);
}
vcpu->arch.vgic_cpu.live_lrs = live_lrs; vcpu->arch.vgic_cpu.live_lrs = live_lrs;
/* /*
* Prevent the guest from touching the GIC system registers if * Prevent the guest from touching the GIC system registers if
* SRE isn't enabled for GICv3 emulation. * SRE isn't enabled for GICv3 emulation.
*/ */
if (!cpu_if->vgic_sre) { write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
ICC_SRE_EL2);
}
} }
void __hyp_text __vgic_v3_init_lrs(void) void __hyp_text __vgic_v3_init_lrs(void)
......
...@@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu, ...@@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
return true; return true;
} }
static bool access_gic_sre(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write)
return ignore_write(vcpu, p);
p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
return true;
}
static bool trap_raz_wi(struct kvm_vcpu *vcpu, static bool trap_raz_wi(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
...@@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
access_gic_sgi }, access_gic_sgi },
/* ICC_SRE_EL1 */ /* ICC_SRE_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
trap_raz_wi }, access_gic_sre },
/* CONTEXTIDR_EL1 */ /* CONTEXTIDR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
......
...@@ -181,19 +181,22 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, ...@@ -181,19 +181,22 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid_entry __user *entries) struct kvm_cpuid_entry __user *entries)
{ {
int r, i; int r, i;
struct kvm_cpuid_entry *cpuid_entries; struct kvm_cpuid_entry *cpuid_entries = NULL;
r = -E2BIG; r = -E2BIG;
if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
goto out; goto out;
r = -ENOMEM; r = -ENOMEM;
cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent); if (cpuid->nent) {
if (!cpuid_entries) cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
goto out; cpuid->nent);
r = -EFAULT; if (!cpuid_entries)
if (copy_from_user(cpuid_entries, entries, goto out;
cpuid->nent * sizeof(struct kvm_cpuid_entry))) r = -EFAULT;
goto out_free; if (copy_from_user(cpuid_entries, entries,
cpuid->nent * sizeof(struct kvm_cpuid_entry)))
goto out;
}
for (i = 0; i < cpuid->nent; i++) { for (i = 0; i < cpuid->nent; i++) {
vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
...@@ -212,9 +215,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, ...@@ -212,9 +215,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
kvm_x86_ops->cpuid_update(vcpu); kvm_x86_ops->cpuid_update(vcpu);
r = kvm_update_cpuid(vcpu); r = kvm_update_cpuid(vcpu);
out_free:
vfree(cpuid_entries);
out: out:
vfree(cpuid_entries);
return r; return r;
} }
......
...@@ -336,12 +336,12 @@ static gfn_t pse36_gfn_delta(u32 gpte) ...@@ -336,12 +336,12 @@ static gfn_t pse36_gfn_delta(u32 gpte)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
static void __set_spte(u64 *sptep, u64 spte) static void __set_spte(u64 *sptep, u64 spte)
{ {
*sptep = spte; WRITE_ONCE(*sptep, spte);
} }
static void __update_clear_spte_fast(u64 *sptep, u64 spte) static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{ {
*sptep = spte; WRITE_ONCE(*sptep, spte);
} }
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
...@@ -390,7 +390,7 @@ static void __set_spte(u64 *sptep, u64 spte) ...@@ -390,7 +390,7 @@ static void __set_spte(u64 *sptep, u64 spte)
*/ */
smp_wmb(); smp_wmb();
ssptep->spte_low = sspte.spte_low; WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
} }
static void __update_clear_spte_fast(u64 *sptep, u64 spte) static void __update_clear_spte_fast(u64 *sptep, u64 spte)
...@@ -400,7 +400,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte) ...@@ -400,7 +400,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte)
ssptep = (union split_spte *)sptep; ssptep = (union split_spte *)sptep;
sspte = (union split_spte)spte; sspte = (union split_spte)spte;
ssptep->spte_low = sspte.spte_low; WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
/* /*
* If we map the spte from present to nonpresent, we should clear * If we map the spte from present to nonpresent, we should clear
......
...@@ -2314,6 +2314,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2314,6 +2314,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_AMD64_NB_CFG: case MSR_AMD64_NB_CFG:
case MSR_FAM10H_MMIO_CONF_BASE: case MSR_FAM10H_MMIO_CONF_BASE:
case MSR_AMD64_BU_CFG2: case MSR_AMD64_BU_CFG2:
case MSR_IA32_PERF_CTL:
msr_info->data = 0; msr_info->data = 0;
break; break;
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
...@@ -2972,6 +2973,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -2972,6 +2973,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
| KVM_VCPUEVENT_VALID_SMM)) | KVM_VCPUEVENT_VALID_SMM))
return -EINVAL; return -EINVAL;
if (events->exception.injected &&
(events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
return -EINVAL;
process_nmi(vcpu); process_nmi(vcpu);
vcpu->arch.exception.pending = events->exception.injected; vcpu->arch.exception.pending = events->exception.injected;
vcpu->arch.exception.nr = events->exception.nr; vcpu->arch.exception.nr = events->exception.nr;
...@@ -3036,6 +3041,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, ...@@ -3036,6 +3041,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
if (dbgregs->flags) if (dbgregs->flags)
return -EINVAL; return -EINVAL;
if (dbgregs->dr6 & ~0xffffffffull)
return -EINVAL;
if (dbgregs->dr7 & ~0xffffffffull)
return -EINVAL;
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
kvm_update_dr0123(vcpu); kvm_update_dr0123(vcpu);
vcpu->arch.dr6 = dbgregs->dr6; vcpu->arch.dr6 = dbgregs->dr6;
...@@ -7815,7 +7825,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) ...@@ -7815,7 +7825,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
slot = id_to_memslot(slots, id); slot = id_to_memslot(slots, id);
if (size) { if (size) {
if (WARN_ON(slot->npages)) if (slot->npages)
return -EEXIST; return -EEXIST;
/* /*
......
...@@ -100,12 +100,11 @@ static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) ...@@ -100,12 +100,11 @@ static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
continue; continue;
if (cpu_if->vgic_elrsr & (1UL << i)) { if (cpu_if->vgic_elrsr & (1UL << i))
cpu_if->vgic_lr[i] &= ~GICH_LR_STATE; cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
continue; else
} cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
writel_relaxed(0, base + GICH_LR0 + (i * 4)); writel_relaxed(0, base + GICH_LR0 + (i * 4));
} }
} }
......
...@@ -191,10 +191,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, ...@@ -191,10 +191,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
* other thread sync back the IRQ. * other thread sync back the IRQ.
*/ */
while (irq->vcpu && /* IRQ may have state in an LR somewhere */ while (irq->vcpu && /* IRQ may have state in an LR somewhere */
irq->vcpu->cpu != -1) { /* VCPU thread is running */ irq->vcpu->cpu != -1) /* VCPU thread is running */
BUG_ON(irq->intid < VGIC_NR_PRIVATE_IRQS);
cond_resched_lock(&irq->irq_lock); cond_resched_lock(&irq->irq_lock);
}
irq->active = new_active_state; irq->active = new_active_state;
if (new_active_state) if (new_active_state)
......
...@@ -112,11 +112,15 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -112,11 +112,15 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
} }
} }
/* Clear soft pending state when level IRQs have been acked */ /*
if (irq->config == VGIC_CONFIG_LEVEL && * Clear soft pending state when level irqs have been acked.
!(val & GICH_LR_PENDING_BIT)) { * Always regenerate the pending state.
irq->soft_pending = false; */
irq->pending = irq->line_level; if (irq->config == VGIC_CONFIG_LEVEL) {
if (!(val & GICH_LR_PENDING_BIT))
irq->soft_pending = false;
irq->pending = irq->line_level || irq->soft_pending;
} }
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
......
...@@ -101,11 +101,15 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -101,11 +101,15 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
} }
} }
/* Clear soft pending state when level irqs have been acked */ /*
if (irq->config == VGIC_CONFIG_LEVEL && * Clear soft pending state when level irqs have been acked.
!(val & ICH_LR_PENDING_BIT)) { * Always regenerate the pending state.
irq->soft_pending = false; */
irq->pending = irq->line_level; if (irq->config == VGIC_CONFIG_LEVEL) {
if (!(val & ICH_LR_PENDING_BIT))
irq->soft_pending = false;
irq->pending = irq->line_level || irq->soft_pending;
} }
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
......
...@@ -40,7 +40,7 @@ int kvm_irq_map_gsi(struct kvm *kvm, ...@@ -40,7 +40,7 @@ int kvm_irq_map_gsi(struct kvm *kvm,
irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
lockdep_is_held(&kvm->irq_lock)); lockdep_is_held(&kvm->irq_lock));
if (gsi < irq_rt->nr_rt_entries) { if (irq_rt && gsi < irq_rt->nr_rt_entries) {
hlist_for_each_entry(e, &irq_rt->map[gsi], link) { hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
entries[n] = *e; entries[n] = *e;
++n; ++n;
......
...@@ -2935,7 +2935,7 @@ static long kvm_vm_ioctl(struct file *filp, ...@@ -2935,7 +2935,7 @@ static long kvm_vm_ioctl(struct file *filp,
case KVM_SET_GSI_ROUTING: { case KVM_SET_GSI_ROUTING: {
struct kvm_irq_routing routing; struct kvm_irq_routing routing;
struct kvm_irq_routing __user *urouting; struct kvm_irq_routing __user *urouting;
struct kvm_irq_routing_entry *entries; struct kvm_irq_routing_entry *entries = NULL;
r = -EFAULT; r = -EFAULT;
if (copy_from_user(&routing, argp, sizeof(routing))) if (copy_from_user(&routing, argp, sizeof(routing)))
...@@ -2945,15 +2945,17 @@ static long kvm_vm_ioctl(struct file *filp, ...@@ -2945,15 +2945,17 @@ static long kvm_vm_ioctl(struct file *filp,
goto out; goto out;
if (routing.flags) if (routing.flags)
goto out; goto out;
r = -ENOMEM; if (routing.nr) {
entries = vmalloc(routing.nr * sizeof(*entries)); r = -ENOMEM;
if (!entries) entries = vmalloc(routing.nr * sizeof(*entries));
goto out; if (!entries)
r = -EFAULT; goto out;
urouting = argp; r = -EFAULT;
if (copy_from_user(entries, urouting->entries, urouting = argp;
routing.nr * sizeof(*entries))) if (copy_from_user(entries, urouting->entries,
goto out_free_irq_routing; routing.nr * sizeof(*entries)))
goto out_free_irq_routing;
}
r = kvm_set_irq_routing(kvm, entries, routing.nr, r = kvm_set_irq_routing(kvm, entries, routing.nr,
routing.flags); routing.flags);
out_free_irq_routing: out_free_irq_routing:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment