Commit 541d8f4d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "Miscellaneous bugfixes.

  The ARM and s390 fixes are for new regressions from the merge window,
  others are usual stable material"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  compiler-gcc: disable -ftracer for __noclone functions
  kvm: x86: make lapic hrtimer pinned
  s390/mm/kvm: fix mis-merge in gmap handling
  kvm: set page dirty only if page has been writable
  KVM: x86: reduce default value of halt_poll_ns parameter
  KVM: Hyper-V: do not do hypercall userspace exits if SynIC is disabled
  KVM: x86: Inject pending interrupt even if pending nmi exist
  arm64: KVM: Register CPU notifiers when the kernel runs at HYP
  arm64: kvm: 4.6-rc1: Fix VTCR_EL2 VS setting
parents 5003bc6c 95272c29
...@@ -1061,15 +1061,27 @@ static void cpu_init_hyp_mode(void *dummy) ...@@ -1061,15 +1061,27 @@ static void cpu_init_hyp_mode(void *dummy)
kvm_arm_init_debug(); kvm_arm_init_debug();
} }
static void cpu_hyp_reinit(void)
{
if (is_kernel_in_hyp_mode()) {
/*
* cpu_init_stage2() is safe to call even if the PM
* event was cancelled before the CPU was reset.
*/
cpu_init_stage2(NULL);
} else {
if (__hyp_get_vectors() == hyp_default_vectors)
cpu_init_hyp_mode(NULL);
}
}
static int hyp_init_cpu_notify(struct notifier_block *self, static int hyp_init_cpu_notify(struct notifier_block *self,
unsigned long action, void *cpu) unsigned long action, void *cpu)
{ {
switch (action) { switch (action) {
case CPU_STARTING: case CPU_STARTING:
case CPU_STARTING_FROZEN: case CPU_STARTING_FROZEN:
if (__hyp_get_vectors() == hyp_default_vectors) cpu_hyp_reinit();
cpu_init_hyp_mode(NULL);
break;
} }
return NOTIFY_OK; return NOTIFY_OK;
...@@ -1084,9 +1096,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self, ...@@ -1084,9 +1096,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd, unsigned long cmd,
void *v) void *v)
{ {
if (cmd == CPU_PM_EXIT && if (cmd == CPU_PM_EXIT) {
__hyp_get_vectors() == hyp_default_vectors) { cpu_hyp_reinit();
cpu_init_hyp_mode(NULL);
return NOTIFY_OK; return NOTIFY_OK;
} }
...@@ -1127,6 +1138,22 @@ static int init_subsystems(void) ...@@ -1127,6 +1138,22 @@ static int init_subsystems(void)
{ {
int err; int err;
/*
* Register CPU Hotplug notifier
*/
cpu_notifier_register_begin();
err = __register_cpu_notifier(&hyp_init_cpu_nb);
cpu_notifier_register_done();
if (err) {
kvm_err("Cannot register KVM init CPU notifier (%d)\n", err);
return err;
}
/*
* Register CPU lower-power notifier
*/
hyp_cpu_pm_init();
/* /*
* Init HYP view of VGIC * Init HYP view of VGIC
*/ */
...@@ -1270,19 +1297,6 @@ static int init_hyp_mode(void) ...@@ -1270,19 +1297,6 @@ static int init_hyp_mode(void)
free_boot_hyp_pgd(); free_boot_hyp_pgd();
#endif #endif
cpu_notifier_register_begin();
err = __register_cpu_notifier(&hyp_init_cpu_nb);
cpu_notifier_register_done();
if (err) {
kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
goto out_err;
}
hyp_cpu_pm_init();
/* set size of VMID supported by CPU */ /* set size of VMID supported by CPU */
kvm_vmid_bits = kvm_get_vmid_bits(); kvm_vmid_bits = kvm_get_vmid_bits();
kvm_info("%d-bit VMID\n", kvm_vmid_bits); kvm_info("%d-bit VMID\n", kvm_vmid_bits);
......
...@@ -124,7 +124,9 @@ ...@@ -124,7 +124,9 @@
#define VTCR_EL2_SL0_LVL1 (1 << 6) #define VTCR_EL2_SL0_LVL1 (1 << 6)
#define VTCR_EL2_T0SZ_MASK 0x3f #define VTCR_EL2_T0SZ_MASK 0x3f
#define VTCR_EL2_T0SZ_40B 24 #define VTCR_EL2_T0SZ_40B 24
#define VTCR_EL2_VS 19 #define VTCR_EL2_VS_SHIFT 19
#define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT)
#define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT)
/* /*
* We configure the Stage-2 page tables to always restrict the IPA space to be * We configure the Stage-2 page tables to always restrict the IPA space to be
......
...@@ -141,6 +141,9 @@ ...@@ -141,6 +141,9 @@
#define ID_AA64MMFR1_VMIDBITS_SHIFT 4 #define ID_AA64MMFR1_VMIDBITS_SHIFT 4
#define ID_AA64MMFR1_HADBS_SHIFT 0 #define ID_AA64MMFR1_HADBS_SHIFT 0
#define ID_AA64MMFR1_VMIDBITS_8 0
#define ID_AA64MMFR1_VMIDBITS_16 2
/* id_aa64mmfr2 */ /* id_aa64mmfr2 */
#define ID_AA64MMFR2_UAO_SHIFT 4 #define ID_AA64MMFR2_UAO_SHIFT 4
......
...@@ -36,8 +36,10 @@ void __hyp_text __init_stage2_translation(void) ...@@ -36,8 +36,10 @@ void __hyp_text __init_stage2_translation(void)
* Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
* bit in VTCR_EL2. * bit in VTCR_EL2.
*/ */
tmp = (read_sysreg(id_aa64mmfr1_el1) >> 4) & 0xf; tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_VMIDBITS_SHIFT) & 0xf;
val |= (tmp == 2) ? VTCR_EL2_VS : 0; val |= (tmp == ID_AA64MMFR1_VMIDBITS_16) ?
VTCR_EL2_VS_16BIT :
VTCR_EL2_VS_8BIT;
write_sysreg(val, vtcr_el2); write_sysreg(val, vtcr_el2);
} }
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
/** /**
* gmap_alloc - allocate a guest address space * gmap_alloc - allocate a guest address space
* @mm: pointer to the parent mm_struct * @mm: pointer to the parent mm_struct
* @limit: maximum size of the gmap address space * @limit: maximum address of the gmap address space
* *
* Returns a guest address space structure. * Returns a guest address space structure.
*/ */
...@@ -292,7 +292,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, ...@@ -292,7 +292,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
if ((from | to | len) & (PMD_SIZE - 1)) if ((from | to | len) & (PMD_SIZE - 1))
return -EINVAL; return -EINVAL;
if (len == 0 || from + len < from || to + len < to || if (len == 0 || from + len < from || to + len < to ||
from + len > TASK_MAX_SIZE || to + len > gmap->asce_end) from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
return -EINVAL; return -EINVAL;
flush = 0; flush = 0;
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
#define KVM_PIO_PAGE_OFFSET 1 #define KVM_PIO_PAGE_OFFSET 1
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2
#define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_HALT_POLL_NS_DEFAULT 400000
#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
......
...@@ -1116,6 +1116,11 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) ...@@ -1116,6 +1116,11 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
break; break;
case HVCALL_POST_MESSAGE: case HVCALL_POST_MESSAGE:
case HVCALL_SIGNAL_EVENT: case HVCALL_SIGNAL_EVENT:
/* don't bother userspace if it has no way to handle it */
if (!vcpu_to_synic(vcpu)->active) {
res = HV_STATUS_INVALID_HYPERCALL_CODE;
break;
}
vcpu->run->exit_reason = KVM_EXIT_HYPERV; vcpu->run->exit_reason = KVM_EXIT_HYPERV;
vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
vcpu->run->hyperv.u.hcall.input = param; vcpu->run->hyperv.u.hcall.input = param;
......
...@@ -1369,7 +1369,7 @@ static void start_apic_timer(struct kvm_lapic *apic) ...@@ -1369,7 +1369,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
hrtimer_start(&apic->lapic_timer.timer, hrtimer_start(&apic->lapic_timer.timer,
ktime_add_ns(now, apic->lapic_timer.period), ktime_add_ns(now, apic->lapic_timer.period),
HRTIMER_MODE_ABS); HRTIMER_MODE_ABS_PINNED);
apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
PRIx64 ", " PRIx64 ", "
...@@ -1402,7 +1402,7 @@ static void start_apic_timer(struct kvm_lapic *apic) ...@@ -1402,7 +1402,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
expire = ktime_add_ns(now, ns); expire = ktime_add_ns(now, ns);
expire = ktime_sub_ns(expire, lapic_timer_advance_ns); expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
hrtimer_start(&apic->lapic_timer.timer, hrtimer_start(&apic->lapic_timer.timer,
expire, HRTIMER_MODE_ABS); expire, HRTIMER_MODE_ABS_PINNED);
} else } else
apic_timer_expired(apic); apic_timer_expired(apic);
...@@ -1868,7 +1868,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) ...@@ -1868,7 +1868,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
apic->vcpu = vcpu; apic->vcpu = vcpu;
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS); HRTIMER_MODE_ABS_PINNED);
apic->lapic_timer.timer.function = apic_timer_fn; apic->lapic_timer.timer.function = apic_timer_fn;
/* /*
...@@ -2003,7 +2003,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) ...@@ -2003,7 +2003,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
timer = &vcpu->arch.apic->lapic_timer.timer; timer = &vcpu->arch.apic->lapic_timer.timer;
if (hrtimer_cancel(timer)) if (hrtimer_cancel(timer))
hrtimer_start_expires(timer, HRTIMER_MODE_ABS); hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
} }
/* /*
......
...@@ -557,8 +557,15 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte) ...@@ -557,8 +557,15 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
!is_writable_pte(new_spte)) !is_writable_pte(new_spte))
ret = true; ret = true;
if (!shadow_accessed_mask) if (!shadow_accessed_mask) {
/*
* We don't set page dirty when dropping non-writable spte.
* So do it now if the new spte is becoming non-writable.
*/
if (ret)
kvm_set_pfn_dirty(spte_to_pfn(old_spte));
return ret; return ret;
}
/* /*
* Flush TLB when accessed/dirty bits are changed in the page tables, * Flush TLB when accessed/dirty bits are changed in the page tables,
...@@ -605,7 +612,8 @@ static int mmu_spte_clear_track_bits(u64 *sptep) ...@@ -605,7 +612,8 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn); kvm_set_pfn_accessed(pfn);
if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) if (old_spte & (shadow_dirty_mask ? shadow_dirty_mask :
PT_WRITABLE_MASK))
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
return 1; return 1;
} }
......
...@@ -6095,12 +6095,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) ...@@ -6095,12 +6095,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
} }
/* try to inject new event if pending */ /* try to inject new event if pending */
if (vcpu->arch.nmi_pending) { if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
if (kvm_x86_ops->nmi_allowed(vcpu)) {
--vcpu->arch.nmi_pending; --vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true; vcpu->arch.nmi_injected = true;
kvm_x86_ops->set_nmi(vcpu); kvm_x86_ops->set_nmi(vcpu);
}
} else if (kvm_cpu_has_injectable_intr(vcpu)) { } else if (kvm_cpu_has_injectable_intr(vcpu)) {
/* /*
* Because interrupts can be injected asynchronously, we are * Because interrupts can be injected asynchronously, we are
...@@ -6569,10 +6567,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -6569,10 +6567,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (inject_pending_event(vcpu, req_int_win) != 0) if (inject_pending_event(vcpu, req_int_win) != 0)
req_immediate_exit = true; req_immediate_exit = true;
/* enable NMI/IRQ window open exits if needed */ /* enable NMI/IRQ window open exits if needed */
else if (vcpu->arch.nmi_pending) else {
if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu); kvm_x86_ops->enable_nmi_window(vcpu);
else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
kvm_x86_ops->enable_irq_window(vcpu); kvm_x86_ops->enable_irq_window(vcpu);
}
if (kvm_lapic_enabled(vcpu)) { if (kvm_lapic_enabled(vcpu)) {
update_cr8_intercept(vcpu); update_cr8_intercept(vcpu);
......
...@@ -199,7 +199,7 @@ ...@@ -199,7 +199,7 @@
#define unreachable() __builtin_unreachable() #define unreachable() __builtin_unreachable()
/* Mark a function definition as prohibited from being cloned. */ /* Mark a function definition as prohibited from being cloned. */
#define __noclone __attribute__((__noclone__)) #define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
#endif /* GCC_VERSION >= 40500 */ #endif /* GCC_VERSION >= 40500 */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment