Commit 4a6cd3ba authored by Radim Krčmář's avatar Radim Krčmář

Merge tag 'kvm-arm-for-4.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm

KVM/ARM Fixes for v4.6-rc4

Addresses:
 - Wrong indentation in the PMU code from the merge window
 - A long-time bug occuring with running ntpd on the host, candidate for stable
 - Properly handle (and warn about) the unsupported configuration of running on
   systems with less than 40 bits of PA space
 - More fixes to the PM and hotplug notifier stuff from the merge window
parents 3d8e15dd 06a71a24
...@@ -1112,10 +1112,17 @@ static void __init hyp_cpu_pm_init(void) ...@@ -1112,10 +1112,17 @@ static void __init hyp_cpu_pm_init(void)
{ {
cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
} }
static void __init hyp_cpu_pm_exit(void)
{
cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
}
#else #else
static inline void hyp_cpu_pm_init(void) static inline void hyp_cpu_pm_init(void)
{ {
} }
static inline void hyp_cpu_pm_exit(void)
{
}
#endif #endif
static void teardown_common_resources(void) static void teardown_common_resources(void)
...@@ -1141,9 +1148,7 @@ static int init_subsystems(void) ...@@ -1141,9 +1148,7 @@ static int init_subsystems(void)
/* /*
* Register CPU Hotplug notifier * Register CPU Hotplug notifier
*/ */
cpu_notifier_register_begin(); err = register_cpu_notifier(&hyp_init_cpu_nb);
err = __register_cpu_notifier(&hyp_init_cpu_nb);
cpu_notifier_register_done();
if (err) { if (err) {
kvm_err("Cannot register KVM init CPU notifier (%d)\n", err); kvm_err("Cannot register KVM init CPU notifier (%d)\n", err);
return err; return err;
...@@ -1193,6 +1198,8 @@ static void teardown_hyp_mode(void) ...@@ -1193,6 +1198,8 @@ static void teardown_hyp_mode(void)
free_hyp_pgds(); free_hyp_pgds();
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
unregister_cpu_notifier(&hyp_init_cpu_nb);
hyp_cpu_pm_exit();
} }
static int init_vhe_mode(void) static int init_vhe_mode(void)
......
...@@ -151,8 +151,7 @@ ...@@ -151,8 +151,7 @@
*/ */
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \ #define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \ VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1)
VTCR_EL2_RES1)
#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B) #define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
#else #else
/* /*
...@@ -163,8 +162,7 @@ ...@@ -163,8 +162,7 @@
*/ */
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \ #define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \ VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \ VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1)
VTCR_EL2_RES1)
#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B) #define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
#endif #endif
......
...@@ -54,7 +54,7 @@ extern void __vgic_v3_init_lrs(void); ...@@ -54,7 +54,7 @@ extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void); extern u32 __kvm_get_mdcr_el2(void);
extern void __init_stage2_translation(void); extern u32 __init_stage2_translation(void);
#endif #endif
......
...@@ -369,11 +369,12 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, ...@@ -369,11 +369,12 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr); struct kvm_device_attr *attr);
/* #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) */
static inline void __cpu_init_stage2(void) static inline void __cpu_init_stage2(void)
{ {
kvm_call_hyp(__init_stage2_translation); u32 parange = kvm_call_hyp(__init_stage2_translation);
WARN_ONCE(parange < 40,
"PARange is %d bits, unsupported configuration!", parange);
} }
#endif /* __ARM64_KVM_HOST_H__ */ #endif /* __ARM64_KVM_HOST_H__ */
...@@ -20,9 +20,10 @@ ...@@ -20,9 +20,10 @@
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
void __hyp_text __init_stage2_translation(void) u32 __hyp_text __init_stage2_translation(void)
{ {
u64 val = VTCR_EL2_FLAGS; u64 val = VTCR_EL2_FLAGS;
u64 parange;
u64 tmp; u64 tmp;
/* /*
...@@ -30,7 +31,39 @@ void __hyp_text __init_stage2_translation(void) ...@@ -30,7 +31,39 @@ void __hyp_text __init_stage2_translation(void)
* bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while
* PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2... * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2...
*/ */
val |= (read_sysreg(id_aa64mmfr0_el1) & 7) << 16; parange = read_sysreg(id_aa64mmfr0_el1) & 7;
val |= parange << 16;
/* Compute the actual PARange... */
switch (parange) {
case 0:
parange = 32;
break;
case 1:
parange = 36;
break;
case 2:
parange = 40;
break;
case 3:
parange = 42;
break;
case 4:
parange = 44;
break;
case 5:
default:
parange = 48;
break;
}
/*
* ... and clamp it to 40 bits, unless we have some braindead
* HW that implements less than that. In all cases, we'll
* return that value for the rest of the kernel to decide what
* to do.
*/
val |= 64 - (parange > 40 ? 40 : parange);
/* /*
* Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
...@@ -42,4 +75,6 @@ void __hyp_text __init_stage2_translation(void) ...@@ -42,4 +75,6 @@ void __hyp_text __init_stage2_translation(void)
VTCR_EL2_VS_8BIT; VTCR_EL2_VS_8BIT;
write_sysreg(val, vtcr_el2); write_sysreg(val, vtcr_el2);
return parange;
} }
...@@ -91,6 +91,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work) ...@@ -91,6 +91,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
vcpu->arch.timer_cpu.armed = false; vcpu->arch.timer_cpu.armed = false;
WARN_ON(!kvm_timer_should_fire(vcpu));
/* /*
* If the vcpu is blocked we want to wake it up so that it will see * If the vcpu is blocked we want to wake it up so that it will see
* the timer has expired when entering the guest. * the timer has expired when entering the guest.
...@@ -98,10 +100,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work) ...@@ -98,10 +100,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
} }
static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
{
cycle_t cval, now;
cval = vcpu->arch.timer_cpu.cntv_cval;
now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
if (now < cval) {
u64 ns;
ns = cyclecounter_cyc2ns(timecounter->cc,
cval - now,
timecounter->mask,
&timecounter->frac);
return ns;
}
return 0;
}
static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
{ {
struct arch_timer_cpu *timer; struct arch_timer_cpu *timer;
struct kvm_vcpu *vcpu;
u64 ns;
timer = container_of(hrt, struct arch_timer_cpu, timer); timer = container_of(hrt, struct arch_timer_cpu, timer);
vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
/*
* Check that the timer has really expired from the guest's
* PoV (NTP on the host may have forced it to expire
* early). If we should have slept longer, restart it.
*/
ns = kvm_timer_compute_delta(vcpu);
if (unlikely(ns)) {
hrtimer_forward_now(hrt, ns_to_ktime(ns));
return HRTIMER_RESTART;
}
queue_work(wqueue, &timer->expired); queue_work(wqueue, &timer->expired);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
...@@ -176,8 +214,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu) ...@@ -176,8 +214,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
void kvm_timer_schedule(struct kvm_vcpu *vcpu) void kvm_timer_schedule(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
u64 ns;
cycle_t cval, now;
BUG_ON(timer_is_armed(timer)); BUG_ON(timer_is_armed(timer));
...@@ -197,14 +233,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu) ...@@ -197,14 +233,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
return; return;
/* The timer has not yet expired, schedule a background timer */ /* The timer has not yet expired, schedule a background timer */
cval = timer->cntv_cval; timer_arm(timer, kvm_timer_compute_delta(vcpu));
now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
ns = cyclecounter_cyc2ns(timecounter->cc,
cval - now,
timecounter->mask,
&timecounter->frac);
timer_arm(timer, ns);
} }
void kvm_timer_unschedule(struct kvm_vcpu *vcpu) void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
......
...@@ -193,11 +193,12 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) ...@@ -193,11 +193,12 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
{ {
u64 reg = 0; u64 reg = 0;
if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0); reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0); reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1); reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
reg &= kvm_pmu_valid_counter_mask(vcpu); reg &= kvm_pmu_valid_counter_mask(vcpu);
}
return reg; return reg;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment