Commit e121ee6b authored by Michael Ellerman's avatar Michael Ellerman

Merge branch 'topic/ppc-kvm' into next

Merge commits we're sharing with kvm-ppc tree.
parents d0055df0 c0577201
...@@ -141,6 +141,7 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); ...@@ -141,6 +141,7 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
...@@ -632,7 +633,7 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, ...@@ -632,7 +633,7 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
unsigned int yield_count); unsigned int yield_count);
long kvmppc_h_random(struct kvm_vcpu *vcpu); long kvmppc_h_random(struct kvm_vcpu *vcpu);
void kvmhv_commence_exit(int trap); void kvmhv_commence_exit(int trap);
long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu); void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
void kvmppc_subcore_enter_guest(void); void kvmppc_subcore_enter_guest(void);
void kvmppc_subcore_exit_guest(void); void kvmppc_subcore_exit_guest(void);
long kvmppc_realmode_hmi_handler(void); long kvmppc_realmode_hmi_handler(void);
......
...@@ -209,7 +209,7 @@ extern int get_mce_event(struct machine_check_event *mce, bool release); ...@@ -209,7 +209,7 @@ extern int get_mce_event(struct machine_check_event *mce, bool release);
extern void release_mce_event(void); extern void release_mce_event(void);
extern void machine_check_queue_event(void); extern void machine_check_queue_event(void);
extern void machine_check_print_event_info(struct machine_check_event *evt, extern void machine_check_print_event_info(struct machine_check_event *evt,
bool user_mode); bool user_mode, bool in_guest);
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
void flush_and_reload_slb(void); void flush_and_reload_slb(void);
#endif /* CONFIG_PPC_BOOK3S_64 */ #endif /* CONFIG_PPC_BOOK3S_64 */
......
...@@ -301,13 +301,13 @@ static void machine_check_process_queued_event(struct irq_work *work) ...@@ -301,13 +301,13 @@ static void machine_check_process_queued_event(struct irq_work *work)
while (__this_cpu_read(mce_queue_count) > 0) { while (__this_cpu_read(mce_queue_count) > 0) {
index = __this_cpu_read(mce_queue_count) - 1; index = __this_cpu_read(mce_queue_count) - 1;
evt = this_cpu_ptr(&mce_event_queue[index]); evt = this_cpu_ptr(&mce_event_queue[index]);
machine_check_print_event_info(evt, false); machine_check_print_event_info(evt, false, false);
__this_cpu_dec(mce_queue_count); __this_cpu_dec(mce_queue_count);
} }
} }
void machine_check_print_event_info(struct machine_check_event *evt, void machine_check_print_event_info(struct machine_check_event *evt,
bool user_mode) bool user_mode, bool in_guest)
{ {
const char *level, *sevstr, *subtype; const char *level, *sevstr, *subtype;
static const char *mc_ue_types[] = { static const char *mc_ue_types[] = {
...@@ -387,7 +387,9 @@ void machine_check_print_event_info(struct machine_check_event *evt, ...@@ -387,7 +387,9 @@ void machine_check_print_event_info(struct machine_check_event *evt,
evt->disposition == MCE_DISPOSITION_RECOVERED ? evt->disposition == MCE_DISPOSITION_RECOVERED ?
"Recovered" : "Not recovered"); "Recovered" : "Not recovered");
if (user_mode) { if (in_guest) {
printk("%s Guest NIP: %016llx\n", level, evt->srr0);
} else if (user_mode) {
printk("%s NIP: [%016llx] PID: %d Comm: %s\n", level, printk("%s NIP: [%016llx] PID: %d Comm: %s\n", level,
evt->srr0, current->pid, current->comm); evt->srr0, current->pid, current->comm);
} else { } else {
......
...@@ -192,6 +192,13 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) ...@@ -192,6 +192,13 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
} }
EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio); EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
{
/* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags);
}
EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
{ {
/* might as well deliver this straight away */ /* might as well deliver this straight away */
......
...@@ -1215,6 +1215,22 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1215,6 +1215,22 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
case BOOK3S_INTERRUPT_MACHINE_CHECK: case BOOK3S_INTERRUPT_MACHINE_CHECK:
/* Print the MCE event to host console. */
machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
/*
* If the guest can do FWNMI, exit to userspace so it can
* deliver a FWNMI to the guest.
* Otherwise we synthesize a machine check for the guest
* so that it knows that the machine check occurred.
*/
if (!vcpu->kvm->arch.fwnmi_enabled) {
ulong flags = vcpu->arch.shregs.msr & 0x083c0000;
kvmppc_core_queue_machine_check(vcpu, flags);
r = RESUME_GUEST;
break;
}
/* Exit to guest with KVM_EXIT_NMI as exit reason */ /* Exit to guest with KVM_EXIT_NMI as exit reason */
run->exit_reason = KVM_EXIT_NMI; run->exit_reason = KVM_EXIT_NMI;
run->hw.hardware_exit_reason = vcpu->arch.trap; run->hw.hardware_exit_reason = vcpu->arch.trap;
...@@ -1227,8 +1243,6 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1227,8 +1243,6 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV; run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV;
r = RESUME_HOST; r = RESUME_HOST;
/* Print the MCE event to host console. */
machine_check_print_event_info(&vcpu->arch.mce_evt, false);
break; break;
case BOOK3S_INTERRUPT_PROGRAM: case BOOK3S_INTERRUPT_PROGRAM:
{ {
...@@ -1392,7 +1406,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1392,7 +1406,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Pass the machine check to the L1 guest */ /* Pass the machine check to the L1 guest */
r = RESUME_HOST; r = RESUME_HOST;
/* Print the MCE event to host console. */ /* Print the MCE event to host console. */
machine_check_print_event_info(&vcpu->arch.mce_evt, false); machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
break; break;
/* /*
* We get these next two if the guest accesses a page which it thinks * We get these next two if the guest accesses a page which it thinks
...@@ -3455,6 +3469,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -3455,6 +3469,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long host_dscr = mfspr(SPRN_DSCR); unsigned long host_dscr = mfspr(SPRN_DSCR);
unsigned long host_tidr = mfspr(SPRN_TIDR); unsigned long host_tidr = mfspr(SPRN_TIDR);
unsigned long host_iamr = mfspr(SPRN_IAMR); unsigned long host_iamr = mfspr(SPRN_IAMR);
unsigned long host_amr = mfspr(SPRN_AMR);
s64 dec; s64 dec;
u64 tb; u64 tb;
int trap, save_pmu; int trap, save_pmu;
...@@ -3571,13 +3586,15 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -3571,13 +3586,15 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr(SPRN_PSPB, 0); mtspr(SPRN_PSPB, 0);
mtspr(SPRN_WORT, 0); mtspr(SPRN_WORT, 0);
mtspr(SPRN_AMR, 0);
mtspr(SPRN_UAMOR, 0); mtspr(SPRN_UAMOR, 0);
mtspr(SPRN_DSCR, host_dscr); mtspr(SPRN_DSCR, host_dscr);
mtspr(SPRN_TIDR, host_tidr); mtspr(SPRN_TIDR, host_tidr);
mtspr(SPRN_IAMR, host_iamr); mtspr(SPRN_IAMR, host_iamr);
mtspr(SPRN_PSPB, 0); mtspr(SPRN_PSPB, 0);
if (host_amr != vcpu->arch.amr)
mtspr(SPRN_AMR, host_amr);
msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX); msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
store_fp_state(&vcpu->arch.fp); store_fp_state(&vcpu->arch.fp);
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
......
...@@ -66,10 +66,8 @@ static void reload_slb(struct kvm_vcpu *vcpu) ...@@ -66,10 +66,8 @@ static void reload_slb(struct kvm_vcpu *vcpu)
/* /*
* On POWER7, see if we can handle a machine check that occurred inside * On POWER7, see if we can handle a machine check that occurred inside
* the guest in real mode, without switching to the host partition. * the guest in real mode, without switching to the host partition.
*
* Returns: 0 => exit guest, 1 => deliver machine check to guest
*/ */
static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) static void kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
{ {
unsigned long srr1 = vcpu->arch.shregs.msr; unsigned long srr1 = vcpu->arch.shregs.msr;
struct machine_check_event mce_evt; struct machine_check_event mce_evt;
...@@ -111,52 +109,24 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) ...@@ -111,52 +109,24 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
} }
/* /*
* See if we have already handled the condition in the linux host. * Now get the event and stash it in the vcpu struct so it can
* We assume that if the condition is recovered then linux host * be handled by the primary thread in virtual mode. We can't
* will have generated an error log event that we will pick * call machine_check_queue_event() here if we are running on
* up and log later. * an offline secondary thread.
* Don't release mce event now. We will queue up the event so that
* we can log the MCE event info on host console.
*/ */
if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE)) if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) {
goto out; if (handled && mce_evt.version == MCE_V1)
mce_evt.disposition = MCE_DISPOSITION_RECOVERED;
if (mce_evt.version == MCE_V1 && } else {
(mce_evt.severity == MCE_SEV_NO_ERROR || memset(&mce_evt, 0, sizeof(mce_evt));
mce_evt.disposition == MCE_DISPOSITION_RECOVERED)) }
handled = 1;
out:
/*
* For guest that supports FWNMI capability, hook the MCE event into
* vcpu structure. We are going to exit the guest with KVM_EXIT_NMI
* exit reason. On our way to exit we will pull this event from vcpu
* structure and print it from thread 0 of the core/subcore.
*
* For guest that does not support FWNMI capability (old QEMU):
* We are now going enter guest either through machine check
* interrupt (for unhandled errors) or will continue from
* current HSRR0 (for handled errors) in guest. Hence
* queue up the event so that we can log it from host console later.
*/
if (vcpu->kvm->arch.fwnmi_enabled) {
/*
* Hook up the mce event on to vcpu structure.
* First clear the old event.
*/
memset(&vcpu->arch.mce_evt, 0, sizeof(vcpu->arch.mce_evt));
if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) {
vcpu->arch.mce_evt = mce_evt;
}
} else
machine_check_queue_event();
return handled; vcpu->arch.mce_evt = mce_evt;
} }
long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu) void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
{ {
return kvmppc_realmode_mc_power7(vcpu); kvmppc_realmode_mc_power7(vcpu);
} }
/* Check if dynamic split is in force and return subcore size accordingly. */ /* Check if dynamic split is in force and return subcore size accordingly. */
......
...@@ -2826,49 +2826,15 @@ kvm_cede_exit: ...@@ -2826,49 +2826,15 @@ kvm_cede_exit:
#endif /* CONFIG_KVM_XICS */ #endif /* CONFIG_KVM_XICS */
3: b guest_exit_cont 3: b guest_exit_cont
/* Try to handle a machine check in real mode */ /* Try to do machine check recovery in real mode */
machine_check_realmode: machine_check_realmode:
mr r3, r9 /* get vcpu pointer */ mr r3, r9 /* get vcpu pointer */
bl kvmppc_realmode_machine_check bl kvmppc_realmode_machine_check
nop nop
/* all machine checks go to virtual mode for further handling */
ld r9, HSTATE_KVM_VCPU(r13) ld r9, HSTATE_KVM_VCPU(r13)
li r12, BOOK3S_INTERRUPT_MACHINE_CHECK li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
/* b guest_exit_cont
* For the guest that is FWNMI capable, deliver all the MCE errors
* (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
* reason. This new approach injects machine check errors in guest
* address space to guest with additional information in the form
* of RTAS event, thus enabling guest kernel to suitably handle
* such errors.
*
* For the guest that is not FWNMI capable (old QEMU) fallback
* to old behaviour for backward compatibility:
* Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
* through machine check interrupt (set HSRR0 to 0x200).
* For handled errors (no-fatal), just go back to guest execution
* with current HSRR0.
* if we receive machine check with MSR(RI=0) then deliver it to
* guest as machine check causing guest to crash.
*/
ld r11, VCPU_MSR(r9)
rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
bne guest_exit_cont /* if so, exit to host */
/* Check if guest is capable of handling NMI exit */
ld r10, VCPU_KVM(r9)
lbz r10, KVM_FWNMI(r10)
cmpdi r10, 1 /* FWNMI capable? */
beq guest_exit_cont /* if so, exit with KVM_EXIT_NMI. */
/* if not, fall through for backward compatibility. */
andi. r10, r11, MSR_RI /* check for unrecoverable exception */
beq 1f /* Deliver a machine check to guest */
ld r10, VCPU_PC(r9)
cmpdi r3, 0 /* Did we handle MCE ? */
bne 2f /* Continue guest execution. */
/* If not, deliver a machine check. SRR0/1 are already set */
1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
bl kvmppc_msr_interrupt
2: b fast_interrupt_c_return
/* /*
* Call C code to handle a HMI in real mode. * Call C code to handle a HMI in real mode.
......
...@@ -586,7 +586,7 @@ int opal_machine_check(struct pt_regs *regs) ...@@ -586,7 +586,7 @@ int opal_machine_check(struct pt_regs *regs)
evt.version); evt.version);
return 0; return 0;
} }
machine_check_print_event_info(&evt, user_mode(regs)); machine_check_print_event_info(&evt, user_mode(regs), false);
if (opal_recover_mce(regs, &evt)) if (opal_recover_mce(regs, &evt))
return 1; return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment