Commit f7adbba1 authored by Alexander Graf's avatar Alexander Graf Committed by Marcelo Tosatti

KVM: PPC: Keep SRR1 flags around in shadow_msr

SRR1 stores more information that just the MSR value. It also stores
valuable information about the type of interrupt we received, for
example whether the storage interrupt we just got was because of a
missing htab entry or not.

We use that information to speed up the exit path.

Now if we get preempted before we can interpret the shadow_msr values,
we get into vcpu_put which then calls the MSR handler, which then sets
all the SRR1 information bits in shadow_msr to 0. Great.

So let's preserve the SRR1 specific bits in shadow_msr whenever we set
the MSR. They don't hurt.
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 1c0006d8
...@@ -198,6 +198,7 @@ struct kvm_vcpu_arch { ...@@ -198,6 +198,7 @@ struct kvm_vcpu_arch {
ulong msr; ulong msr;
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
ulong shadow_msr; ulong shadow_msr;
ulong shadow_srr1;
ulong hflags; ulong hflags;
ulong guest_owned_ext; ulong guest_owned_ext;
#endif #endif
......
...@@ -433,6 +433,7 @@ int main(void) ...@@ -433,6 +433,7 @@ int main(void)
DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2)); DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2));
DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
......
...@@ -524,14 +524,14 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -524,14 +524,14 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Page not found in guest PTE entries */ /* Page not found in guest PTE entries */
vcpu->arch.dear = vcpu->arch.fault_dear; vcpu->arch.dear = vcpu->arch.fault_dear;
to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
kvmppc_book3s_queue_irqprio(vcpu, vec); kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EPERM) { } else if (page_found == -EPERM) {
/* Storage protection */ /* Storage protection */
vcpu->arch.dear = vcpu->arch.fault_dear; vcpu->arch.dear = vcpu->arch.fault_dear;
to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
kvmppc_book3s_queue_irqprio(vcpu, vec); kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EINVAL) { } else if (page_found == -EINVAL) {
/* Page not found in guest SLB */ /* Page not found in guest SLB */
...@@ -693,7 +693,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -693,7 +693,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_INST_STORAGE: case BOOK3S_INTERRUPT_INST_STORAGE:
vcpu->stat.pf_instruc++; vcpu->stat.pf_instruc++;
/* only care about PTEG not found errors, but leave NX alone */ /* only care about PTEG not found errors, but leave NX alone */
if (vcpu->arch.shadow_msr & 0x40000000) { if (vcpu->arch.shadow_srr1 & 0x40000000) {
r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr);
vcpu->stat.sp_instruc++; vcpu->stat.sp_instruc++;
} else if (vcpu->arch.mmu.is_dcbz32(vcpu) && } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
...@@ -705,7 +705,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -705,7 +705,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/ */
kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
} else { } else {
vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000); vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr); kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
r = RESUME_GUEST; r = RESUME_GUEST;
...@@ -753,7 +753,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -753,7 +753,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
enum emulation_result er; enum emulation_result er;
ulong flags; ulong flags;
flags = (vcpu->arch.shadow_msr & 0x1f0000ull); flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
if (vcpu->arch.msr & MSR_PR) { if (vcpu->arch.msr & MSR_PR) {
#ifdef EXIT_DEBUG #ifdef EXIT_DEBUG
...@@ -808,7 +808,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -808,7 +808,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
default: default:
/* Ugh - bork here! What did we get? */ /* Ugh - bork here! What did we get? */
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr); printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1);
r = RESUME_HOST; r = RESUME_HOST;
BUG(); BUG();
break; break;
......
...@@ -169,7 +169,7 @@ kvmppc_handler_highmem: ...@@ -169,7 +169,7 @@ kvmppc_handler_highmem:
stw r0, VCPU_LAST_INST(r7) stw r0, VCPU_LAST_INST(r7)
std r3, VCPU_PC(r7) std r3, VCPU_PC(r7)
std r4, VCPU_SHADOW_MSR(r7) std r4, VCPU_SHADOW_SRR1(r7)
std r5, VCPU_FAULT_DEAR(r7) std r5, VCPU_FAULT_DEAR(r7)
std r6, VCPU_FAULT_DSISR(r7) std r6, VCPU_FAULT_DSISR(r7)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment