Commit d682916a authored by Anton Blanchard's avatar Anton Blanchard Committed by Alexander Graf

KVM: PPC: Book3S HV: Basic little-endian guest support

We create a guest MSR from scratch when delivering exceptions in
a few places.  Instead of extracting LPCR[ILE] and inserting it
into MSR_LE each time, we simply create a new variable intr_msr which
contains the entire MSR to use.  For a little-endian guest, userspace
needs to set the ILE (interrupt little-endian) bit in the LPCR for
each vcpu (or at least one vcpu in each virtual core).

[paulus@samba.org - removed H_SET_MODE implementation from original
version of the patch, and made kvmppc_set_lpcr update vcpu->arch.intr_msr.]
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 8563bf52
...@@ -636,6 +636,7 @@ struct kvm_vcpu_arch { ...@@ -636,6 +636,7 @@ struct kvm_vcpu_arch {
spinlock_t tbacct_lock; spinlock_t tbacct_lock;
u64 busy_stolen; u64 busy_stolen;
u64 busy_preempt; u64 busy_preempt;
unsigned long intr_msr;
#endif #endif
}; };
......
...@@ -480,6 +480,7 @@ int main(void) ...@@ -480,6 +480,7 @@ int main(void)
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
#endif #endif
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
......
...@@ -262,7 +262,7 @@ int kvmppc_mmu_hv_init(void) ...@@ -262,7 +262,7 @@ int kvmppc_mmu_hv_init(void)
static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
{ {
kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
} }
/* /*
......
...@@ -787,6 +787,27 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr) ...@@ -787,6 +787,27 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
u64 mask; u64 mask;
spin_lock(&vc->lock); spin_lock(&vc->lock);
/*
* If ILE (interrupt little-endian) has changed, update the
* MSR_LE bit in the intr_msr for each vcpu in this vcore.
*/
if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *vcpu;
int i;
mutex_lock(&kvm->lock);
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.vcore != vc)
continue;
if (new_lpcr & LPCR_ILE)
vcpu->arch.intr_msr |= MSR_LE;
else
vcpu->arch.intr_msr &= ~MSR_LE;
}
mutex_unlock(&kvm->lock);
}
/* /*
* Userspace can only modify DPFD (default prefetch depth), * Userspace can only modify DPFD (default prefetch depth),
* ILE (interrupt little-endian) and TC (translation control). * ILE (interrupt little-endian) and TC (translation control).
...@@ -1155,6 +1176,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, ...@@ -1155,6 +1176,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
spin_lock_init(&vcpu->arch.vpa_update_lock); spin_lock_init(&vcpu->arch.vpa_update_lock);
spin_lock_init(&vcpu->arch.tbacct_lock); spin_lock_init(&vcpu->arch.tbacct_lock);
vcpu->arch.busy_preempt = TB_NIL; vcpu->arch.busy_preempt = TB_NIL;
vcpu->arch.intr_msr = MSR_SF | MSR_ME;
kvmppc_mmu_book3s_hv_init(vcpu); kvmppc_mmu_book3s_hv_init(vcpu);
......
...@@ -812,8 +812,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) ...@@ -812,8 +812,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
12: mtspr SPRN_SRR0, r10 12: mtspr SPRN_SRR0, r10
mr r10,r0 mr r10,r0
mtspr SPRN_SRR1, r11 mtspr SPRN_SRR1, r11
li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ ld r11, VCPU_INTR_MSR(r4)
rotldi r11,r11,63
5: 5:
/* /*
...@@ -1551,8 +1550,7 @@ kvmppc_hdsi: ...@@ -1551,8 +1550,7 @@ kvmppc_hdsi:
mtspr SPRN_SRR0, r10 mtspr SPRN_SRR0, r10
mtspr SPRN_SRR1, r11 mtspr SPRN_SRR1, r11
li r10, BOOK3S_INTERRUPT_DATA_STORAGE li r10, BOOK3S_INTERRUPT_DATA_STORAGE
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ ld r11, VCPU_INTR_MSR(r9)
rotldi r11, r11, 63
fast_interrupt_c_return: fast_interrupt_c_return:
6: ld r7, VCPU_CTR(r9) 6: ld r7, VCPU_CTR(r9)
lwz r8, VCPU_XER(r9) lwz r8, VCPU_XER(r9)
...@@ -1621,8 +1619,7 @@ kvmppc_hisi: ...@@ -1621,8 +1619,7 @@ kvmppc_hisi:
1: mtspr SPRN_SRR0, r10 1: mtspr SPRN_SRR0, r10
mtspr SPRN_SRR1, r11 mtspr SPRN_SRR1, r11
li r10, BOOK3S_INTERRUPT_INST_STORAGE li r10, BOOK3S_INTERRUPT_INST_STORAGE
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ ld r11, VCPU_INTR_MSR(r9)
rotldi r11, r11, 63
b fast_interrupt_c_return b fast_interrupt_c_return
3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
...@@ -1665,8 +1662,7 @@ sc_1_fast_return: ...@@ -1665,8 +1662,7 @@ sc_1_fast_return:
mtspr SPRN_SRR0,r10 mtspr SPRN_SRR0,r10
mtspr SPRN_SRR1,r11 mtspr SPRN_SRR1,r11
li r10, BOOK3S_INTERRUPT_SYSCALL li r10, BOOK3S_INTERRUPT_SYSCALL
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ ld r11, VCPU_INTR_MSR(r9)
rotldi r11, r11, 63
mr r4,r9 mr r4,r9
b fast_guest_return b fast_guest_return
...@@ -1994,8 +1990,7 @@ machine_check_realmode: ...@@ -1994,8 +1990,7 @@ machine_check_realmode:
beq mc_cont beq mc_cont
/* If not, deliver a machine check. SRR0/1 are already set */ /* If not, deliver a machine check. SRR0/1 are already set */
li r10, BOOK3S_INTERRUPT_MACHINE_CHECK li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ ld r11, VCPU_INTR_MSR(r9)
rotldi r11, r11, 63
b fast_interrupt_c_return b fast_interrupt_c_return
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment