Commit e5ee5422 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Alexander Graf

KVM: PPC: BOOK3S: PR: Enable Little Endian PR guest

This patch make sure we inherit the LE bit correctly in different case
so that we can run Little Endian distro in PR mode
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 8f20a3ab
...@@ -562,6 +562,7 @@ struct kvm_vcpu_arch { ...@@ -562,6 +562,7 @@ struct kvm_vcpu_arch {
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
ulong fault_dar; ulong fault_dar;
u32 fault_dsisr; u32 fault_dsisr;
unsigned long intr_msr;
#endif #endif
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE
...@@ -654,7 +655,6 @@ struct kvm_vcpu_arch { ...@@ -654,7 +655,6 @@ struct kvm_vcpu_arch {
spinlock_t tbacct_lock; spinlock_t tbacct_lock;
u64 busy_stolen; u64 busy_stolen;
u64 busy_preempt; u64 busy_preempt;
unsigned long intr_msr;
#endif #endif
}; };
......
...@@ -493,7 +493,6 @@ int main(void) ...@@ -493,7 +493,6 @@ int main(void)
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
#endif #endif
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
...@@ -528,6 +527,7 @@ int main(void) ...@@ -528,6 +527,7 @@ int main(void)
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
{ {
kvmppc_set_msr(vcpu, MSR_SF); kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
} }
static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
......
...@@ -249,7 +249,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) ...@@ -249,7 +249,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
ulong smsr = vcpu->arch.shared->msr; ulong smsr = vcpu->arch.shared->msr;
/* Guest MSR values */ /* Guest MSR values */
smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE; smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
/* Process MSR values */ /* Process MSR values */
smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
/* External providers the guest reserved */ /* External providers the guest reserved */
...@@ -1110,6 +1110,15 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, ...@@ -1110,6 +1110,15 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_HIOR: case KVM_REG_PPC_HIOR:
*val = get_reg_val(id, to_book3s(vcpu)->hior); *val = get_reg_val(id, to_book3s(vcpu)->hior);
break; break;
case KVM_REG_PPC_LPCR:
/*
* We are only interested in the LPCR_ILE bit
*/
if (vcpu->arch.intr_msr & MSR_LE)
*val = get_reg_val(id, LPCR_ILE);
else
*val = get_reg_val(id, 0);
break;
default: default:
r = -EINVAL; r = -EINVAL;
break; break;
...@@ -1118,6 +1127,14 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, ...@@ -1118,6 +1127,14 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
return r; return r;
} }
static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
{
if (new_lpcr & LPCR_ILE)
vcpu->arch.intr_msr |= MSR_LE;
else
vcpu->arch.intr_msr &= ~MSR_LE;
}
static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
union kvmppc_one_reg *val) union kvmppc_one_reg *val)
{ {
...@@ -1128,6 +1145,9 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, ...@@ -1128,6 +1145,9 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
to_book3s(vcpu)->hior = set_reg_val(id, *val); to_book3s(vcpu)->hior = set_reg_val(id, *val);
to_book3s(vcpu)->hior_explicit = true; to_book3s(vcpu)->hior_explicit = true;
break; break;
case KVM_REG_PPC_LPCR:
kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
break;
default: default:
r = -EINVAL; r = -EINVAL;
break; break;
...@@ -1180,6 +1200,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, ...@@ -1180,6 +1200,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
vcpu->arch.pvr = 0x3C0301; vcpu->arch.pvr = 0x3C0301;
if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
vcpu->arch.pvr = mfspr(SPRN_PVR); vcpu->arch.pvr = mfspr(SPRN_PVR);
vcpu->arch.intr_msr = MSR_SF;
#else #else
/* default to book3s_32 (750) */ /* default to book3s_32 (750) */
vcpu->arch.pvr = 0x84202; vcpu->arch.pvr = 0x84202;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment