Commit 41a8645a authored by Paul Mackerras's avatar Paul Mackerras

KVM: PPC: Book3S PR: Add emulation for slbfee. instruction

Recent kernels, since commit e15a4fea ("powerpc/64s/hash: Add
some SLB debugging tests", 2018-10-03) use the slbfee. instruction,
which PR KVM currently does not have code to emulate.  Consequently
recent kernels fail to boot under PR KVM.  This adds emulation of
slbfee., enabling these kernels to boot successfully.
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent d1393711
...@@ -377,6 +377,7 @@ struct kvmppc_mmu { ...@@ -377,6 +377,7 @@ struct kvmppc_mmu {
void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs); void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr); u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr); u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
int (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb);
void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr); void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
void (*slbia)(struct kvm_vcpu *vcpu); void (*slbia)(struct kvm_vcpu *vcpu);
/* book3s */ /* book3s */
......
...@@ -425,6 +425,7 @@ void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu) ...@@ -425,6 +425,7 @@ void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu)
mmu->slbmte = NULL; mmu->slbmte = NULL;
mmu->slbmfee = NULL; mmu->slbmfee = NULL;
mmu->slbmfev = NULL; mmu->slbmfev = NULL;
mmu->slbfee = NULL;
mmu->slbie = NULL; mmu->slbie = NULL;
mmu->slbia = NULL; mmu->slbia = NULL;
} }
...@@ -435,6 +435,19 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) ...@@ -435,6 +435,19 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT); kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
} }
static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr,
ulong *ret_slb)
{
struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
if (slbe) {
*ret_slb = slbe->origv;
return 0;
}
*ret_slb = 0;
return -ENOENT;
}
static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
{ {
struct kvmppc_slb *slbe; struct kvmppc_slb *slbe;
...@@ -670,6 +683,7 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu) ...@@ -670,6 +683,7 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
mmu->slbmte = kvmppc_mmu_book3s_64_slbmte; mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee; mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev; mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
mmu->slbfee = kvmppc_mmu_book3s_64_slbfee;
mmu->slbie = kvmppc_mmu_book3s_64_slbie; mmu->slbie = kvmppc_mmu_book3s_64_slbie;
mmu->slbia = kvmppc_mmu_book3s_64_slbia; mmu->slbia = kvmppc_mmu_book3s_64_slbia;
mmu->xlate = kvmppc_mmu_book3s_64_xlate; mmu->xlate = kvmppc_mmu_book3s_64_xlate;
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#define OP_31_XOP_SLBMFEV 851 #define OP_31_XOP_SLBMFEV 851
#define OP_31_XOP_EIOIO 854 #define OP_31_XOP_EIOIO 854
#define OP_31_XOP_SLBMFEE 915 #define OP_31_XOP_SLBMFEE 915
#define OP_31_XOP_SLBFEE 979
#define OP_31_XOP_TBEGIN 654 #define OP_31_XOP_TBEGIN 654
#define OP_31_XOP_TABORT 910 #define OP_31_XOP_TABORT 910
...@@ -416,6 +417,23 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -416,6 +417,23 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.mmu.slbia(vcpu); vcpu->arch.mmu.slbia(vcpu);
break; break;
case OP_31_XOP_SLBFEE:
if (!(inst & 1) || !vcpu->arch.mmu.slbfee) {
return EMULATE_FAIL;
} else {
ulong b, t;
ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK;
b = kvmppc_get_gpr(vcpu, rb);
if (!vcpu->arch.mmu.slbfee(vcpu, b, &t))
cr |= 2 << CR0_SHIFT;
kvmppc_set_gpr(vcpu, rt, t);
/* copy XER[SO] bit to CR0[SO] */
cr |= (vcpu->arch.regs.xer & 0x80000000) >>
(31 - CR0_SHIFT);
kvmppc_set_cr(vcpu, cr);
}
break;
case OP_31_XOP_SLBMFEE: case OP_31_XOP_SLBMFEE:
if (!vcpu->arch.mmu.slbmfee) { if (!vcpu->arch.mmu.slbmfee) {
emulated = EMULATE_FAIL; emulated = EMULATE_FAIL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment