Commit ceba57df authored by Paul Mackerras's avatar Paul Mackerras

KVM: PPC: Emulation for more integer loads and stores

This adds emulation for the following integer loads and stores,
thus enabling them to be used in a guest for accessing emulated
MMIO locations.

- lhaux
- lwaux
- lwzux
- ldu
- lwa
- stdux
- stwux
- stdu
- ldbrx
- stdbrx

Previously, most of these would cause an emulation failure exit to
userspace, though ldu and lwa got treated incorrectly as ld, and
stdu got treated incorrectly as std.

This also tidies up some of the formatting and updates the comment
listing instructions that still need to be implemented.

With this, all integer loads and stores that are defined in the Power
ISA v2.07 are emulated, except for those that are permitted to trap
when used on cache-inhibited or write-through mappings (and which do
in fact trap on POWER8), that is, lmw/stmw, lswi/stswi, lswx/stswx,
lq/stq, and l[bhwdq]arx/st[bhwdq]cx.
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 91242fd1
...@@ -96,6 +96,8 @@ ...@@ -96,6 +96,8 @@
#define OP_31_XOP_LBZX 87 #define OP_31_XOP_LBZX 87
#define OP_31_XOP_STDX 149 #define OP_31_XOP_STDX 149
#define OP_31_XOP_STWX 151 #define OP_31_XOP_STWX 151
#define OP_31_XOP_STDUX 181
#define OP_31_XOP_STWUX 183
#define OP_31_XOP_STBX 215 #define OP_31_XOP_STBX 215
#define OP_31_XOP_LBZUX 119 #define OP_31_XOP_LBZUX 119
#define OP_31_XOP_STBUX 247 #define OP_31_XOP_STBUX 247
...@@ -104,13 +106,16 @@ ...@@ -104,13 +106,16 @@
#define OP_31_XOP_MFSPR 339 #define OP_31_XOP_MFSPR 339
#define OP_31_XOP_LWAX 341 #define OP_31_XOP_LWAX 341
#define OP_31_XOP_LHAX 343 #define OP_31_XOP_LHAX 343
#define OP_31_XOP_LWAUX 373
#define OP_31_XOP_LHAUX 375 #define OP_31_XOP_LHAUX 375
#define OP_31_XOP_STHX 407 #define OP_31_XOP_STHX 407
#define OP_31_XOP_STHUX 439 #define OP_31_XOP_STHUX 439
#define OP_31_XOP_MTSPR 467 #define OP_31_XOP_MTSPR 467
#define OP_31_XOP_DCBI 470 #define OP_31_XOP_DCBI 470
#define OP_31_XOP_LDBRX 532
#define OP_31_XOP_LWBRX 534 #define OP_31_XOP_LWBRX 534
#define OP_31_XOP_TLBSYNC 566 #define OP_31_XOP_TLBSYNC 566
#define OP_31_XOP_STDBRX 660
#define OP_31_XOP_STWBRX 662 #define OP_31_XOP_STWBRX 662
#define OP_31_XOP_STFSX 663 #define OP_31_XOP_STFSX 663
#define OP_31_XOP_STFSUX 695 #define OP_31_XOP_STFSUX 695
......
...@@ -58,18 +58,14 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) ...@@ -58,18 +58,14 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
} }
#endif /* CONFIG_VSX */ #endif /* CONFIG_VSX */
/* XXX to do: /*
* lhax * XXX to do:
* lhaux * lfiwax, lfiwzx
* lswx * vector loads and stores
* lswi
* stswx
* stswi
* lha
* lhau
* lmw
* stmw
* *
* Instructions that trap when used on cache-inhibited mappings
* are not emulated here: multiple and string instructions,
* lq/stq, and the load-reserve/store-conditional instructions.
*/ */
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
{ {
...@@ -110,6 +106,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -110,6 +106,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break; break;
case OP_31_XOP_LWZUX:
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_LBZX: case OP_31_XOP_LBZX:
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break; break;
...@@ -121,26 +122,34 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -121,26 +122,34 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
case OP_31_XOP_STDX: case OP_31_XOP_STDX:
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs), 8, 1);
8, 1); break;
case OP_31_XOP_STDUX:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), 8, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
case OP_31_XOP_STWX: case OP_31_XOP_STWX:
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs), 4, 1);
4, 1); break;
case OP_31_XOP_STWUX:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), 4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
case OP_31_XOP_STBX: case OP_31_XOP_STBX:
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs), 1, 1);
1, 1);
break; break;
case OP_31_XOP_STBUX: case OP_31_XOP_STBUX:
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs), 1, 1);
1, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
...@@ -148,6 +157,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -148,6 +157,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
break; break;
case OP_31_XOP_LHAUX:
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_LHZX: case OP_31_XOP_LHZX:
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
break; break;
...@@ -159,14 +173,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -159,14 +173,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
case OP_31_XOP_STHX: case OP_31_XOP_STHX:
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs), 2, 1);
2, 1);
break; break;
case OP_31_XOP_STHUX: case OP_31_XOP_STHUX:
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs), 2, 1);
2, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
...@@ -186,8 +198,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -186,8 +198,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
case OP_31_XOP_STWBRX: case OP_31_XOP_STWBRX:
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs), 4, 0);
4, 0);
break; break;
case OP_31_XOP_LHBRX: case OP_31_XOP_LHBRX:
...@@ -196,8 +207,16 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -196,8 +207,16 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
case OP_31_XOP_STHBRX: case OP_31_XOP_STHBRX:
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs), 2, 0);
2, 0); break;
case OP_31_XOP_LDBRX:
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 0);
break;
case OP_31_XOP_STDBRX:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), 8, 0);
break; break;
case OP_31_XOP_LDX: case OP_31_XOP_LDX:
...@@ -213,6 +232,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -213,6 +232,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
break; break;
case OP_31_XOP_LWAUX:
emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
case OP_31_XOP_LFSX: case OP_31_XOP_LFSX:
if (kvmppc_check_fp_disabled(vcpu)) if (kvmppc_check_fp_disabled(vcpu))
...@@ -267,16 +291,14 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -267,16 +291,14 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
if (kvmppc_check_fp_disabled(vcpu)) if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE; return EMULATE_DONE;
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
VCPU_FPR(vcpu, rs), VCPU_FPR(vcpu, rs), 8, 1);
8, 1);
break; break;
case OP_31_XOP_STFDUX: case OP_31_XOP_STFDUX:
if (kvmppc_check_fp_disabled(vcpu)) if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE; return EMULATE_DONE;
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
VCPU_FPR(vcpu, rs), VCPU_FPR(vcpu, rs), 8, 1);
8, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
...@@ -284,8 +306,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -284,8 +306,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
if (kvmppc_check_fp_disabled(vcpu)) if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE; return EMULATE_DONE;
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
VCPU_FPR(vcpu, rs), VCPU_FPR(vcpu, rs), 4, 1);
4, 1);
break; break;
#endif #endif
...@@ -472,10 +493,22 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -472,10 +493,22 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
break; break;
#endif #endif
/* TBD: Add support for other 64 bit load variants like ldu etc. */
case OP_LD: case OP_LD:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); switch (inst & 3) {
case 0: /* ld */
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
break;
case 1: /* ldu */
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case 2: /* lwa */
emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
break;
default:
emulated = EMULATE_FAIL;
}
break; break;
case OP_LWZU: case OP_LWZU:
...@@ -498,31 +531,37 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -498,31 +531,37 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
4, 1); 4, 1);
break; break;
/* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
case OP_STD: case OP_STD:
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, switch (inst & 3) {
kvmppc_get_gpr(vcpu, rs), case 0: /* std */
8, 1); emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), 8, 1);
break;
case 1: /* stdu */
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), 8, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
default:
emulated = EMULATE_FAIL;
}
break; break;
case OP_STWU: case OP_STWU:
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs), 4, 1);
4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
case OP_STB: case OP_STB:
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs), 1, 1);
1, 1);
break; break;
case OP_STBU: case OP_STBU:
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs), 1, 1);
1, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
...@@ -546,14 +585,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -546,14 +585,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
case OP_STH: case OP_STH:
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs), 2, 1);
2, 1);
break; break;
case OP_STHU: case OP_STHU:
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs), 2, 1);
2, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment