Commit 6020c0f6 authored by Alexander Graf's avatar Alexander Graf Committed by Avi Kivity

KVM: PPC: Pass EA to updating emulation ops

When emulating updating load/store instructions (lwzu, stwu, ...) we need to
write the effective address of the load/store into a register.

Currently, we write the physical address in there, which is very wrong. So
instead let's save off where the virtual fault was on MMIO and use that
information as value to put into the register.

While at it, also move the XOP variants of the above instructions to the new
scheme of using the already known vaddr instead of calculating it themselves.
Reported-by: default avatarJörg Sommer <joerg@alea.gnuu.de>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 8943633c
...@@ -464,6 +464,7 @@ struct kvm_vcpu_arch { ...@@ -464,6 +464,7 @@ struct kvm_vcpu_arch {
u32 epr; u32 epr;
#endif #endif
gpa_t paddr_accessed; gpa_t paddr_accessed;
gva_t vaddr_accessed;
u8 io_gpr; /* GPR used as IO source/target */ u8 io_gpr; /* GPR used as IO source/target */
u8 mmio_is_bigendian; u8 mmio_is_bigendian;
......
...@@ -447,7 +447,7 @@ static int instruction_is_store(unsigned int instr) ...@@ -447,7 +447,7 @@ static int instruction_is_store(unsigned int instr)
} }
static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned long gpa, int is_store) unsigned long gpa, gva_t ea, int is_store)
{ {
int ret; int ret;
u32 last_inst; u32 last_inst;
...@@ -494,6 +494,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -494,6 +494,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/ */
vcpu->arch.paddr_accessed = gpa; vcpu->arch.paddr_accessed = gpa;
vcpu->arch.vaddr_accessed = ea;
return kvmppc_emulate_mmio(run, vcpu); return kvmppc_emulate_mmio(run, vcpu);
} }
...@@ -547,7 +548,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -547,7 +548,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* No memslot means it's an emulated MMIO region */ /* No memslot means it's an emulated MMIO region */
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1)); unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
return kvmppc_hv_emulate_mmio(run, vcpu, gpa, return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
dsisr & DSISR_ISSTORE); dsisr & DSISR_ISSTORE);
} }
......
...@@ -351,6 +351,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -351,6 +351,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* MMIO */ /* MMIO */
vcpu->stat.mmio_exits++; vcpu->stat.mmio_exits++;
vcpu->arch.paddr_accessed = pte.raddr; vcpu->arch.paddr_accessed = pte.raddr;
vcpu->arch.vaddr_accessed = pte.eaddr;
r = kvmppc_emulate_mmio(run, vcpu); r = kvmppc_emulate_mmio(run, vcpu);
if ( r == RESUME_HOST_NV ) if ( r == RESUME_HOST_NV )
r = RESUME_HOST; r = RESUME_HOST;
......
...@@ -875,6 +875,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -875,6 +875,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Guest has mapped and accessed a page which is not /* Guest has mapped and accessed a page which is not
* actually RAM. */ * actually RAM. */
vcpu->arch.paddr_accessed = gpaddr; vcpu->arch.paddr_accessed = gpaddr;
vcpu->arch.vaddr_accessed = eaddr;
r = kvmppc_emulate_mmio(run, vcpu); r = kvmppc_emulate_mmio(run, vcpu);
kvmppc_account_exit(vcpu, MMIO_EXITS); kvmppc_account_exit(vcpu, MMIO_EXITS);
} }
......
...@@ -141,7 +141,6 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) ...@@ -141,7 +141,6 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{ {
u32 inst = kvmppc_get_last_inst(vcpu); u32 inst = kvmppc_get_last_inst(vcpu);
u32 ea;
int ra; int ra;
int rb; int rb;
int rs; int rs;
...@@ -185,12 +184,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -185,12 +184,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
ea = kvmppc_get_gpr(vcpu, rb);
if (ra)
ea += kvmppc_get_gpr(vcpu, ra);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
kvmppc_set_gpr(vcpu, ra, ea); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
case OP_31_XOP_STWX: case OP_31_XOP_STWX:
...@@ -212,14 +207,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -212,14 +207,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
ea = kvmppc_get_gpr(vcpu, rb);
if (ra)
ea += kvmppc_get_gpr(vcpu, ra);
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs),
1, 1); 1, 1);
kvmppc_set_gpr(vcpu, rs, ea); kvmppc_set_gpr(vcpu, rs, vcpu->arch.vaddr_accessed);
break; break;
case OP_31_XOP_LHAX: case OP_31_XOP_LHAX:
...@@ -237,12 +228,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -237,12 +228,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
ea = kvmppc_get_gpr(vcpu, rb);
if (ra)
ea += kvmppc_get_gpr(vcpu, ra);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
kvmppc_set_gpr(vcpu, ra, ea); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
case OP_31_XOP_MFSPR: case OP_31_XOP_MFSPR:
...@@ -318,14 +305,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -318,14 +305,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
ea = kvmppc_get_gpr(vcpu, rb);
if (ra)
ea += kvmppc_get_gpr(vcpu, ra);
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs),
2, 1); 2, 1);
kvmppc_set_gpr(vcpu, ra, ea); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
case OP_31_XOP_MTSPR: case OP_31_XOP_MTSPR:
...@@ -429,7 +412,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -429,7 +412,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst); ra = get_ra(inst);
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
case OP_LBZ: case OP_LBZ:
...@@ -441,7 +424,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -441,7 +424,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst); ra = get_ra(inst);
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
case OP_STW: case OP_STW:
...@@ -457,7 +440,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -457,7 +440,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs),
4, 1); 4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
case OP_STB: case OP_STB:
...@@ -473,7 +456,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -473,7 +456,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs),
1, 1); 1, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
case OP_LHZ: case OP_LHZ:
...@@ -485,7 +468,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -485,7 +468,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst); ra = get_ra(inst);
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
case OP_LHA: case OP_LHA:
...@@ -497,7 +480,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -497,7 +480,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst); ra = get_ra(inst);
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
case OP_STH: case OP_STH:
...@@ -513,7 +496,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -513,7 +496,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs),
2, 1); 2, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break; break;
default: default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment