Commit 09f98496 authored by Jose Ricardo Ziviani's avatar Jose Ricardo Ziviani Committed by Paul Mackerras

KVM: PPC: Book3S: Add MMIO emulation for VMX instructions

This patch provides the MMIO load/store vector indexed
X-Form emulation.

Instructions implemented:
lvx: the quadword in storage addressed by the result of EA &
0xffff_ffff_ffff_fff0 is loaded into VRT.

stvx: the contents of VRS are stored into the quadword in storage
addressed by the result of EA & 0xffff_ffff_ffff_fff0.
Reported-by: default avatarGopesh Kumar Chaudhary <gopchaud@in.ibm.com>
Reported-by: default avatarBalamuruhan S <bala24@linux.vnet.ibm.com>
Signed-off-by: default avatarJose Ricardo Ziviani <joserz@linux.vnet.ibm.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent d20fe50a
...@@ -690,6 +690,7 @@ struct kvm_vcpu_arch { ...@@ -690,6 +690,7 @@ struct kvm_vcpu_arch {
u8 mmio_vsx_offset; u8 mmio_vsx_offset;
u8 mmio_vsx_copy_type; u8 mmio_vsx_copy_type;
u8 mmio_vsx_tx_sx_enabled; u8 mmio_vsx_tx_sx_enabled;
u8 mmio_vmx_copy_nums;
u8 osi_needed; u8 osi_needed;
u8 osi_enabled; u8 osi_enabled;
u8 papr_enabled; u8 papr_enabled;
...@@ -804,6 +805,7 @@ struct kvm_vcpu_arch { ...@@ -804,6 +805,7 @@ struct kvm_vcpu_arch {
#define KVM_MMIO_REG_QPR 0x0040 #define KVM_MMIO_REG_QPR 0x0040
#define KVM_MMIO_REG_FQPR 0x0060 #define KVM_MMIO_REG_FQPR 0x0060
#define KVM_MMIO_REG_VSX 0x0080 #define KVM_MMIO_REG_VSX 0x0080
#define KVM_MMIO_REG_VMX 0x00c0
#define __KVM_HAVE_ARCH_WQP #define __KVM_HAVE_ARCH_WQP
#define __KVM_HAVE_CREATE_DEVICE #define __KVM_HAVE_CREATE_DEVICE
......
...@@ -81,6 +81,10 @@ extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -81,6 +81,10 @@ extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend); int is_default_endian, int mmio_sign_extend);
extern int kvmppc_handle_load128_by2x64(struct kvm_run *run,
struct kvm_vcpu *vcpu, unsigned int rt, int is_default_endian);
extern int kvmppc_handle_store128_by2x64(struct kvm_run *run,
struct kvm_vcpu *vcpu, unsigned int rs, int is_default_endian);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, u64 val, unsigned int bytes,
int is_default_endian); int is_default_endian);
......
...@@ -156,6 +156,12 @@ ...@@ -156,6 +156,12 @@
#define OP_31_XOP_LFDX 599 #define OP_31_XOP_LFDX 599
#define OP_31_XOP_LFDUX 631 #define OP_31_XOP_LFDUX 631
/* VMX Vector Load Instructions */
#define OP_31_XOP_LVX 103
/* VMX Vector Store Instructions */
#define OP_31_XOP_STVX 231
#define OP_LWZ 32 #define OP_LWZ 32
#define OP_STFS 52 #define OP_STFS 52
#define OP_STFSU 53 #define OP_STFSU 53
......
...@@ -58,6 +58,18 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) ...@@ -58,6 +58,18 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
} }
#endif /* CONFIG_VSX */ #endif /* CONFIG_VSX */
#ifdef CONFIG_ALTIVEC
static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
{
if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
kvmppc_core_queue_vec_unavail(vcpu);
return true;
}
return false;
}
#endif /* CONFIG_ALTIVEC */
/* /*
* XXX to do: * XXX to do:
* lfiwax, lfiwzx * lfiwax, lfiwzx
...@@ -98,6 +110,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -98,6 +110,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE; vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE;
vcpu->arch.mmio_sp64_extend = 0; vcpu->arch.mmio_sp64_extend = 0;
vcpu->arch.mmio_sign_extend = 0; vcpu->arch.mmio_sign_extend = 0;
vcpu->arch.mmio_vmx_copy_nums = 0;
switch (get_op(inst)) { switch (get_op(inst)) {
case 31: case 31:
...@@ -459,6 +472,29 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) ...@@ -459,6 +472,29 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
rs, 4, 1); rs, 4, 1);
break; break;
#endif /* CONFIG_VSX */ #endif /* CONFIG_VSX */
#ifdef CONFIG_ALTIVEC
case OP_31_XOP_LVX:
if (kvmppc_check_altivec_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.vaddr_accessed &= ~0xFULL;
vcpu->arch.paddr_accessed &= ~0xFULL;
vcpu->arch.mmio_vmx_copy_nums = 2;
emulated = kvmppc_handle_load128_by2x64(run, vcpu,
KVM_MMIO_REG_VMX|rt, 1);
break;
case OP_31_XOP_STVX:
if (kvmppc_check_altivec_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.vaddr_accessed &= ~0xFULL;
vcpu->arch.paddr_accessed &= ~0xFULL;
vcpu->arch.mmio_vmx_copy_nums = 2;
emulated = kvmppc_handle_store128_by2x64(run, vcpu,
rs, 1);
break;
#endif /* CONFIG_ALTIVEC */
default: default:
emulated = EMULATE_FAIL; emulated = EMULATE_FAIL;
break; break;
......
...@@ -924,6 +924,34 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, ...@@ -924,6 +924,34 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
} }
#endif /* CONFIG_VSX */ #endif /* CONFIG_VSX */
#ifdef CONFIG_ALTIVEC
static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
u64 gpr)
{
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
u32 hi, lo;
u32 di;
#ifdef __BIG_ENDIAN
hi = gpr >> 32;
lo = gpr & 0xffffffff;
#else
lo = gpr >> 32;
hi = gpr & 0xffffffff;
#endif
di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
if (di > 1)
return;
if (vcpu->arch.mmio_host_swabbed)
di = 1 - di;
VCPU_VSX_VR(vcpu, index).u[di * 2] = hi;
VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo;
}
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
static inline u64 sp_to_dp(u32 fprs) static inline u64 sp_to_dp(u32 fprs)
{ {
...@@ -1026,6 +1054,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -1026,6 +1054,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
kvmppc_set_vsr_dword_dump(vcpu, gpr); kvmppc_set_vsr_dword_dump(vcpu, gpr);
break; break;
#endif
#ifdef CONFIG_ALTIVEC
case KVM_MMIO_REG_VMX:
kvmppc_set_vmx_dword(vcpu, gpr);
break;
#endif #endif
default: default:
BUG(); BUG();
...@@ -1302,6 +1335,111 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, ...@@ -1302,6 +1335,111 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
} }
#endif /* CONFIG_VSX */ #endif /* CONFIG_VSX */
#ifdef CONFIG_ALTIVEC
/* handle quadword load access in two halves */
int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, int is_default_endian)
{
enum emulation_result emulated;
while (vcpu->arch.mmio_vmx_copy_nums) {
emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
is_default_endian, 0);
if (emulated != EMULATE_DONE)
break;
vcpu->arch.paddr_accessed += run->mmio.len;
vcpu->arch.mmio_vmx_copy_nums--;
}
return emulated;
}
static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
{
vector128 vrs = VCPU_VSX_VR(vcpu, rs);
u32 di;
u64 w0, w1;
di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
if (di > 1)
return -1;
if (vcpu->arch.mmio_host_swabbed)
di = 1 - di;
w0 = vrs.u[di * 2];
w1 = vrs.u[di * 2 + 1];
#ifdef __BIG_ENDIAN
*val = (w0 << 32) | w1;
#else
*val = (w1 << 32) | w0;
#endif
return 0;
}
/* handle quadword store in two halves */
int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rs, int is_default_endian)
{
u64 val = 0;
enum emulation_result emulated = EMULATE_DONE;
vcpu->arch.io_gpr = rs;
while (vcpu->arch.mmio_vmx_copy_nums) {
if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1)
return EMULATE_FAIL;
emulated = kvmppc_handle_store(run, vcpu, val, 8,
is_default_endian);
if (emulated != EMULATE_DONE)
break;
vcpu->arch.paddr_accessed += run->mmio.len;
vcpu->arch.mmio_vmx_copy_nums--;
}
return emulated;
}
static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
enum emulation_result emulated = EMULATE_FAIL;
int r;
vcpu->arch.paddr_accessed += run->mmio.len;
if (!vcpu->mmio_is_write) {
emulated = kvmppc_handle_load128_by2x64(run, vcpu,
vcpu->arch.io_gpr, 1);
} else {
emulated = kvmppc_handle_store128_by2x64(run, vcpu,
vcpu->arch.io_gpr, 1);
}
switch (emulated) {
case EMULATE_DO_MMIO:
run->exit_reason = KVM_EXIT_MMIO;
r = RESUME_HOST;
break;
case EMULATE_FAIL:
pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
r = RESUME_HOST;
break;
default:
r = RESUME_GUEST;
break;
}
return r;
}
#endif /* CONFIG_ALTIVEC */
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
{ {
int r = 0; int r = 0;
...@@ -1420,6 +1558,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -1420,6 +1558,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
return r; return r;
} }
} }
#endif
#ifdef CONFIG_ALTIVEC
if (vcpu->arch.mmio_vmx_copy_nums > 0)
vcpu->arch.mmio_vmx_copy_nums--;
if (vcpu->arch.mmio_vmx_copy_nums > 0) {
r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
if (r == RESUME_HOST) {
vcpu->mmio_needed = 1;
return r;
}
}
#endif #endif
} else if (vcpu->arch.osi_needed) { } else if (vcpu->arch.osi_needed) {
u64 *gprs = run->osi.gprs; u64 *gprs = run->osi.gprs;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment