Commit efff1912 authored by Paul Mackerras's avatar Paul Mackerras Committed by Alexander Graf

KVM: PPC: Store FP/VSX/VMX state in thread_fp/vr_state structures

This uses struct thread_fp_state and struct thread_vr_state to store
the floating-point, VMX/Altivec and VSX state, rather than flat arrays.
This makes transferring the state to/from the thread_struct simpler
and allows us to unify the get/set_one_reg implementations for the
VSX registers.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 09548fda
...@@ -410,8 +410,7 @@ struct kvm_vcpu_arch { ...@@ -410,8 +410,7 @@ struct kvm_vcpu_arch {
ulong gpr[32]; ulong gpr[32];
u64 fpr[32]; struct thread_fp_state fp;
u64 fpscr;
#ifdef CONFIG_SPE #ifdef CONFIG_SPE
ulong evr[32]; ulong evr[32];
...@@ -420,12 +419,7 @@ struct kvm_vcpu_arch { ...@@ -420,12 +419,7 @@ struct kvm_vcpu_arch {
u64 acc; u64 acc;
#endif #endif
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
vector128 vr[32]; struct thread_vr_state vr;
vector128 vscr;
#endif
#ifdef CONFIG_VSX
u64 vsr[64];
#endif #endif
#ifdef CONFIG_KVM_BOOKE_HV #ifdef CONFIG_KVM_BOOKE_HV
...@@ -619,6 +613,8 @@ struct kvm_vcpu_arch { ...@@ -619,6 +613,8 @@ struct kvm_vcpu_arch {
#endif #endif
}; };
#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
/* Values for vcpu->arch.state */ /* Values for vcpu->arch.state */
#define KVMPPC_VCPU_NOTREADY 0 #define KVMPPC_VCPU_NOTREADY 0
#define KVMPPC_VCPU_RUNNABLE 1 #define KVMPPC_VCPU_RUNNABLE 1
......
...@@ -425,14 +425,11 @@ int main(void) ...@@ -425,14 +425,11 @@ int main(void)
DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid)); DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr));
DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr)); DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fp.fpscr));
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr)); DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr));
DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr)); DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vr.vscr));
#endif
#ifdef CONFIG_VSX
DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
#endif #endif
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
......
...@@ -577,10 +577,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) ...@@ -577,10 +577,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
break; break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = reg->id - KVM_REG_PPC_FPR0; i = reg->id - KVM_REG_PPC_FPR0;
val = get_reg_val(reg->id, vcpu->arch.fpr[i]); val = get_reg_val(reg->id, VCPU_FPR(vcpu, i));
break; break;
case KVM_REG_PPC_FPSCR: case KVM_REG_PPC_FPSCR:
val = get_reg_val(reg->id, vcpu->arch.fpscr); val = get_reg_val(reg->id, vcpu->arch.fp.fpscr);
break; break;
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
...@@ -588,19 +588,30 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) ...@@ -588,19 +588,30 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO; r = -ENXIO;
break; break;
} }
val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0]; val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
break; break;
case KVM_REG_PPC_VSCR: case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO; r = -ENXIO;
break; break;
} }
val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]); val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
break; break;
case KVM_REG_PPC_VRSAVE: case KVM_REG_PPC_VRSAVE:
val = get_reg_val(reg->id, vcpu->arch.vrsave); val = get_reg_val(reg->id, vcpu->arch.vrsave);
break; break;
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
long int i = reg->id - KVM_REG_PPC_VSR0;
val.vsxval[0] = vcpu->arch.fp.fpr[i][0];
val.vsxval[1] = vcpu->arch.fp.fpr[i][1];
} else {
r = -ENXIO;
}
break;
#endif /* CONFIG_VSX */
case KVM_REG_PPC_DEBUG_INST: { case KVM_REG_PPC_DEBUG_INST: {
u32 opcode = INS_TW; u32 opcode = INS_TW;
r = copy_to_user((u32 __user *)(long)reg->addr, r = copy_to_user((u32 __user *)(long)reg->addr,
...@@ -656,10 +667,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) ...@@ -656,10 +667,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
break; break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = reg->id - KVM_REG_PPC_FPR0; i = reg->id - KVM_REG_PPC_FPR0;
vcpu->arch.fpr[i] = set_reg_val(reg->id, val); VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val);
break; break;
case KVM_REG_PPC_FPSCR: case KVM_REG_PPC_FPSCR:
vcpu->arch.fpscr = set_reg_val(reg->id, val); vcpu->arch.fp.fpscr = set_reg_val(reg->id, val);
break; break;
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
...@@ -667,14 +678,14 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) ...@@ -667,14 +678,14 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO; r = -ENXIO;
break; break;
} }
vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
break; break;
case KVM_REG_PPC_VSCR: case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO; r = -ENXIO;
break; break;
} }
vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val); vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
break; break;
case KVM_REG_PPC_VRSAVE: case KVM_REG_PPC_VRSAVE:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
...@@ -684,6 +695,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) ...@@ -684,6 +695,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
vcpu->arch.vrsave = set_reg_val(reg->id, val); vcpu->arch.vrsave = set_reg_val(reg->id, val);
break; break;
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
long int i = reg->id - KVM_REG_PPC_VSR0;
vcpu->arch.fp.fpr[i][0] = val.vsxval[0];
vcpu->arch.fp.fpr[i][1] = val.vsxval[1];
} else {
r = -ENXIO;
}
break;
#endif /* CONFIG_VSX */
#ifdef CONFIG_KVM_XICS #ifdef CONFIG_KVM_XICS
case KVM_REG_PPC_ICP_STATE: case KVM_REG_PPC_ICP_STATE:
if (!vcpu->arch.icp) { if (!vcpu->arch.icp) {
......
...@@ -811,27 +811,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -811,27 +811,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_SDAR: case KVM_REG_PPC_SDAR:
*val = get_reg_val(id, vcpu->arch.sdar); *val = get_reg_val(id, vcpu->arch.sdar);
break; break;
#ifdef CONFIG_VSX
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
/* VSX => FP reg i is stored in arch.vsr[2*i] */
long int i = id - KVM_REG_PPC_FPR0;
*val = get_reg_val(id, vcpu->arch.vsr[2 * i]);
} else {
/* let generic code handle it */
r = -EINVAL;
}
break;
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
long int i = id - KVM_REG_PPC_VSR0;
val->vsxval[0] = vcpu->arch.vsr[2 * i];
val->vsxval[1] = vcpu->arch.vsr[2 * i + 1];
} else {
r = -ENXIO;
}
break;
#endif /* CONFIG_VSX */
case KVM_REG_PPC_VPA_ADDR: case KVM_REG_PPC_VPA_ADDR:
spin_lock(&vcpu->arch.vpa_update_lock); spin_lock(&vcpu->arch.vpa_update_lock);
*val = get_reg_val(id, vcpu->arch.vpa.next_gpa); *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
...@@ -914,27 +893,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -914,27 +893,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_SDAR: case KVM_REG_PPC_SDAR:
vcpu->arch.sdar = set_reg_val(id, *val); vcpu->arch.sdar = set_reg_val(id, *val);
break; break;
#ifdef CONFIG_VSX
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
/* VSX => FP reg i is stored in arch.vsr[2*i] */
long int i = id - KVM_REG_PPC_FPR0;
vcpu->arch.vsr[2 * i] = set_reg_val(id, *val);
} else {
/* let generic code handle it */
r = -EINVAL;
}
break;
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
long int i = id - KVM_REG_PPC_VSR0;
vcpu->arch.vsr[2 * i] = val->vsxval[0];
vcpu->arch.vsr[2 * i + 1] = val->vsxval[1];
} else {
r = -ENXIO;
}
break;
#endif /* CONFIG_VSX */
case KVM_REG_PPC_VPA_ADDR: case KVM_REG_PPC_VPA_ADDR:
addr = set_reg_val(id, *val); addr = set_reg_val(id, *val);
r = -EINVAL; r = -EINVAL;
......
...@@ -1889,7 +1889,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) ...@@ -1889,7 +1889,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
reg = 0 reg = 0
.rept 32 .rept 32
li r6,reg*16+VCPU_VSRS li r6,reg*16+VCPU_FPRS
STXVD2X(reg,R6,R3) STXVD2X(reg,R6,R3)
reg = reg + 1 reg = reg + 1
.endr .endr
...@@ -1951,7 +1951,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) ...@@ -1951,7 +1951,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
reg = 0 reg = 0
.rept 32 .rept 32
li r7,reg*16+VCPU_VSRS li r7,reg*16+VCPU_FPRS
LXVD2X(reg,R7,R4) LXVD2X(reg,R7,R4)
reg = reg + 1 reg = reg + 1
.endr .endr
......
This diff is collapsed.
...@@ -545,12 +545,6 @@ static inline int get_fpr_index(int i) ...@@ -545,12 +545,6 @@ static inline int get_fpr_index(int i)
void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
{ {
struct thread_struct *t = &current->thread; struct thread_struct *t = &current->thread;
u64 *vcpu_fpr = vcpu->arch.fpr;
#ifdef CONFIG_VSX
u64 *vcpu_vsx = vcpu->arch.vsr;
#endif
u64 *thread_fpr = &t->fp_state.fpr[0][0];
int i;
/* /*
* VSX instructions can access FP and vector registers, so if * VSX instructions can access FP and vector registers, so if
...@@ -575,24 +569,14 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) ...@@ -575,24 +569,14 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
*/ */
if (current->thread.regs->msr & MSR_FP) if (current->thread.regs->msr & MSR_FP)
giveup_fpu(current); giveup_fpu(current);
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) vcpu->arch.fp = t->fp_state;
vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
vcpu->arch.fpscr = t->fp_state.fpscr;
#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
#endif
} }
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) { if (msr & MSR_VEC) {
if (current->thread.regs->msr & MSR_VEC) if (current->thread.regs->msr & MSR_VEC)
giveup_altivec(current); giveup_altivec(current);
memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr)); vcpu->arch.vr = t->vr_state;
vcpu->arch.vscr = t->vr_state.vscr;
} }
#endif #endif
...@@ -640,12 +624,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, ...@@ -640,12 +624,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
ulong msr) ulong msr)
{ {
struct thread_struct *t = &current->thread; struct thread_struct *t = &current->thread;
u64 *vcpu_fpr = vcpu->arch.fpr;
#ifdef CONFIG_VSX
u64 *vcpu_vsx = vcpu->arch.vsr;
#endif
u64 *thread_fpr = &t->fp_state.fpr[0][0];
int i;
/* When we have paired singles, we emulate in software */ /* When we have paired singles, we emulate in software */
if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
...@@ -683,13 +661,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, ...@@ -683,13 +661,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
#endif #endif
if (msr & MSR_FP) { if (msr & MSR_FP) {
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) t->fp_state = vcpu->arch.fp;
thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
#ifdef CONFIG_VSX
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
#endif
t->fp_state.fpscr = vcpu->arch.fpscr;
t->fpexc_mode = 0; t->fpexc_mode = 0;
enable_kernel_fp(); enable_kernel_fp();
load_fp_state(&t->fp_state); load_fp_state(&t->fp_state);
...@@ -697,8 +669,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, ...@@ -697,8 +669,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
if (msr & MSR_VEC) { if (msr & MSR_VEC) {
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); t->vr_state = vcpu->arch.vr;
t->vr_state.vscr = vcpu->arch.vscr;
t->vrsave = -1; t->vrsave = -1;
enable_kernel_altivec(); enable_kernel_altivec();
load_vr_state(&t->vr_state); load_vr_state(&t->vr_state);
...@@ -1118,19 +1089,6 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, ...@@ -1118,19 +1089,6 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_HIOR: case KVM_REG_PPC_HIOR:
*val = get_reg_val(id, to_book3s(vcpu)->hior); *val = get_reg_val(id, to_book3s(vcpu)->hior);
break; break;
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
long int i = id - KVM_REG_PPC_VSR0;
if (!cpu_has_feature(CPU_FTR_VSX)) {
r = -ENXIO;
break;
}
val->vsxval[0] = vcpu->arch.fpr[i];
val->vsxval[1] = vcpu->arch.vsr[i];
break;
}
#endif /* CONFIG_VSX */
default: default:
r = -EINVAL; r = -EINVAL;
break; break;
...@@ -1149,19 +1107,6 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, ...@@ -1149,19 +1107,6 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
to_book3s(vcpu)->hior = set_reg_val(id, *val); to_book3s(vcpu)->hior = set_reg_val(id, *val);
to_book3s(vcpu)->hior_explicit = true; to_book3s(vcpu)->hior_explicit = true;
break; break;
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
long int i = id - KVM_REG_PPC_VSR0;
if (!cpu_has_feature(CPU_FTR_VSX)) {
r = -ENXIO;
break;
}
vcpu->arch.fpr[i] = val->vsxval[0];
vcpu->arch.vsr[i] = val->vsxval[1];
break;
}
#endif /* CONFIG_VSX */
default: default:
r = -EINVAL; r = -EINVAL;
break; break;
......
...@@ -707,9 +707,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -707,9 +707,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
fpexc_mode = current->thread.fpexc_mode; fpexc_mode = current->thread.fpexc_mode;
/* Restore guest FPU state to thread */ /* Restore guest FPU state to thread */
memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr, current->thread.fp_state = vcpu->arch.fp;
sizeof(vcpu->arch.fpr));
current->thread.fp_state.fpscr = vcpu->arch.fpscr;
/* /*
* Since we can't trap on MSR_FP in GS-mode, we consider the guest * Since we can't trap on MSR_FP in GS-mode, we consider the guest
...@@ -745,9 +743,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -745,9 +743,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vcpu->fpu_active = 0; vcpu->fpu_active = 0;
/* Save guest FPU state from thread */ /* Save guest FPU state from thread */
memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr, vcpu->arch.fp = current->thread.fp_state;
sizeof(vcpu->arch.fpr));
vcpu->arch.fpscr = current->thread.fp_state.fpscr;
/* Restore userspace FPU state from stack */ /* Restore userspace FPU state from stack */
current->thread.fp_state = fp; current->thread.fp_state = fp;
......
...@@ -656,14 +656,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -656,14 +656,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
break; break;
case KVM_MMIO_REG_FPR: case KVM_MMIO_REG_FPR:
vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
break; break;
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
case KVM_MMIO_REG_QPR: case KVM_MMIO_REG_QPR:
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break; break;
case KVM_MMIO_REG_FQPR: case KVM_MMIO_REG_FQPR:
vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break; break;
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment