Commit d0752060 authored by Hollis Blanchard's avatar Hollis Blanchard Committed by Avi Kivity

KVM: Portability: Move x86 FPU handling to x86.c

Signed-off-by: default avatarHollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 8776e519
...@@ -633,6 +633,9 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -633,6 +633,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg); unsigned int ioctl, unsigned long arg);
void kvm_arch_destroy_vm(struct kvm *kvm); void kvm_arch_destroy_vm(struct kvm *kvm);
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
__init void kvm_arch_init(void); __init void kvm_arch_init(void);
static inline void kvm_guest_enter(void) static inline void kvm_guest_enter(void)
......
...@@ -73,28 +73,6 @@ static inline int valid_vcpu(int n) ...@@ -73,28 +73,6 @@ static inline int valid_vcpu(int n)
return likely(n >= 0 && n < KVM_MAX_VCPUS); return likely(n >= 0 && n < KVM_MAX_VCPUS);
} }
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
return;
vcpu->guest_fpu_loaded = 1;
fx_save(&vcpu->host_fx_image);
fx_restore(&vcpu->guest_fx_image);
}
EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
if (!vcpu->guest_fpu_loaded)
return;
vcpu->guest_fpu_loaded = 0;
fx_save(&vcpu->guest_fx_image);
fx_restore(&vcpu->host_fx_image);
}
EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
/* /*
* Switches to specified vcpu, until a matching vcpu_put() * Switches to specified vcpu, until a matching vcpu_put()
*/ */
...@@ -294,26 +272,6 @@ static int kvm_vm_release(struct inode *inode, struct file *filp) ...@@ -294,26 +272,6 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
return 0; return 0;
} }
void fx_init(struct kvm_vcpu *vcpu)
{
unsigned after_mxcsr_mask;
/* Initialize guest FPU by resetting ours and saving into guest's */
preempt_disable();
fx_save(&vcpu->host_fx_image);
fpu_init();
fx_save(&vcpu->guest_fx_image);
fx_restore(&vcpu->host_fx_image);
preempt_enable();
vcpu->cr0 |= X86_CR0_ET;
after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
vcpu->guest_fx_image.mxcsr = 0x1f80;
memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
}
EXPORT_SYMBOL_GPL(fx_init);
/* /*
* Allocate some memory and give it an address in the guest physical address * Allocate some memory and give it an address in the guest physical address
* space. * space.
...@@ -1422,67 +1380,6 @@ static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) ...@@ -1422,67 +1380,6 @@ static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
return 0; return 0;
} }
/*
* fxsave fpu state. Taken from x86_64/processor.h. To be killed when
* we have asm/x86/processor.h
*/
struct fxsave {
u16 cwd;
u16 swd;
u16 twd;
u16 fop;
u64 rip;
u64 rdp;
u32 mxcsr;
u32 mxcsr_mask;
u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
#ifdef CONFIG_X86_64
u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
#else
u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
#endif
};
static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
vcpu_load(vcpu);
memcpy(fpu->fpr, fxsave->st_space, 128);
fpu->fcw = fxsave->cwd;
fpu->fsw = fxsave->swd;
fpu->ftwx = fxsave->twd;
fpu->last_opcode = fxsave->fop;
fpu->last_ip = fxsave->rip;
fpu->last_dp = fxsave->rdp;
memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
vcpu_put(vcpu);
return 0;
}
static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
vcpu_load(vcpu);
memcpy(fxsave->st_space, fpu->fpr, 128);
fxsave->cwd = fpu->fcw;
fxsave->swd = fpu->fsw;
fxsave->twd = fpu->ftwx;
fxsave->fop = fpu->last_opcode;
fxsave->rip = fpu->last_ip;
fxsave->rdp = fpu->last_dp;
memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
vcpu_put(vcpu);
return 0;
}
static long kvm_vcpu_ioctl(struct file *filp, static long kvm_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg) unsigned int ioctl, unsigned long arg)
{ {
...@@ -1613,7 +1510,7 @@ static long kvm_vcpu_ioctl(struct file *filp, ...@@ -1613,7 +1510,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
struct kvm_fpu fpu; struct kvm_fpu fpu;
memset(&fpu, 0, sizeof fpu); memset(&fpu, 0, sizeof fpu);
r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu); r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
if (r) if (r)
goto out; goto out;
r = -EFAULT; r = -EFAULT;
...@@ -1628,7 +1525,7 @@ static long kvm_vcpu_ioctl(struct file *filp, ...@@ -1628,7 +1525,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
r = -EFAULT; r = -EFAULT;
if (copy_from_user(&fpu, argp, sizeof fpu)) if (copy_from_user(&fpu, argp, sizeof fpu))
goto out; goto out;
r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu); r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
if (r) if (r)
goto out; goto out;
r = 0; r = 0;
......
...@@ -1785,3 +1785,106 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) ...@@ -1785,3 +1785,106 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
kvm_x86_ops->skip_emulated_instruction(vcpu); kvm_x86_ops->skip_emulated_instruction(vcpu);
} }
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
/*
* fxsave fpu state. Taken from x86_64/processor.h. To be killed when
* we have asm/x86/processor.h
*/
struct fxsave {
u16 cwd;
u16 swd;
u16 twd;
u16 fop;
u64 rip;
u64 rdp;
u32 mxcsr;
u32 mxcsr_mask;
u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
#ifdef CONFIG_X86_64
u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
#else
u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
#endif
};
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
vcpu_load(vcpu);
memcpy(fpu->fpr, fxsave->st_space, 128);
fpu->fcw = fxsave->cwd;
fpu->fsw = fxsave->swd;
fpu->ftwx = fxsave->twd;
fpu->last_opcode = fxsave->fop;
fpu->last_ip = fxsave->rip;
fpu->last_dp = fxsave->rdp;
memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
vcpu_load(vcpu);
memcpy(fxsave->st_space, fpu->fpr, 128);
fxsave->cwd = fpu->fcw;
fxsave->swd = fpu->fsw;
fxsave->twd = fpu->ftwx;
fxsave->fop = fpu->last_opcode;
fxsave->rip = fpu->last_ip;
fxsave->rdp = fpu->last_dp;
memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
vcpu_put(vcpu);
return 0;
}
void fx_init(struct kvm_vcpu *vcpu)
{
unsigned after_mxcsr_mask;
/* Initialize guest FPU by resetting ours and saving into guest's */
preempt_disable();
fx_save(&vcpu->host_fx_image);
fpu_init();
fx_save(&vcpu->guest_fx_image);
fx_restore(&vcpu->host_fx_image);
preempt_enable();
vcpu->cr0 |= X86_CR0_ET;
after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
vcpu->guest_fx_image.mxcsr = 0x1f80;
memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
}
EXPORT_SYMBOL_GPL(fx_init);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
return;
vcpu->guest_fpu_loaded = 1;
fx_save(&vcpu->host_fx_image);
fx_restore(&vcpu->guest_fx_image);
}
EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
if (!vcpu->guest_fpu_loaded)
return;
vcpu->guest_fpu_loaded = 0;
fx_save(&vcpu->guest_fx_image);
fx_restore(&vcpu->host_fx_image);
}
EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment