Commit ffde22ac authored by Ed Swierk's avatar Ed Swierk Committed by Avi Kivity

KVM: Xen PV-on-HVM guest support

Support for Xen PV-on-HVM guests can be implemented almost entirely in
userspace, except for handling one annoying MSR that maps a Xen
hypercall blob into guest address space.

A generic mechanism to delegate MSR writes to userspace seems overkill
and risks encouraging similar MSR abuse in the future.  Thus this patch
adds special support for the Xen HVM MSR.

I implemented a new ioctl, KVM_XEN_HVM_CONFIG, that lets userspace tell
KVM which MSR the guest will write to, as well as the starting address
and size of the hypercall blobs (one each for 32-bit and 64-bit) that
userspace has loaded from files.  When the guest writes to the MSR, KVM
copies one page of the blob from userspace to the guest.

I've tested this patch with a hacked-up version of Gerd's userspace
code, booting a number of guests (CentOS 5.3 i386 and x86_64, and
FreeBSD 8.0-RC1 amd64) and exercising PV network and block devices.

[jan: fix i386 build warning]
[avi: future proof abi with a flags field]
Signed-off-by: default avatarEd Swierk <eswierk@aristanetworks.com>
Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 94c30d9c
...@@ -593,6 +593,30 @@ struct kvm_irqchip { ...@@ -593,6 +593,30 @@ struct kvm_irqchip {
} chip; } chip;
}; };
4.27 KVM_XEN_HVM_CONFIG
Capability: KVM_CAP_XEN_HVM
Architectures: x86
Type: vm ioctl
Parameters: struct kvm_xen_hvm_config (in)
Returns: 0 on success, -1 on error
Sets the MSR that the Xen HVM guest uses to initialize its hypercall
page, and provides the starting address and size of the hypercall
blobs in userspace. When the guest writes the MSR, kvm copies one
page of a blob (32- or 64-bit, depending on the vcpu mode) to guest
memory.
struct kvm_xen_hvm_config {
__u32 flags;
__u32 msr;
__u64 blob_addr_32;
__u64 blob_addr_64;
__u8 blob_size_32;
__u8 blob_size_64;
__u8 pad2[30];
};
5. The kvm_run structure 5. The kvm_run structure
Application code obtains a pointer to the kvm_run structure by Application code obtains a pointer to the kvm_run structure by
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#define __KVM_HAVE_MSIX #define __KVM_HAVE_MSIX
#define __KVM_HAVE_MCE #define __KVM_HAVE_MCE
#define __KVM_HAVE_PIT_STATE2 #define __KVM_HAVE_PIT_STATE2
#define __KVM_HAVE_XEN_HVM
/* Architectural interrupt line count. */ /* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256 #define KVM_NR_INTERRUPTS 256
......
...@@ -410,6 +410,8 @@ struct kvm_arch{ ...@@ -410,6 +410,8 @@ struct kvm_arch{
unsigned long irq_sources_bitmap; unsigned long irq_sources_bitmap;
u64 vm_init_tsc; u64 vm_init_tsc;
struct kvm_xen_hvm_config xen_hvm_config;
}; };
struct kvm_vm_stat { struct kvm_vm_stat {
......
...@@ -857,6 +857,38 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -857,6 +857,38 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
return 0; return 0;
} }
static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
int lm = is_long_mode(vcpu);
u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
: (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
: kvm->arch.xen_hvm_config.blob_size_32;
u32 page_num = data & ~PAGE_MASK;
u64 page_addr = data & PAGE_MASK;
u8 *page;
int r;
r = -E2BIG;
if (page_num >= blob_size)
goto out;
r = -ENOMEM;
page = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!page)
goto out;
r = -EFAULT;
if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
goto out_free;
if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
goto out_free;
r = 0;
out_free:
kfree(page);
out:
return r;
}
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{ {
switch (msr) { switch (msr) {
...@@ -972,6 +1004,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -972,6 +1004,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
"0x%x data 0x%llx\n", msr, data); "0x%x data 0x%llx\n", msr, data);
break; break;
default: default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
if (!ignore_msrs) { if (!ignore_msrs) {
pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
msr, data); msr, data);
...@@ -1246,6 +1280,7 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -1246,6 +1280,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_PIT2: case KVM_CAP_PIT2:
case KVM_CAP_PIT_STATE2: case KVM_CAP_PIT_STATE2:
case KVM_CAP_SET_IDENTITY_MAP_ADDR: case KVM_CAP_SET_IDENTITY_MAP_ADDR:
case KVM_CAP_XEN_HVM:
r = 1; r = 1;
break; break;
case KVM_CAP_COALESCED_MMIO: case KVM_CAP_COALESCED_MMIO:
...@@ -2441,6 +2476,17 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -2441,6 +2476,17 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = 0; r = 0;
break; break;
} }
case KVM_XEN_HVM_CONFIG: {
r = -EFAULT;
if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
sizeof(struct kvm_xen_hvm_config)))
goto out;
r = -EINVAL;
if (kvm->arch.xen_hvm_config.flags)
goto out;
r = 0;
break;
}
default: default:
; ;
} }
......
...@@ -436,6 +436,9 @@ struct kvm_ioeventfd { ...@@ -436,6 +436,9 @@ struct kvm_ioeventfd {
#endif #endif
#define KVM_CAP_IOEVENTFD 36 #define KVM_CAP_IOEVENTFD 36
#define KVM_CAP_SET_IDENTITY_MAP_ADDR 37 #define KVM_CAP_SET_IDENTITY_MAP_ADDR 37
#ifdef __KVM_HAVE_XEN_HVM
#define KVM_CAP_XEN_HVM 38
#endif
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
...@@ -488,6 +491,18 @@ struct kvm_x86_mce { ...@@ -488,6 +491,18 @@ struct kvm_x86_mce {
}; };
#endif #endif
#ifdef KVM_CAP_XEN_HVM
struct kvm_xen_hvm_config {
__u32 flags;
__u32 msr;
__u64 blob_addr_32;
__u64 blob_addr_64;
__u8 blob_size_32;
__u8 blob_size_64;
__u8 pad2[30];
};
#endif
#define KVM_IRQFD_FLAG_DEASSIGN (1 << 0) #define KVM_IRQFD_FLAG_DEASSIGN (1 << 0)
struct kvm_irqfd { struct kvm_irqfd {
...@@ -546,6 +561,7 @@ struct kvm_irqfd { ...@@ -546,6 +561,7 @@ struct kvm_irqfd {
#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config) #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78) #define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
#define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd) #define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd)
#define KVM_XEN_HVM_CONFIG _IOW(KVMIO, 0x7a, struct kvm_xen_hvm_config)
/* /*
* ioctls for vcpu fds * ioctls for vcpu fds
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment