Commit d3f12d36 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'kvm-updates/2.6.30' of git://git.kernel.org/pub/scm/virt/kvm/kvm

* 'kvm-updates/2.6.30' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (113 commits)
  KVM: VMX: Don't allow uninhibited access to EFER on i386
  KVM: Correct deassign device ioctl to IOW
  KVM: ppc: e500: Fix the bug that KVM is unstable in SMP
  KVM: ppc: e500: Fix the bug that mas0 update to wrong value when read TLB entry
  KVM: Fix missing smp tlb flush in invlpg
  KVM: Get support IRQ routing entry counts
  KVM: fix sparse warnings: Should it be static?
  KVM: fix sparse warnings: context imbalance
  KVM: is_long_mode() should check for EFER.LMA
  KVM: VMX: Update necessary state when guest enters long mode
  KVM: ia64: Fix the build errors due to lack of macros related to MSI.
  ia64: Move the macro definitions related to MSI to one header file.
  KVM: fix kvm_vm_ioctl_deassign_device
  KVM: define KVM_CAP_DEVICE_DEASSIGNMENT
  KVM: ppc: Add emulation of E500 register mmucsr0
  KVM: Report IRQ injection status for MSI delivered interrupts
  KVM: MMU: Fix another largepage memory leak
  KVM: SVM: set accessed bit for VMCB segment selectors
  KVM: Report IRQ injection status to userspace.
  KVM: MMU: remove assertion in kvm_mmu_alloc_page
  ...
parents 39b566ee 16175a79
...@@ -166,7 +166,40 @@ struct saved_vpd { ...@@ -166,7 +166,40 @@ struct saved_vpd {
unsigned long vcpuid[5]; unsigned long vcpuid[5];
unsigned long vpsr; unsigned long vpsr;
unsigned long vpr; unsigned long vpr;
unsigned long vcr[128]; union {
unsigned long vcr[128];
struct {
unsigned long dcr;
unsigned long itm;
unsigned long iva;
unsigned long rsv1[5];
unsigned long pta;
unsigned long rsv2[7];
unsigned long ipsr;
unsigned long isr;
unsigned long rsv3;
unsigned long iip;
unsigned long ifa;
unsigned long itir;
unsigned long iipa;
unsigned long ifs;
unsigned long iim;
unsigned long iha;
unsigned long rsv4[38];
unsigned long lid;
unsigned long ivr;
unsigned long tpr;
unsigned long eoi;
unsigned long irr[4];
unsigned long itv;
unsigned long pmv;
unsigned long cmcv;
unsigned long rsv5[5];
unsigned long lrr0;
unsigned long lrr1;
unsigned long rsv6[46];
};
};
}; };
struct kvm_regs { struct kvm_regs {
...@@ -214,4 +247,18 @@ struct kvm_sregs { ...@@ -214,4 +247,18 @@ struct kvm_sregs {
struct kvm_fpu { struct kvm_fpu {
}; };
#define KVM_IA64_VCPU_STACK_SHIFT 16
#define KVM_IA64_VCPU_STACK_SIZE (1UL << KVM_IA64_VCPU_STACK_SHIFT)
struct kvm_ia64_vcpu_stack {
unsigned char stack[KVM_IA64_VCPU_STACK_SIZE];
};
struct kvm_debug_exit_arch {
};
/* for KVM_SET_GUEST_DEBUG */
struct kvm_guest_debug_arch {
};
#endif #endif
...@@ -112,7 +112,11 @@ ...@@ -112,7 +112,11 @@
#define VCPU_STRUCT_SHIFT 16 #define VCPU_STRUCT_SHIFT 16
#define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT) #define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT)
#define KVM_STK_OFFSET VCPU_STRUCT_SIZE /*
* This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h
*/
#define KVM_STK_SHIFT 16
#define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT)
#define KVM_VM_STRUCT_SHIFT 19 #define KVM_VM_STRUCT_SHIFT 19
#define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT) #define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT)
...@@ -153,10 +157,10 @@ struct kvm_vm_data { ...@@ -153,10 +157,10 @@ struct kvm_vm_data {
struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
}; };
#define VCPU_BASE(n) KVM_VM_DATA_BASE + \ #define VCPU_BASE(n) (KVM_VM_DATA_BASE + \
offsetof(struct kvm_vm_data, vcpu_data[n]) offsetof(struct kvm_vm_data, vcpu_data[n]))
#define VM_BASE KVM_VM_DATA_BASE + \ #define KVM_VM_BASE (KVM_VM_DATA_BASE + \
offsetof(struct kvm_vm_data, kvm_vm_struct) offsetof(struct kvm_vm_data, kvm_vm_struct))
#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ #define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \
offsetof(struct kvm_vm_data, kvm_mem_dirty_log) offsetof(struct kvm_vm_data, kvm_mem_dirty_log)
...@@ -235,8 +239,6 @@ struct kvm_vm_data { ...@@ -235,8 +239,6 @@ struct kvm_vm_data {
struct kvm; struct kvm;
struct kvm_vcpu; struct kvm_vcpu;
struct kvm_guest_debug{
};
struct kvm_mmio_req { struct kvm_mmio_req {
uint64_t addr; /* physical address */ uint64_t addr; /* physical address */
...@@ -462,6 +464,8 @@ struct kvm_arch { ...@@ -462,6 +464,8 @@ struct kvm_arch {
unsigned long metaphysical_rr4; unsigned long metaphysical_rr4;
unsigned long vmm_init_rr; unsigned long vmm_init_rr;
int online_vcpus;
struct kvm_ioapic *vioapic; struct kvm_ioapic *vioapic;
struct kvm_vm_stat stat; struct kvm_vm_stat stat;
struct kvm_sal_data rdv_sal_data; struct kvm_sal_data rdv_sal_data;
......
#ifndef _IA64_MSI_DEF_H
#define _IA64_MSI_DEF_H
/*
* Shifts for APIC-based data
*/
#define MSI_DATA_VECTOR_SHIFT 0
#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
#define MSI_DATA_VECTOR_MASK 0xffffff00
#define MSI_DATA_DELIVERY_MODE_SHIFT 8
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
#define MSI_DATA_LEVEL_SHIFT 14
#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
#define MSI_DATA_TRIGGER_SHIFT 15
#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
/*
* Shift/mask fields for APIC-based bus address
*/
#define MSI_ADDR_DEST_ID_SHIFT 4
#define MSI_ADDR_HEADER 0xfee00000
#define MSI_ADDR_DEST_ID_MASK 0xfff0000f
#define MSI_ADDR_DEST_ID_CPU(cpu) ((cpu) << MSI_ADDR_DEST_ID_SHIFT)
#define MSI_ADDR_DEST_MODE_SHIFT 2
#define MSI_ADDR_DEST_MODE_PHYS (0 << MSI_ADDR_DEST_MODE_SHIFT)
#define MSI_ADDR_DEST_MODE_LOGIC (1 << MSI_ADDR_DEST_MODE_SHIFT)
#define MSI_ADDR_REDIRECTION_SHIFT 3
#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
#endif/* _IA64_MSI_DEF_H */
...@@ -7,44 +7,7 @@ ...@@ -7,44 +7,7 @@
#include <linux/msi.h> #include <linux/msi.h>
#include <linux/dmar.h> #include <linux/dmar.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/msidef.h>
/*
* Shifts for APIC-based data
*/
#define MSI_DATA_VECTOR_SHIFT 0
#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
#define MSI_DATA_VECTOR_MASK 0xffffff00
#define MSI_DATA_DELIVERY_SHIFT 8
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT)
#define MSI_DATA_LEVEL_SHIFT 14
#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
#define MSI_DATA_TRIGGER_SHIFT 15
#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
/*
* Shift/mask fields for APIC-based bus address
*/
#define MSI_TARGET_CPU_SHIFT 4
#define MSI_ADDR_HEADER 0xfee00000
#define MSI_ADDR_DESTID_MASK 0xfff0000f
#define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT)
#define MSI_ADDR_DESTMODE_SHIFT 2
#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT)
#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
#define MSI_ADDR_REDIRECTION_SHIFT 3
#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
static struct irq_chip ia64_msi_chip; static struct irq_chip ia64_msi_chip;
...@@ -65,8 +28,8 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, ...@@ -65,8 +28,8 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
read_msi_msg(irq, &msg); read_msi_msg(irq, &msg);
addr = msg.address_lo; addr = msg.address_lo;
addr &= MSI_ADDR_DESTID_MASK; addr &= MSI_ADDR_DEST_ID_MASK;
addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
msg.address_lo = addr; msg.address_lo = addr;
data = msg.data; data = msg.data;
...@@ -98,9 +61,9 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) ...@@ -98,9 +61,9 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
msg.address_hi = 0; msg.address_hi = 0;
msg.address_lo = msg.address_lo =
MSI_ADDR_HEADER | MSI_ADDR_HEADER |
MSI_ADDR_DESTMODE_PHYS | MSI_ADDR_DEST_MODE_PHYS |
MSI_ADDR_REDIRECTION_CPU | MSI_ADDR_REDIRECTION_CPU |
MSI_ADDR_DESTID_CPU(dest_phys_id); MSI_ADDR_DEST_ID_CPU(dest_phys_id);
msg.data = msg.data =
MSI_DATA_TRIGGER_EDGE | MSI_DATA_TRIGGER_EDGE |
...@@ -183,8 +146,8 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) ...@@ -183,8 +146,8 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
msg.data &= ~MSI_DATA_VECTOR_MASK; msg.data &= ~MSI_DATA_VECTOR_MASK;
msg.data |= MSI_DATA_VECTOR(cfg->vector); msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DESTID_MASK; msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
dmar_msi_write(irq, &msg); dmar_msi_write(irq, &msg);
irq_desc[irq].affinity = *mask; irq_desc[irq].affinity = *mask;
...@@ -215,9 +178,9 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) ...@@ -215,9 +178,9 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
msg->address_hi = 0; msg->address_hi = 0;
msg->address_lo = msg->address_lo =
MSI_ADDR_HEADER | MSI_ADDR_HEADER |
MSI_ADDR_DESTMODE_PHYS | MSI_ADDR_DEST_MODE_PHYS |
MSI_ADDR_REDIRECTION_CPU | MSI_ADDR_REDIRECTION_CPU |
MSI_ADDR_DESTID_CPU(dest); MSI_ADDR_DEST_ID_CPU(dest);
msg->data = msg->data =
MSI_DATA_TRIGGER_EDGE | MSI_DATA_TRIGGER_EDGE |
......
...@@ -4,6 +4,10 @@ ...@@ -4,6 +4,10 @@
config HAVE_KVM config HAVE_KVM
bool bool
config HAVE_KVM_IRQCHIP
bool
default y
menuconfig VIRTUALIZATION menuconfig VIRTUALIZATION
bool "Virtualization" bool "Virtualization"
depends on HAVE_KVM || IA64 depends on HAVE_KVM || IA64
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#ifndef __IRQ_H #ifndef __IRQ_H
#define __IRQ_H #define __IRQ_H
#include "lapic.h"
static inline int irqchip_in_kernel(struct kvm *kvm) static inline int irqchip_in_kernel(struct kvm *kvm)
{ {
return 1; return 1;
......
...@@ -182,7 +182,7 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -182,7 +182,7 @@ int kvm_dev_ioctl_check_extension(long ext)
switch (ext) { switch (ext) {
case KVM_CAP_IRQCHIP: case KVM_CAP_IRQCHIP:
case KVM_CAP_MP_STATE: case KVM_CAP_MP_STATE:
case KVM_CAP_IRQ_INJECT_STATUS:
r = 1; r = 1;
break; break;
case KVM_CAP_COALESCED_MMIO: case KVM_CAP_COALESCED_MMIO:
...@@ -314,7 +314,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, ...@@ -314,7 +314,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
union ia64_lid lid; union ia64_lid lid;
int i; int i;
for (i = 0; i < KVM_MAX_VCPUS; i++) { for (i = 0; i < kvm->arch.online_vcpus; i++) {
if (kvm->vcpus[i]) { if (kvm->vcpus[i]) {
lid.val = VCPU_LID(kvm->vcpus[i]); lid.val = VCPU_LID(kvm->vcpus[i]);
if (lid.id == id && lid.eid == eid) if (lid.id == id && lid.eid == eid)
...@@ -388,7 +388,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -388,7 +388,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
call_data.ptc_g_data = p->u.ptc_g_data; call_data.ptc_g_data = p->u.ptc_g_data;
for (i = 0; i < KVM_MAX_VCPUS; i++) { for (i = 0; i < kvm->arch.online_vcpus; i++) {
if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
KVM_MP_STATE_UNINITIALIZED || KVM_MP_STATE_UNINITIALIZED ||
vcpu == kvm->vcpus[i]) vcpu == kvm->vcpus[i])
...@@ -788,6 +788,8 @@ struct kvm *kvm_arch_create_vm(void) ...@@ -788,6 +788,8 @@ struct kvm *kvm_arch_create_vm(void)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
kvm_init_vm(kvm); kvm_init_vm(kvm);
kvm->arch.online_vcpus = 0;
return kvm; return kvm;
} }
...@@ -919,7 +921,13 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -919,7 +921,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_ioapic_init(kvm); r = kvm_ioapic_init(kvm);
if (r) if (r)
goto out; goto out;
r = kvm_setup_default_irq_routing(kvm);
if (r) {
kfree(kvm->arch.vioapic);
goto out;
}
break; break;
case KVM_IRQ_LINE_STATUS:
case KVM_IRQ_LINE: { case KVM_IRQ_LINE: {
struct kvm_irq_level irq_event; struct kvm_irq_level irq_event;
...@@ -927,10 +935,17 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -927,10 +935,17 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (copy_from_user(&irq_event, argp, sizeof irq_event)) if (copy_from_user(&irq_event, argp, sizeof irq_event))
goto out; goto out;
if (irqchip_in_kernel(kvm)) { if (irqchip_in_kernel(kvm)) {
__s32 status;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level); irq_event.irq, irq_event.level);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
if (ioctl == KVM_IRQ_LINE_STATUS) {
irq_event.status = status;
if (copy_to_user(argp, &irq_event,
sizeof irq_event))
goto out;
}
r = 0; r = 0;
} }
break; break;
...@@ -1149,7 +1164,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -1149,7 +1164,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
/*Initialize itc offset for vcpus*/ /*Initialize itc offset for vcpus*/
itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
for (i = 0; i < KVM_MAX_VCPUS; i++) { for (i = 0; i < kvm->arch.online_vcpus; i++) {
v = (struct kvm_vcpu *)((char *)vcpu + v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i); sizeof(struct kvm_vcpu_data) * i);
v->arch.itc_offset = itc_offset; v->arch.itc_offset = itc_offset;
...@@ -1283,6 +1298,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -1283,6 +1298,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
goto fail; goto fail;
} }
kvm->arch.online_vcpus++;
return vcpu; return vcpu;
fail: fail:
return ERR_PTR(r); return ERR_PTR(r);
...@@ -1303,8 +1320,8 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) ...@@ -1303,8 +1320,8 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return -EINVAL; return -EINVAL;
} }
int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_debug_guest *dbg) struct kvm_guest_debug *dbg)
{ {
return -EINVAL; return -EINVAL;
} }
...@@ -1421,6 +1438,23 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -1421,6 +1438,23 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return 0; return 0;
} }
int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu,
struct kvm_ia64_vcpu_stack *stack)
{
memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack));
return 0;
}
int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu,
struct kvm_ia64_vcpu_stack *stack)
{
memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu),
sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu));
vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data;
return 0;
}
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{ {
...@@ -1430,9 +1464,78 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) ...@@ -1430,9 +1464,78 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
long kvm_arch_vcpu_ioctl(struct file *filp, long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg) unsigned int ioctl, unsigned long arg)
{ {
return -EINVAL; struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
struct kvm_ia64_vcpu_stack *stack = NULL;
long r;
switch (ioctl) {
case KVM_IA64_VCPU_GET_STACK: {
struct kvm_ia64_vcpu_stack __user *user_stack;
void __user *first_p = argp;
r = -EFAULT;
if (copy_from_user(&user_stack, first_p, sizeof(void *)))
goto out;
if (!access_ok(VERIFY_WRITE, user_stack,
sizeof(struct kvm_ia64_vcpu_stack))) {
printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: "
"Illegal user destination address for stack\n");
goto out;
}
stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
if (!stack) {
r = -ENOMEM;
goto out;
}
r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack);
if (r)
goto out;
if (copy_to_user(user_stack, stack,
sizeof(struct kvm_ia64_vcpu_stack)))
goto out;
break;
}
case KVM_IA64_VCPU_SET_STACK: {
struct kvm_ia64_vcpu_stack __user *user_stack;
void __user *first_p = argp;
r = -EFAULT;
if (copy_from_user(&user_stack, first_p, sizeof(void *)))
goto out;
if (!access_ok(VERIFY_READ, user_stack,
sizeof(struct kvm_ia64_vcpu_stack))) {
printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: "
"Illegal user address for stack\n");
goto out;
}
stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
if (!stack) {
r = -ENOMEM;
goto out;
}
if (copy_from_user(stack, user_stack,
sizeof(struct kvm_ia64_vcpu_stack)))
goto out;
r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack);
break;
}
default:
r = -EINVAL;
}
out:
kfree(stack);
return r;
} }
int kvm_arch_set_memory_region(struct kvm *kvm, int kvm_arch_set_memory_region(struct kvm *kvm,
...@@ -1472,7 +1575,7 @@ void kvm_arch_flush_shadow(struct kvm *kvm) ...@@ -1472,7 +1575,7 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
} }
long kvm_arch_dev_ioctl(struct file *filp, long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg) unsigned int ioctl, unsigned long arg)
{ {
return -EINVAL; return -EINVAL;
} }
...@@ -1737,7 +1840,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, ...@@ -1737,7 +1840,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
struct kvm_vcpu *lvcpu = kvm->vcpus[0]; struct kvm_vcpu *lvcpu = kvm->vcpus[0];
int i; int i;
for (i = 1; i < KVM_MAX_VCPUS; i++) { for (i = 1; i < kvm->arch.online_vcpus; i++) {
if (!kvm->vcpus[i]) if (!kvm->vcpus[i])
continue; continue;
if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp) if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
......
...@@ -227,6 +227,18 @@ static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu) ...@@ -227,6 +227,18 @@ static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu)
return result; return result;
} }
static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu)
{
struct ia64_pal_retval result = {0, 0, 0, 0};
long in0, in1, in2, in3;
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
result.status = ia64_pal_register_info(in1, &result.v1, &result.v2);
return result;
}
static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu) static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu)
{ {
...@@ -268,8 +280,12 @@ static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu) ...@@ -268,8 +280,12 @@ static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu)
static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu) static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu)
{ {
struct ia64_pal_retval result; struct ia64_pal_retval result;
unsigned long in0, in1, in2, in3;
INIT_PAL_STATUS_UNIMPLEMENTED(result); kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
result.status = ia64_pal_vm_info(in1, in2,
(pal_tc_info_u_t *)&result.v1, &result.v2);
return result; return result;
} }
...@@ -292,6 +308,108 @@ static void prepare_for_halt(struct kvm_vcpu *vcpu) ...@@ -292,6 +308,108 @@ static void prepare_for_halt(struct kvm_vcpu *vcpu)
vcpu->arch.timer_fired = 0; vcpu->arch.timer_fired = 0;
} }
static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu)
{
long status;
unsigned long in0, in1, in2, in3, r9;
unsigned long pm_buffer[16];
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
status = ia64_pal_perf_mon_info(pm_buffer,
(pal_perf_mon_info_u_t *) &r9);
if (status != 0) {
printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status);
} else {
if (in1)
memcpy((void *)in1, pm_buffer, sizeof(pm_buffer));
else {
status = PAL_STATUS_EINVAL;
printk(KERN_WARNING"Invalid parameters "
"for PAL call:0x%lx!\n", in0);
}
}
return (struct ia64_pal_retval){status, r9, 0, 0};
}
static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu)
{
unsigned long in0, in1, in2, in3;
long status;
unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
| (1UL << 61) | (1UL << 60);
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
if (in1) {
memcpy((void *)in1, &res, sizeof(res));
status = 0;
} else{
status = PAL_STATUS_EINVAL;
printk(KERN_WARNING"Invalid parameters "
"for PAL call:0x%lx!\n", in0);
}
return (struct ia64_pal_retval){status, 0, 0, 0};
}
static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu)
{
unsigned long r9;
long status;
status = ia64_pal_mem_attrib(&r9);
return (struct ia64_pal_retval){status, r9, 0, 0};
}
static void remote_pal_prefetch_visibility(void *v)
{
s64 trans_type = (s64)v;
ia64_pal_prefetch_visibility(trans_type);
}
static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu)
{
struct ia64_pal_retval result = {0, 0, 0, 0};
unsigned long in0, in1, in2, in3;
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
result.status = ia64_pal_prefetch_visibility(in1);
if (result.status == 0) {
/* Must be performed on all remote processors
in the coherence domain. */
smp_call_function(remote_pal_prefetch_visibility,
(void *)in1, 1);
/* Unnecessary on remote processor for other vcpus!*/
result.status = 1;
}
return result;
}
static void remote_pal_mc_drain(void *v)
{
ia64_pal_mc_drain();
}
static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu)
{
struct ia64_pal_retval result = {0, 0, 0, 0};
unsigned long in0, in1, in2, in3;
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
if (in1 == 0 && in2) {
char brand_info[128];
result.status = ia64_pal_get_brand_info(brand_info);
if (result.status == PAL_STATUS_SUCCESS)
memcpy((void *)in2, brand_info, 128);
} else {
result.status = PAL_STATUS_REQUIRES_MEMORY;
printk(KERN_WARNING"Invalid parameters for "
"PAL call:0x%lx!\n", in0);
}
return result;
}
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ {
...@@ -300,14 +418,22 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -300,14 +418,22 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
int ret = 1; int ret = 1;
gr28 = kvm_get_pal_call_index(vcpu); gr28 = kvm_get_pal_call_index(vcpu);
/*printk("pal_call index:%lx\n",gr28);*/
switch (gr28) { switch (gr28) {
case PAL_CACHE_FLUSH: case PAL_CACHE_FLUSH:
result = pal_cache_flush(vcpu); result = pal_cache_flush(vcpu);
break; break;
case PAL_MEM_ATTRIB:
result = pal_mem_attrib(vcpu);
break;
case PAL_CACHE_SUMMARY: case PAL_CACHE_SUMMARY:
result = pal_cache_summary(vcpu); result = pal_cache_summary(vcpu);
break; break;
case PAL_PERF_MON_INFO:
result = pal_perf_mon_info(vcpu);
break;
case PAL_HALT_INFO:
result = pal_halt_info(vcpu);
break;
case PAL_HALT_LIGHT: case PAL_HALT_LIGHT:
{ {
INIT_PAL_STATUS_SUCCESS(result); INIT_PAL_STATUS_SUCCESS(result);
...@@ -317,6 +443,16 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -317,6 +443,16 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
} }
break; break;
case PAL_PREFETCH_VISIBILITY:
result = pal_prefetch_visibility(vcpu);
break;
case PAL_MC_DRAIN:
result.status = ia64_pal_mc_drain();
/* FIXME: All vcpus likely call PAL_MC_DRAIN.
That causes the congestion. */
smp_call_function(remote_pal_mc_drain, NULL, 1);
break;
case PAL_FREQ_RATIOS: case PAL_FREQ_RATIOS:
result = pal_freq_ratios(vcpu); result = pal_freq_ratios(vcpu);
break; break;
...@@ -346,6 +482,9 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -346,6 +482,9 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
INIT_PAL_STATUS_SUCCESS(result); INIT_PAL_STATUS_SUCCESS(result);
result.v1 = (1L << 32) | 1L; result.v1 = (1L << 32) | 1L;
break; break;
case PAL_REGISTER_INFO:
result = pal_register_info(vcpu);
break;
case PAL_VM_PAGE_SIZE: case PAL_VM_PAGE_SIZE:
result.status = ia64_pal_vm_page_size(&result.v0, result.status = ia64_pal_vm_page_size(&result.v0,
&result.v1); &result.v1);
...@@ -365,12 +504,18 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -365,12 +504,18 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
result.status = ia64_pal_version( result.status = ia64_pal_version(
(pal_version_u_t *)&result.v0, (pal_version_u_t *)&result.v0,
(pal_version_u_t *)&result.v1); (pal_version_u_t *)&result.v1);
break; break;
case PAL_FIXED_ADDR: case PAL_FIXED_ADDR:
result.status = PAL_STATUS_SUCCESS; result.status = PAL_STATUS_SUCCESS;
result.v0 = vcpu->vcpu_id; result.v0 = vcpu->vcpu_id;
break; break;
case PAL_BRAND_INFO:
result = pal_get_brand_info(vcpu);
break;
case PAL_GET_PSTATE:
case PAL_CACHE_SHARED_INFO:
INIT_PAL_STATUS_UNIMPLEMENTED(result);
break;
default: default:
INIT_PAL_STATUS_UNIMPLEMENTED(result); INIT_PAL_STATUS_UNIMPLEMENTED(result);
printk(KERN_WARNING"kvm: Unsupported pal call," printk(KERN_WARNING"kvm: Unsupported pal call,"
......
...@@ -167,7 +167,6 @@ static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa) ...@@ -167,7 +167,6 @@ static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
return (rr1.val); return (rr1.val);
} }
/* /*
* Set vIFA & vITIR & vIHA, when vPSR.ic =1 * Set vIFA & vITIR & vIHA, when vPSR.ic =1
* Parameter: * Parameter:
...@@ -222,8 +221,6 @@ void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr) ...@@ -222,8 +221,6 @@ void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
} }
/* /*
* Data Nested TLB Fault * Data Nested TLB Fault
* @ Data Nested TLB Vector * @ Data Nested TLB Vector
...@@ -245,7 +242,6 @@ void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr) ...@@ -245,7 +242,6 @@ void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
} }
/* /*
* Data TLB Fault * Data TLB Fault
* @ Data TLB vector * @ Data TLB vector
...@@ -265,8 +261,6 @@ static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) ...@@ -265,8 +261,6 @@ static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
/* If vPSR.ic, IFA, ITIR, IHA*/ /* If vPSR.ic, IFA, ITIR, IHA*/
set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
} }
/* /*
...@@ -279,7 +273,6 @@ void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) ...@@ -279,7 +273,6 @@ void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
_vhpt_fault(vcpu, vadr); _vhpt_fault(vcpu, vadr);
} }
/* /*
* VHPT Data Fault * VHPT Data Fault
* @ VHPT Translation vector * @ VHPT Translation vector
...@@ -290,8 +283,6 @@ void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) ...@@ -290,8 +283,6 @@ void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
_vhpt_fault(vcpu, vadr); _vhpt_fault(vcpu, vadr);
} }
/* /*
* Deal with: * Deal with:
* General Exception vector * General Exception vector
...@@ -301,7 +292,6 @@ void _general_exception(struct kvm_vcpu *vcpu) ...@@ -301,7 +292,6 @@ void _general_exception(struct kvm_vcpu *vcpu)
inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
} }
/* /*
* Illegal Operation Fault * Illegal Operation Fault
* @ General Exception Vector * @ General Exception Vector
...@@ -419,19 +409,16 @@ static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr) ...@@ -419,19 +409,16 @@ static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
} }
void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
{ {
__page_not_present(vcpu, vadr); __page_not_present(vcpu, vadr);
} }
void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
{ {
__page_not_present(vcpu, vadr); __page_not_present(vcpu, vadr);
} }
/* Deal with /* Deal with
* Data access rights vector * Data access rights vector
*/ */
...@@ -563,22 +550,64 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim, ...@@ -563,22 +550,64 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim,
inject_guest_interruption(vcpu, vector); inject_guest_interruption(vcpu, vector);
} }
static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu,
unsigned long arg)
{
struct thash_data *data;
unsigned long gpa, poff;
if (!is_physical_mode(vcpu)) {
/* Depends on caller to provide the DTR or DTC mapping.*/
data = vtlb_lookup(vcpu, arg, D_TLB);
if (data)
gpa = data->page_flags & _PAGE_PPN_MASK;
else {
data = vhpt_lookup(arg);
if (!data)
return 0;
gpa = data->gpaddr & _PAGE_PPN_MASK;
}
poff = arg & (PSIZE(data->ps) - 1);
arg = PAGEALIGN(gpa, data->ps) | poff;
}
arg = kvm_gpa_to_mpa(arg << 1 >> 1);
return (unsigned long)__va(arg);
}
static void set_pal_call_data(struct kvm_vcpu *vcpu) static void set_pal_call_data(struct kvm_vcpu *vcpu)
{ {
struct exit_ctl_data *p = &vcpu->arch.exit_data; struct exit_ctl_data *p = &vcpu->arch.exit_data;
unsigned long gr28 = vcpu_get_gr(vcpu, 28);
unsigned long gr29 = vcpu_get_gr(vcpu, 29);
unsigned long gr30 = vcpu_get_gr(vcpu, 30);
/*FIXME:For static and stacked convention, firmware /*FIXME:For static and stacked convention, firmware
* has put the parameters in gr28-gr31 before * has put the parameters in gr28-gr31 before
* break to vmm !!*/ * break to vmm !!*/
p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28); switch (gr28) {
p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29); case PAL_PERF_MON_INFO:
p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); case PAL_HALT_INFO:
p->u.pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29);
p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
break;
case PAL_BRAND_INFO:
p->u.pal_data.gr29 = gr29;;
p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30);
break;
default:
p->u.pal_data.gr29 = gr29;;
p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
}
p->u.pal_data.gr28 = gr28;
p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31); p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31);
p->exit_reason = EXIT_REASON_PAL_CALL; p->exit_reason = EXIT_REASON_PAL_CALL;
} }
static void set_pal_call_result(struct kvm_vcpu *vcpu) static void get_pal_call_result(struct kvm_vcpu *vcpu)
{ {
struct exit_ctl_data *p = &vcpu->arch.exit_data; struct exit_ctl_data *p = &vcpu->arch.exit_data;
...@@ -606,7 +635,7 @@ static void set_sal_call_data(struct kvm_vcpu *vcpu) ...@@ -606,7 +635,7 @@ static void set_sal_call_data(struct kvm_vcpu *vcpu)
p->exit_reason = EXIT_REASON_SAL_CALL; p->exit_reason = EXIT_REASON_SAL_CALL;
} }
static void set_sal_call_result(struct kvm_vcpu *vcpu) static void get_sal_call_result(struct kvm_vcpu *vcpu)
{ {
struct exit_ctl_data *p = &vcpu->arch.exit_data; struct exit_ctl_data *p = &vcpu->arch.exit_data;
...@@ -629,13 +658,13 @@ void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, ...@@ -629,13 +658,13 @@ void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
if (iim == DOMN_PAL_REQUEST) { if (iim == DOMN_PAL_REQUEST) {
set_pal_call_data(v); set_pal_call_data(v);
vmm_transition(v); vmm_transition(v);
set_pal_call_result(v); get_pal_call_result(v);
vcpu_increment_iip(v); vcpu_increment_iip(v);
return; return;
} else if (iim == DOMN_SAL_REQUEST) { } else if (iim == DOMN_SAL_REQUEST) {
set_sal_call_data(v); set_sal_call_data(v);
vmm_transition(v); vmm_transition(v);
set_sal_call_result(v); get_sal_call_result(v);
vcpu_increment_iip(v); vcpu_increment_iip(v);
return; return;
} }
...@@ -703,7 +732,6 @@ void vhpi_detection(struct kvm_vcpu *vcpu) ...@@ -703,7 +732,6 @@ void vhpi_detection(struct kvm_vcpu *vcpu)
} }
} }
void leave_hypervisor_tail(void) void leave_hypervisor_tail(void)
{ {
struct kvm_vcpu *v = current_vcpu; struct kvm_vcpu *v = current_vcpu;
...@@ -737,7 +765,6 @@ void leave_hypervisor_tail(void) ...@@ -737,7 +765,6 @@ void leave_hypervisor_tail(void)
} }
} }
static inline void handle_lds(struct kvm_pt_regs *regs) static inline void handle_lds(struct kvm_pt_regs *regs)
{ {
regs->cr_ipsr |= IA64_PSR_ED; regs->cr_ipsr |= IA64_PSR_ED;
......
...@@ -112,7 +112,6 @@ void switch_to_physical_rid(struct kvm_vcpu *vcpu) ...@@ -112,7 +112,6 @@ void switch_to_physical_rid(struct kvm_vcpu *vcpu)
return; return;
} }
void switch_to_virtual_rid(struct kvm_vcpu *vcpu) void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
{ {
unsigned long psr; unsigned long psr;
...@@ -166,8 +165,6 @@ void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, ...@@ -166,8 +165,6 @@ void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
return; return;
} }
/* /*
* In physical mode, insert tc/tr for region 0 and 4 uses * In physical mode, insert tc/tr for region 0 and 4 uses
* RID[0] and RID[4] which is for physical mode emulation. * RID[0] and RID[4] which is for physical mode emulation.
...@@ -269,7 +266,6 @@ static inline unsigned long fph_index(struct kvm_pt_regs *regs, ...@@ -269,7 +266,6 @@ static inline unsigned long fph_index(struct kvm_pt_regs *regs,
return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
} }
/* /*
* The inverse of the above: given bspstore and the number of * The inverse of the above: given bspstore and the number of
* registers, calculate ar.bsp. * registers, calculate ar.bsp.
...@@ -811,12 +807,15 @@ static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val); ...@@ -811,12 +807,15 @@ static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
{ {
struct kvm_vcpu *v; struct kvm_vcpu *v;
struct kvm *kvm;
int i; int i;
long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC); long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
unsigned long vitv = VCPU(vcpu, itv); unsigned long vitv = VCPU(vcpu, itv);
kvm = (struct kvm *)KVM_VM_BASE;
if (vcpu->vcpu_id == 0) { if (vcpu->vcpu_id == 0) {
for (i = 0; i < KVM_MAX_VCPUS; i++) { for (i = 0; i < kvm->arch.online_vcpus; i++) {
v = (struct kvm_vcpu *)((char *)vcpu + v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i); sizeof(struct kvm_vcpu_data) * i);
VMX(v, itc_offset) = itc_offset; VMX(v, itc_offset) = itc_offset;
...@@ -1039,8 +1038,6 @@ u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr) ...@@ -1039,8 +1038,6 @@ u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
return key; return key;
} }
void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
{ {
unsigned long thash, vadr; unsigned long thash, vadr;
...@@ -1050,7 +1047,6 @@ void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) ...@@ -1050,7 +1047,6 @@ void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
} }
void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
{ {
unsigned long tag, vadr; unsigned long tag, vadr;
...@@ -1131,7 +1127,6 @@ int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr) ...@@ -1131,7 +1127,6 @@ int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
return IA64_NO_FAULT; return IA64_NO_FAULT;
} }
int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
{ {
unsigned long r1, r3; unsigned long r1, r3;
...@@ -1154,7 +1149,6 @@ void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst) ...@@ -1154,7 +1149,6 @@ void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
} }
/************************************ /************************************
* Insert/Purge translation register/cache * Insert/Purge translation register/cache
************************************/ ************************************/
...@@ -1385,7 +1379,6 @@ void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) ...@@ -1385,7 +1379,6 @@ void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
vcpu_set_itc(vcpu, r2); vcpu_set_itc(vcpu, r2);
} }
void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
{ {
unsigned long r1; unsigned long r1;
...@@ -1393,8 +1386,9 @@ void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) ...@@ -1393,8 +1386,9 @@ void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
r1 = vcpu_get_itc(vcpu); r1 = vcpu_get_itc(vcpu);
vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
} }
/************************************************************************** /**************************************************************************
struct kvm_vcpu*protection key register access routines struct kvm_vcpu protection key register access routines
**************************************************************************/ **************************************************************************/
unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
...@@ -1407,20 +1401,6 @@ void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val) ...@@ -1407,20 +1401,6 @@ void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
ia64_set_pkr(reg, val); ia64_set_pkr(reg, val);
} }
unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
{
union ia64_rr rr, rr1;
rr.val = vcpu_get_rr(vcpu, ifa);
rr1.val = 0;
rr1.ps = rr.ps;
rr1.rid = rr.rid;
return (rr1.val);
}
/******************************** /********************************
* Moves to privileged registers * Moves to privileged registers
********************************/ ********************************/
...@@ -1464,8 +1444,6 @@ unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg, ...@@ -1464,8 +1444,6 @@ unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
return (IA64_NO_FAULT); return (IA64_NO_FAULT);
} }
void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
{ {
unsigned long r3, r2; unsigned long r3, r2;
...@@ -1510,8 +1488,6 @@ void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst) ...@@ -1510,8 +1488,6 @@ void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
vcpu_set_pkr(vcpu, r3, r2); vcpu_set_pkr(vcpu, r3, r2);
} }
void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
{ {
unsigned long r3, r1; unsigned long r3, r1;
...@@ -1557,7 +1533,6 @@ void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst) ...@@ -1557,7 +1533,6 @@ void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
} }
unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
{ {
/* FIXME: This could get called as a result of a rsvd-reg fault */ /* FIXME: This could get called as a result of a rsvd-reg fault */
...@@ -1609,7 +1584,6 @@ unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst) ...@@ -1609,7 +1584,6 @@ unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
return 0; return 0;
} }
unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
{ {
unsigned long tgt = inst.M33.r1; unsigned long tgt = inst.M33.r1;
...@@ -1633,8 +1607,6 @@ unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) ...@@ -1633,8 +1607,6 @@ unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
return 0; return 0;
} }
void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
{ {
...@@ -1776,9 +1748,6 @@ void vcpu_bsw1(struct kvm_vcpu *vcpu) ...@@ -1776,9 +1748,6 @@ void vcpu_bsw1(struct kvm_vcpu *vcpu)
} }
} }
void vcpu_rfi(struct kvm_vcpu *vcpu) void vcpu_rfi(struct kvm_vcpu *vcpu)
{ {
unsigned long ifs, psr; unsigned long ifs, psr;
...@@ -1796,7 +1765,6 @@ void vcpu_rfi(struct kvm_vcpu *vcpu) ...@@ -1796,7 +1765,6 @@ void vcpu_rfi(struct kvm_vcpu *vcpu)
regs->cr_iip = VCPU(vcpu, iip); regs->cr_iip = VCPU(vcpu, iip);
} }
/* /*
VPSR can't keep track of below bits of guest PSR VPSR can't keep track of below bits of guest PSR
This function gets guest PSR This function gets guest PSR
......
...@@ -703,7 +703,7 @@ extern u64 guest_vhpt_lookup(u64 iha, u64 *pte); ...@@ -703,7 +703,7 @@ extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps); extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps); extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va); extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, extern void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
u64 itir, u64 ifa, int type); u64 itir, u64 ifa, int type);
extern void thash_purge_all(struct kvm_vcpu *v); extern void thash_purge_all(struct kvm_vcpu *v);
extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v, extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
...@@ -738,7 +738,7 @@ void kvm_init_vhpt(struct kvm_vcpu *v); ...@@ -738,7 +738,7 @@ void kvm_init_vhpt(struct kvm_vcpu *v);
void thash_init(struct thash_cb *hcb, u64 sz); void thash_init(struct thash_cb *hcb, u64 sz);
void panic_vm(struct kvm_vcpu *v, const char *fmt, ...); void panic_vm(struct kvm_vcpu *v, const char *fmt, ...);
u64 kvm_gpa_to_mpa(u64 gpa);
extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
u64 arg4, u64 arg5, u64 arg6, u64 arg7); u64 arg4, u64 arg5, u64 arg6, u64 arg7);
......
...@@ -164,11 +164,11 @@ static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte) ...@@ -164,11 +164,11 @@ static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
unsigned long ps, gpaddr; unsigned long ps, gpaddr;
ps = itir_ps(itir); ps = itir_ps(itir);
rr.val = ia64_get_rr(ifa);
gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
(ifa & ((1UL << ps) - 1)); (ifa & ((1UL << ps) - 1));
rr.val = ia64_get_rr(ifa);
head = (struct thash_data *)ia64_thash(ifa); head = (struct thash_data *)ia64_thash(ifa);
head->etag = INVALID_TI_TAG; head->etag = INVALID_TI_TAG;
ia64_mf(); ia64_mf();
...@@ -412,16 +412,14 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) ...@@ -412,16 +412,14 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
/* /*
* Purge overlap TCs and then insert the new entry to emulate itc ops. * Purge overlap TCs and then insert the new entry to emulate itc ops.
* Notes: Only TC entry can purge and insert. * Notes: Only TC entry can purge and insert.
* 1 indicates this is MMIO
*/ */
int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
u64 ifa, int type) u64 ifa, int type)
{ {
u64 ps; u64 ps;
u64 phy_pte, io_mask, index; u64 phy_pte, io_mask, index;
union ia64_rr vrr, mrr; union ia64_rr vrr, mrr;
int ret = 0;
ps = itir_ps(itir); ps = itir_ps(itir);
vrr.val = vcpu_get_rr(v, ifa); vrr.val = vcpu_get_rr(v, ifa);
...@@ -441,25 +439,19 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, ...@@ -441,25 +439,19 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
phy_pte &= ~_PAGE_MA_MASK; phy_pte &= ~_PAGE_MA_MASK;
} }
if (pte & VTLB_PTE_IO)
ret = 1;
vtlb_purge(v, ifa, ps); vtlb_purge(v, ifa, ps);
vhpt_purge(v, ifa, ps); vhpt_purge(v, ifa, ps);
if (ps == mrr.ps) { if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) {
if (!(pte&VTLB_PTE_IO)) {
vhpt_insert(phy_pte, itir, ifa, pte);
} else {
vtlb_insert(v, pte, itir, ifa);
vcpu_quick_region_set(VMX(v, tc_regions), ifa);
}
} else if (ps > mrr.ps) {
vtlb_insert(v, pte, itir, ifa); vtlb_insert(v, pte, itir, ifa);
vcpu_quick_region_set(VMX(v, tc_regions), ifa); vcpu_quick_region_set(VMX(v, tc_regions), ifa);
if (!(pte&VTLB_PTE_IO)) }
vhpt_insert(phy_pte, itir, ifa, pte); if (pte & VTLB_PTE_IO)
} else { return;
if (ps >= mrr.ps)
vhpt_insert(phy_pte, itir, ifa, pte);
else {
u64 psr; u64 psr;
phy_pte &= ~PAGE_FLAGS_RV_MASK; phy_pte &= ~PAGE_FLAGS_RV_MASK;
psr = ia64_clear_ic(); psr = ia64_clear_ic();
...@@ -469,7 +461,6 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, ...@@ -469,7 +461,6 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
if (!(pte&VTLB_PTE_IO)) if (!(pte&VTLB_PTE_IO))
mark_pages_dirty(v, pte, ps); mark_pages_dirty(v, pte, ps);
return ret;
} }
/* /*
...@@ -509,7 +500,6 @@ void thash_purge_all(struct kvm_vcpu *v) ...@@ -509,7 +500,6 @@ void thash_purge_all(struct kvm_vcpu *v)
local_flush_tlb_all(); local_flush_tlb_all();
} }
/* /*
* Lookup the hash table and its collision chain to find an entry * Lookup the hash table and its collision chain to find an entry
* covering this address rid:va or the entry. * covering this address rid:va or the entry.
...@@ -517,7 +507,6 @@ void thash_purge_all(struct kvm_vcpu *v) ...@@ -517,7 +507,6 @@ void thash_purge_all(struct kvm_vcpu *v)
* INPUT: * INPUT:
* in: TLB format for both VHPT & TLB. * in: TLB format for both VHPT & TLB.
*/ */
struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
{ {
struct thash_data *cch; struct thash_data *cch;
...@@ -547,7 +536,6 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) ...@@ -547,7 +536,6 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
return NULL; return NULL;
} }
/* /*
* Initialize internal control data before service. * Initialize internal control data before service.
*/ */
...@@ -573,6 +561,10 @@ void thash_init(struct thash_cb *hcb, u64 sz) ...@@ -573,6 +561,10 @@ void thash_init(struct thash_cb *hcb, u64 sz)
u64 kvm_get_mpt_entry(u64 gpfn) u64 kvm_get_mpt_entry(u64 gpfn)
{ {
u64 *base = (u64 *) KVM_P2M_BASE; u64 *base = (u64 *) KVM_P2M_BASE;
if (gpfn >= (KVM_P2M_SIZE >> 3))
panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn);
return *(base + gpfn); return *(base + gpfn);
} }
...@@ -589,7 +581,6 @@ u64 kvm_gpa_to_mpa(u64 gpa) ...@@ -589,7 +581,6 @@ u64 kvm_gpa_to_mpa(u64 gpa)
return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK); return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
} }
/* /*
* Fetch guest bundle code. * Fetch guest bundle code.
* INPUT: * INPUT:
...@@ -631,7 +622,6 @@ int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle) ...@@ -631,7 +622,6 @@ int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
return IA64_NO_FAULT; return IA64_NO_FAULT;
} }
void kvm_init_vhpt(struct kvm_vcpu *v) void kvm_init_vhpt(struct kvm_vcpu *v)
{ {
v->arch.vhpt.num = VHPT_NUM_ENTRIES; v->arch.vhpt.num = VHPT_NUM_ENTRIES;
......
...@@ -52,4 +52,11 @@ struct kvm_fpu { ...@@ -52,4 +52,11 @@ struct kvm_fpu {
__u64 fpr[32]; __u64 fpr[32];
}; };
struct kvm_debug_exit_arch {
};
/* for KVM_SET_GUEST_DEBUG */
struct kvm_guest_debug_arch {
};
#endif /* __LINUX_KVM_POWERPC_H */ #endif /* __LINUX_KVM_POWERPC_H */
...@@ -28,6 +28,13 @@ ...@@ -28,6 +28,13 @@
* need to find some way of advertising it. */ * need to find some way of advertising it. */
#define KVM44x_GUEST_TLB_SIZE 64 #define KVM44x_GUEST_TLB_SIZE 64
struct kvmppc_44x_tlbe {
u32 tid; /* Only the low 8 bits are used. */
u32 word0;
u32 word1;
u32 word2;
};
struct kvmppc_44x_shadow_ref { struct kvmppc_44x_shadow_ref {
struct page *page; struct page *page;
u16 gtlb_index; u16 gtlb_index;
......
...@@ -42,7 +42,12 @@ ...@@ -42,7 +42,12 @@
#define BOOKE_INTERRUPT_DTLB_MISS 13 #define BOOKE_INTERRUPT_DTLB_MISS 13
#define BOOKE_INTERRUPT_ITLB_MISS 14 #define BOOKE_INTERRUPT_ITLB_MISS 14
#define BOOKE_INTERRUPT_DEBUG 15 #define BOOKE_INTERRUPT_DEBUG 15
#define BOOKE_MAX_INTERRUPT 15
/* E500 */
#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
#define BOOKE_INTERRUPT_SPE_FP_DATA 33
#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ #define RESUME_FLAG_HOST (1<<1) /* Resume host? */
......
/*
* Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, <yu.liu@freescale.com>
*
* Description:
* This file is derived from arch/powerpc/include/asm/kvm_44x.h,
* by Hollis Blanchard <hollisb@us.ibm.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*/
#ifndef __ASM_KVM_E500_H__
#define __ASM_KVM_E500_H__
#include <linux/kvm_host.h>
#define BOOKE_INTERRUPT_SIZE 36
#define E500_PID_NUM 3
#define E500_TLB_NUM 2
struct tlbe{
u32 mas1;
u32 mas2;
u32 mas3;
u32 mas7;
};
struct kvmppc_vcpu_e500 {
/* Unmodified copy of the guest's TLB. */
struct tlbe *guest_tlb[E500_TLB_NUM];
/* TLB that's actually used when the guest is running. */
struct tlbe *shadow_tlb[E500_TLB_NUM];
/* Pages which are referenced in the shadow TLB. */
struct page **shadow_pages[E500_TLB_NUM];
unsigned int guest_tlb_size[E500_TLB_NUM];
unsigned int shadow_tlb_size[E500_TLB_NUM];
unsigned int guest_tlb_nv[E500_TLB_NUM];
u32 host_pid[E500_PID_NUM];
u32 pid[E500_PID_NUM];
u32 mas0;
u32 mas1;
u32 mas2;
u32 mas3;
u32 mas4;
u32 mas5;
u32 mas6;
u32 mas7;
u32 l1csr1;
u32 hid0;
u32 hid1;
struct kvm_vcpu vcpu;
};
static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
{
return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
}
#endif /* __ASM_KVM_E500_H__ */
...@@ -64,13 +64,6 @@ struct kvm_vcpu_stat { ...@@ -64,13 +64,6 @@ struct kvm_vcpu_stat {
u32 halt_wakeup; u32 halt_wakeup;
}; };
struct kvmppc_44x_tlbe {
u32 tid; /* Only the low 8 bits are used. */
u32 word0;
u32 word1;
u32 word2;
};
enum kvm_exit_types { enum kvm_exit_types {
MMIO_EXITS, MMIO_EXITS,
DCR_EXITS, DCR_EXITS,
...@@ -118,11 +111,6 @@ struct kvm_arch { ...@@ -118,11 +111,6 @@ struct kvm_arch {
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
u32 host_stack; u32 host_stack;
u32 host_pid; u32 host_pid;
u32 host_dbcr0;
u32 host_dbcr1;
u32 host_dbcr2;
u32 host_iac[4];
u32 host_msr;
u64 fpr[32]; u64 fpr[32];
ulong gpr[32]; ulong gpr[32];
...@@ -157,7 +145,7 @@ struct kvm_vcpu_arch { ...@@ -157,7 +145,7 @@ struct kvm_vcpu_arch {
u32 tbu; u32 tbu;
u32 tcr; u32 tcr;
u32 tsr; u32 tsr;
u32 ivor[16]; u32 ivor[64];
ulong ivpr; ulong ivpr;
u32 pir; u32 pir;
...@@ -170,6 +158,7 @@ struct kvm_vcpu_arch { ...@@ -170,6 +158,7 @@ struct kvm_vcpu_arch {
u32 ccr1; u32 ccr1;
u32 dbcr0; u32 dbcr0;
u32 dbcr1; u32 dbcr1;
u32 dbsr;
#ifdef CONFIG_KVM_EXIT_TIMING #ifdef CONFIG_KVM_EXIT_TIMING
struct kvmppc_exit_timing timing_exit; struct kvmppc_exit_timing timing_exit;
...@@ -200,10 +189,4 @@ struct kvm_vcpu_arch { ...@@ -200,10 +189,4 @@ struct kvm_vcpu_arch {
unsigned long pending_exceptions; unsigned long pending_exceptions;
}; };
struct kvm_guest_debug {
int enabled;
unsigned long bp[4];
int singlestep;
};
#endif /* __POWERPC_KVM_HOST_H__ */ #endif /* __POWERPC_KVM_HOST_H__ */
...@@ -52,13 +52,19 @@ extern int kvmppc_emulate_instruction(struct kvm_run *run, ...@@ -52,13 +52,19 @@ extern int kvmppc_emulate_instruction(struct kvm_run *run,
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
/* Core-specific hooks */
extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
u64 asid, u32 flags, u32 max_bytes,
unsigned int gtlb_idx); unsigned int gtlb_idx);
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
/* Core-specific hooks */ extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
gva_t eaddr);
extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
unsigned int id); unsigned int id);
...@@ -71,9 +77,6 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, ...@@ -71,9 +77,6 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
extern void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu);
extern void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu);
extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu); extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu);
......
...@@ -75,6 +75,8 @@ ...@@ -75,6 +75,8 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern unsigned int tlbcam_index;
typedef struct { typedef struct {
unsigned int id; unsigned int id;
unsigned int active; unsigned int active;
......
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
#include <asm/iseries/alpaca.h> #include <asm/iseries/alpaca.h>
#endif #endif
#ifdef CONFIG_KVM #ifdef CONFIG_KVM
#include <asm/kvm_44x.h> #include <linux/kvm_host.h>
#endif #endif
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
...@@ -361,8 +361,6 @@ int main(void) ...@@ -361,8 +361,6 @@ int main(void)
DEFINE(PTE_SIZE, sizeof(pte_t)); DEFINE(PTE_SIZE, sizeof(pte_t));
#ifdef CONFIG_KVM #ifdef CONFIG_KVM
DEFINE(TLBE_BYTES, sizeof(struct kvmppc_44x_tlbe));
DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
......
...@@ -28,72 +28,6 @@ ...@@ -28,72 +28,6 @@
#include "44x_tlb.h" #include "44x_tlb.h"
/* Note: clearing MSR[DE] just means that the debug interrupt will not be
* delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
* If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
* will be delivered as an "imprecise debug event" (which is indicated by
* DBSR[IDE].
*/
static void kvm44x_disable_debug_interrupts(void)
{
mtmsr(mfmsr() & ~MSR_DE);
}
void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
{
kvm44x_disable_debug_interrupts();
mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
mtmsr(vcpu->arch.host_msr);
}
void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
{
struct kvm_guest_debug *dbg = &vcpu->guest_debug;
u32 dbcr0 = 0;
vcpu->arch.host_msr = mfmsr();
kvm44x_disable_debug_interrupts();
/* Save host debug register state. */
vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
/* set registers up for guest */
if (dbg->bp[0]) {
mtspr(SPRN_IAC1, dbg->bp[0]);
dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
}
if (dbg->bp[1]) {
mtspr(SPRN_IAC2, dbg->bp[1]);
dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
}
if (dbg->bp[2]) {
mtspr(SPRN_IAC3, dbg->bp[2]);
dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
}
if (dbg->bp[3]) {
mtspr(SPRN_IAC4, dbg->bp[3]);
dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
}
mtspr(SPRN_DBCR0, dbcr0);
mtspr(SPRN_DBCR1, 0);
mtspr(SPRN_DBCR2, 0);
}
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
kvmppc_44x_tlb_load(vcpu); kvmppc_44x_tlb_load(vcpu);
...@@ -149,8 +83,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -149,8 +83,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr) struct kvm_translation *tr)
{ {
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *gtlbe;
int index; int index;
gva_t eaddr; gva_t eaddr;
u8 pid; u8 pid;
...@@ -166,9 +98,7 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, ...@@ -166,9 +98,7 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
gtlbe = &vcpu_44x->guest_tlb[index]; tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
tr->physical_address = tlb_xlate(gtlbe, eaddr);
/* XXX what does "writeable" and "usermode" even mean? */ /* XXX what does "writeable" and "usermode" even mean? */
tr->valid = 1; tr->valid = 1;
......
...@@ -27,25 +27,12 @@ ...@@ -27,25 +27,12 @@
#include "booke.h" #include "booke.h"
#include "44x_tlb.h" #include "44x_tlb.h"
#define OP_RFI 19
#define XOP_RFI 50
#define XOP_MFMSR 83
#define XOP_WRTEE 131
#define XOP_MTMSR 146
#define XOP_WRTEEI 163
#define XOP_MFDCR 323 #define XOP_MFDCR 323
#define XOP_MTDCR 451 #define XOP_MTDCR 451
#define XOP_TLBSX 914 #define XOP_TLBSX 914
#define XOP_ICCCI 966 #define XOP_ICCCI 966
#define XOP_TLBWE 978 #define XOP_TLBWE 978
static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
{
vcpu->arch.pc = vcpu->arch.srr0;
kvmppc_set_msr(vcpu, vcpu->arch.srr1);
}
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance) unsigned int inst, int *advance)
{ {
...@@ -59,48 +46,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -59,48 +46,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
int ws; int ws;
switch (get_op(inst)) { switch (get_op(inst)) {
case OP_RFI:
switch (get_xop(inst)) {
case XOP_RFI:
kvmppc_emul_rfi(vcpu);
kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS);
*advance = 0;
break;
default:
emulated = EMULATE_FAIL;
break;
}
break;
case 31: case 31:
switch (get_xop(inst)) { switch (get_xop(inst)) {
case XOP_MFMSR:
rt = get_rt(inst);
vcpu->arch.gpr[rt] = vcpu->arch.msr;
kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
break;
case XOP_MTMSR:
rs = get_rs(inst);
kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
break;
case XOP_WRTEE:
rs = get_rs(inst);
vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
| (vcpu->arch.gpr[rs] & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
break;
case XOP_WRTEEI:
vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
| (inst & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
break;
case XOP_MFDCR: case XOP_MFDCR:
dcrn = get_dcrn(inst); dcrn = get_dcrn(inst);
rt = get_rt(inst); rt = get_rt(inst);
...@@ -186,186 +134,51 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -186,186 +134,51 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
emulated = EMULATE_FAIL; emulated = EMULATE_FAIL;
} }
if (emulated == EMULATE_FAIL)
emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
return emulated; return emulated;
} }
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{ {
int emulated = EMULATE_DONE;
switch (sprn) { switch (sprn) {
case SPRN_MMUCR:
vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
case SPRN_PID: case SPRN_PID:
kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break; kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
case SPRN_MMUCR:
vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
case SPRN_CCR0: case SPRN_CCR0:
vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break; vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
case SPRN_CCR1: case SPRN_CCR1:
vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break; vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
case SPRN_DEAR:
vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
case SPRN_ESR:
vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
case SPRN_DBCR0:
vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
case SPRN_DBCR1:
vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
case SPRN_TSR:
vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
case SPRN_TCR:
vcpu->arch.tcr = vcpu->arch.gpr[rs];
kvmppc_emulate_dec(vcpu);
break;
/* Note: SPRG4-7 are user-readable. These values are
* loaded into the real SPRGs when resuming the
* guest. */
case SPRN_SPRG4:
vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
case SPRN_SPRG5:
vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
case SPRN_SPRG6:
vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
case SPRN_SPRG7:
vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
case SPRN_IVPR:
vcpu->arch.ivpr = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR0:
vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR1:
vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR2:
vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR3:
vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR4:
vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR5:
vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR6:
vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR7:
vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR8:
vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR9:
vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR10:
vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR11:
vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR12:
vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR13:
vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR14:
vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR15:
vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs];
break;
default: default:
return EMULATE_FAIL; emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
} }
kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
return EMULATE_DONE; return emulated;
} }
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
{ {
int emulated = EMULATE_DONE;
switch (sprn) { switch (sprn) {
/* 440 */ case SPRN_PID:
vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
case SPRN_MMUCR: case SPRN_MMUCR:
vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break; vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
case SPRN_CCR0: case SPRN_CCR0:
vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break; vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
case SPRN_CCR1: case SPRN_CCR1:
vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break; vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
/* Book E */
case SPRN_PID:
vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
case SPRN_IVPR:
vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
case SPRN_DEAR:
vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
case SPRN_ESR:
vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
case SPRN_DBCR0:
vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
case SPRN_DBCR1:
vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
case SPRN_IVOR0:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
break;
case SPRN_IVOR1:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
break;
case SPRN_IVOR2:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
break;
case SPRN_IVOR3:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
break;
case SPRN_IVOR4:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
break;
case SPRN_IVOR5:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
break;
case SPRN_IVOR6:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
break;
case SPRN_IVOR7:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
break;
case SPRN_IVOR8:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
break;
case SPRN_IVOR9:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
break;
case SPRN_IVOR10:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
break;
case SPRN_IVOR11:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
break;
case SPRN_IVOR12:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
break;
case SPRN_IVOR13:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
break;
case SPRN_IVOR14:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
break;
case SPRN_IVOR15:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
break;
default: default:
return EMULATE_FAIL; emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
} }
kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
return EMULATE_DONE; return emulated;
} }
...@@ -208,20 +208,38 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, ...@@ -208,20 +208,38 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
return -1; return -1;
} }
int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
gva_t eaddr)
{
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
unsigned int pgmask = get_tlb_bytes(gtlbe) - 1;
return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
}
int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
{ {
unsigned int as = !!(vcpu->arch.msr & MSR_IS); unsigned int as = !!(vcpu->arch.msr & MSR_IS);
return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
} }
int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
{ {
unsigned int as = !!(vcpu->arch.msr & MSR_DS); unsigned int as = !!(vcpu->arch.msr & MSR_DS);
return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
} }
void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
{
}
void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
{
}
static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
unsigned int stlb_index) unsigned int stlb_index)
{ {
...@@ -248,7 +266,7 @@ static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, ...@@ -248,7 +266,7 @@ static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
KVMTRACE_1D(STLB_INVAL, &vcpu_44x->vcpu, stlb_index, handler); KVMTRACE_1D(STLB_INVAL, &vcpu_44x->vcpu, stlb_index, handler);
} }
void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
int i; int i;
...@@ -269,15 +287,19 @@ void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) ...@@ -269,15 +287,19 @@ void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu)
* Caller must ensure that the specified guest TLB entry is safe to insert into * Caller must ensure that the specified guest TLB entry is safe to insert into
* the shadow TLB. * the shadow TLB.
*/ */
void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
u32 flags, u32 max_bytes, unsigned int gtlb_index) unsigned int gtlb_index)
{ {
struct kvmppc_44x_tlbe stlbe; struct kvmppc_44x_tlbe stlbe;
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
struct kvmppc_44x_shadow_ref *ref; struct kvmppc_44x_shadow_ref *ref;
struct page *new_page; struct page *new_page;
hpa_t hpaddr; hpa_t hpaddr;
gfn_t gfn; gfn_t gfn;
u32 asid = gtlbe->tid;
u32 flags = gtlbe->word2;
u32 max_bytes = get_tlb_bytes(gtlbe);
unsigned int victim; unsigned int victim;
/* Select TLB entry to clobber. Indirectly guard against races with the TLB /* Select TLB entry to clobber. Indirectly guard against races with the TLB
...@@ -448,10 +470,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) ...@@ -448,10 +470,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
} }
if (tlbe_is_host_safe(vcpu, tlbe)) { if (tlbe_is_host_safe(vcpu, tlbe)) {
u64 asid;
gva_t eaddr; gva_t eaddr;
gpa_t gpaddr; gpa_t gpaddr;
u32 flags;
u32 bytes; u32 bytes;
eaddr = get_tlb_eaddr(tlbe); eaddr = get_tlb_eaddr(tlbe);
...@@ -462,10 +482,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) ...@@ -462,10 +482,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
eaddr &= ~(bytes - 1); eaddr &= ~(bytes - 1);
gpaddr &= ~(bytes - 1); gpaddr &= ~(bytes - 1);
asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
flags = tlbe->word2 & 0xffff;
kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes, gtlb_index);
} }
KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0, KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0,
......
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr,
unsigned int pid, unsigned int as); unsigned int pid, unsigned int as);
extern int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb,
u8 rc); u8 rc);
...@@ -85,11 +83,4 @@ static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu) ...@@ -85,11 +83,4 @@ static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu)
return (vcpu->arch.mmucr >> 16) & 0x1; return (vcpu->arch.mmucr >> 16) & 0x1;
} }
static inline gpa_t tlb_xlate(struct kvmppc_44x_tlbe *tlbe, gva_t eaddr)
{
unsigned int pgmask = get_tlb_bytes(tlbe) - 1;
return get_tlb_raddr(tlbe) | (eaddr & pgmask);
}
#endif /* __KVM_POWERPC_TLB_H__ */ #endif /* __KVM_POWERPC_TLB_H__ */
...@@ -2,6 +2,9 @@ ...@@ -2,6 +2,9 @@
# KVM configuration # KVM configuration
# #
config HAVE_KVM_IRQCHIP
bool
menuconfig VIRTUALIZATION menuconfig VIRTUALIZATION
bool "Virtualization" bool "Virtualization"
---help--- ---help---
...@@ -43,6 +46,19 @@ config KVM_EXIT_TIMING ...@@ -43,6 +46,19 @@ config KVM_EXIT_TIMING
If unsure, say N. If unsure, say N.
config KVM_E500
bool "KVM support for PowerPC E500 processors"
depends on EXPERIMENTAL && E500
select KVM
---help---
Support running unmodified E500 guest kernels in virtual machines on
E500 host processors.
This module provides access to the hardware capabilities through
a character device node named /dev/kvm.
If unsure, say N.
config KVM_TRACE config KVM_TRACE
bool "KVM trace support" bool "KVM trace support"
depends on KVM && MARKERS && SYSFS depends on KVM && MARKERS && SYSFS
......
...@@ -16,8 +16,18 @@ AFLAGS_booke_interrupts.o := -I$(obj) ...@@ -16,8 +16,18 @@ AFLAGS_booke_interrupts.o := -I$(obj)
kvm-440-objs := \ kvm-440-objs := \
booke.o \ booke.o \
booke_emulate.o \
booke_interrupts.o \ booke_interrupts.o \
44x.o \ 44x.o \
44x_tlb.o \ 44x_tlb.o \
44x_emulate.o 44x_emulate.o
obj-$(CONFIG_KVM_440) += kvm-440.o obj-$(CONFIG_KVM_440) += kvm-440.o
kvm-e500-objs := \
booke.o \
booke_emulate.o \
booke_interrupts.o \
e500.o \
e500_tlb.o \
e500_emulate.o
obj-$(CONFIG_KVM_E500) += kvm-e500.o
...@@ -30,10 +30,8 @@ ...@@ -30,10 +30,8 @@
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include "timing.h" #include "timing.h"
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/kvm_44x.h>
#include "booke.h" #include "booke.h"
#include "44x_tlb.h"
unsigned long kvmppc_booke_handlers; unsigned long kvmppc_booke_handlers;
...@@ -120,6 +118,9 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, ...@@ -120,6 +118,9 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
case BOOKE_IRQPRIO_DATA_STORAGE: case BOOKE_IRQPRIO_DATA_STORAGE:
case BOOKE_IRQPRIO_INST_STORAGE: case BOOKE_IRQPRIO_INST_STORAGE:
case BOOKE_IRQPRIO_FP_UNAVAIL: case BOOKE_IRQPRIO_FP_UNAVAIL:
case BOOKE_IRQPRIO_SPE_UNAVAIL:
case BOOKE_IRQPRIO_SPE_FP_DATA:
case BOOKE_IRQPRIO_SPE_FP_ROUND:
case BOOKE_IRQPRIO_AP_UNAVAIL: case BOOKE_IRQPRIO_AP_UNAVAIL:
case BOOKE_IRQPRIO_ALIGNMENT: case BOOKE_IRQPRIO_ALIGNMENT:
allowed = 1; allowed = 1;
...@@ -165,7 +166,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) ...@@ -165,7 +166,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
unsigned int priority; unsigned int priority;
priority = __ffs(*pending); priority = __ffs(*pending);
while (priority <= BOOKE_MAX_INTERRUPT) { while (priority <= BOOKE_IRQPRIO_MAX) {
if (kvmppc_booke_irqprio_deliver(vcpu, priority)) if (kvmppc_booke_irqprio_deliver(vcpu, priority))
break; break;
...@@ -263,6 +264,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -263,6 +264,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
case BOOKE_INTERRUPT_SPE_UNAVAIL:
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
r = RESUME_GUEST;
break;
case BOOKE_INTERRUPT_SPE_FP_DATA:
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
r = RESUME_GUEST;
break;
case BOOKE_INTERRUPT_SPE_FP_ROUND:
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
r = RESUME_GUEST;
break;
case BOOKE_INTERRUPT_DATA_STORAGE: case BOOKE_INTERRUPT_DATA_STORAGE:
vcpu->arch.dear = vcpu->arch.fault_dear; vcpu->arch.dear = vcpu->arch.fault_dear;
vcpu->arch.esr = vcpu->arch.fault_esr; vcpu->arch.esr = vcpu->arch.fault_esr;
...@@ -284,29 +300,27 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -284,29 +300,27 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
/* XXX move to a 440-specific file. */
case BOOKE_INTERRUPT_DTLB_MISS: { case BOOKE_INTERRUPT_DTLB_MISS: {
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *gtlbe;
unsigned long eaddr = vcpu->arch.fault_dear; unsigned long eaddr = vcpu->arch.fault_dear;
int gtlb_index; int gtlb_index;
gpa_t gpaddr;
gfn_t gfn; gfn_t gfn;
/* Check the guest TLB. */ /* Check the guest TLB. */
gtlb_index = kvmppc_44x_dtlb_index(vcpu, eaddr); gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
if (gtlb_index < 0) { if (gtlb_index < 0) {
/* The guest didn't have a mapping for it. */ /* The guest didn't have a mapping for it. */
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
vcpu->arch.dear = vcpu->arch.fault_dear; vcpu->arch.dear = vcpu->arch.fault_dear;
vcpu->arch.esr = vcpu->arch.fault_esr; vcpu->arch.esr = vcpu->arch.fault_esr;
kvmppc_mmu_dtlb_miss(vcpu);
kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
} }
gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr); gfn = gpaddr >> PAGE_SHIFT;
gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT;
if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
/* The guest TLB had a mapping, but the shadow TLB /* The guest TLB had a mapping, but the shadow TLB
...@@ -315,13 +329,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -315,13 +329,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* b) the guest used a large mapping which we're faking * b) the guest used a large mapping which we're faking
* Either way, we need to satisfy the fault without * Either way, we need to satisfy the fault without
* invoking the guest. */ * invoking the guest. */
kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed, gtlbe->tid, kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index);
kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
r = RESUME_GUEST; r = RESUME_GUEST;
} else { } else {
/* Guest has mapped and accessed a page which is not /* Guest has mapped and accessed a page which is not
* actually RAM. */ * actually RAM. */
vcpu->arch.paddr_accessed = gpaddr;
r = kvmppc_emulate_mmio(run, vcpu); r = kvmppc_emulate_mmio(run, vcpu);
kvmppc_account_exit(vcpu, MMIO_EXITS); kvmppc_account_exit(vcpu, MMIO_EXITS);
} }
...@@ -329,10 +343,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -329,10 +343,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
} }
/* XXX move to a 440-specific file. */
case BOOKE_INTERRUPT_ITLB_MISS: { case BOOKE_INTERRUPT_ITLB_MISS: {
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *gtlbe;
unsigned long eaddr = vcpu->arch.pc; unsigned long eaddr = vcpu->arch.pc;
gpa_t gpaddr; gpa_t gpaddr;
gfn_t gfn; gfn_t gfn;
...@@ -341,18 +352,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -341,18 +352,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST; r = RESUME_GUEST;
/* Check the guest TLB. */ /* Check the guest TLB. */
gtlb_index = kvmppc_44x_itlb_index(vcpu, eaddr); gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
if (gtlb_index < 0) { if (gtlb_index < 0) {
/* The guest didn't have a mapping for it. */ /* The guest didn't have a mapping for it. */
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
kvmppc_mmu_itlb_miss(vcpu);
kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS); kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
break; break;
} }
kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
gpaddr = tlb_xlate(gtlbe, eaddr);
gfn = gpaddr >> PAGE_SHIFT; gfn = gpaddr >> PAGE_SHIFT;
if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
...@@ -362,8 +373,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -362,8 +373,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* b) the guest used a large mapping which we're faking * b) the guest used a large mapping which we're faking
* Either way, we need to satisfy the fault without * Either way, we need to satisfy the fault without
* invoking the guest. */ * invoking the guest. */
kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid, kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index);
} else { } else {
/* Guest mapped and leaped at non-RAM! */ /* Guest mapped and leaped at non-RAM! */
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/kvm_ppc.h>
#include "timing.h" #include "timing.h"
/* interrupt priortity ordering */ /* interrupt priortity ordering */
...@@ -30,17 +31,24 @@ ...@@ -30,17 +31,24 @@
#define BOOKE_IRQPRIO_ALIGNMENT 2 #define BOOKE_IRQPRIO_ALIGNMENT 2
#define BOOKE_IRQPRIO_PROGRAM 3 #define BOOKE_IRQPRIO_PROGRAM 3
#define BOOKE_IRQPRIO_FP_UNAVAIL 4 #define BOOKE_IRQPRIO_FP_UNAVAIL 4
#define BOOKE_IRQPRIO_SYSCALL 5 #define BOOKE_IRQPRIO_SPE_UNAVAIL 5
#define BOOKE_IRQPRIO_AP_UNAVAIL 6 #define BOOKE_IRQPRIO_SPE_FP_DATA 6
#define BOOKE_IRQPRIO_DTLB_MISS 7 #define BOOKE_IRQPRIO_SPE_FP_ROUND 7
#define BOOKE_IRQPRIO_ITLB_MISS 8 #define BOOKE_IRQPRIO_SYSCALL 8
#define BOOKE_IRQPRIO_MACHINE_CHECK 9 #define BOOKE_IRQPRIO_AP_UNAVAIL 9
#define BOOKE_IRQPRIO_DEBUG 10 #define BOOKE_IRQPRIO_DTLB_MISS 10
#define BOOKE_IRQPRIO_CRITICAL 11 #define BOOKE_IRQPRIO_ITLB_MISS 11
#define BOOKE_IRQPRIO_WATCHDOG 12 #define BOOKE_IRQPRIO_MACHINE_CHECK 12
#define BOOKE_IRQPRIO_EXTERNAL 13 #define BOOKE_IRQPRIO_DEBUG 13
#define BOOKE_IRQPRIO_FIT 14 #define BOOKE_IRQPRIO_CRITICAL 14
#define BOOKE_IRQPRIO_DECREMENTER 15 #define BOOKE_IRQPRIO_WATCHDOG 15
#define BOOKE_IRQPRIO_EXTERNAL 16
#define BOOKE_IRQPRIO_FIT 17
#define BOOKE_IRQPRIO_DECREMENTER 18
#define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19
#define BOOKE_IRQPRIO_MAX 19
extern unsigned long kvmppc_booke_handlers;
/* Helper function for "full" MSR writes. No need to call this if only EE is /* Helper function for "full" MSR writes. No need to call this if only EE is
* changing. */ * changing. */
...@@ -57,4 +65,9 @@ static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) ...@@ -57,4 +65,9 @@ static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
}; };
} }
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
#endif /* __KVM_BOOKE_H__ */ #endif /* __KVM_BOOKE_H__ */
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2008
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
#include <linux/kvm_host.h>
#include <asm/disassemble.h>
#include "booke.h"
#define OP_19_XOP_RFI 50
#define OP_31_XOP_MFMSR 83
#define OP_31_XOP_WRTEE 131
#define OP_31_XOP_MTMSR 146
#define OP_31_XOP_WRTEEI 163
static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
{
vcpu->arch.pc = vcpu->arch.srr0;
kvmppc_set_msr(vcpu, vcpu->arch.srr1);
}
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int rs;
int rt;
switch (get_op(inst)) {
case 19:
switch (get_xop(inst)) {
case OP_19_XOP_RFI:
kvmppc_emul_rfi(vcpu);
kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS);
*advance = 0;
break;
default:
emulated = EMULATE_FAIL;
break;
}
break;
case 31:
switch (get_xop(inst)) {
case OP_31_XOP_MFMSR:
rt = get_rt(inst);
vcpu->arch.gpr[rt] = vcpu->arch.msr;
kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
break;
case OP_31_XOP_MTMSR:
rs = get_rs(inst);
kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
break;
case OP_31_XOP_WRTEE:
rs = get_rs(inst);
vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
| (vcpu->arch.gpr[rs] & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
break;
case OP_31_XOP_WRTEEI:
vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
| (inst & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
break;
default:
emulated = EMULATE_FAIL;
}
break;
default:
emulated = EMULATE_FAIL;
}
return emulated;
}
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_DEAR:
vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
case SPRN_ESR:
vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
case SPRN_DBCR0:
vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
case SPRN_DBCR1:
vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
case SPRN_DBSR:
vcpu->arch.dbsr &= ~vcpu->arch.gpr[rs]; break;
case SPRN_TSR:
vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
case SPRN_TCR:
vcpu->arch.tcr = vcpu->arch.gpr[rs];
kvmppc_emulate_dec(vcpu);
break;
/* Note: SPRG4-7 are user-readable. These values are
* loaded into the real SPRGs when resuming the
* guest. */
case SPRN_SPRG4:
vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
case SPRN_SPRG5:
vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
case SPRN_SPRG6:
vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
case SPRN_SPRG7:
vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
case SPRN_IVPR:
vcpu->arch.ivpr = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR0:
vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR1:
vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR2:
vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR3:
vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR4:
vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR5:
vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR6:
vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR7:
vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR8:
vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR9:
vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR10:
vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR11:
vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR12:
vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR13:
vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR14:
vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR15:
vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs];
break;
default:
emulated = EMULATE_FAIL;
}
return emulated;
}
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
{
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_IVPR:
vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
case SPRN_DEAR:
vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
case SPRN_ESR:
vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
case SPRN_DBCR0:
vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
case SPRN_DBCR1:
vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
case SPRN_DBSR:
vcpu->arch.gpr[rt] = vcpu->arch.dbsr; break;
case SPRN_IVOR0:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
break;
case SPRN_IVOR1:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
break;
case SPRN_IVOR2:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
break;
case SPRN_IVOR3:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
break;
case SPRN_IVOR4:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
break;
case SPRN_IVOR5:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
break;
case SPRN_IVOR6:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
break;
case SPRN_IVOR7:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
break;
case SPRN_IVOR8:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
break;
case SPRN_IVOR9:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
break;
case SPRN_IVOR10:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
break;
case SPRN_IVOR11:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
break;
case SPRN_IVOR12:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
break;
case SPRN_IVOR13:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
break;
case SPRN_IVOR14:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
break;
case SPRN_IVOR15:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
break;
default:
emulated = EMULATE_FAIL;
}
return emulated;
}
...@@ -86,6 +86,9 @@ KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG ...@@ -86,6 +86,9 @@ KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG
KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS
KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS
KVM_HANDLER BOOKE_INTERRUPT_DEBUG KVM_HANDLER BOOKE_INTERRUPT_DEBUG
KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL
KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA
KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND
_GLOBAL(kvmppc_handler_len) _GLOBAL(kvmppc_handler_len)
.long kvmppc_handler_1 - kvmppc_handler_0 .long kvmppc_handler_1 - kvmppc_handler_0
...@@ -347,7 +350,9 @@ lightweight_exit: ...@@ -347,7 +350,9 @@ lightweight_exit:
lwz r3, VCPU_SHADOW_PID(r4) lwz r3, VCPU_SHADOW_PID(r4)
mtspr SPRN_PID, r3 mtspr SPRN_PID, r3
#ifdef CONFIG_44x
iccci 0, 0 /* XXX hack */ iccci 0, 0 /* XXX hack */
#endif
/* Load some guest volatiles. */ /* Load some guest volatiles. */
lwz r0, VCPU_GPR(r0)(r4) lwz r0, VCPU_GPR(r0)(r4)
......
/*
* Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, <yu.liu@freescale.com>
*
* Description:
* This file is derived from arch/powerpc/kvm/44x.c,
* by Hollis Blanchard <hollisb@us.ibm.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*/
#include <linux/kvm_host.h>
#include <linux/err.h>
#include <asm/reg.h>
#include <asm/cputable.h>
#include <asm/tlbflush.h>
#include <asm/kvm_e500.h>
#include <asm/kvm_ppc.h>
#include "booke.h"
#include "e500_tlb.h"
void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
{
}
void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
{
}
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
kvmppc_e500_tlb_load(vcpu, cpu);
}
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
kvmppc_e500_tlb_put(vcpu);
}
int kvmppc_core_check_processor_compat(void)
{
int r;
if (strcmp(cur_cpu_spec->cpu_name, "e500v2") == 0)
r = 0;
else
r = -ENOTSUPP;
return r;
}
int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
kvmppc_e500_tlb_setup(vcpu_e500);
/* Use the same core vertion as host's */
vcpu->arch.pvr = mfspr(SPRN_PVR);
return 0;
}
/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
int index;
gva_t eaddr;
u8 pid;
u8 as;
eaddr = tr->linear_address;
pid = (tr->linear_address >> 32) & 0xff;
as = (tr->linear_address >> 40) & 0x1;
index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
if (index < 0) {
tr->valid = 0;
return 0;
}
tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
/* XXX what does "writeable" and "usermode" even mean? */
tr->valid = 1;
return 0;
}
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvmppc_vcpu_e500 *vcpu_e500;
struct kvm_vcpu *vcpu;
int err;
vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu_e500) {
err = -ENOMEM;
goto out;
}
vcpu = &vcpu_e500->vcpu;
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
goto free_vcpu;
err = kvmppc_e500_tlb_init(vcpu_e500);
if (err)
goto uninit_vcpu;
return vcpu;
uninit_vcpu:
kvm_vcpu_uninit(vcpu);
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
out:
return ERR_PTR(err);
}
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
kvmppc_e500_tlb_uninit(vcpu_e500);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
}
static int kvmppc_e500_init(void)
{
int r, i;
unsigned long ivor[3];
unsigned long max_ivor = 0;
r = kvmppc_booke_init();
if (r)
return r;
/* copy extra E500 exception handlers */
ivor[0] = mfspr(SPRN_IVOR32);
ivor[1] = mfspr(SPRN_IVOR33);
ivor[2] = mfspr(SPRN_IVOR34);
for (i = 0; i < 3; i++) {
if (ivor[i] > max_ivor)
max_ivor = ivor[i];
memcpy((void *)kvmppc_booke_handlers + ivor[i],
kvmppc_handlers_start + (i + 16) * kvmppc_handler_len,
kvmppc_handler_len);
}
flush_icache_range(kvmppc_booke_handlers,
kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), THIS_MODULE);
}
static void kvmppc_e500_exit(void)
{
kvmppc_booke_exit();
}
module_init(kvmppc_e500_init);
module_exit(kvmppc_e500_exit);
/*
* Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, <yu.liu@freescale.com>
*
* Description:
* This file is derived from arch/powerpc/kvm/44x_emulate.c,
* by Hollis Blanchard <hollisb@us.ibm.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*/
#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
#include <asm/kvm_e500.h>
#include "booke.h"
#include "e500_tlb.h"
#define XOP_TLBIVAX 786
#define XOP_TLBSX 914
#define XOP_TLBRE 946
#define XOP_TLBWE 978
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int ra;
int rb;
switch (get_op(inst)) {
case 31:
switch (get_xop(inst)) {
case XOP_TLBRE:
emulated = kvmppc_e500_emul_tlbre(vcpu);
break;
case XOP_TLBWE:
emulated = kvmppc_e500_emul_tlbwe(vcpu);
break;
case XOP_TLBSX:
rb = get_rb(inst);
emulated = kvmppc_e500_emul_tlbsx(vcpu,rb);
break;
case XOP_TLBIVAX:
ra = get_ra(inst);
rb = get_rb(inst);
emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb);
break;
default:
emulated = EMULATE_FAIL;
}
break;
default:
emulated = EMULATE_FAIL;
}
if (emulated == EMULATE_FAIL)
emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
return emulated;
}
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_PID:
vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
vcpu->arch.pid = vcpu->arch.gpr[rs];
break;
case SPRN_PID1:
vcpu_e500->pid[1] = vcpu->arch.gpr[rs]; break;
case SPRN_PID2:
vcpu_e500->pid[2] = vcpu->arch.gpr[rs]; break;
case SPRN_MAS0:
vcpu_e500->mas0 = vcpu->arch.gpr[rs]; break;
case SPRN_MAS1:
vcpu_e500->mas1 = vcpu->arch.gpr[rs]; break;
case SPRN_MAS2:
vcpu_e500->mas2 = vcpu->arch.gpr[rs]; break;
case SPRN_MAS3:
vcpu_e500->mas3 = vcpu->arch.gpr[rs]; break;
case SPRN_MAS4:
vcpu_e500->mas4 = vcpu->arch.gpr[rs]; break;
case SPRN_MAS6:
vcpu_e500->mas6 = vcpu->arch.gpr[rs]; break;
case SPRN_MAS7:
vcpu_e500->mas7 = vcpu->arch.gpr[rs]; break;
case SPRN_L1CSR1:
vcpu_e500->l1csr1 = vcpu->arch.gpr[rs]; break;
case SPRN_HID0:
vcpu_e500->hid0 = vcpu->arch.gpr[rs]; break;
case SPRN_HID1:
vcpu_e500->hid1 = vcpu->arch.gpr[rs]; break;
case SPRN_MMUCSR0:
emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
vcpu->arch.gpr[rs]);
break;
/* extra exceptions */
case SPRN_IVOR32:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR33:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR34:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = vcpu->arch.gpr[rs];
break;
case SPRN_IVOR35:
vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = vcpu->arch.gpr[rs];
break;
default:
emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
}
return emulated;
}
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_PID:
vcpu->arch.gpr[rt] = vcpu_e500->pid[0]; break;
case SPRN_PID1:
vcpu->arch.gpr[rt] = vcpu_e500->pid[1]; break;
case SPRN_PID2:
vcpu->arch.gpr[rt] = vcpu_e500->pid[2]; break;
case SPRN_MAS0:
vcpu->arch.gpr[rt] = vcpu_e500->mas0; break;
case SPRN_MAS1:
vcpu->arch.gpr[rt] = vcpu_e500->mas1; break;
case SPRN_MAS2:
vcpu->arch.gpr[rt] = vcpu_e500->mas2; break;
case SPRN_MAS3:
vcpu->arch.gpr[rt] = vcpu_e500->mas3; break;
case SPRN_MAS4:
vcpu->arch.gpr[rt] = vcpu_e500->mas4; break;
case SPRN_MAS6:
vcpu->arch.gpr[rt] = vcpu_e500->mas6; break;
case SPRN_MAS7:
vcpu->arch.gpr[rt] = vcpu_e500->mas7; break;
case SPRN_TLB0CFG:
vcpu->arch.gpr[rt] = mfspr(SPRN_TLB0CFG);
vcpu->arch.gpr[rt] &= ~0xfffUL;
vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[0];
break;
case SPRN_TLB1CFG:
vcpu->arch.gpr[rt] = mfspr(SPRN_TLB1CFG);
vcpu->arch.gpr[rt] &= ~0xfffUL;
vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[1];
break;
case SPRN_L1CSR1:
vcpu->arch.gpr[rt] = vcpu_e500->l1csr1; break;
case SPRN_HID0:
vcpu->arch.gpr[rt] = vcpu_e500->hid0; break;
case SPRN_HID1:
vcpu->arch.gpr[rt] = vcpu_e500->hid1; break;
case SPRN_MMUCSR0:
vcpu->arch.gpr[rt] = 0; break;
/* extra exceptions */
case SPRN_IVOR32:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
break;
case SPRN_IVOR33:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
break;
case SPRN_IVOR34:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
break;
case SPRN_IVOR35:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
break;
default:
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
}
return emulated;
}
This diff is collapsed.
/*
* Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, yu.liu@freescale.com
*
* Description:
* This file is based on arch/powerpc/kvm/44x_tlb.h,
* by Hollis Blanchard <hollisb@us.ibm.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*/
#ifndef __KVM_E500_TLB_H__
#define __KVM_E500_TLB_H__
#include <linux/kvm_host.h>
#include <asm/mmu-fsl-booke.h>
#include <asm/tlb.h>
#include <asm/kvm_e500.h>
#define KVM_E500_TLB0_WAY_SIZE_BIT 7 /* Fixed */
#define KVM_E500_TLB0_WAY_SIZE (1UL << KVM_E500_TLB0_WAY_SIZE_BIT)
#define KVM_E500_TLB0_WAY_SIZE_MASK (KVM_E500_TLB0_WAY_SIZE - 1)
#define KVM_E500_TLB0_WAY_NUM_BIT 1 /* No greater than 7 */
#define KVM_E500_TLB0_WAY_NUM (1UL << KVM_E500_TLB0_WAY_NUM_BIT)
#define KVM_E500_TLB0_WAY_NUM_MASK (KVM_E500_TLB0_WAY_NUM - 1)
#define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
#define KVM_E500_TLB1_SIZE 16
#define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF))
#define tlbsel_of(index) ((index) >> 16)
#define esel_of(index) ((index) & 0xFFFF)
#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
#define MAS2_ATTRIB_MASK \
(MAS2_X0 | MAS2_X1)
#define MAS3_ATTRIB_MASK \
(MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
| E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
extern void kvmppc_dump_tlbs(struct kvm_vcpu *);
extern int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *, ulong);
extern int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *);
extern int kvmppc_e500_emul_tlbre(struct kvm_vcpu *);
extern int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *, int, int);
extern int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *, int);
extern int kvmppc_e500_tlb_search(struct kvm_vcpu *, gva_t, unsigned int, int);
extern void kvmppc_e500_tlb_put(struct kvm_vcpu *);
extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int);
extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *);
extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *);
extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
/* TLB helper functions */
static inline unsigned int get_tlb_size(const struct tlbe *tlbe)
{
return (tlbe->mas1 >> 8) & 0xf;
}
static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe)
{
return tlbe->mas2 & 0xfffff000;
}
static inline u64 get_tlb_bytes(const struct tlbe *tlbe)
{
unsigned int pgsize = get_tlb_size(tlbe);
return 1ULL << 10 << (pgsize << 1);
}
static inline gva_t get_tlb_end(const struct tlbe *tlbe)
{
u64 bytes = get_tlb_bytes(tlbe);
return get_tlb_eaddr(tlbe) + bytes - 1;
}
static inline u64 get_tlb_raddr(const struct tlbe *tlbe)
{
u64 rpn = tlbe->mas7;
return (rpn << 32) | (tlbe->mas3 & 0xfffff000);
}
static inline unsigned int get_tlb_tid(const struct tlbe *tlbe)
{
return (tlbe->mas1 >> 16) & 0xff;
}
static inline unsigned int get_tlb_ts(const struct tlbe *tlbe)
{
return (tlbe->mas1 >> 12) & 0x1;
}
static inline unsigned int get_tlb_v(const struct tlbe *tlbe)
{
return (tlbe->mas1 >> 31) & 0x1;
}
static inline unsigned int get_tlb_iprot(const struct tlbe *tlbe)
{
return (tlbe->mas1 >> 30) & 0x1;
}
static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
{
return vcpu->arch.pid & 0xff;
}
static inline unsigned int get_cur_spid(
const struct kvmppc_vcpu_e500 *vcpu_e500)
{
return (vcpu_e500->mas6 >> 16) & 0xff;
}
static inline unsigned int get_cur_sas(
const struct kvmppc_vcpu_e500 *vcpu_e500)
{
return vcpu_e500->mas6 & 0x1;
}
static inline unsigned int get_tlb_tlbsel(
const struct kvmppc_vcpu_e500 *vcpu_e500)
{
/*
* Manual says that tlbsel has 2 bits wide.
* Since we only have two TLBs, only lower bit is used.
*/
return (vcpu_e500->mas0 >> 28) & 0x1;
}
static inline unsigned int get_tlb_nv_bit(
const struct kvmppc_vcpu_e500 *vcpu_e500)
{
return vcpu_e500->mas0 & 0xfff;
}
static inline unsigned int get_tlb_esel_bit(
const struct kvmppc_vcpu_e500 *vcpu_e500)
{
return (vcpu_e500->mas0 >> 16) & 0xfff;
}
static inline unsigned int get_tlb_esel(
const struct kvmppc_vcpu_e500 *vcpu_e500,
int tlbsel)
{
unsigned int esel = get_tlb_esel_bit(vcpu_e500);
if (tlbsel == 0) {
esel &= KVM_E500_TLB0_WAY_NUM_MASK;
esel |= ((vcpu_e500->mas2 >> 12) & KVM_E500_TLB0_WAY_SIZE_MASK)
<< KVM_E500_TLB0_WAY_NUM_BIT;
} else {
esel &= KVM_E500_TLB1_SIZE - 1;
}
return esel;
}
static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
const struct tlbe *tlbe)
{
gpa_t gpa;
if (!get_tlb_v(tlbe))
return 0;
/* Does it match current guest AS? */
/* XXX what about IS != DS? */
if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
return 0;
gpa = get_tlb_raddr(tlbe);
if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
/* Mapping is not for RAM. */
return 0;
return 1;
}
#endif /* __KVM_E500_TLB_H__ */
...@@ -30,6 +30,39 @@ ...@@ -30,6 +30,39 @@
#include <asm/disassemble.h> #include <asm/disassemble.h>
#include "timing.h" #include "timing.h"
#define OP_TRAP 3
#define OP_31_XOP_LWZX 23
#define OP_31_XOP_LBZX 87
#define OP_31_XOP_STWX 151
#define OP_31_XOP_STBX 215
#define OP_31_XOP_STBUX 247
#define OP_31_XOP_LHZX 279
#define OP_31_XOP_LHZUX 311
#define OP_31_XOP_MFSPR 339
#define OP_31_XOP_STHX 407
#define OP_31_XOP_STHUX 439
#define OP_31_XOP_MTSPR 467
#define OP_31_XOP_DCBI 470
#define OP_31_XOP_LWBRX 534
#define OP_31_XOP_TLBSYNC 566
#define OP_31_XOP_STWBRX 662
#define OP_31_XOP_LHBRX 790
#define OP_31_XOP_STHBRX 918
#define OP_LWZ 32
#define OP_LWZU 33
#define OP_LBZ 34
#define OP_LBZU 35
#define OP_STW 36
#define OP_STWU 37
#define OP_STB 38
#define OP_STBU 39
#define OP_LHZ 40
#define OP_LHZU 41
#define OP_STH 44
#define OP_STHU 45
void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.tcr & TCR_DIE) { if (vcpu->arch.tcr & TCR_DIE) {
...@@ -78,7 +111,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -78,7 +111,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
switch (get_op(inst)) { switch (get_op(inst)) {
case 3: /* trap */ case OP_TRAP:
vcpu->arch.esr |= ESR_PTR; vcpu->arch.esr |= ESR_PTR;
kvmppc_core_queue_program(vcpu); kvmppc_core_queue_program(vcpu);
advance = 0; advance = 0;
...@@ -87,31 +120,31 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -87,31 +120,31 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case 31: case 31:
switch (get_xop(inst)) { switch (get_xop(inst)) {
case 23: /* lwzx */ case OP_31_XOP_LWZX:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break; break;
case 87: /* lbzx */ case OP_31_XOP_LBZX:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break; break;
case 151: /* stwx */ case OP_31_XOP_STWX:
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
vcpu->arch.gpr[rs], vcpu->arch.gpr[rs],
4, 1); 4, 1);
break; break;
case 215: /* stbx */ case OP_31_XOP_STBX:
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
vcpu->arch.gpr[rs], vcpu->arch.gpr[rs],
1, 1); 1, 1);
break; break;
case 247: /* stbux */ case OP_31_XOP_STBUX:
rs = get_rs(inst); rs = get_rs(inst);
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
...@@ -126,12 +159,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -126,12 +159,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.gpr[rs] = ea; vcpu->arch.gpr[rs] = ea;
break; break;
case 279: /* lhzx */ case OP_31_XOP_LHZX:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
break; break;
case 311: /* lhzux */ case OP_31_XOP_LHZUX:
rt = get_rt(inst); rt = get_rt(inst);
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
...@@ -144,7 +177,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -144,7 +177,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.gpr[ra] = ea; vcpu->arch.gpr[ra] = ea;
break; break;
case 339: /* mfspr */ case OP_31_XOP_MFSPR:
sprn = get_sprn(inst); sprn = get_sprn(inst);
rt = get_rt(inst); rt = get_rt(inst);
...@@ -185,7 +218,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -185,7 +218,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
break; break;
case 407: /* sthx */ case OP_31_XOP_STHX:
rs = get_rs(inst); rs = get_rs(inst);
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
...@@ -195,7 +228,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -195,7 +228,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
2, 1); 2, 1);
break; break;
case 439: /* sthux */ case OP_31_XOP_STHUX:
rs = get_rs(inst); rs = get_rs(inst);
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
...@@ -210,7 +243,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -210,7 +243,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.gpr[ra] = ea; vcpu->arch.gpr[ra] = ea;
break; break;
case 467: /* mtspr */ case OP_31_XOP_MTSPR:
sprn = get_sprn(inst); sprn = get_sprn(inst);
rs = get_rs(inst); rs = get_rs(inst);
switch (sprn) { switch (sprn) {
...@@ -246,7 +279,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -246,7 +279,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
break; break;
case 470: /* dcbi */ case OP_31_XOP_DCBI:
/* Do nothing. The guest is performing dcbi because /* Do nothing. The guest is performing dcbi because
* hardware DMA is not snooped by the dcache, but * hardware DMA is not snooped by the dcache, but
* emulated DMA either goes through the dcache as * emulated DMA either goes through the dcache as
...@@ -254,15 +287,15 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -254,15 +287,15 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
* coherence. */ * coherence. */
break; break;
case 534: /* lwbrx */ case OP_31_XOP_LWBRX:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
break; break;
case 566: /* tlbsync */ case OP_31_XOP_TLBSYNC:
break; break;
case 662: /* stwbrx */ case OP_31_XOP_STWBRX:
rs = get_rs(inst); rs = get_rs(inst);
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
...@@ -272,12 +305,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -272,12 +305,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
4, 0); 4, 0);
break; break;
case 790: /* lhbrx */ case OP_31_XOP_LHBRX:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
break; break;
case 918: /* sthbrx */ case OP_31_XOP_STHBRX:
rs = get_rs(inst); rs = get_rs(inst);
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
...@@ -293,37 +326,37 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -293,37 +326,37 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
break; break;
case 32: /* lwz */ case OP_LWZ:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break; break;
case 33: /* lwzu */ case OP_LWZU:
ra = get_ra(inst); ra = get_ra(inst);
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
break; break;
case 34: /* lbz */ case OP_LBZ:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break; break;
case 35: /* lbzu */ case OP_LBZU:
ra = get_ra(inst); ra = get_ra(inst);
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
break; break;
case 36: /* stw */ case OP_STW:
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
4, 1); 4, 1);
break; break;
case 37: /* stwu */ case OP_STWU:
ra = get_ra(inst); ra = get_ra(inst);
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
...@@ -331,13 +364,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -331,13 +364,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
break; break;
case 38: /* stb */ case OP_STB:
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
1, 1); 1, 1);
break; break;
case 39: /* stbu */ case OP_STBU:
ra = get_ra(inst); ra = get_ra(inst);
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
...@@ -345,25 +378,25 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -345,25 +378,25 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
break; break;
case 40: /* lhz */ case OP_LHZ:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
break; break;
case 41: /* lhzu */ case OP_LHZU:
ra = get_ra(inst); ra = get_ra(inst);
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
break; break;
case 44: /* sth */ case OP_STH:
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
2, 1); 2, 1);
break; break;
case 45: /* sthu */ case OP_STHU:
ra = get_ra(inst); ra = get_ra(inst);
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
......
...@@ -216,46 +216,23 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -216,46 +216,23 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{ {
kvmppc_core_destroy_mmu(vcpu); kvmppc_mmu_destroy(vcpu);
} }
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
if (vcpu->guest_debug.enabled)
kvmppc_core_load_guest_debugstate(vcpu);
kvmppc_core_vcpu_load(vcpu, cpu); kvmppc_core_vcpu_load(vcpu, cpu);
} }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
if (vcpu->guest_debug.enabled)
kvmppc_core_load_host_debugstate(vcpu);
/* Don't leave guest TLB entries resident when being de-scheduled. */
/* XXX It would be nice to differentiate between heavyweight exit and
* sched_out here, since we could avoid the TLB flush for heavyweight
* exits. */
_tlbil_all();
kvmppc_core_vcpu_put(vcpu); kvmppc_core_vcpu_put(vcpu);
} }
int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_debug_guest *dbg) struct kvm_guest_debug *dbg)
{ {
int i; return -EINVAL;
vcpu->guest_debug.enabled = dbg->enabled;
if (vcpu->guest_debug.enabled) {
for (i=0; i < ARRAY_SIZE(vcpu->guest_debug.bp); i++) {
if (dbg->breakpoints[i].enabled)
vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
else
vcpu->guest_debug.bp[i] = 0;
}
}
return 0;
} }
static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
......
...@@ -42,4 +42,11 @@ struct kvm_fpu { ...@@ -42,4 +42,11 @@ struct kvm_fpu {
__u64 fprs[16]; __u64 fprs[16];
}; };
struct kvm_debug_exit_arch {
};
/* for KVM_SET_GUEST_DEBUG */
struct kvm_guest_debug_arch {
};
#endif #endif
...@@ -21,9 +21,6 @@ ...@@ -21,9 +21,6 @@
/* memory slots that does not exposed to userspace */ /* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_PRIVATE_MEM_SLOTS 4
struct kvm_guest_debug {
};
struct sca_entry { struct sca_entry {
atomic_t scn; atomic_t scn;
__u64 reserved; __u64 reserved;
......
...@@ -4,6 +4,9 @@ ...@@ -4,6 +4,9 @@
config HAVE_KVM config HAVE_KVM
bool bool
config HAVE_KVM_IRQCHIP
bool
menuconfig VIRTUALIZATION menuconfig VIRTUALIZATION
bool "Virtualization" bool "Virtualization"
default y default y
......
...@@ -103,7 +103,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu) ...@@ -103,7 +103,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
static intercept_handler_t instruction_handlers[256] = { static intercept_handler_t instruction_handlers[256] = {
[0x83] = kvm_s390_handle_diag, [0x83] = kvm_s390_handle_diag,
[0xae] = kvm_s390_handle_sigp, [0xae] = kvm_s390_handle_sigp,
[0xb2] = kvm_s390_handle_priv, [0xb2] = kvm_s390_handle_b2,
[0xb7] = handle_lctl, [0xb7] = handle_lctl,
[0xeb] = handle_lctlg, [0xeb] = handle_lctlg,
}; };
......
...@@ -555,9 +555,14 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, ...@@ -555,9 +555,14 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
s390int->parm); s390int->parm);
break; break;
case KVM_S390_SIGP_SET_PREFIX:
inti->prefix.address = s390int->parm;
inti->type = s390int->type;
VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
s390int->parm);
break;
case KVM_S390_SIGP_STOP: case KVM_S390_SIGP_STOP:
case KVM_S390_RESTART: case KVM_S390_RESTART:
case KVM_S390_SIGP_SET_PREFIX:
case KVM_S390_INT_EMERGENCY: case KVM_S390_INT_EMERGENCY:
VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
inti->type = s390int->type; inti->type = s390int->type;
......
...@@ -422,8 +422,8 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, ...@@ -422,8 +422,8 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
return -EINVAL; /* not implemented yet */ return -EINVAL; /* not implemented yet */
} }
int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_debug_guest *dbg) struct kvm_guest_debug *dbg)
{ {
return -EINVAL; /* not implemented yet */ return -EINVAL; /* not implemented yet */
} }
......
...@@ -50,7 +50,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, ...@@ -50,7 +50,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
/* implemented in priv.c */ /* implemented in priv.c */
int kvm_s390_handle_priv(struct kvm_vcpu *vcpu); int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
/* implemented in sigp.c */ /* implemented in sigp.c */
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
......
...@@ -304,12 +304,24 @@ static intercept_handler_t priv_handlers[256] = { ...@@ -304,12 +304,24 @@ static intercept_handler_t priv_handlers[256] = {
[0xb1] = handle_stfl, [0xb1] = handle_stfl,
}; };
int kvm_s390_handle_priv(struct kvm_vcpu *vcpu) int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
{ {
intercept_handler_t handler; intercept_handler_t handler;
/*
* a lot of B2 instructions are priviledged. We first check for
* the priviledges ones, that we can handle in the kernel. If the
* kernel can handle this instruction, we check for the problem
* state bit and (a) handle the instruction or (b) send a code 2
* program check.
* Anything else goes to userspace.*/
handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
if (handler) if (handler) {
return handler(vcpu); if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu,
PGM_PRIVILEGED_OPERATION);
else
return handler(vcpu);
}
return -ENOTSUPP; return -ENOTSUPP;
} }
...@@ -153,8 +153,6 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) ...@@ -153,8 +153,6 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
switch (parameter & 0xff) { switch (parameter & 0xff) {
case 0: case 0:
printk(KERN_WARNING "kvm: request to switch to ESA/390 mode"
" not supported");
rc = 3; /* not operational */ rc = 3; /* not operational */
break; break;
case 1: case 1:
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#define __KVM_HAVE_DEVICE_ASSIGNMENT #define __KVM_HAVE_DEVICE_ASSIGNMENT
#define __KVM_HAVE_MSI #define __KVM_HAVE_MSI
#define __KVM_HAVE_USER_NMI #define __KVM_HAVE_USER_NMI
#define __KVM_HAVE_GUEST_DEBUG
/* Architectural interrupt line count. */ /* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256 #define KVM_NR_INTERRUPTS 256
...@@ -212,7 +213,30 @@ struct kvm_pit_channel_state { ...@@ -212,7 +213,30 @@ struct kvm_pit_channel_state {
__s64 count_load_time; __s64 count_load_time;
}; };
struct kvm_debug_exit_arch {
__u32 exception;
__u32 pad;
__u64 pc;
__u64 dr6;
__u64 dr7;
};
#define KVM_GUESTDBG_USE_SW_BP 0x00010000
#define KVM_GUESTDBG_USE_HW_BP 0x00020000
#define KVM_GUESTDBG_INJECT_DB 0x00040000
#define KVM_GUESTDBG_INJECT_BP 0x00080000
/* for KVM_SET_GUEST_DEBUG */
struct kvm_guest_debug_arch {
__u64 debugreg[8];
};
struct kvm_pit_state { struct kvm_pit_state {
struct kvm_pit_channel_state channels[3]; struct kvm_pit_channel_state channels[3];
}; };
struct kvm_reinject_control {
__u8 pit_reinject;
__u8 reserved[31];
};
#endif /* _ASM_X86_KVM_H */ #endif /* _ASM_X86_KVM_H */
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <asm/pvclock-abi.h> #include <asm/pvclock-abi.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/msr-index.h>
#define KVM_MAX_VCPUS 16 #define KVM_MAX_VCPUS 16
#define KVM_MEMORY_SLOTS 32 #define KVM_MEMORY_SLOTS 32
...@@ -134,11 +135,18 @@ enum { ...@@ -134,11 +135,18 @@ enum {
#define KVM_NR_MEM_OBJS 40 #define KVM_NR_MEM_OBJS 40
struct kvm_guest_debug { #define KVM_NR_DB_REGS 4
int enabled;
unsigned long bp[4]; #define DR6_BD (1 << 13)
int singlestep; #define DR6_BS (1 << 14)
}; #define DR6_FIXED_1 0xffff0ff0
#define DR6_VOLATILE 0x0000e00f
#define DR7_BP_EN_MASK 0x000000ff
#define DR7_GE (1 << 9)
#define DR7_GD (1 << 13)
#define DR7_FIXED_1 0x00000400
#define DR7_VOLATILE 0xffff23ff
/* /*
* We don't want allocation failures within the mmu code, so we preallocate * We don't want allocation failures within the mmu code, so we preallocate
...@@ -162,7 +170,8 @@ struct kvm_pte_chain { ...@@ -162,7 +170,8 @@ struct kvm_pte_chain {
* bits 0:3 - total guest paging levels (2-4, or zero for real mode) * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
* bits 4:7 - page table level for this shadow (1-4) * bits 4:7 - page table level for this shadow (1-4)
* bits 8:9 - page table quadrant for 2-level guests * bits 8:9 - page table quadrant for 2-level guests
* bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode) * bit 16 - direct mapping of virtual to physical mapping at gfn
* used for real mode and two-dimensional paging
* bits 17:19 - common access permissions for all ptes in this shadow page * bits 17:19 - common access permissions for all ptes in this shadow page
*/ */
union kvm_mmu_page_role { union kvm_mmu_page_role {
...@@ -172,9 +181,10 @@ union kvm_mmu_page_role { ...@@ -172,9 +181,10 @@ union kvm_mmu_page_role {
unsigned level:4; unsigned level:4;
unsigned quadrant:2; unsigned quadrant:2;
unsigned pad_for_nice_hex_output:6; unsigned pad_for_nice_hex_output:6;
unsigned metaphysical:1; unsigned direct:1;
unsigned access:3; unsigned access:3;
unsigned invalid:1; unsigned invalid:1;
unsigned cr4_pge:1;
}; };
}; };
...@@ -218,6 +228,18 @@ struct kvm_pv_mmu_op_buffer { ...@@ -218,6 +228,18 @@ struct kvm_pv_mmu_op_buffer {
char buf[512] __aligned(sizeof(long)); char buf[512] __aligned(sizeof(long));
}; };
struct kvm_pio_request {
unsigned long count;
int cur_count;
gva_t guest_gva;
int in;
int port;
int size;
int string;
int down;
int rep;
};
/* /*
* x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
* 32-bit). The kvm_mmu structure abstracts the details of the current mmu * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
...@@ -236,6 +258,7 @@ struct kvm_mmu { ...@@ -236,6 +258,7 @@ struct kvm_mmu {
hpa_t root_hpa; hpa_t root_hpa;
int root_level; int root_level;
int shadow_root_level; int shadow_root_level;
union kvm_mmu_page_role base_role;
u64 *pae_root; u64 *pae_root;
}; };
...@@ -258,6 +281,7 @@ struct kvm_vcpu_arch { ...@@ -258,6 +281,7 @@ struct kvm_vcpu_arch {
unsigned long cr3; unsigned long cr3;
unsigned long cr4; unsigned long cr4;
unsigned long cr8; unsigned long cr8;
u32 hflags;
u64 pdptrs[4]; /* pae */ u64 pdptrs[4]; /* pae */
u64 shadow_efer; u64 shadow_efer;
u64 apic_base; u64 apic_base;
...@@ -338,6 +362,15 @@ struct kvm_vcpu_arch { ...@@ -338,6 +362,15 @@ struct kvm_vcpu_arch {
struct mtrr_state_type mtrr_state; struct mtrr_state_type mtrr_state;
u32 pat; u32 pat;
int switch_db_regs;
unsigned long host_db[KVM_NR_DB_REGS];
unsigned long host_dr6;
unsigned long host_dr7;
unsigned long db[KVM_NR_DB_REGS];
unsigned long dr6;
unsigned long dr7;
unsigned long eff_db[KVM_NR_DB_REGS];
}; };
struct kvm_mem_alias { struct kvm_mem_alias {
...@@ -378,6 +411,7 @@ struct kvm_arch{ ...@@ -378,6 +411,7 @@ struct kvm_arch{
unsigned long irq_sources_bitmap; unsigned long irq_sources_bitmap;
unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
u64 vm_init_tsc;
}; };
struct kvm_vm_stat { struct kvm_vm_stat {
...@@ -446,8 +480,7 @@ struct kvm_x86_ops { ...@@ -446,8 +480,7 @@ struct kvm_x86_ops {
void (*vcpu_put)(struct kvm_vcpu *vcpu); void (*vcpu_put)(struct kvm_vcpu *vcpu);
int (*set_guest_debug)(struct kvm_vcpu *vcpu, int (*set_guest_debug)(struct kvm_vcpu *vcpu,
struct kvm_debug_guest *dbg); struct kvm_guest_debug *dbg);
void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
...@@ -583,16 +616,12 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); ...@@ -583,16 +616,12 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
u32 error_code); u32 error_code);
void kvm_pic_set_irq(void *opaque, int irq, int level); int kvm_pic_set_irq(void *opaque, int irq, int level);
void kvm_inject_nmi(struct kvm_vcpu *vcpu); void kvm_inject_nmi(struct kvm_vcpu *vcpu);
void fx_init(struct kvm_vcpu *vcpu); void fx_init(struct kvm_vcpu *vcpu);
int emulator_read_std(unsigned long addr,
void *val,
unsigned int bytes,
struct kvm_vcpu *vcpu);
int emulator_write_emulated(unsigned long addr, int emulator_write_emulated(unsigned long addr,
const void *val, const void *val,
unsigned int bytes, unsigned int bytes,
...@@ -737,6 +766,10 @@ enum { ...@@ -737,6 +766,10 @@ enum {
TASK_SWITCH_GATE = 3, TASK_SWITCH_GATE = 3,
}; };
#define HF_GIF_MASK (1 << 0)
#define HF_HIF_MASK (1 << 1)
#define HF_VINTR_MASK (1 << 2)
/* /*
* Hardware virtualization extension instructions may fault if a * Hardware virtualization extension instructions may fault if a
* reboot turns off virtualization while processes are running. * reboot turns off virtualization while processes are running.
......
...@@ -18,11 +18,15 @@ ...@@ -18,11 +18,15 @@
#define _EFER_LME 8 /* Long mode enable */ #define _EFER_LME 8 /* Long mode enable */
#define _EFER_LMA 10 /* Long mode active (read-only) */ #define _EFER_LMA 10 /* Long mode active (read-only) */
#define _EFER_NX 11 /* No execute enable */ #define _EFER_NX 11 /* No execute enable */
#define _EFER_SVME 12 /* Enable virtualization */
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
#define EFER_SCE (1<<_EFER_SCE) #define EFER_SCE (1<<_EFER_SCE)
#define EFER_LME (1<<_EFER_LME) #define EFER_LME (1<<_EFER_LME)
#define EFER_LMA (1<<_EFER_LMA) #define EFER_LMA (1<<_EFER_LMA)
#define EFER_NX (1<<_EFER_NX) #define EFER_NX (1<<_EFER_NX)
#define EFER_SVME (1<<_EFER_SVME)
#define EFER_FFXSR (1<<_EFER_FFXSR)
/* Intel MSRs. Some also available on other CPUs */ /* Intel MSRs. Some also available on other CPUs */
#define MSR_IA32_PERFCTR0 0x000000c1 #define MSR_IA32_PERFCTR0 0x000000c1
...@@ -360,4 +364,9 @@ ...@@ -360,4 +364,9 @@
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
/* AMD-V MSRs */
#define MSR_VM_CR 0xc0010114
#define MSR_VM_HSAVE_PA 0xc0010117
#endif /* _ASM_X86_MSR_INDEX_H */ #endif /* _ASM_X86_MSR_INDEX_H */
...@@ -174,10 +174,6 @@ struct __attribute__ ((__packed__)) vmcb { ...@@ -174,10 +174,6 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_CPUID_FEATURE_SHIFT 2 #define SVM_CPUID_FEATURE_SHIFT 2
#define SVM_CPUID_FUNC 0x8000000a #define SVM_CPUID_FUNC 0x8000000a
#define MSR_EFER_SVME_MASK (1ULL << 12)
#define MSR_VM_CR 0xc0010114
#define MSR_VM_HSAVE_PA 0xc0010117ULL
#define SVM_VM_CR_SVM_DISABLE 4 #define SVM_VM_CR_SVM_DISABLE 4
#define SVM_SELECTOR_S_SHIFT 4 #define SVM_SELECTOR_S_SHIFT 4
......
...@@ -118,7 +118,7 @@ static inline void cpu_svm_disable(void) ...@@ -118,7 +118,7 @@ static inline void cpu_svm_disable(void)
wrmsrl(MSR_VM_HSAVE_PA, 0); wrmsrl(MSR_VM_HSAVE_PA, 0);
rdmsrl(MSR_EFER, efer); rdmsrl(MSR_EFER, efer);
wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); wrmsrl(MSR_EFER, efer & ~EFER_SVME);
} }
/** Makes sure SVM is disabled, if it is supported on the CPU /** Makes sure SVM is disabled, if it is supported on the CPU
......
...@@ -270,8 +270,9 @@ enum vmcs_field { ...@@ -270,8 +270,9 @@ enum vmcs_field {
#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
#define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */
/* GUEST_INTERRUPTIBILITY_INFO flags. */ /* GUEST_INTERRUPTIBILITY_INFO flags. */
#define GUEST_INTR_STATE_STI 0x00000001 #define GUEST_INTR_STATE_STI 0x00000001
...@@ -311,7 +312,7 @@ enum vmcs_field { ...@@ -311,7 +312,7 @@ enum vmcs_field {
#define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */ #define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
#define TYPE_MOV_TO_DR (0 << 4) #define TYPE_MOV_TO_DR (0 << 4)
#define TYPE_MOV_FROM_DR (1 << 4) #define TYPE_MOV_FROM_DR (1 << 4)
#define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose reg. */ #define DEBUG_REG_ACCESS_REG(eq) (((eq) >> 8) & 0xf) /* 11:8, general purpose reg. */
/* segment AR */ /* segment AR */
......
...@@ -4,6 +4,10 @@ ...@@ -4,6 +4,10 @@
config HAVE_KVM config HAVE_KVM
bool bool
config HAVE_KVM_IRQCHIP
bool
default y
menuconfig VIRTUALIZATION menuconfig VIRTUALIZATION
bool "Virtualization" bool "Virtualization"
depends on HAVE_KVM || X86 depends on HAVE_KVM || X86
......
...@@ -201,6 +201,9 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps) ...@@ -201,6 +201,9 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps)
if (!atomic_inc_and_test(&pt->pending)) if (!atomic_inc_and_test(&pt->pending))
set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests); set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests);
if (!pt->reinject)
atomic_set(&pt->pending, 1);
if (vcpu0 && waitqueue_active(&vcpu0->wq)) if (vcpu0 && waitqueue_active(&vcpu0->wq))
wake_up_interruptible(&vcpu0->wq); wake_up_interruptible(&vcpu0->wq);
...@@ -536,6 +539,16 @@ void kvm_pit_reset(struct kvm_pit *pit) ...@@ -536,6 +539,16 @@ void kvm_pit_reset(struct kvm_pit *pit)
pit->pit_state.irq_ack = 1; pit->pit_state.irq_ack = 1;
} }
static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
{
struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
if (!mask) {
atomic_set(&pit->pit_state.pit_timer.pending, 0);
pit->pit_state.irq_ack = 1;
}
}
struct kvm_pit *kvm_create_pit(struct kvm *kvm) struct kvm_pit *kvm_create_pit(struct kvm *kvm)
{ {
struct kvm_pit *pit; struct kvm_pit *pit;
...@@ -545,9 +558,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) ...@@ -545,9 +558,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
if (!pit) if (!pit)
return NULL; return NULL;
mutex_lock(&kvm->lock);
pit->irq_source_id = kvm_request_irq_source_id(kvm); pit->irq_source_id = kvm_request_irq_source_id(kvm);
mutex_unlock(&kvm->lock);
if (pit->irq_source_id < 0) { if (pit->irq_source_id < 0) {
kfree(pit); kfree(pit);
return NULL; return NULL;
...@@ -580,10 +591,14 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) ...@@ -580,10 +591,14 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
pit_state->irq_ack_notifier.gsi = 0; pit_state->irq_ack_notifier.gsi = 0;
pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq;
kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
pit_state->pit_timer.reinject = true;
mutex_unlock(&pit->pit_state.lock); mutex_unlock(&pit->pit_state.lock);
kvm_pit_reset(pit); kvm_pit_reset(pit);
pit->mask_notifier.func = pit_mask_notifer;
kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
return pit; return pit;
} }
...@@ -592,6 +607,8 @@ void kvm_free_pit(struct kvm *kvm) ...@@ -592,6 +607,8 @@ void kvm_free_pit(struct kvm *kvm)
struct hrtimer *timer; struct hrtimer *timer;
if (kvm->arch.vpit) { if (kvm->arch.vpit) {
kvm_unregister_irq_mask_notifier(kvm, 0,
&kvm->arch.vpit->mask_notifier);
mutex_lock(&kvm->arch.vpit->pit_state.lock); mutex_lock(&kvm->arch.vpit->pit_state.lock);
timer = &kvm->arch.vpit->pit_state.pit_timer.timer; timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
hrtimer_cancel(timer); hrtimer_cancel(timer);
......
...@@ -9,6 +9,7 @@ struct kvm_kpit_timer { ...@@ -9,6 +9,7 @@ struct kvm_kpit_timer {
s64 period; /* unit: ns */ s64 period; /* unit: ns */
s64 scheduled; s64 scheduled;
atomic_t pending; atomic_t pending;
bool reinject;
}; };
struct kvm_kpit_channel_state { struct kvm_kpit_channel_state {
...@@ -45,6 +46,7 @@ struct kvm_pit { ...@@ -45,6 +46,7 @@ struct kvm_pit {
struct kvm *kvm; struct kvm *kvm;
struct kvm_kpit_state pit_state; struct kvm_kpit_state pit_state;
int irq_source_id; int irq_source_id;
struct kvm_irq_mask_notifier mask_notifier;
}; };
#define KVM_PIT_BASE_ADDRESS 0x40 #define KVM_PIT_BASE_ADDRESS 0x40
......
...@@ -32,11 +32,13 @@ ...@@ -32,11 +32,13 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
static void pic_lock(struct kvm_pic *s) static void pic_lock(struct kvm_pic *s)
__acquires(&s->lock)
{ {
spin_lock(&s->lock); spin_lock(&s->lock);
} }
static void pic_unlock(struct kvm_pic *s) static void pic_unlock(struct kvm_pic *s)
__releases(&s->lock)
{ {
struct kvm *kvm = s->kvm; struct kvm *kvm = s->kvm;
unsigned acks = s->pending_acks; unsigned acks = s->pending_acks;
...@@ -49,7 +51,8 @@ static void pic_unlock(struct kvm_pic *s) ...@@ -49,7 +51,8 @@ static void pic_unlock(struct kvm_pic *s)
spin_unlock(&s->lock); spin_unlock(&s->lock);
while (acks) { while (acks) {
kvm_notify_acked_irq(kvm, __ffs(acks)); kvm_notify_acked_irq(kvm, SELECT_PIC(__ffs(acks)),
__ffs(acks));
acks &= acks - 1; acks &= acks - 1;
} }
...@@ -76,12 +79,13 @@ void kvm_pic_clear_isr_ack(struct kvm *kvm) ...@@ -76,12 +79,13 @@ void kvm_pic_clear_isr_ack(struct kvm *kvm)
/* /*
* set irq level. If an edge is detected, then the IRR is set to 1 * set irq level. If an edge is detected, then the IRR is set to 1
*/ */
static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
{ {
int mask; int mask, ret = 1;
mask = 1 << irq; mask = 1 << irq;
if (s->elcr & mask) /* level triggered */ if (s->elcr & mask) /* level triggered */
if (level) { if (level) {
ret = !(s->irr & mask);
s->irr |= mask; s->irr |= mask;
s->last_irr |= mask; s->last_irr |= mask;
} else { } else {
...@@ -90,11 +94,15 @@ static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) ...@@ -90,11 +94,15 @@ static inline void pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
} }
else /* edge triggered */ else /* edge triggered */
if (level) { if (level) {
if ((s->last_irr & mask) == 0) if ((s->last_irr & mask) == 0) {
ret = !(s->irr & mask);
s->irr |= mask; s->irr |= mask;
}
s->last_irr |= mask; s->last_irr |= mask;
} else } else
s->last_irr &= ~mask; s->last_irr &= ~mask;
return (s->imr & mask) ? -1 : ret;
} }
/* /*
...@@ -171,16 +179,19 @@ void kvm_pic_update_irq(struct kvm_pic *s) ...@@ -171,16 +179,19 @@ void kvm_pic_update_irq(struct kvm_pic *s)
pic_unlock(s); pic_unlock(s);
} }
void kvm_pic_set_irq(void *opaque, int irq, int level) int kvm_pic_set_irq(void *opaque, int irq, int level)
{ {
struct kvm_pic *s = opaque; struct kvm_pic *s = opaque;
int ret = -1;
pic_lock(s); pic_lock(s);
if (irq >= 0 && irq < PIC_NUM_PINS) { if (irq >= 0 && irq < PIC_NUM_PINS) {
pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
pic_update_irq(s); pic_update_irq(s);
} }
pic_unlock(s); pic_unlock(s);
return ret;
} }
/* /*
...@@ -232,7 +243,7 @@ int kvm_pic_read_irq(struct kvm *kvm) ...@@ -232,7 +243,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
} }
pic_update_irq(s); pic_update_irq(s);
pic_unlock(s); pic_unlock(s);
kvm_notify_acked_irq(kvm, irq); kvm_notify_acked_irq(kvm, SELECT_PIC(irq), irq);
return intno; return intno;
} }
......
...@@ -32,6 +32,8 @@ ...@@ -32,6 +32,8 @@
#include "lapic.h" #include "lapic.h"
#define PIC_NUM_PINS 16 #define PIC_NUM_PINS 16
#define SELECT_PIC(irq) \
((irq) < 8 ? KVM_IRQCHIP_PIC_MASTER : KVM_IRQCHIP_PIC_SLAVE)
struct kvm; struct kvm;
struct kvm_vcpu; struct kvm_vcpu;
......
...@@ -18,7 +18,6 @@ static const u32 host_save_user_msrs[] = { ...@@ -18,7 +18,6 @@ static const u32 host_save_user_msrs[] = {
}; };
#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
#define NUM_DB_REGS 4
struct kvm_vcpu; struct kvm_vcpu;
...@@ -29,18 +28,23 @@ struct vcpu_svm { ...@@ -29,18 +28,23 @@ struct vcpu_svm {
struct svm_cpu_data *svm_data; struct svm_cpu_data *svm_data;
uint64_t asid_generation; uint64_t asid_generation;
unsigned long db_regs[NUM_DB_REGS];
u64 next_rip; u64 next_rip;
u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
u64 host_gs_base; u64 host_gs_base;
unsigned long host_cr2; unsigned long host_cr2;
unsigned long host_db_regs[NUM_DB_REGS];
unsigned long host_dr6;
unsigned long host_dr7;
u32 *msrpm; u32 *msrpm;
struct vmcb *hsave;
u64 hsave_msr;
u64 nested_vmcb;
/* These are the merged vectors */
u32 *nested_msrpm;
/* gpa pointers to the real vectors */
u64 nested_vmcb_msrpm;
}; };
#endif #endif
......
This diff is collapsed.
...@@ -54,7 +54,7 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) ...@@ -54,7 +54,7 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
static inline int is_long_mode(struct kvm_vcpu *vcpu) static inline int is_long_mode(struct kvm_vcpu *vcpu)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
return vcpu->arch.shadow_efer & EFER_LME; return vcpu->arch.shadow_efer & EFER_LMA;
#else #else
return 0; return 0;
#endif #endif
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -40,17 +40,4 @@ typedef unsigned long hfn_t; ...@@ -40,17 +40,4 @@ typedef unsigned long hfn_t;
typedef hfn_t pfn_t; typedef hfn_t pfn_t;
struct kvm_pio_request {
unsigned long count;
int cur_count;
struct page *guest_pages[2];
unsigned guest_page_offset;
int in;
int port;
int size;
int string;
int down;
int rep;
};
#endif /* __KVM_TYPES_H__ */ #endif /* __KVM_TYPES_H__ */
This diff is collapsed.
...@@ -83,7 +83,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, ...@@ -83,7 +83,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
unsigned long bitmap); unsigned long bitmap);
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
int kvm_ioapic_init(struct kvm *kvm); int kvm_ioapic_init(struct kvm *kvm);
void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
void kvm_ioapic_reset(struct kvm_ioapic *ioapic); void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
u8 dest_mode); u8 dest_mode);
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment