Commit 07acfc2a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM changes from Avi Kivity:
 "Changes include additional instruction emulation, page-crossing MMIO,
  faster dirty logging, preventing the watchdog from killing a stopped
  guest, module autoload, a new MSI ABI, and some minor optimizations
  and fixes.  Outside x86 we have a small s390 and a very large ppc
  update.

  Regarding the new (for kvm) rebaseless workflow, some of the patches
  that were merged before we switch trees had to be rebased, while
  others are true pulls.  In either case the signoffs should be correct
  now."

Fix up trivial conflicts in Documentation/feature-removal-schedule.txt
arch/powerpc/kvm/book3s_segment.S and arch/x86/include/asm/kvm_para.h.

I suspect the kvm_para.h resolution ends up doing the "do I have cpuid"
check effectively twice (it was done differently in two different
commits), but better safe than sorry ;)

* 'next' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (125 commits)
  KVM: make asm-generic/kvm_para.h have an ifdef __KERNEL__ block
  KVM: s390: onereg for timer related registers
  KVM: s390: epoch difference and TOD programmable field
  KVM: s390: KVM_GET/SET_ONEREG for s390
  KVM: s390: add capability indicating COW support
  KVM: Fix mmu_reload() clash with nested vmx event injection
  KVM: MMU: Don't use RCU for lockless shadow walking
  KVM: VMX: Optimize %ds, %es reload
  KVM: VMX: Fix %ds/%es clobber
  KVM: x86 emulator: convert bsf/bsr instructions to emulate_2op_SrcV_nobyte()
  KVM: VMX: unlike vmcs on fail path
  KVM: PPC: Emulator: clean up SPR reads and writes
  KVM: PPC: Emulator: clean up instruction parsing
  kvm/powerpc: Add new ioctl to retreive server MMU infos
  kvm/book3s: Make kernel emulated H_PUT_TCE available for "PR" KVM
  KVM: PPC: bookehv: Fix r8/r13 storing in level exception handler
  KVM: PPC: Book3S: Enable IRQs during exit handling
  KVM: PPC: Fix PR KVM on POWER7 bare metal
  KVM: PPC: Fix stbux emulation
  KVM: PPC: bookehv: Use lwz/stw instead of PPC_LL/PPC_STL for 32-bit fields
  ...
parents b5f4035a 322728e5
...@@ -588,3 +588,10 @@ Why: Remount currently allows changing bound subsystems and ...@@ -588,3 +588,10 @@ Why: Remount currently allows changing bound subsystems and
replaced with conventional fsnotify. replaced with conventional fsnotify.
---------------------------- ----------------------------
What: KVM debugfs statistics
When: 2013
Why: KVM tracepoints provide mostly equivalent information in a much more
flexible fashion.
----------------------------
This diff is collapsed.
...@@ -10,11 +10,15 @@ a guest. ...@@ -10,11 +10,15 @@ a guest.
KVM cpuid functions are: KVM cpuid functions are:
function: KVM_CPUID_SIGNATURE (0x40000000) function: KVM_CPUID_SIGNATURE (0x40000000)
returns : eax = 0, returns : eax = 0x40000001,
ebx = 0x4b4d564b, ebx = 0x4b4d564b,
ecx = 0x564b4d56, ecx = 0x564b4d56,
edx = 0x4d. edx = 0x4d.
Note that this value in ebx, ecx and edx corresponds to the string "KVMKVMKVM". Note that this value in ebx, ecx and edx corresponds to the string "KVMKVMKVM".
The value in eax corresponds to the maximum cpuid function present in this leaf,
and will be updated if more functions are added in the future.
Note also that old hosts set eax value to 0x0. This should
be interpreted as if the value was 0x40000001.
This function queries the presence of KVM cpuid leafs. This function queries the presence of KVM cpuid leafs.
......
...@@ -108,6 +108,10 @@ MSR_KVM_SYSTEM_TIME_NEW: 0x4b564d01 ...@@ -108,6 +108,10 @@ MSR_KVM_SYSTEM_TIME_NEW: 0x4b564d01
| | time measures taken across | | time measures taken across
0 | 24 | multiple cpus are guaranteed to 0 | 24 | multiple cpus are guaranteed to
| | be monotonic | | be monotonic
-------------------------------------------------------------
| | guest vcpu has been paused by
1 | N/A | the host
| | See 4.70 in api.txt
------------------------------------------------------------- -------------------------------------------------------------
Availability of this MSR must be checked via bit 3 in 0x4000001 cpuid Availability of this MSR must be checked via bit 3 in 0x4000001 cpuid
......
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
...@@ -365,6 +365,7 @@ struct thash_cb { ...@@ -365,6 +365,7 @@ struct thash_cb {
}; };
struct kvm_vcpu_stat { struct kvm_vcpu_stat {
u32 halt_wakeup;
}; };
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
...@@ -448,6 +449,8 @@ struct kvm_vcpu_arch { ...@@ -448,6 +449,8 @@ struct kvm_vcpu_arch {
char log_buf[VMM_LOG_LEN]; char log_buf[VMM_LOG_LEN];
union context host; union context host;
union context guest; union context guest;
char mmio_data[8];
}; };
struct kvm_vm_stat { struct kvm_vm_stat {
......
...@@ -26,6 +26,11 @@ static inline unsigned int kvm_arch_para_features(void) ...@@ -26,6 +26,11 @@ static inline unsigned int kvm_arch_para_features(void)
return 0; return 0;
} }
static inline bool kvm_check_and_clear_guest_paused(void)
{
return false;
}
#endif #endif
#endif #endif
...@@ -232,12 +232,12 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -232,12 +232,12 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS) if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
goto mmio; goto mmio;
vcpu->mmio_needed = 1; vcpu->mmio_needed = 1;
vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr; vcpu->mmio_fragments[0].gpa = kvm_run->mmio.phys_addr = p->addr;
vcpu->mmio_size = kvm_run->mmio.len = p->size; vcpu->mmio_fragments[0].len = kvm_run->mmio.len = p->size;
vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
if (vcpu->mmio_is_write) if (vcpu->mmio_is_write)
memcpy(vcpu->mmio_data, &p->data, p->size); memcpy(vcpu->arch.mmio_data, &p->data, p->size);
memcpy(kvm_run->mmio.data, &p->data, p->size); memcpy(kvm_run->mmio.data, &p->data, p->size);
kvm_run->exit_reason = KVM_EXIT_MMIO; kvm_run->exit_reason = KVM_EXIT_MMIO;
return 0; return 0;
...@@ -719,7 +719,7 @@ static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) ...@@ -719,7 +719,7 @@ static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu); struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
if (!vcpu->mmio_is_write) if (!vcpu->mmio_is_write)
memcpy(&p->data, vcpu->mmio_data, 8); memcpy(&p->data, vcpu->arch.mmio_data, 8);
p->state = STATE_IORESP_READY; p->state = STATE_IORESP_READY;
} }
...@@ -739,7 +739,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -739,7 +739,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} }
if (vcpu->mmio_needed) { if (vcpu->mmio_needed) {
memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); memcpy(vcpu->arch.mmio_data, kvm_run->mmio.data, 8);
kvm_set_mmio_data(vcpu); kvm_set_mmio_data(vcpu);
vcpu->mmio_read_completed = 1; vcpu->mmio_read_completed = 1;
vcpu->mmio_needed = 0; vcpu->mmio_needed = 0;
...@@ -1872,21 +1872,6 @@ void kvm_arch_hardware_unsetup(void) ...@@ -1872,21 +1872,6 @@ void kvm_arch_hardware_unsetup(void)
{ {
} }
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int me;
int cpu = vcpu->cpu;
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
me = get_cpu();
if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu))
if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
smp_send_reschedule(cpu);
put_cpu();
}
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq) int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
{ {
return __apic_accept_irq(vcpu, irq->vector); return __apic_accept_irq(vcpu, irq->vector);
...@@ -1956,6 +1941,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) ...@@ -1956,6 +1941,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
(kvm_highest_pending_irq(vcpu) != -1); (kvm_highest_pending_irq(vcpu) != -1);
} }
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests));
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
......
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
...@@ -168,6 +168,7 @@ extern const char *powerpc_base_platform; ...@@ -168,6 +168,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000) #define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000)
#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000) #define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000)
#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000) #define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000)
#define CPU_FTR_EMB_HV ASM_CONST(0x0000000040000000)
/* /*
* Add the 64-bit processor unique features in the top half of the word; * Add the 64-bit processor unique features in the top half of the word;
...@@ -376,7 +377,8 @@ extern const char *powerpc_base_platform; ...@@ -376,7 +377,8 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_47X (CPU_FTRS_440x6) #define CPU_FTRS_47X (CPU_FTRS_440x6)
#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ #define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE) CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE | \
CPU_FTR_DEBUG_LVL_EXC)
#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ #define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
CPU_FTR_NOEXECUTE) CPU_FTR_NOEXECUTE)
...@@ -385,15 +387,15 @@ extern const char *powerpc_base_platform; ...@@ -385,15 +387,15 @@ extern const char *powerpc_base_platform;
CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ #define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL) CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ #define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_DEBUG_LVL_EXC) CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ #define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_DEBUG_LVL_EXC) CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */ /* 64-bit CPUs */
...@@ -486,8 +488,10 @@ enum { ...@@ -486,8 +488,10 @@ enum {
CPU_FTRS_E200 | CPU_FTRS_E200 |
#endif #endif
#ifdef CONFIG_E500 #ifdef CONFIG_E500
CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | CPU_FTRS_E500 | CPU_FTRS_E500_2 |
CPU_FTRS_E5500 | CPU_FTRS_E6500 | #endif
#ifdef CONFIG_PPC_E500MC
CPU_FTRS_E500MC | CPU_FTRS_E5500 | CPU_FTRS_E6500 |
#endif #endif
0, 0,
}; };
...@@ -531,9 +535,12 @@ enum { ...@@ -531,9 +535,12 @@ enum {
CPU_FTRS_E200 & CPU_FTRS_E200 &
#endif #endif
#ifdef CONFIG_E500 #ifdef CONFIG_E500
CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & CPU_FTRS_E500 & CPU_FTRS_E500_2 &
CPU_FTRS_E5500 & CPU_FTRS_E6500 & #endif
#ifdef CONFIG_PPC_E500MC
CPU_FTRS_E500MC & CPU_FTRS_E5500 & CPU_FTRS_E6500 &
#endif #endif
~CPU_FTR_EMB_HV & /* can be removed at runtime */
CPU_FTRS_POSSIBLE, CPU_FTRS_POSSIBLE,
}; };
#endif /* __powerpc64__ */ #endif /* __powerpc64__ */
......
...@@ -19,6 +19,9 @@ ...@@ -19,6 +19,9 @@
#define PPC_DBELL_MSG_BRDCAST (0x04000000) #define PPC_DBELL_MSG_BRDCAST (0x04000000)
#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36)) #define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
#define PPC_DBELL_TYPE_MASK PPC_DBELL_TYPE(0xf)
#define PPC_DBELL_LPID(x) ((x) << (63 - 49))
#define PPC_DBELL_PIR_MASK 0x3fff
enum ppc_dbell { enum ppc_dbell {
PPC_DBELL = 0, /* doorbell */ PPC_DBELL = 0, /* doorbell */
PPC_DBELL_CRIT = 1, /* critical doorbell */ PPC_DBELL_CRIT = 1, /* critical doorbell */
......
...@@ -133,6 +133,16 @@ ...@@ -133,6 +133,16 @@
#define H_PP1 (1UL<<(63-62)) #define H_PP1 (1UL<<(63-62))
#define H_PP2 (1UL<<(63-63)) #define H_PP2 (1UL<<(63-63))
/* Flags for H_REGISTER_VPA subfunction field */
#define H_VPA_FUNC_SHIFT (63-18) /* Bit posn of subfunction code */
#define H_VPA_FUNC_MASK 7UL
#define H_VPA_REG_VPA 1UL /* Register Virtual Processor Area */
#define H_VPA_REG_DTL 2UL /* Register Dispatch Trace Log */
#define H_VPA_REG_SLB 3UL /* Register SLB shadow buffer */
#define H_VPA_DEREG_VPA 5UL /* Deregister Virtual Processor Area */
#define H_VPA_DEREG_DTL 6UL /* Deregister Dispatch Trace Log */
#define H_VPA_DEREG_SLB 7UL /* Deregister SLB shadow buffer */
/* VASI States */ /* VASI States */
#define H_VASI_INVALID 0 #define H_VASI_INVALID 0
#define H_VASI_ENABLED 1 #define H_VASI_ENABLED 1
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
extern void __replay_interrupt(unsigned int vector); extern void __replay_interrupt(unsigned int vector);
extern void timer_interrupt(struct pt_regs *); extern void timer_interrupt(struct pt_regs *);
extern void performance_monitor_exception(struct pt_regs *regs);
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#include <asm/paca.h> #include <asm/paca.h>
......
...@@ -277,6 +277,7 @@ struct kvm_sync_regs { ...@@ -277,6 +277,7 @@ struct kvm_sync_regs {
#define KVM_CPU_E500V2 2 #define KVM_CPU_E500V2 2
#define KVM_CPU_3S_32 3 #define KVM_CPU_3S_32 3
#define KVM_CPU_3S_64 4 #define KVM_CPU_3S_64 4
#define KVM_CPU_E500MC 5
/* for KVM_CAP_SPAPR_TCE */ /* for KVM_CAP_SPAPR_TCE */
struct kvm_create_spapr_tce { struct kvm_create_spapr_tce {
......
...@@ -20,6 +20,16 @@ ...@@ -20,6 +20,16 @@
#ifndef __POWERPC_KVM_ASM_H__ #ifndef __POWERPC_KVM_ASM_H__
#define __POWERPC_KVM_ASM_H__ #define __POWERPC_KVM_ASM_H__
#ifdef __ASSEMBLY__
#ifdef CONFIG_64BIT
#define PPC_STD(sreg, offset, areg) std sreg, (offset)(areg)
#define PPC_LD(treg, offset, areg) ld treg, (offset)(areg)
#else
#define PPC_STD(sreg, offset, areg) stw sreg, (offset+4)(areg)
#define PPC_LD(treg, offset, areg) lwz treg, (offset+4)(areg)
#endif
#endif
/* IVPR must be 64KiB-aligned. */ /* IVPR must be 64KiB-aligned. */
#define VCPU_SIZE_ORDER 4 #define VCPU_SIZE_ORDER 4
#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) #define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
...@@ -48,6 +58,14 @@ ...@@ -48,6 +58,14 @@
#define BOOKE_INTERRUPT_SPE_FP_DATA 33 #define BOOKE_INTERRUPT_SPE_FP_DATA 33
#define BOOKE_INTERRUPT_SPE_FP_ROUND 34 #define BOOKE_INTERRUPT_SPE_FP_ROUND 34
#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
#define BOOKE_INTERRUPT_DOORBELL 36
#define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37
/* booke_hv */
#define BOOKE_INTERRUPT_GUEST_DBELL 38
#define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39
#define BOOKE_INTERRUPT_HV_SYSCALL 40
#define BOOKE_INTERRUPT_HV_PRIV 41
/* book3s */ /* book3s */
......
...@@ -453,4 +453,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) ...@@ -453,4 +453,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
#define INS_DCBZ 0x7c0007ec #define INS_DCBZ 0x7c0007ec
/* LPIDs we support with this build -- runtime limit may be lower */
#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
#endif /* __ASM_KVM_BOOK3S_H__ */ #endif /* __ASM_KVM_BOOK3S_H__ */
...@@ -79,6 +79,9 @@ struct kvmppc_host_state { ...@@ -79,6 +79,9 @@ struct kvmppc_host_state {
u8 napping; u8 napping;
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_64_HV
u8 hwthread_req;
u8 hwthread_state;
struct kvm_vcpu *kvm_vcpu; struct kvm_vcpu *kvm_vcpu;
struct kvmppc_vcore *kvm_vcore; struct kvmppc_vcore *kvm_vcore;
unsigned long xics_phys; unsigned long xics_phys;
...@@ -122,4 +125,9 @@ struct kvmppc_book3s_shadow_vcpu { ...@@ -122,4 +125,9 @@ struct kvmppc_book3s_shadow_vcpu {
#endif /*__ASSEMBLY__ */ #endif /*__ASSEMBLY__ */
/* Values for kvm_state */
#define KVM_HWTHREAD_IN_KERNEL 0
#define KVM_HWTHREAD_IN_NAP 1
#define KVM_HWTHREAD_IN_KVM 2
#endif /* __ASM_KVM_BOOK3S_ASM_H__ */ #endif /* __ASM_KVM_BOOK3S_ASM_H__ */
...@@ -23,6 +23,9 @@ ...@@ -23,6 +23,9 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
/* LPIDs we support with this build -- runtime limit may be lower */
#define KVMPPC_NR_LPIDS 64
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{ {
vcpu->arch.gpr[num] = val; vcpu->arch.gpr[num] = val;
......
/*
* Copyright 2010-2011 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*/
#ifndef ASM_KVM_BOOKE_HV_ASM_H
#define ASM_KVM_BOOKE_HV_ASM_H
#ifdef __ASSEMBLY__
/*
* All exceptions from guest state must go through KVM
* (except for those which are delivered directly to the guest) --
* there are no exceptions for which we fall through directly to
* the normal host handler.
*
* Expected inputs (normal exceptions):
* SCRATCH0 = saved r10
* r10 = thread struct
* r11 = appropriate SRR1 variant (currently used as scratch)
* r13 = saved CR
* *(r10 + THREAD_NORMSAVE(0)) = saved r11
* *(r10 + THREAD_NORMSAVE(2)) = saved r13
*
* Expected inputs (crit/mcheck/debug exceptions):
* appropriate SCRATCH = saved r8
* r8 = exception level stack frame
* r9 = *(r8 + _CCR) = saved CR
* r11 = appropriate SRR1 variant (currently used as scratch)
* *(r8 + GPR9) = saved r9
* *(r8 + GPR10) = saved r10 (r10 not yet clobbered)
* *(r8 + GPR11) = saved r11
*/
.macro DO_KVM intno srr1
#ifdef CONFIG_KVM_BOOKE_HV
BEGIN_FTR_SECTION
mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */
bf 3, kvmppc_resume_\intno\()_\srr1
b kvmppc_handler_\intno\()_\srr1
kvmppc_resume_\intno\()_\srr1:
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
#endif
.endm
#endif /*__ASSEMBLY__ */
#endif /* ASM_KVM_BOOKE_HV_ASM_H */
/*
* Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, <yu.liu@freescale.com>
*
* Description:
* This file is derived from arch/powerpc/include/asm/kvm_44x.h,
* by Hollis Blanchard <hollisb@us.ibm.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*/
#ifndef __ASM_KVM_E500_H__
#define __ASM_KVM_E500_H__
#include <linux/kvm_host.h>
#define BOOKE_INTERRUPT_SIZE 36
#define E500_PID_NUM 3
#define E500_TLB_NUM 2
#define E500_TLB_VALID 1
#define E500_TLB_DIRTY 2
struct tlbe_ref {
pfn_t pfn;
unsigned int flags; /* E500_TLB_* */
};
struct tlbe_priv {
struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
};
struct vcpu_id_table;
struct kvmppc_e500_tlb_params {
int entries, ways, sets;
};
struct kvmppc_vcpu_e500 {
/* Unmodified copy of the guest's TLB -- shared with host userspace. */
struct kvm_book3e_206_tlb_entry *gtlb_arch;
/* Starting entry number in gtlb_arch[] */
int gtlb_offset[E500_TLB_NUM];
/* KVM internal information associated with each guest TLB entry */
struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
unsigned int gtlb_nv[E500_TLB_NUM];
/*
* information associated with each host TLB entry --
* TLB1 only for now. If/when guest TLB1 entries can be
* mapped with host TLB0, this will be used for that too.
*
* We don't want to use this for guest TLB0 because then we'd
* have the overhead of doing the translation again even if
* the entry is still in the guest TLB (e.g. we swapped out
* and back, and our host TLB entries got evicted).
*/
struct tlbe_ref *tlb_refs[E500_TLB_NUM];
unsigned int host_tlb1_nv;
u32 host_pid[E500_PID_NUM];
u32 pid[E500_PID_NUM];
u32 svr;
/* vcpu id table */
struct vcpu_id_table *idt;
u32 l1csr0;
u32 l1csr1;
u32 hid0;
u32 hid1;
u32 tlb0cfg;
u32 tlb1cfg;
u64 mcar;
struct page **shared_tlb_pages;
int num_shared_tlb_pages;
struct kvm_vcpu vcpu;
};
static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
{
return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
}
#endif /* __ASM_KVM_E500_H__ */
...@@ -82,7 +82,7 @@ struct kvm_vcpu; ...@@ -82,7 +82,7 @@ struct kvm_vcpu;
struct lppaca; struct lppaca;
struct slb_shadow; struct slb_shadow;
struct dtl; struct dtl_entry;
struct kvm_vm_stat { struct kvm_vm_stat {
u32 remote_tlb_flush; u32 remote_tlb_flush;
...@@ -106,6 +106,8 @@ struct kvm_vcpu_stat { ...@@ -106,6 +106,8 @@ struct kvm_vcpu_stat {
u32 dec_exits; u32 dec_exits;
u32 ext_intr_exits; u32 ext_intr_exits;
u32 halt_wakeup; u32 halt_wakeup;
u32 dbell_exits;
u32 gdbell_exits;
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
u32 pf_storage; u32 pf_storage;
u32 pf_instruc; u32 pf_instruc;
...@@ -140,6 +142,7 @@ enum kvm_exit_types { ...@@ -140,6 +142,7 @@ enum kvm_exit_types {
EMULATED_TLBSX_EXITS, EMULATED_TLBSX_EXITS,
EMULATED_TLBWE_EXITS, EMULATED_TLBWE_EXITS,
EMULATED_RFI_EXITS, EMULATED_RFI_EXITS,
EMULATED_RFCI_EXITS,
DEC_EXITS, DEC_EXITS,
EXT_INTR_EXITS, EXT_INTR_EXITS,
HALT_WAKEUP, HALT_WAKEUP,
...@@ -147,6 +150,8 @@ enum kvm_exit_types { ...@@ -147,6 +150,8 @@ enum kvm_exit_types {
FP_UNAVAIL, FP_UNAVAIL,
DEBUG_EXITS, DEBUG_EXITS,
TIMEINGUEST, TIMEINGUEST,
DBELL_EXITS,
GDBELL_EXITS,
__NUMBER_OF_KVM_EXIT_TYPES __NUMBER_OF_KVM_EXIT_TYPES
}; };
...@@ -217,10 +222,10 @@ struct kvm_arch_memory_slot { ...@@ -217,10 +222,10 @@ struct kvm_arch_memory_slot {
}; };
struct kvm_arch { struct kvm_arch {
unsigned int lpid;
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_64_HV
unsigned long hpt_virt; unsigned long hpt_virt;
struct revmap_entry *revmap; struct revmap_entry *revmap;
unsigned int lpid;
unsigned int host_lpid; unsigned int host_lpid;
unsigned long host_lpcr; unsigned long host_lpcr;
unsigned long sdr1; unsigned long sdr1;
...@@ -232,7 +237,6 @@ struct kvm_arch { ...@@ -232,7 +237,6 @@ struct kvm_arch {
unsigned long vrma_slb_v; unsigned long vrma_slb_v;
int rma_setup_done; int rma_setup_done;
int using_mmu_notifiers; int using_mmu_notifiers;
struct list_head spapr_tce_tables;
spinlock_t slot_phys_lock; spinlock_t slot_phys_lock;
unsigned long *slot_phys[KVM_MEM_SLOTS_NUM]; unsigned long *slot_phys[KVM_MEM_SLOTS_NUM];
int slot_npages[KVM_MEM_SLOTS_NUM]; int slot_npages[KVM_MEM_SLOTS_NUM];
...@@ -240,6 +244,9 @@ struct kvm_arch { ...@@ -240,6 +244,9 @@ struct kvm_arch {
struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
struct kvmppc_linear_info *hpt_li; struct kvmppc_linear_info *hpt_li;
#endif /* CONFIG_KVM_BOOK3S_64_HV */ #endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
#endif
}; };
/* /*
...@@ -263,6 +270,9 @@ struct kvmppc_vcore { ...@@ -263,6 +270,9 @@ struct kvmppc_vcore {
struct list_head runnable_threads; struct list_head runnable_threads;
spinlock_t lock; spinlock_t lock;
wait_queue_head_t wq; wait_queue_head_t wq;
u64 stolen_tb;
u64 preempt_tb;
struct kvm_vcpu *runner;
}; };
#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
...@@ -274,6 +284,19 @@ struct kvmppc_vcore { ...@@ -274,6 +284,19 @@ struct kvmppc_vcore {
#define VCORE_EXITING 2 #define VCORE_EXITING 2
#define VCORE_SLEEPING 3 #define VCORE_SLEEPING 3
/*
* Struct used to manage memory for a virtual processor area
* registered by a PAPR guest. There are three types of area
* that a guest can register.
*/
struct kvmppc_vpa {
void *pinned_addr; /* Address in kernel linear mapping */
void *pinned_end; /* End of region */
unsigned long next_gpa; /* Guest phys addr for update */
unsigned long len; /* Number of bytes required */
u8 update_pending; /* 1 => update pinned_addr from next_gpa */
};
struct kvmppc_pte { struct kvmppc_pte {
ulong eaddr; ulong eaddr;
u64 vpage; u64 vpage;
...@@ -345,6 +368,17 @@ struct kvm_vcpu_arch { ...@@ -345,6 +368,17 @@ struct kvm_vcpu_arch {
u64 vsr[64]; u64 vsr[64];
#endif #endif
#ifdef CONFIG_KVM_BOOKE_HV
u32 host_mas4;
u32 host_mas6;
u32 shadow_epcr;
u32 epcr;
u32 shadow_msrp;
u32 eplc;
u32 epsc;
u32 oldpir;
#endif
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
/* For Gekko paired singles */ /* For Gekko paired singles */
u32 qpr[32]; u32 qpr[32];
...@@ -370,6 +404,7 @@ struct kvm_vcpu_arch { ...@@ -370,6 +404,7 @@ struct kvm_vcpu_arch {
#endif #endif
u32 vrsave; /* also USPRG0 */ u32 vrsave; /* also USPRG0 */
u32 mmucr; u32 mmucr;
/* shadow_msr is unused for BookE HV */
ulong shadow_msr; ulong shadow_msr;
ulong csrr0; ulong csrr0;
ulong csrr1; ulong csrr1;
...@@ -426,8 +461,12 @@ struct kvm_vcpu_arch { ...@@ -426,8 +461,12 @@ struct kvm_vcpu_arch {
ulong fault_esr; ulong fault_esr;
ulong queued_dear; ulong queued_dear;
ulong queued_esr; ulong queued_esr;
u32 tlbcfg[4];
u32 mmucfg;
u32 epr;
#endif #endif
gpa_t paddr_accessed; gpa_t paddr_accessed;
gva_t vaddr_accessed;
u8 io_gpr; /* GPR used as IO source/target */ u8 io_gpr; /* GPR used as IO source/target */
u8 mmio_is_bigendian; u8 mmio_is_bigendian;
...@@ -453,11 +492,6 @@ struct kvm_vcpu_arch { ...@@ -453,11 +492,6 @@ struct kvm_vcpu_arch {
u8 prodded; u8 prodded;
u32 last_inst; u32 last_inst;
struct lppaca *vpa;
struct slb_shadow *slb_shadow;
struct dtl *dtl;
struct dtl *dtl_end;
wait_queue_head_t *wqp; wait_queue_head_t *wqp;
struct kvmppc_vcore *vcore; struct kvmppc_vcore *vcore;
int ret; int ret;
...@@ -482,6 +516,14 @@ struct kvm_vcpu_arch { ...@@ -482,6 +516,14 @@ struct kvm_vcpu_arch {
struct task_struct *run_task; struct task_struct *run_task;
struct kvm_run *kvm_run; struct kvm_run *kvm_run;
pgd_t *pgdir; pgd_t *pgdir;
spinlock_t vpa_update_lock;
struct kvmppc_vpa vpa;
struct kvmppc_vpa dtl;
struct dtl_entry *dtl_ptr;
unsigned long dtl_index;
u64 stolen_logged;
struct kvmppc_vpa slb_shadow;
#endif #endif
}; };
...@@ -498,4 +540,6 @@ struct kvm_vcpu_arch { ...@@ -498,4 +540,6 @@ struct kvm_vcpu_arch {
#define KVM_MMIO_REG_QPR 0x0040 #define KVM_MMIO_REG_QPR 0x0040
#define KVM_MMIO_REG_FQPR 0x0060 #define KVM_MMIO_REG_FQPR 0x0060
#define __KVM_HAVE_ARCH_WQP
#endif /* __POWERPC_KVM_HOST_H__ */ #endif /* __POWERPC_KVM_HOST_H__ */
...@@ -206,6 +206,11 @@ static inline unsigned int kvm_arch_para_features(void) ...@@ -206,6 +206,11 @@ static inline unsigned int kvm_arch_para_features(void)
return r; return r;
} }
static inline bool kvm_check_and_clear_guest_paused(void)
{
return false;
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __POWERPC_KVM_PARA_H__ */ #endif /* __POWERPC_KVM_PARA_H__ */
...@@ -95,7 +95,7 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, ...@@ -95,7 +95,7 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
extern void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
...@@ -107,8 +107,10 @@ extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, ...@@ -107,8 +107,10 @@ extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int op, int *advance); unsigned int op, int *advance);
extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); ulong val);
extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
ulong *val);
extern int kvmppc_booke_init(void); extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void); extern void kvmppc_booke_exit(void);
...@@ -126,6 +128,8 @@ extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu, ...@@ -126,6 +128,8 @@ extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args); struct kvm_create_spapr_tce *args);
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce);
extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
struct kvm_allocate_rma *rma); struct kvm_allocate_rma *rma);
extern struct kvmppc_linear_info *kvm_alloc_rma(void); extern struct kvmppc_linear_info *kvm_alloc_rma(void);
...@@ -138,6 +142,11 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, ...@@ -138,6 +142,11 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem); struct kvm_userspace_memory_region *mem);
extern void kvmppc_core_commit_memory_region(struct kvm *kvm, extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem); struct kvm_userspace_memory_region *mem);
extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
struct kvm_ppc_smmu_info *info);
extern int kvmppc_bookehv_init(void);
extern void kvmppc_bookehv_exit(void);
/* /*
* Cuts out inst bits with ordering according to spec. * Cuts out inst bits with ordering according to spec.
...@@ -204,4 +213,9 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, ...@@ -204,4 +213,9 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
struct kvm_dirty_tlb *cfg); struct kvm_dirty_tlb *cfg);
long kvmppc_alloc_lpid(void);
void kvmppc_claim_lpid(long lpid);
void kvmppc_free_lpid(long lpid);
void kvmppc_init_lpid(unsigned long nr_lpids);
#endif /* __POWERPC_KVM_PPC_H__ */ #endif /* __POWERPC_KVM_PPC_H__ */
...@@ -104,6 +104,8 @@ ...@@ -104,6 +104,8 @@
#define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */ #define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */
#define MAS4_TSIZED_SHIFT 7 #define MAS4_TSIZED_SHIFT 7
#define MAS5_SGS 0x80000000
#define MAS6_SPID0 0x3FFF0000 #define MAS6_SPID0 0x3FFF0000
#define MAS6_SPID1 0x00007FFE #define MAS6_SPID1 0x00007FFE
#define MAS6_ISIZE(x) MAS1_TSIZE(x) #define MAS6_ISIZE(x) MAS1_TSIZE(x)
...@@ -118,6 +120,10 @@ ...@@ -118,6 +120,10 @@
#define MAS7_RPN 0xFFFFFFFF #define MAS7_RPN 0xFFFFFFFF
#define MAS8_TGS 0x80000000 /* Guest space */
#define MAS8_VF 0x40000000 /* Virtualization Fault */
#define MAS8_TLPID 0x000000ff
/* Bit definitions for MMUCFG */ /* Bit definitions for MMUCFG */
#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */ #define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */
#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */ #define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */
......
...@@ -240,6 +240,9 @@ struct thread_struct { ...@@ -240,6 +240,9 @@ struct thread_struct {
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
void* kvm_shadow_vcpu; /* KVM internal data */ void* kvm_shadow_vcpu; /* KVM internal data */
#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
struct kvm_vcpu *kvm_vcpu;
#endif
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
unsigned long dscr; unsigned long dscr;
int dscr_inherit; int dscr_inherit;
......
...@@ -257,7 +257,9 @@ ...@@ -257,7 +257,9 @@
#define LPCR_LPES_SH 2 #define LPCR_LPES_SH 2
#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */ #define LPCR_RMI 0x00000002 /* real mode is cache inhibit */
#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */ #define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */
#ifndef SPRN_LPID
#define SPRN_LPID 0x13F /* Logical Partition Identifier */ #define SPRN_LPID 0x13F /* Logical Partition Identifier */
#endif
#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ #define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
#define SPRN_HMER 0x150 /* Hardware m? error recovery */ #define SPRN_HMER 0x150 /* Hardware m? error recovery */
#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ #define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
......
...@@ -56,18 +56,30 @@ ...@@ -56,18 +56,30 @@
#define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */ #define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */
#define SPRN_EPCR 0x133 /* Embedded Processor Control Register */ #define SPRN_EPCR 0x133 /* Embedded Processor Control Register */
#define SPRN_DBCR2 0x136 /* Debug Control Register 2 */ #define SPRN_DBCR2 0x136 /* Debug Control Register 2 */
#define SPRN_MSRP 0x137 /* MSR Protect Register */
#define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */ #define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */
#define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */ #define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */
#define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */ #define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */
#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */ #define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */
#define SPRN_LPID 0x152 /* Logical Partition ID */
#define SPRN_MAS8 0x155 /* MMU Assist Register 8 */ #define SPRN_MAS8 0x155 /* MMU Assist Register 8 */
#define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */ #define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */
#define SPRN_TLB1PS 0x159 /* TLB 1 Page Size Register */ #define SPRN_TLB1PS 0x159 /* TLB 1 Page Size Register */
#define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */ #define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */
#define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */ #define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */
#define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */ #define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */
#define SPRN_GSPRG0 0x170 /* Guest SPRG0 */
#define SPRN_GSPRG1 0x171 /* Guest SPRG1 */
#define SPRN_GSPRG2 0x172 /* Guest SPRG2 */
#define SPRN_GSPRG3 0x173 /* Guest SPRG3 */
#define SPRN_MAS7_MAS3 0x174 /* MMU Assist Register 7 || 3 */ #define SPRN_MAS7_MAS3 0x174 /* MMU Assist Register 7 || 3 */
#define SPRN_MAS0_MAS1 0x175 /* MMU Assist Register 0 || 1 */ #define SPRN_MAS0_MAS1 0x175 /* MMU Assist Register 0 || 1 */
#define SPRN_GSRR0 0x17A /* Guest SRR0 */
#define SPRN_GSRR1 0x17B /* Guest SRR1 */
#define SPRN_GEPR 0x17C /* Guest EPR */
#define SPRN_GDEAR 0x17D /* Guest DEAR */
#define SPRN_GPIR 0x17E /* Guest PIR */
#define SPRN_GESR 0x17F /* Guest Exception Syndrome Register */
#define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */ #define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */
#define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */ #define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */
#define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */ #define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */
...@@ -88,6 +100,13 @@ ...@@ -88,6 +100,13 @@
#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */ #define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */
#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */ #define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */
#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */ #define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */
#define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */
#define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */
#define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */
#define SPRN_GIVOR8 0x1BB /* Guest IVOR8 */
#define SPRN_GIVOR13 0x1BC /* Guest IVOR13 */
#define SPRN_GIVOR14 0x1BD /* Guest IVOR14 */
#define SPRN_GIVPR 0x1BF /* Guest IVPR */
#define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */ #define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */
#define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */ #define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */
#define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */ #define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */
...@@ -240,6 +259,10 @@ ...@@ -240,6 +259,10 @@
#define MCSR_LDG 0x00002000UL /* Guarded Load */ #define MCSR_LDG 0x00002000UL /* Guarded Load */
#define MCSR_TLBSYNC 0x00000002UL /* Multiple tlbsyncs detected */ #define MCSR_TLBSYNC 0x00000002UL /* Multiple tlbsyncs detected */
#define MCSR_BSL2_ERR 0x00000001UL /* Backside L2 cache error */ #define MCSR_BSL2_ERR 0x00000001UL /* Backside L2 cache error */
#define MSRP_UCLEP 0x04000000 /* Protect MSR[UCLE] */
#define MSRP_DEP 0x00000200 /* Protect MSR[DE] */
#define MSRP_PMMP 0x00000004 /* Protect MSR[PMM] */
#endif #endif
#ifdef CONFIG_E200 #ifdef CONFIG_E200
...@@ -594,6 +617,17 @@ ...@@ -594,6 +617,17 @@
#define SPRN_EPCR_DMIUH 0x00400000 /* Disable MAS Interrupt updates #define SPRN_EPCR_DMIUH 0x00400000 /* Disable MAS Interrupt updates
* for hypervisor */ * for hypervisor */
/* Bit definitions for EPLC/EPSC */
#define EPC_EPR 0x80000000 /* 1 = user, 0 = kernel */
#define EPC_EPR_SHIFT 31
#define EPC_EAS 0x40000000 /* Address Space */
#define EPC_EAS_SHIFT 30
#define EPC_EGS 0x20000000 /* 1 = guest, 0 = hypervisor */
#define EPC_EGS_SHIFT 29
#define EPC_ELPID 0x00ff0000
#define EPC_ELPID_SHIFT 16
#define EPC_EPID 0x00003fff
#define EPC_EPID_SHIFT 0
/* /*
* The IBM-403 is an even more odd special case, as it is much * The IBM-403 is an even more odd special case, as it is much
......
...@@ -17,6 +17,7 @@ extern struct task_struct *_switch(struct thread_struct *prev, ...@@ -17,6 +17,7 @@ extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next); struct thread_struct *next);
extern void giveup_fpu(struct task_struct *); extern void giveup_fpu(struct task_struct *);
extern void load_up_fpu(void);
extern void disable_kernel_fp(void); extern void disable_kernel_fp(void);
extern void enable_kernel_fp(void); extern void enable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *); extern void flush_fp_to_thread(struct task_struct *);
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
extern unsigned long tb_ticks_per_jiffy; extern unsigned long tb_ticks_per_jiffy;
extern unsigned long tb_ticks_per_usec; extern unsigned long tb_ticks_per_usec;
extern unsigned long tb_ticks_per_sec; extern unsigned long tb_ticks_per_sec;
extern struct clock_event_device decrementer_clockevent;
struct rtc_time; struct rtc_time;
extern void to_tm(int tim, struct rtc_time * tm); extern void to_tm(int tim, struct rtc_time * tm);
......
...@@ -116,6 +116,9 @@ int main(void) ...@@ -116,6 +116,9 @@ int main(void)
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
#endif #endif
#ifdef CONFIG_KVM_BOOKE_HV
DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu));
#endif
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
...@@ -383,6 +386,7 @@ int main(void) ...@@ -383,6 +386,7 @@ int main(void)
#ifdef CONFIG_KVM #ifdef CONFIG_KVM
DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
...@@ -425,9 +429,11 @@ int main(void) ...@@ -425,9 +429,11 @@ int main(void)
DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4)); DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6)); DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
/* book3s */ /* book3s */
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_64_HV
DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
...@@ -440,9 +446,9 @@ int main(void) ...@@ -440,9 +446,9 @@ int main(void)
DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
#endif #endif
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
...@@ -457,7 +463,6 @@ int main(void) ...@@ -457,7 +463,6 @@ int main(void)
DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions)); DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions));
DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded)); DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded));
DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa));
DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
...@@ -533,6 +538,8 @@ int main(void) ...@@ -533,6 +538,8 @@ int main(void)
HSTATE_FIELD(HSTATE_NAPPING, napping); HSTATE_FIELD(HSTATE_NAPPING, napping);
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_64_HV
HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore); HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
...@@ -593,6 +600,12 @@ int main(void) ...@@ -593,6 +600,12 @@ int main(void)
DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr)); DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
#endif #endif
#ifdef CONFIG_KVM_BOOKE_HV
DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4));
DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6));
DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc));
#endif
#ifdef CONFIG_KVM_EXIT_TIMING #ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
arch.timing_exit.tv32.tbu)); arch.timing_exit.tv32.tbu));
......
...@@ -73,6 +73,7 @@ _GLOBAL(__setup_cpu_e500v2) ...@@ -73,6 +73,7 @@ _GLOBAL(__setup_cpu_e500v2)
mtlr r4 mtlr r4
blr blr
_GLOBAL(__setup_cpu_e500mc) _GLOBAL(__setup_cpu_e500mc)
mr r5, r4
mflr r4 mflr r4
bl __e500_icache_setup bl __e500_icache_setup
bl __e500_dcache_setup bl __e500_dcache_setup
......
...@@ -63,11 +63,13 @@ BEGIN_FTR_SECTION ...@@ -63,11 +63,13 @@ BEGIN_FTR_SECTION
GET_PACA(r13) GET_PACA(r13)
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_64_HV
lbz r0,PACAPROCSTART(r13) li r0,KVM_HWTHREAD_IN_KERNEL
cmpwi r0,0x80 stb r0,HSTATE_HWTHREAD_STATE(r13)
bne 1f /* Order setting hwthread_state vs. testing hwthread_req */
li r0,1 sync
stb r0,PACAPROCSTART(r13) lbz r0,HSTATE_HWTHREAD_REQ(r13)
cmpwi r0,0
beq 1f
b kvm_start_guest b kvm_start_guest
1: 1:
#endif #endif
......
...@@ -248,10 +248,11 @@ _ENTRY(_start); ...@@ -248,10 +248,11 @@ _ENTRY(_start);
interrupt_base: interrupt_base:
/* Critical Input Interrupt */ /* Critical Input Interrupt */
CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception) CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
/* Machine Check Interrupt */ /* Machine Check Interrupt */
CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
machine_check_exception)
MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception) MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
/* Data Storage Interrupt */ /* Data Storage Interrupt */
...@@ -261,7 +262,8 @@ interrupt_base: ...@@ -261,7 +262,8 @@ interrupt_base:
INSTRUCTION_STORAGE_EXCEPTION INSTRUCTION_STORAGE_EXCEPTION
/* External Input Interrupt */ /* External Input Interrupt */
EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, \
do_IRQ, EXC_XFER_LITE)
/* Alignment Interrupt */ /* Alignment Interrupt */
ALIGNMENT_EXCEPTION ALIGNMENT_EXCEPTION
...@@ -273,29 +275,32 @@ interrupt_base: ...@@ -273,29 +275,32 @@ interrupt_base:
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
FP_UNAVAILABLE_EXCEPTION FP_UNAVAILABLE_EXCEPTION
#else #else
EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
#endif #endif
/* System Call Interrupt */ /* System Call Interrupt */
START_EXCEPTION(SystemCall) START_EXCEPTION(SystemCall)
NORMAL_EXCEPTION_PROLOG NORMAL_EXCEPTION_PROLOG(BOOKE_INTERRUPT_SYSCALL)
EXC_XFER_EE_LITE(0x0c00, DoSyscall) EXC_XFER_EE_LITE(0x0c00, DoSyscall)
/* Auxiliary Processor Unavailable Interrupt */ /* Auxiliary Processor Unavailable Interrupt */
EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
/* Decrementer Interrupt */ /* Decrementer Interrupt */
DECREMENTER_EXCEPTION DECREMENTER_EXCEPTION
/* Fixed Internal Timer Interrupt */ /* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */ /* TODO: Add FIT support */
EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE) EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, \
unknown_exception, EXC_XFER_EE)
/* Watchdog Timer Interrupt */ /* Watchdog Timer Interrupt */
/* TODO: Add watchdog support */ /* TODO: Add watchdog support */
#ifdef CONFIG_BOOKE_WDT #ifdef CONFIG_BOOKE_WDT
CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException) CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException)
#else #else
CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception) CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception)
#endif #endif
/* Data TLB Error Interrupt */ /* Data TLB Error Interrupt */
......
...@@ -2,6 +2,9 @@ ...@@ -2,6 +2,9 @@
#define __HEAD_BOOKE_H__ #define __HEAD_BOOKE_H__
#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */ #include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
#include <asm/kvm_asm.h>
#include <asm/kvm_booke_hv_asm.h>
/* /*
* Macros used for common Book-e exception handling * Macros used for common Book-e exception handling
*/ */
...@@ -28,14 +31,15 @@ ...@@ -28,14 +31,15 @@
*/ */
#define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4)) #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4))
#define NORMAL_EXCEPTION_PROLOG \ #define NORMAL_EXCEPTION_PROLOG(intno) \
mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \ mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
mfspr r10, SPRN_SPRG_THREAD; \ mfspr r10, SPRN_SPRG_THREAD; \
stw r11, THREAD_NORMSAVE(0)(r10); \ stw r11, THREAD_NORMSAVE(0)(r10); \
stw r13, THREAD_NORMSAVE(2)(r10); \ stw r13, THREAD_NORMSAVE(2)(r10); \
mfcr r13; /* save CR in r13 for now */\ mfcr r13; /* save CR in r13 for now */\
mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ mfspr r11, SPRN_SRR1; \
andi. r11,r11,MSR_PR; \ DO_KVM BOOKE_INTERRUPT_##intno SPRN_SRR1; \
andi. r11, r11, MSR_PR; /* check whether user or kernel */\
mr r11, r1; \ mr r11, r1; \
beq 1f; \ beq 1f; \
/* if from user, start at top of this thread's kernel stack */ \ /* if from user, start at top of this thread's kernel stack */ \
...@@ -113,7 +117,7 @@ ...@@ -113,7 +117,7 @@
* registers as the normal prolog above. Instead we use a portion of the * registers as the normal prolog above. Instead we use a portion of the
* critical/machine check exception stack at low physical addresses. * critical/machine check exception stack at low physical addresses.
*/ */
#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, exc_level_srr0, exc_level_srr1) \ #define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, intno, exc_level_srr0, exc_level_srr1) \
mtspr SPRN_SPRG_WSCRATCH_##exc_level,r8; \ mtspr SPRN_SPRG_WSCRATCH_##exc_level,r8; \
BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \ BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \
stw r9,GPR9(r8); /* save various registers */\ stw r9,GPR9(r8); /* save various registers */\
...@@ -121,8 +125,9 @@ ...@@ -121,8 +125,9 @@
stw r10,GPR10(r8); \ stw r10,GPR10(r8); \
stw r11,GPR11(r8); \ stw r11,GPR11(r8); \
stw r9,_CCR(r8); /* save CR on stack */\ stw r9,_CCR(r8); /* save CR on stack */\
mfspr r10,exc_level_srr1; /* check whether user or kernel */\ mfspr r11,exc_level_srr1; /* check whether user or kernel */\
andi. r10,r10,MSR_PR; \ DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \
andi. r11,r11,MSR_PR; \
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\ addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\
...@@ -162,12 +167,30 @@ ...@@ -162,12 +167,30 @@
SAVE_4GPRS(3, r11); \ SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11) SAVE_2GPRS(7, r11)
#define CRITICAL_EXCEPTION_PROLOG \ #define CRITICAL_EXCEPTION_PROLOG(intno) \
EXC_LEVEL_EXCEPTION_PROLOG(CRIT, SPRN_CSRR0, SPRN_CSRR1) EXC_LEVEL_EXCEPTION_PROLOG(CRIT, intno, SPRN_CSRR0, SPRN_CSRR1)
#define DEBUG_EXCEPTION_PROLOG \ #define DEBUG_EXCEPTION_PROLOG \
EXC_LEVEL_EXCEPTION_PROLOG(DBG, SPRN_DSRR0, SPRN_DSRR1) EXC_LEVEL_EXCEPTION_PROLOG(DBG, DEBUG, SPRN_DSRR0, SPRN_DSRR1)
#define MCHECK_EXCEPTION_PROLOG \ #define MCHECK_EXCEPTION_PROLOG \
EXC_LEVEL_EXCEPTION_PROLOG(MC, SPRN_MCSRR0, SPRN_MCSRR1) EXC_LEVEL_EXCEPTION_PROLOG(MC, MACHINE_CHECK, \
SPRN_MCSRR0, SPRN_MCSRR1)
/*
* Guest Doorbell -- this is a bit odd in that uses GSRR0/1 despite
* being delivered to the host. This exception can only happen
* inside a KVM guest -- so we just handle up to the DO_KVM rather
* than try to fit this into one of the existing prolog macros.
*/
#define GUEST_DOORBELL_EXCEPTION \
START_EXCEPTION(GuestDoorbell); \
mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
mfspr r10, SPRN_SPRG_THREAD; \
stw r11, THREAD_NORMSAVE(0)(r10); \
mfspr r11, SPRN_SRR1; \
stw r13, THREAD_NORMSAVE(2)(r10); \
mfcr r13; /* save CR in r13 for now */\
DO_KVM BOOKE_INTERRUPT_GUEST_DBELL SPRN_GSRR1; \
trap
/* /*
* Exception vectors. * Exception vectors.
...@@ -181,15 +204,15 @@ ...@@ -181,15 +204,15 @@
.long func; \ .long func; \
.long ret_from_except_full .long ret_from_except_full
#define EXCEPTION(n, label, hdlr, xfer) \ #define EXCEPTION(n, intno, label, hdlr, xfer) \
START_EXCEPTION(label); \ START_EXCEPTION(label); \
NORMAL_EXCEPTION_PROLOG; \ NORMAL_EXCEPTION_PROLOG(intno); \
addi r3,r1,STACK_FRAME_OVERHEAD; \ addi r3,r1,STACK_FRAME_OVERHEAD; \
xfer(n, hdlr) xfer(n, hdlr)
#define CRITICAL_EXCEPTION(n, label, hdlr) \ #define CRITICAL_EXCEPTION(n, intno, label, hdlr) \
START_EXCEPTION(label); \ START_EXCEPTION(label); \
CRITICAL_EXCEPTION_PROLOG; \ CRITICAL_EXCEPTION_PROLOG(intno); \
addi r3,r1,STACK_FRAME_OVERHEAD; \ addi r3,r1,STACK_FRAME_OVERHEAD; \
EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
NOCOPY, crit_transfer_to_handler, \ NOCOPY, crit_transfer_to_handler, \
...@@ -302,7 +325,7 @@ ...@@ -302,7 +325,7 @@
#define DEBUG_CRIT_EXCEPTION \ #define DEBUG_CRIT_EXCEPTION \
START_EXCEPTION(DebugCrit); \ START_EXCEPTION(DebugCrit); \
CRITICAL_EXCEPTION_PROLOG; \ CRITICAL_EXCEPTION_PROLOG(DEBUG); \
\ \
/* \ /* \
* If there is a single step or branch-taken exception in an \ * If there is a single step or branch-taken exception in an \
...@@ -355,7 +378,7 @@ ...@@ -355,7 +378,7 @@
#define DATA_STORAGE_EXCEPTION \ #define DATA_STORAGE_EXCEPTION \
START_EXCEPTION(DataStorage) \ START_EXCEPTION(DataStorage) \
NORMAL_EXCEPTION_PROLOG; \ NORMAL_EXCEPTION_PROLOG(DATA_STORAGE); \
mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
stw r5,_ESR(r11); \ stw r5,_ESR(r11); \
mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \ mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \
...@@ -363,7 +386,7 @@ ...@@ -363,7 +386,7 @@
#define INSTRUCTION_STORAGE_EXCEPTION \ #define INSTRUCTION_STORAGE_EXCEPTION \
START_EXCEPTION(InstructionStorage) \ START_EXCEPTION(InstructionStorage) \
NORMAL_EXCEPTION_PROLOG; \ NORMAL_EXCEPTION_PROLOG(INST_STORAGE); \
mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
stw r5,_ESR(r11); \ stw r5,_ESR(r11); \
mr r4,r12; /* Pass SRR0 as arg2 */ \ mr r4,r12; /* Pass SRR0 as arg2 */ \
...@@ -372,7 +395,7 @@ ...@@ -372,7 +395,7 @@
#define ALIGNMENT_EXCEPTION \ #define ALIGNMENT_EXCEPTION \
START_EXCEPTION(Alignment) \ START_EXCEPTION(Alignment) \
NORMAL_EXCEPTION_PROLOG; \ NORMAL_EXCEPTION_PROLOG(ALIGNMENT); \
mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \ mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \
stw r4,_DEAR(r11); \ stw r4,_DEAR(r11); \
addi r3,r1,STACK_FRAME_OVERHEAD; \ addi r3,r1,STACK_FRAME_OVERHEAD; \
...@@ -380,7 +403,7 @@ ...@@ -380,7 +403,7 @@
#define PROGRAM_EXCEPTION \ #define PROGRAM_EXCEPTION \
START_EXCEPTION(Program) \ START_EXCEPTION(Program) \
NORMAL_EXCEPTION_PROLOG; \ NORMAL_EXCEPTION_PROLOG(PROGRAM); \
mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \ mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \
stw r4,_ESR(r11); \ stw r4,_ESR(r11); \
addi r3,r1,STACK_FRAME_OVERHEAD; \ addi r3,r1,STACK_FRAME_OVERHEAD; \
...@@ -388,7 +411,7 @@ ...@@ -388,7 +411,7 @@
#define DECREMENTER_EXCEPTION \ #define DECREMENTER_EXCEPTION \
START_EXCEPTION(Decrementer) \ START_EXCEPTION(Decrementer) \
NORMAL_EXCEPTION_PROLOG; \ NORMAL_EXCEPTION_PROLOG(DECREMENTER); \
lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \ lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \
mtspr SPRN_TSR,r0; /* Clear the DEC interrupt */ \ mtspr SPRN_TSR,r0; /* Clear the DEC interrupt */ \
addi r3,r1,STACK_FRAME_OVERHEAD; \ addi r3,r1,STACK_FRAME_OVERHEAD; \
...@@ -396,7 +419,7 @@ ...@@ -396,7 +419,7 @@
#define FP_UNAVAILABLE_EXCEPTION \ #define FP_UNAVAILABLE_EXCEPTION \
START_EXCEPTION(FloatingPointUnavailable) \ START_EXCEPTION(FloatingPointUnavailable) \
NORMAL_EXCEPTION_PROLOG; \ NORMAL_EXCEPTION_PROLOG(FP_UNAVAIL); \
beq 1f; \ beq 1f; \
bl load_up_fpu; /* if from user, just load it up */ \ bl load_up_fpu; /* if from user, just load it up */ \
b fast_exception_return; \ b fast_exception_return; \
......
...@@ -301,19 +301,20 @@ _ENTRY(__early_start) ...@@ -301,19 +301,20 @@ _ENTRY(__early_start)
interrupt_base: interrupt_base:
/* Critical Input Interrupt */ /* Critical Input Interrupt */
CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception) CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
/* Machine Check Interrupt */ /* Machine Check Interrupt */
#ifdef CONFIG_E200 #ifdef CONFIG_E200
/* no RFMCI, MCSRRs on E200 */ /* no RFMCI, MCSRRs on E200 */
CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
machine_check_exception)
#else #else
MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception) MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
#endif #endif
/* Data Storage Interrupt */ /* Data Storage Interrupt */
START_EXCEPTION(DataStorage) START_EXCEPTION(DataStorage)
NORMAL_EXCEPTION_PROLOG NORMAL_EXCEPTION_PROLOG(DATA_STORAGE)
mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
stw r5,_ESR(r11) stw r5,_ESR(r11)
mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
...@@ -328,7 +329,7 @@ interrupt_base: ...@@ -328,7 +329,7 @@ interrupt_base:
INSTRUCTION_STORAGE_EXCEPTION INSTRUCTION_STORAGE_EXCEPTION
/* External Input Interrupt */ /* External Input Interrupt */
EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ, EXC_XFER_LITE)
/* Alignment Interrupt */ /* Alignment Interrupt */
ALIGNMENT_EXCEPTION ALIGNMENT_EXCEPTION
...@@ -342,32 +343,36 @@ interrupt_base: ...@@ -342,32 +343,36 @@ interrupt_base:
#else #else
#ifdef CONFIG_E200 #ifdef CONFIG_E200
/* E200 treats 'normal' floating point instructions as FP Unavail exception */ /* E200 treats 'normal' floating point instructions as FP Unavail exception */
EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE) EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
program_check_exception, EXC_XFER_EE)
#else #else
EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
unknown_exception, EXC_XFER_EE)
#endif #endif
#endif #endif
/* System Call Interrupt */ /* System Call Interrupt */
START_EXCEPTION(SystemCall) START_EXCEPTION(SystemCall)
NORMAL_EXCEPTION_PROLOG NORMAL_EXCEPTION_PROLOG(SYSCALL)
EXC_XFER_EE_LITE(0x0c00, DoSyscall) EXC_XFER_EE_LITE(0x0c00, DoSyscall)
/* Auxiliary Processor Unavailable Interrupt */ /* Auxiliary Processor Unavailable Interrupt */
EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \
unknown_exception, EXC_XFER_EE)
/* Decrementer Interrupt */ /* Decrementer Interrupt */
DECREMENTER_EXCEPTION DECREMENTER_EXCEPTION
/* Fixed Internal Timer Interrupt */ /* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */ /* TODO: Add FIT support */
EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE) EXCEPTION(0x3100, FIT, FixedIntervalTimer, \
unknown_exception, EXC_XFER_EE)
/* Watchdog Timer Interrupt */ /* Watchdog Timer Interrupt */
#ifdef CONFIG_BOOKE_WDT #ifdef CONFIG_BOOKE_WDT
CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException) CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, WatchdogException)
#else #else
CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception) CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, unknown_exception)
#endif #endif
/* Data TLB Error Interrupt */ /* Data TLB Error Interrupt */
...@@ -375,10 +380,16 @@ interrupt_base: ...@@ -375,10 +380,16 @@ interrupt_base:
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
mfspr r10, SPRN_SPRG_THREAD mfspr r10, SPRN_SPRG_THREAD
stw r11, THREAD_NORMSAVE(0)(r10) stw r11, THREAD_NORMSAVE(0)(r10)
#ifdef CONFIG_KVM_BOOKE_HV
BEGIN_FTR_SECTION
mfspr r11, SPRN_SRR1
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
#endif
stw r12, THREAD_NORMSAVE(1)(r10) stw r12, THREAD_NORMSAVE(1)(r10)
stw r13, THREAD_NORMSAVE(2)(r10) stw r13, THREAD_NORMSAVE(2)(r10)
mfcr r13 mfcr r13
stw r13, THREAD_NORMSAVE(3)(r10) stw r13, THREAD_NORMSAVE(3)(r10)
DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
mfspr r10, SPRN_DEAR /* Get faulting address */ mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the /* If we are faulting a kernel address, we have to use the
...@@ -463,10 +474,16 @@ interrupt_base: ...@@ -463,10 +474,16 @@ interrupt_base:
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
mfspr r10, SPRN_SPRG_THREAD mfspr r10, SPRN_SPRG_THREAD
stw r11, THREAD_NORMSAVE(0)(r10) stw r11, THREAD_NORMSAVE(0)(r10)
#ifdef CONFIG_KVM_BOOKE_HV
BEGIN_FTR_SECTION
mfspr r11, SPRN_SRR1
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
#endif
stw r12, THREAD_NORMSAVE(1)(r10) stw r12, THREAD_NORMSAVE(1)(r10)
stw r13, THREAD_NORMSAVE(2)(r10) stw r13, THREAD_NORMSAVE(2)(r10)
mfcr r13 mfcr r13
stw r13, THREAD_NORMSAVE(3)(r10) stw r13, THREAD_NORMSAVE(3)(r10)
DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
mfspr r10, SPRN_SRR0 /* Get faulting address */ mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the /* If we are faulting a kernel address, we have to use the
...@@ -538,36 +555,54 @@ interrupt_base: ...@@ -538,36 +555,54 @@ interrupt_base:
#ifdef CONFIG_SPE #ifdef CONFIG_SPE
/* SPE Unavailable */ /* SPE Unavailable */
START_EXCEPTION(SPEUnavailable) START_EXCEPTION(SPEUnavailable)
NORMAL_EXCEPTION_PROLOG NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
bne load_up_spe bne load_up_spe
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x2010, KernelSPE) EXC_XFER_EE_LITE(0x2010, KernelSPE)
#else #else
EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE) EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
unknown_exception, EXC_XFER_EE)
#endif /* CONFIG_SPE */ #endif /* CONFIG_SPE */
/* SPE Floating Point Data */ /* SPE Floating Point Data */
#ifdef CONFIG_SPE #ifdef CONFIG_SPE
EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData, \
SPEFloatingPointException, EXC_XFER_EE);
/* SPE Floating Point Round */ /* SPE Floating Point Round */
EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE) EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
SPEFloatingPointRoundException, EXC_XFER_EE)
#else #else
EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE) EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, \
EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE) unknown_exception, EXC_XFER_EE)
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
unknown_exception, EXC_XFER_EE)
#endif /* CONFIG_SPE */ #endif /* CONFIG_SPE */
/* Performance Monitor */ /* Performance Monitor */
EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD) EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
performance_monitor_exception, EXC_XFER_STD)
EXCEPTION(0x2070, Doorbell, doorbell_exception, EXC_XFER_STD) EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception, EXC_XFER_STD)
CRITICAL_EXCEPTION(0x2080, CriticalDoorbell, unknown_exception) CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \
CriticalDoorbell, unknown_exception)
/* Debug Interrupt */ /* Debug Interrupt */
DEBUG_DEBUG_EXCEPTION DEBUG_DEBUG_EXCEPTION
DEBUG_CRIT_EXCEPTION DEBUG_CRIT_EXCEPTION
GUEST_DOORBELL_EXCEPTION
CRITICAL_EXCEPTION(0, GUEST_DBELL_CRIT, CriticalGuestDoorbell, \
unknown_exception)
/* Hypercall */
EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception, EXC_XFER_EE)
/* Embedded Hypervisor Privilege */
EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_EE)
/* /*
* Local functions * Local functions
*/ */
...@@ -871,8 +906,31 @@ _GLOBAL(__setup_e500mc_ivors) ...@@ -871,8 +906,31 @@ _GLOBAL(__setup_e500mc_ivors)
mtspr SPRN_IVOR36,r3 mtspr SPRN_IVOR36,r3
li r3,CriticalDoorbell@l li r3,CriticalDoorbell@l
mtspr SPRN_IVOR37,r3 mtspr SPRN_IVOR37,r3
/*
* We only want to touch IVOR38-41 if we're running on hardware
* that supports category E.HV. The architectural way to determine
* this is MMUCFG[LPIDSIZE].
*/
mfspr r3, SPRN_MMUCFG
andis. r3, r3, MMUCFG_LPIDSIZE@h
beq no_hv
li r3,GuestDoorbell@l
mtspr SPRN_IVOR38,r3
li r3,CriticalGuestDoorbell@l
mtspr SPRN_IVOR39,r3
li r3,Hypercall@l
mtspr SPRN_IVOR40,r3
li r3,Ehvpriv@l
mtspr SPRN_IVOR41,r3
skip_hv_ivors:
sync sync
blr blr
no_hv:
lwz r3, CPU_SPEC_FEATURES(r5)
rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
stw r3, CPU_SPEC_FEATURES(r5)
b skip_hv_ivors
#ifdef CONFIG_SPE #ifdef CONFIG_SPE
/* /*
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/ppc-opcode.h> #include <asm/ppc-opcode.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/kvm_book3s_asm.h>
#undef DEBUG #undef DEBUG
...@@ -81,6 +82,12 @@ _GLOBAL(power7_idle) ...@@ -81,6 +82,12 @@ _GLOBAL(power7_idle)
std r9,_MSR(r1) std r9,_MSR(r1)
std r1,PACAR1(r13) std r1,PACAR1(r13)
#ifdef CONFIG_KVM_BOOK3S_64_HV
/* Tell KVM we're napping */
li r4,KVM_HWTHREAD_IN_NAP
stb r4,HSTATE_HWTHREAD_STATE(r13)
#endif
/* Magic NAP mode enter sequence */ /* Magic NAP mode enter sequence */
std r0,0(r1) std r0,0(r1)
ptesync ptesync
......
...@@ -190,3 +190,7 @@ EXPORT_SYMBOL(__arch_hweight16); ...@@ -190,3 +190,7 @@ EXPORT_SYMBOL(__arch_hweight16);
EXPORT_SYMBOL(__arch_hweight32); EXPORT_SYMBOL(__arch_hweight32);
EXPORT_SYMBOL(__arch_hweight64); EXPORT_SYMBOL(__arch_hweight64);
#endif #endif
#ifdef CONFIG_PPC_BOOK3S_64
EXPORT_SYMBOL_GPL(mmu_psize_defs);
#endif
...@@ -100,7 +100,7 @@ static int decrementer_set_next_event(unsigned long evt, ...@@ -100,7 +100,7 @@ static int decrementer_set_next_event(unsigned long evt,
static void decrementer_set_mode(enum clock_event_mode mode, static void decrementer_set_mode(enum clock_event_mode mode,
struct clock_event_device *dev); struct clock_event_device *dev);
static struct clock_event_device decrementer_clockevent = { struct clock_event_device decrementer_clockevent = {
.name = "decrementer", .name = "decrementer",
.rating = 200, .rating = 200,
.irq = 0, .irq = 0,
...@@ -108,6 +108,7 @@ static struct clock_event_device decrementer_clockevent = { ...@@ -108,6 +108,7 @@ static struct clock_event_device decrementer_clockevent = {
.set_mode = decrementer_set_mode, .set_mode = decrementer_set_mode,
.features = CLOCK_EVT_FEAT_ONESHOT, .features = CLOCK_EVT_FEAT_ONESHOT,
}; };
EXPORT_SYMBOL(decrementer_clockevent);
DEFINE_PER_CPU(u64, decrementers_next_tb); DEFINE_PER_CPU(u64, decrementers_next_tb);
static DEFINE_PER_CPU(struct clock_event_device, decrementers); static DEFINE_PER_CPU(struct clock_event_device, decrementers);
......
...@@ -29,15 +29,18 @@ ...@@ -29,15 +29,18 @@
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include "44x_tlb.h" #include "44x_tlb.h"
#include "booke.h"
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
kvmppc_booke_vcpu_load(vcpu, cpu);
kvmppc_44x_tlb_load(vcpu); kvmppc_44x_tlb_load(vcpu);
} }
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{ {
kvmppc_44x_tlb_put(vcpu); kvmppc_44x_tlb_put(vcpu);
kvmppc_booke_vcpu_put(vcpu);
} }
int kvmppc_core_check_processor_compat(void) int kvmppc_core_check_processor_compat(void)
...@@ -160,6 +163,15 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) ...@@ -160,6 +163,15 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
kmem_cache_free(kvm_vcpu_cache, vcpu_44x); kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
} }
int kvmppc_core_init_vm(struct kvm *kvm)
{
return 0;
}
void kvmppc_core_destroy_vm(struct kvm *kvm)
{
}
static int __init kvmppc_44x_init(void) static int __init kvmppc_44x_init(void)
{ {
int r; int r;
......
...@@ -37,22 +37,19 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -37,22 +37,19 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance) unsigned int inst, int *advance)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
int dcrn; int dcrn = get_dcrn(inst);
int ra; int ra = get_ra(inst);
int rb; int rb = get_rb(inst);
int rc; int rc = get_rc(inst);
int rs; int rs = get_rs(inst);
int rt; int rt = get_rt(inst);
int ws; int ws = get_ws(inst);
switch (get_op(inst)) { switch (get_op(inst)) {
case 31: case 31:
switch (get_xop(inst)) { switch (get_xop(inst)) {
case XOP_MFDCR: case XOP_MFDCR:
dcrn = get_dcrn(inst);
rt = get_rt(inst);
/* The guest may access CPR0 registers to determine the timebase /* The guest may access CPR0 registers to determine the timebase
* frequency, and it must know the real host frequency because it * frequency, and it must know the real host frequency because it
* can directly access the timebase registers. * can directly access the timebase registers.
...@@ -88,9 +85,6 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -88,9 +85,6 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
case XOP_MTDCR: case XOP_MTDCR:
dcrn = get_dcrn(inst);
rs = get_rs(inst);
/* emulate some access in kernel */ /* emulate some access in kernel */
switch (dcrn) { switch (dcrn) {
case DCRN_CPR0_CONFIG_ADDR: case DCRN_CPR0_CONFIG_ADDR:
...@@ -108,17 +102,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -108,17 +102,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
case XOP_TLBWE: case XOP_TLBWE:
ra = get_ra(inst);
rs = get_rs(inst);
ws = get_ws(inst);
emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
break; break;
case XOP_TLBSX: case XOP_TLBSX:
rt = get_rt(inst);
ra = get_ra(inst);
rb = get_rb(inst);
rc = get_rc(inst);
emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
break; break;
...@@ -141,41 +128,41 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -141,41 +128,41 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated; return emulated;
} }
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
switch (sprn) { switch (sprn) {
case SPRN_PID: case SPRN_PID:
kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break; kvmppc_set_pid(vcpu, spr_val); break;
case SPRN_MMUCR: case SPRN_MMUCR:
vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break; vcpu->arch.mmucr = spr_val; break;
case SPRN_CCR0: case SPRN_CCR0:
vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break; vcpu->arch.ccr0 = spr_val; break;
case SPRN_CCR1: case SPRN_CCR1:
vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break; vcpu->arch.ccr1 = spr_val; break;
default: default:
emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
} }
return emulated; return emulated;
} }
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
switch (sprn) { switch (sprn) {
case SPRN_PID: case SPRN_PID:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break; *spr_val = vcpu->arch.pid; break;
case SPRN_MMUCR: case SPRN_MMUCR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break; *spr_val = vcpu->arch.mmucr; break;
case SPRN_CCR0: case SPRN_CCR0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break; *spr_val = vcpu->arch.ccr0; break;
case SPRN_CCR1: case SPRN_CCR1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break; *spr_val = vcpu->arch.ccr1; break;
default: default:
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
} }
return emulated; return emulated;
......
...@@ -90,6 +90,9 @@ config KVM_BOOK3S_64_PR ...@@ -90,6 +90,9 @@ config KVM_BOOK3S_64_PR
depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV
select KVM_BOOK3S_PR select KVM_BOOK3S_PR
config KVM_BOOKE_HV
bool
config KVM_440 config KVM_440
bool "KVM support for PowerPC 440 processors" bool "KVM support for PowerPC 440 processors"
depends on EXPERIMENTAL && 44x depends on EXPERIMENTAL && 44x
...@@ -106,7 +109,7 @@ config KVM_440 ...@@ -106,7 +109,7 @@ config KVM_440
config KVM_EXIT_TIMING config KVM_EXIT_TIMING
bool "Detailed exit timing" bool "Detailed exit timing"
depends on KVM_440 || KVM_E500 depends on KVM_440 || KVM_E500V2 || KVM_E500MC
---help--- ---help---
Calculate elapsed time for every exit/enter cycle. A per-vcpu Calculate elapsed time for every exit/enter cycle. A per-vcpu
report is available in debugfs kvm/vm#_vcpu#_timing. report is available in debugfs kvm/vm#_vcpu#_timing.
...@@ -115,14 +118,29 @@ config KVM_EXIT_TIMING ...@@ -115,14 +118,29 @@ config KVM_EXIT_TIMING
If unsure, say N. If unsure, say N.
config KVM_E500 config KVM_E500V2
bool "KVM support for PowerPC E500 processors" bool "KVM support for PowerPC E500v2 processors"
depends on EXPERIMENTAL && E500 depends on EXPERIMENTAL && E500 && !PPC_E500MC
select KVM select KVM
select KVM_MMIO select KVM_MMIO
---help--- ---help---
Support running unmodified E500 guest kernels in virtual machines on Support running unmodified E500 guest kernels in virtual machines on
E500 host processors. E500v2 host processors.
This module provides access to the hardware capabilities through
a character device node named /dev/kvm.
If unsure, say N.
config KVM_E500MC
bool "KVM support for PowerPC E500MC/E5500 processors"
depends on EXPERIMENTAL && PPC_E500MC
select KVM
select KVM_MMIO
select KVM_BOOKE_HV
---help---
Support running unmodified E500MC/E5500 (32-bit) guest kernels in
virtual machines on E500MC/E5500 host processors.
This module provides access to the hardware capabilities through This module provides access to the hardware capabilities through
a character device node named /dev/kvm. a character device node named /dev/kvm.
......
...@@ -36,7 +36,17 @@ kvm-e500-objs := \ ...@@ -36,7 +36,17 @@ kvm-e500-objs := \
e500.o \ e500.o \
e500_tlb.o \ e500_tlb.o \
e500_emulate.o e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs) kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs)
kvm-e500mc-objs := \
$(common-objs-y) \
booke.o \
booke_emulate.o \
bookehv_interrupts.o \
e500mc.o \
e500_tlb.o \
e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
../../../virt/kvm/coalesced_mmio.o \ ../../../virt/kvm/coalesced_mmio.o \
...@@ -44,6 +54,7 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ ...@@ -44,6 +54,7 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
book3s_paired_singles.o \ book3s_paired_singles.o \
book3s_pr.o \ book3s_pr.o \
book3s_pr_papr.o \ book3s_pr_papr.o \
book3s_64_vio_hv.o \
book3s_emulate.o \ book3s_emulate.o \
book3s_interrupts.o \ book3s_interrupts.o \
book3s_mmu_hpte.o \ book3s_mmu_hpte.o \
...@@ -68,6 +79,7 @@ kvm-book3s_64-module-objs := \ ...@@ -68,6 +79,7 @@ kvm-book3s_64-module-objs := \
powerpc.o \ powerpc.o \
emulate.o \ emulate.o \
book3s.o \ book3s.o \
book3s_64_vio.o \
$(kvm-book3s_64-objs-y) $(kvm-book3s_64-objs-y)
kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs) kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs)
...@@ -88,7 +100,8 @@ kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs) ...@@ -88,7 +100,8 @@ kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
kvm-objs := $(kvm-objs-m) $(kvm-objs-y) kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
obj-$(CONFIG_KVM_440) += kvm.o obj-$(CONFIG_KVM_440) += kvm.o
obj-$(CONFIG_KVM_E500) += kvm.o obj-$(CONFIG_KVM_E500V2) += kvm.o
obj-$(CONFIG_KVM_E500MC) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
......
...@@ -258,7 +258,7 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) ...@@ -258,7 +258,7 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
return true; return true;
} }
void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
{ {
unsigned long *pending = &vcpu->arch.pending_exceptions; unsigned long *pending = &vcpu->arch.pending_exceptions;
unsigned long old_pending = vcpu->arch.pending_exceptions; unsigned long old_pending = vcpu->arch.pending_exceptions;
...@@ -283,12 +283,17 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) ...@@ -283,12 +283,17 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
/* Tell the guest about our interrupt status */ /* Tell the guest about our interrupt status */
kvmppc_update_int_pending(vcpu, *pending, old_pending); kvmppc_update_int_pending(vcpu, *pending, old_pending);
return 0;
} }
pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{ {
ulong mp_pa = vcpu->arch.magic_page_pa; ulong mp_pa = vcpu->arch.magic_page_pa;
if (!(vcpu->arch.shared->msr & MSR_SF))
mp_pa = (uint32_t)mp_pa;
/* Magic page override */ /* Magic page override */
if (unlikely(mp_pa) && if (unlikely(mp_pa) &&
unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
......
...@@ -36,13 +36,11 @@ ...@@ -36,13 +36,11 @@
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
#define MAX_LPID_970 63 #define MAX_LPID_970 63
#define NR_LPIDS (LPID_RSVD + 1)
unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
long kvmppc_alloc_hpt(struct kvm *kvm) long kvmppc_alloc_hpt(struct kvm *kvm)
{ {
unsigned long hpt; unsigned long hpt;
unsigned long lpid; long lpid;
struct revmap_entry *rev; struct revmap_entry *rev;
struct kvmppc_linear_info *li; struct kvmppc_linear_info *li;
...@@ -72,14 +70,9 @@ long kvmppc_alloc_hpt(struct kvm *kvm) ...@@ -72,14 +70,9 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
} }
kvm->arch.revmap = rev; kvm->arch.revmap = rev;
/* Allocate the guest's logical partition ID */ lpid = kvmppc_alloc_lpid();
do { if (lpid < 0)
lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
if (lpid >= NR_LPIDS) {
pr_err("kvm_alloc_hpt: No LPIDs free\n");
goto out_freeboth; goto out_freeboth;
}
} while (test_and_set_bit(lpid, lpid_inuse));
kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18); kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
kvm->arch.lpid = lpid; kvm->arch.lpid = lpid;
...@@ -96,7 +89,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm) ...@@ -96,7 +89,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
void kvmppc_free_hpt(struct kvm *kvm) void kvmppc_free_hpt(struct kvm *kvm)
{ {
clear_bit(kvm->arch.lpid, lpid_inuse); kvmppc_free_lpid(kvm->arch.lpid);
vfree(kvm->arch.revmap); vfree(kvm->arch.revmap);
if (kvm->arch.hpt_li) if (kvm->arch.hpt_li)
kvm_release_hpt(kvm->arch.hpt_li); kvm_release_hpt(kvm->arch.hpt_li);
...@@ -171,8 +164,7 @@ int kvmppc_mmu_hv_init(void) ...@@ -171,8 +164,7 @@ int kvmppc_mmu_hv_init(void)
if (!cpu_has_feature(CPU_FTR_HVMODE)) if (!cpu_has_feature(CPU_FTR_HVMODE))
return -EINVAL; return -EINVAL;
memset(lpid_inuse, 0, sizeof(lpid_inuse)); /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
if (cpu_has_feature(CPU_FTR_ARCH_206)) { if (cpu_has_feature(CPU_FTR_ARCH_206)) {
host_lpid = mfspr(SPRN_LPID); /* POWER7 */ host_lpid = mfspr(SPRN_LPID); /* POWER7 */
rsvd_lpid = LPID_RSVD; rsvd_lpid = LPID_RSVD;
...@@ -181,9 +173,11 @@ int kvmppc_mmu_hv_init(void) ...@@ -181,9 +173,11 @@ int kvmppc_mmu_hv_init(void)
rsvd_lpid = MAX_LPID_970; rsvd_lpid = MAX_LPID_970;
} }
set_bit(host_lpid, lpid_inuse); kvmppc_init_lpid(rsvd_lpid + 1);
kvmppc_claim_lpid(host_lpid);
/* rsvd_lpid is reserved for use in partition switching */ /* rsvd_lpid is reserved for use in partition switching */
set_bit(rsvd_lpid, lpid_inuse); kvmppc_claim_lpid(rsvd_lpid);
return 0; return 0;
} }
...@@ -452,7 +446,7 @@ static int instruction_is_store(unsigned int instr) ...@@ -452,7 +446,7 @@ static int instruction_is_store(unsigned int instr)
} }
static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned long gpa, int is_store) unsigned long gpa, gva_t ea, int is_store)
{ {
int ret; int ret;
u32 last_inst; u32 last_inst;
...@@ -499,6 +493,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -499,6 +493,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/ */
vcpu->arch.paddr_accessed = gpa; vcpu->arch.paddr_accessed = gpa;
vcpu->arch.vaddr_accessed = ea;
return kvmppc_emulate_mmio(run, vcpu); return kvmppc_emulate_mmio(run, vcpu);
} }
...@@ -552,7 +547,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -552,7 +547,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* No memslot means it's an emulated MMIO region */ /* No memslot means it's an emulated MMIO region */
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1)); unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
return kvmppc_hv_emulate_mmio(run, vcpu, gpa, return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
dsisr & DSISR_ISSTORE); dsisr & DSISR_ISSTORE);
} }
......
...@@ -90,8 +90,6 @@ slb_exit_skip_ ## num: ...@@ -90,8 +90,6 @@ slb_exit_skip_ ## num:
or r10, r10, r12 or r10, r10, r12
slbie r10 slbie r10
isync
/* Fill SLB with our shadow */ /* Fill SLB with our shadow */
lbz r12, SVCPU_SLB_MAX(r3) lbz r12, SVCPU_SLB_MAX(r3)
......
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
* Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
*/
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/hugetlb.h>
#include <linux/list.h>
#include <linux/anon_inodes.h>
#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu-hash64.h>
#include <asm/hvcall.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
#include <asm/kvm_host.h>
#include <asm/udbg.h>
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
static long kvmppc_stt_npages(unsigned long window_size)
{
return ALIGN((window_size >> SPAPR_TCE_SHIFT)
* sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
}
static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
{
struct kvm *kvm = stt->kvm;
int i;
mutex_lock(&kvm->lock);
list_del(&stt->list);
for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
__free_page(stt->pages[i]);
kfree(stt);
mutex_unlock(&kvm->lock);
kvm_put_kvm(kvm);
}
static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
struct page *page;
if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
return VM_FAULT_SIGBUS;
page = stt->pages[vmf->pgoff];
get_page(page);
vmf->page = page;
return 0;
}
static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
.fault = kvm_spapr_tce_fault,
};
static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
{
vma->vm_ops = &kvm_spapr_tce_vm_ops;
return 0;
}
static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
{
struct kvmppc_spapr_tce_table *stt = filp->private_data;
release_spapr_tce_table(stt);
return 0;
}
static struct file_operations kvm_spapr_tce_fops = {
.mmap = kvm_spapr_tce_mmap,
.release = kvm_spapr_tce_release,
};
long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args)
{
struct kvmppc_spapr_tce_table *stt = NULL;
long npages;
int ret = -ENOMEM;
int i;
/* Check this LIOBN hasn't been previously allocated */
list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
if (stt->liobn == args->liobn)
return -EBUSY;
}
npages = kvmppc_stt_npages(args->window_size);
stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
GFP_KERNEL);
if (!stt)
goto fail;
stt->liobn = args->liobn;
stt->window_size = args->window_size;
stt->kvm = kvm;
for (i = 0; i < npages; i++) {
stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!stt->pages[i])
goto fail;
}
kvm_get_kvm(kvm);
mutex_lock(&kvm->lock);
list_add(&stt->list, &kvm->arch.spapr_tce_tables);
mutex_unlock(&kvm->lock);
return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
stt, O_RDWR);
fail:
if (stt) {
for (i = 0; i < npages; i++)
if (stt->pages[i])
__free_page(stt->pages[i]);
kfree(stt);
}
return ret;
}
...@@ -38,6 +38,9 @@ ...@@ -38,6 +38,9 @@
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
/* WARNING: This will be called in real-mode on HV KVM and virtual
* mode on PR KVM
*/
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce) unsigned long ioba, unsigned long tce)
{ {
......
...@@ -87,6 +87,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -87,6 +87,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance) unsigned int inst, int *advance)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
int rt = get_rt(inst);
int rs = get_rs(inst);
int ra = get_ra(inst);
int rb = get_rb(inst);
switch (get_op(inst)) { switch (get_op(inst)) {
case 19: case 19:
...@@ -106,21 +110,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -106,21 +110,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
case 31: case 31:
switch (get_xop(inst)) { switch (get_xop(inst)) {
case OP_31_XOP_MFMSR: case OP_31_XOP_MFMSR:
kvmppc_set_gpr(vcpu, get_rt(inst), kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
vcpu->arch.shared->msr);
break; break;
case OP_31_XOP_MTMSRD: case OP_31_XOP_MTMSRD:
{ {
ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); ulong rs_val = kvmppc_get_gpr(vcpu, rs);
if (inst & 0x10000) { if (inst & 0x10000) {
vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE); ulong new_msr = vcpu->arch.shared->msr;
vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE); new_msr &= ~(MSR_RI | MSR_EE);
new_msr |= rs_val & (MSR_RI | MSR_EE);
vcpu->arch.shared->msr = new_msr;
} else } else
kvmppc_set_msr(vcpu, rs); kvmppc_set_msr(vcpu, rs_val);
break; break;
} }
case OP_31_XOP_MTMSR: case OP_31_XOP_MTMSR:
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
break; break;
case OP_31_XOP_MFSR: case OP_31_XOP_MFSR:
{ {
...@@ -130,7 +135,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -130,7 +135,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (vcpu->arch.mmu.mfsrin) { if (vcpu->arch.mmu.mfsrin) {
u32 sr; u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
kvmppc_set_gpr(vcpu, get_rt(inst), sr); kvmppc_set_gpr(vcpu, rt, sr);
} }
break; break;
} }
...@@ -138,29 +143,29 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -138,29 +143,29 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
{ {
int srnum; int srnum;
srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf; srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
if (vcpu->arch.mmu.mfsrin) { if (vcpu->arch.mmu.mfsrin) {
u32 sr; u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
kvmppc_set_gpr(vcpu, get_rt(inst), sr); kvmppc_set_gpr(vcpu, rt, sr);
} }
break; break;
} }
case OP_31_XOP_MTSR: case OP_31_XOP_MTSR:
vcpu->arch.mmu.mtsrin(vcpu, vcpu->arch.mmu.mtsrin(vcpu,
(inst >> 16) & 0xf, (inst >> 16) & 0xf,
kvmppc_get_gpr(vcpu, get_rs(inst))); kvmppc_get_gpr(vcpu, rs));
break; break;
case OP_31_XOP_MTSRIN: case OP_31_XOP_MTSRIN:
vcpu->arch.mmu.mtsrin(vcpu, vcpu->arch.mmu.mtsrin(vcpu,
(kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
kvmppc_get_gpr(vcpu, get_rs(inst))); kvmppc_get_gpr(vcpu, rs));
break; break;
case OP_31_XOP_TLBIE: case OP_31_XOP_TLBIE:
case OP_31_XOP_TLBIEL: case OP_31_XOP_TLBIEL:
{ {
bool large = (inst & 0x00200000) ? true : false; bool large = (inst & 0x00200000) ? true : false;
ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst)); ulong addr = kvmppc_get_gpr(vcpu, rb);
vcpu->arch.mmu.tlbie(vcpu, addr, large); vcpu->arch.mmu.tlbie(vcpu, addr, large);
break; break;
} }
...@@ -171,15 +176,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -171,15 +176,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL; return EMULATE_FAIL;
vcpu->arch.mmu.slbmte(vcpu, vcpu->arch.mmu.slbmte(vcpu,
kvmppc_get_gpr(vcpu, get_rs(inst)), kvmppc_get_gpr(vcpu, rs),
kvmppc_get_gpr(vcpu, get_rb(inst))); kvmppc_get_gpr(vcpu, rb));
break; break;
case OP_31_XOP_SLBIE: case OP_31_XOP_SLBIE:
if (!vcpu->arch.mmu.slbie) if (!vcpu->arch.mmu.slbie)
return EMULATE_FAIL; return EMULATE_FAIL;
vcpu->arch.mmu.slbie(vcpu, vcpu->arch.mmu.slbie(vcpu,
kvmppc_get_gpr(vcpu, get_rb(inst))); kvmppc_get_gpr(vcpu, rb));
break; break;
case OP_31_XOP_SLBIA: case OP_31_XOP_SLBIA:
if (!vcpu->arch.mmu.slbia) if (!vcpu->arch.mmu.slbia)
...@@ -191,22 +196,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -191,22 +196,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!vcpu->arch.mmu.slbmfee) { if (!vcpu->arch.mmu.slbmfee) {
emulated = EMULATE_FAIL; emulated = EMULATE_FAIL;
} else { } else {
ulong t, rb; ulong t, rb_val;
rb = kvmppc_get_gpr(vcpu, get_rb(inst)); rb_val = kvmppc_get_gpr(vcpu, rb);
t = vcpu->arch.mmu.slbmfee(vcpu, rb); t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
kvmppc_set_gpr(vcpu, get_rt(inst), t); kvmppc_set_gpr(vcpu, rt, t);
} }
break; break;
case OP_31_XOP_SLBMFEV: case OP_31_XOP_SLBMFEV:
if (!vcpu->arch.mmu.slbmfev) { if (!vcpu->arch.mmu.slbmfev) {
emulated = EMULATE_FAIL; emulated = EMULATE_FAIL;
} else { } else {
ulong t, rb; ulong t, rb_val;
rb = kvmppc_get_gpr(vcpu, get_rb(inst)); rb_val = kvmppc_get_gpr(vcpu, rb);
t = vcpu->arch.mmu.slbmfev(vcpu, rb); t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
kvmppc_set_gpr(vcpu, get_rt(inst), t); kvmppc_set_gpr(vcpu, rt, t);
} }
break; break;
case OP_31_XOP_DCBA: case OP_31_XOP_DCBA:
...@@ -214,17 +219,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -214,17 +219,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
case OP_31_XOP_DCBZ: case OP_31_XOP_DCBZ:
{ {
ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); ulong rb_val = kvmppc_get_gpr(vcpu, rb);
ulong ra = 0; ulong ra_val = 0;
ulong addr, vaddr; ulong addr, vaddr;
u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
u32 dsisr; u32 dsisr;
int r; int r;
if (get_ra(inst)) if (ra)
ra = kvmppc_get_gpr(vcpu, get_ra(inst)); ra_val = kvmppc_get_gpr(vcpu, ra);
addr = (ra + rb) & ~31ULL; addr = (ra_val + rb_val) & ~31ULL;
if (!(vcpu->arch.shared->msr & MSR_SF)) if (!(vcpu->arch.shared->msr & MSR_SF))
addr &= 0xffffffff; addr &= 0xffffffff;
vaddr = addr; vaddr = addr;
...@@ -313,10 +318,9 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) ...@@ -313,10 +318,9 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
return bat; return bat;
} }
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) { switch (sprn) {
case SPRN_SDR1: case SPRN_SDR1:
...@@ -428,7 +432,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) ...@@ -428,7 +432,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
return emulated; return emulated;
} }
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
...@@ -441,46 +445,46 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) ...@@ -441,46 +445,46 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
if (sprn % 2) if (sprn % 2)
kvmppc_set_gpr(vcpu, rt, bat->raw >> 32); *spr_val = bat->raw >> 32;
else else
kvmppc_set_gpr(vcpu, rt, bat->raw); *spr_val = bat->raw;
break; break;
} }
case SPRN_SDR1: case SPRN_SDR1:
if (!spr_allowed(vcpu, PRIV_HYPER)) if (!spr_allowed(vcpu, PRIV_HYPER))
goto unprivileged; goto unprivileged;
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); *spr_val = to_book3s(vcpu)->sdr1;
break; break;
case SPRN_DSISR: case SPRN_DSISR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr); *spr_val = vcpu->arch.shared->dsisr;
break; break;
case SPRN_DAR: case SPRN_DAR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); *spr_val = vcpu->arch.shared->dar;
break; break;
case SPRN_HIOR: case SPRN_HIOR:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); *spr_val = to_book3s(vcpu)->hior;
break; break;
case SPRN_HID0: case SPRN_HID0:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]); *spr_val = to_book3s(vcpu)->hid[0];
break; break;
case SPRN_HID1: case SPRN_HID1:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); *spr_val = to_book3s(vcpu)->hid[1];
break; break;
case SPRN_HID2: case SPRN_HID2:
case SPRN_HID2_GEKKO: case SPRN_HID2_GEKKO:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); *spr_val = to_book3s(vcpu)->hid[2];
break; break;
case SPRN_HID4: case SPRN_HID4:
case SPRN_HID4_GEKKO: case SPRN_HID4_GEKKO:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); *spr_val = to_book3s(vcpu)->hid[4];
break; break;
case SPRN_HID5: case SPRN_HID5:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); *spr_val = to_book3s(vcpu)->hid[5];
break; break;
case SPRN_CFAR: case SPRN_CFAR:
case SPRN_PURR: case SPRN_PURR:
kvmppc_set_gpr(vcpu, rt, 0); *spr_val = 0;
break; break;
case SPRN_GQR0: case SPRN_GQR0:
case SPRN_GQR1: case SPRN_GQR1:
...@@ -490,8 +494,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) ...@@ -490,8 +494,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_GQR5: case SPRN_GQR5:
case SPRN_GQR6: case SPRN_GQR6:
case SPRN_GQR7: case SPRN_GQR7:
kvmppc_set_gpr(vcpu, rt, *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
break; break;
case SPRN_THRM1: case SPRN_THRM1:
case SPRN_THRM2: case SPRN_THRM2:
...@@ -506,7 +509,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) ...@@ -506,7 +509,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_PMC3_GEKKO: case SPRN_PMC3_GEKKO:
case SPRN_PMC4_GEKKO: case SPRN_PMC4_GEKKO:
case SPRN_WPAR_GEKKO: case SPRN_WPAR_GEKKO:
kvmppc_set_gpr(vcpu, rt, 0); *spr_val = 0;
break; break;
default: default:
unprivileged: unprivileged:
...@@ -565,23 +568,22 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) ...@@ -565,23 +568,22 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
{ {
ulong dar = 0; ulong dar = 0;
ulong ra; ulong ra = get_ra(inst);
ulong rb = get_rb(inst);
switch (get_op(inst)) { switch (get_op(inst)) {
case OP_LFS: case OP_LFS:
case OP_LFD: case OP_LFD:
case OP_STFD: case OP_STFD:
case OP_STFS: case OP_STFS:
ra = get_ra(inst);
if (ra) if (ra)
dar = kvmppc_get_gpr(vcpu, ra); dar = kvmppc_get_gpr(vcpu, ra);
dar += (s32)((s16)inst); dar += (s32)((s16)inst);
break; break;
case 31: case 31:
ra = get_ra(inst);
if (ra) if (ra)
dar = kvmppc_get_gpr(vcpu, ra); dar = kvmppc_get_gpr(vcpu, ra);
dar += kvmppc_get_gpr(vcpu, get_rb(inst)); dar += kvmppc_get_gpr(vcpu, rb);
break; break;
default: default:
printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
......
This diff is collapsed.
...@@ -68,19 +68,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) ...@@ -68,19 +68,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
rotldi r10,r10,16 rotldi r10,r10,16
mtmsrd r10,1 mtmsrd r10,1
/* Save host PMU registers and load guest PMU registers */ /* Save host PMU registers */
/* R4 is live here (vcpu pointer) but not r3 or r5 */ /* R4 is live here (vcpu pointer) but not r3 or r5 */
li r3, 1 li r3, 1
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
mfspr r7, SPRN_MMCR0 /* save MMCR0 */ mfspr r7, SPRN_MMCR0 /* save MMCR0 */
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
mfspr r6, SPRN_MMCRA
BEGIN_FTR_SECTION
/* On P7, clear MMCRA in order to disable SDAR updates */
li r5, 0
mtspr SPRN_MMCRA, r5
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
isync isync
ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
lbz r5, LPPACA_PMCINUSE(r3) lbz r5, LPPACA_PMCINUSE(r3)
cmpwi r5, 0 cmpwi r5, 0
beq 31f /* skip if not */ beq 31f /* skip if not */
mfspr r5, SPRN_MMCR1 mfspr r5, SPRN_MMCR1
mfspr r6, SPRN_MMCRA
std r7, HSTATE_MMCR(r13) std r7, HSTATE_MMCR(r13)
std r5, HSTATE_MMCR + 8(r13) std r5, HSTATE_MMCR + 8(r13)
std r6, HSTATE_MMCR + 16(r13) std r6, HSTATE_MMCR + 16(r13)
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/hvcall.h> #include <asm/hvcall.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/exception-64s.h> #include <asm/exception-64s.h>
#include <asm/kvm_book3s_asm.h>
/***************************************************************************** /*****************************************************************************
* * * *
...@@ -82,6 +83,7 @@ _GLOBAL(kvmppc_hv_entry_trampoline) ...@@ -82,6 +83,7 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
#define XICS_XIRR 4 #define XICS_XIRR 4
#define XICS_QIRR 0xc #define XICS_QIRR 0xc
#define XICS_IPI 2 /* interrupt source # for IPIs */
/* /*
* We come in here when wakened from nap mode on a secondary hw thread. * We come in here when wakened from nap mode on a secondary hw thread.
...@@ -94,26 +96,54 @@ kvm_start_guest: ...@@ -94,26 +96,54 @@ kvm_start_guest:
subi r1,r1,STACK_FRAME_OVERHEAD subi r1,r1,STACK_FRAME_OVERHEAD
ld r2,PACATOC(r13) ld r2,PACATOC(r13)
/* were we napping due to cede? */ li r0,KVM_HWTHREAD_IN_KVM
lbz r0,HSTATE_NAPPING(r13) stb r0,HSTATE_HWTHREAD_STATE(r13)
cmpwi r0,0
bne kvm_end_cede
/* get vcpu pointer */ /* NV GPR values from power7_idle() will no longer be valid */
ld r4, HSTATE_KVM_VCPU(r13) li r0,1
stb r0,PACA_NAPSTATELOST(r13)
/* We got here with an IPI; clear it */ /* get vcpu pointer, NULL if we have no vcpu to run */
ld r5, HSTATE_XICS_PHYS(r13) ld r4,HSTATE_KVM_VCPU(r13)
li r0, 0xff cmpdi cr1,r4,0
li r6, XICS_QIRR
li r7, XICS_XIRR /* Check the wake reason in SRR1 to see why we got here */
lwzcix r8, r5, r7 /* ack the interrupt */ mfspr r3,SPRN_SRR1
rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
cmpwi r3,4 /* was it an external interrupt? */
bne 27f
/*
* External interrupt - for now assume it is an IPI, since we
* should never get any other interrupts sent to offline threads.
* Only do this for secondary threads.
*/
beq cr1,25f
lwz r3,VCPU_PTID(r4)
cmpwi r3,0
beq 27f
25: ld r5,HSTATE_XICS_PHYS(r13)
li r0,0xff
li r6,XICS_QIRR
li r7,XICS_XIRR
lwzcix r8,r5,r7 /* get and ack the interrupt */
sync sync
stbcix r0, r5, r6 /* clear it */ clrldi. r9,r8,40 /* get interrupt source ID. */
stwcix r8, r5, r7 /* EOI it */ beq 27f /* none there? */
cmpwi r9,XICS_IPI
bne 26f
stbcix r0,r5,r6 /* clear IPI */
26: stwcix r8,r5,r7 /* EOI the interrupt */
/* NV GPR values from power7_idle() will no longer be valid */ 27: /* XXX should handle hypervisor maintenance interrupts etc. here */
stb r0, PACA_NAPSTATELOST(r13)
/* if we have no vcpu to run, go back to sleep */
beq cr1,kvm_no_guest
/* were we napping due to cede? */
lbz r0,HSTATE_NAPPING(r13)
cmpwi r0,0
bne kvm_end_cede
.global kvmppc_hv_entry .global kvmppc_hv_entry
kvmppc_hv_entry: kvmppc_hv_entry:
...@@ -129,24 +159,15 @@ kvmppc_hv_entry: ...@@ -129,24 +159,15 @@ kvmppc_hv_entry:
mflr r0 mflr r0
std r0, HSTATE_VMHANDLER(r13) std r0, HSTATE_VMHANDLER(r13)
ld r14, VCPU_GPR(r14)(r4) /* Set partition DABR */
ld r15, VCPU_GPR(r15)(r4) /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
ld r16, VCPU_GPR(r16)(r4) li r5,3
ld r17, VCPU_GPR(r17)(r4) ld r6,VCPU_DABR(r4)
ld r18, VCPU_GPR(r18)(r4) mtspr SPRN_DABRX,r5
ld r19, VCPU_GPR(r19)(r4) mtspr SPRN_DABR,r6
ld r20, VCPU_GPR(r20)(r4) BEGIN_FTR_SECTION
ld r21, VCPU_GPR(r21)(r4) isync
ld r22, VCPU_GPR(r22)(r4) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
ld r23, VCPU_GPR(r23)(r4)
ld r24, VCPU_GPR(r24)(r4)
ld r25, VCPU_GPR(r25)(r4)
ld r26, VCPU_GPR(r26)(r4)
ld r27, VCPU_GPR(r27)(r4)
ld r28, VCPU_GPR(r28)(r4)
ld r29, VCPU_GPR(r29)(r4)
ld r30, VCPU_GPR(r30)(r4)
ld r31, VCPU_GPR(r31)(r4)
/* Load guest PMU registers */ /* Load guest PMU registers */
/* R4 is live here (vcpu pointer) */ /* R4 is live here (vcpu pointer) */
...@@ -185,6 +206,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) ...@@ -185,6 +206,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/* Load up FP, VMX and VSX registers */ /* Load up FP, VMX and VSX registers */
bl kvmppc_load_fp bl kvmppc_load_fp
ld r14, VCPU_GPR(r14)(r4)
ld r15, VCPU_GPR(r15)(r4)
ld r16, VCPU_GPR(r16)(r4)
ld r17, VCPU_GPR(r17)(r4)
ld r18, VCPU_GPR(r18)(r4)
ld r19, VCPU_GPR(r19)(r4)
ld r20, VCPU_GPR(r20)(r4)
ld r21, VCPU_GPR(r21)(r4)
ld r22, VCPU_GPR(r22)(r4)
ld r23, VCPU_GPR(r23)(r4)
ld r24, VCPU_GPR(r24)(r4)
ld r25, VCPU_GPR(r25)(r4)
ld r26, VCPU_GPR(r26)(r4)
ld r27, VCPU_GPR(r27)(r4)
ld r28, VCPU_GPR(r28)(r4)
ld r29, VCPU_GPR(r29)(r4)
ld r30, VCPU_GPR(r30)(r4)
ld r31, VCPU_GPR(r31)(r4)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* Switch DSCR to guest value */ /* Switch DSCR to guest value */
ld r5, VCPU_DSCR(r4) ld r5, VCPU_DSCR(r4)
...@@ -226,12 +266,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) ...@@ -226,12 +266,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
mtspr SPRN_DAR, r5 mtspr SPRN_DAR, r5
mtspr SPRN_DSISR, r6 mtspr SPRN_DSISR, r6
/* Set partition DABR */
li r5,3
ld r6,VCPU_DABR(r4)
mtspr SPRN_DABRX,r5
mtspr SPRN_DABR,r6
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* Restore AMR and UAMOR, set AMOR to all 1s */ /* Restore AMR and UAMOR, set AMOR to all 1s */
ld r5,VCPU_AMR(r4) ld r5,VCPU_AMR(r4)
...@@ -925,12 +959,6 @@ BEGIN_FTR_SECTION ...@@ -925,12 +959,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_AMR,r6 mtspr SPRN_AMR,r6
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Restore host DABR and DABRX */
ld r5,HSTATE_DABR(r13)
li r6,7
mtspr SPRN_DABR,r5
mtspr SPRN_DABRX,r6
/* Switch DSCR back to host value */ /* Switch DSCR back to host value */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
mfspr r8, SPRN_DSCR mfspr r8, SPRN_DSCR
...@@ -969,6 +997,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) ...@@ -969,6 +997,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r5, VCPU_SPRG2(r9) std r5, VCPU_SPRG2(r9)
std r6, VCPU_SPRG3(r9) std r6, VCPU_SPRG3(r9)
/* save FP state */
mr r3, r9
bl .kvmppc_save_fp
/* Increment yield count if they have a VPA */ /* Increment yield count if they have a VPA */
ld r8, VCPU_VPA(r9) /* do they have a VPA? */ ld r8, VCPU_VPA(r9) /* do they have a VPA? */
cmpdi r8, 0 cmpdi r8, 0
...@@ -983,6 +1015,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) ...@@ -983,6 +1015,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
mfspr r4, SPRN_MMCR0 /* save MMCR0 */ mfspr r4, SPRN_MMCR0 /* save MMCR0 */
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
mfspr r6, SPRN_MMCRA
BEGIN_FTR_SECTION
/* On P7, clear MMCRA in order to disable SDAR updates */
li r7, 0
mtspr SPRN_MMCRA, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
isync isync
beq 21f /* if no VPA, save PMU stuff anyway */ beq 21f /* if no VPA, save PMU stuff anyway */
lbz r7, LPPACA_PMCINUSE(r8) lbz r7, LPPACA_PMCINUSE(r8)
...@@ -991,7 +1029,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) ...@@ -991,7 +1029,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
b 22f b 22f
21: mfspr r5, SPRN_MMCR1 21: mfspr r5, SPRN_MMCR1
mfspr r6, SPRN_MMCRA
std r4, VCPU_MMCR(r9) std r4, VCPU_MMCR(r9)
std r5, VCPU_MMCR + 8(r9) std r5, VCPU_MMCR + 8(r9)
std r6, VCPU_MMCR + 16(r9) std r6, VCPU_MMCR + 16(r9)
...@@ -1016,17 +1053,20 @@ BEGIN_FTR_SECTION ...@@ -1016,17 +1053,20 @@ BEGIN_FTR_SECTION
stw r11, VCPU_PMC + 28(r9) stw r11, VCPU_PMC + 28(r9)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
22: 22:
/* save FP state */
mr r3, r9
bl .kvmppc_save_fp
/* Secondary threads go off to take a nap on POWER7 */ /* Secondary threads go off to take a nap on POWER7 */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
lwz r0,VCPU_PTID(r3) lwz r0,VCPU_PTID(r9)
cmpwi r0,0 cmpwi r0,0
bne secondary_nap bne secondary_nap
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Restore host DABR and DABRX */
ld r5,HSTATE_DABR(r13)
li r6,7
mtspr SPRN_DABR,r5
mtspr SPRN_DABRX,r6
/* /*
* Reload DEC. HDEC interrupts were disabled when * Reload DEC. HDEC interrupts were disabled when
* we reloaded the host's LPCR value. * we reloaded the host's LPCR value.
...@@ -1363,7 +1403,12 @@ bounce_ext_interrupt: ...@@ -1363,7 +1403,12 @@ bounce_ext_interrupt:
_GLOBAL(kvmppc_h_set_dabr) _GLOBAL(kvmppc_h_set_dabr)
std r4,VCPU_DABR(r3) std r4,VCPU_DABR(r3)
mtspr SPRN_DABR,r4 /* Work around P7 bug where DABR can get corrupted on mtspr */
1: mtspr SPRN_DABR,r4
mfspr r5, SPRN_DABR
cmpd r4, r5
bne 1b
isync
li r3,0 li r3,0
blr blr
...@@ -1445,8 +1490,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) ...@@ -1445,8 +1490,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
* Take a nap until a decrementer or external interrupt occurs, * Take a nap until a decrementer or external interrupt occurs,
* with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
*/ */
li r0,0x80 li r0,1
stb r0,PACAPROCSTART(r13) stb r0,HSTATE_HWTHREAD_REQ(r13)
mfspr r5,SPRN_LPCR mfspr r5,SPRN_LPCR
ori r5,r5,LPCR_PECE0 | LPCR_PECE1 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR,r5 mtspr SPRN_LPCR,r5
...@@ -1463,26 +1508,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) ...@@ -1463,26 +1508,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
kvm_end_cede: kvm_end_cede:
/* Woken by external or decrementer interrupt */ /* Woken by external or decrementer interrupt */
ld r1, HSTATE_HOST_R1(r13) ld r1, HSTATE_HOST_R1(r13)
ld r2, PACATOC(r13)
/* If we're a secondary thread and we got here by an IPI, ack it */
ld r4,HSTATE_KVM_VCPU(r13)
lwz r3,VCPU_PTID(r4)
cmpwi r3,0
beq 27f
mfspr r3,SPRN_SRR1
rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
cmpwi r3,4 /* was it an external interrupt? */
bne 27f
ld r5, HSTATE_XICS_PHYS(r13)
li r0,0xff
li r6,XICS_QIRR
li r7,XICS_XIRR
lwzcix r8,r5,r7 /* ack the interrupt */
sync
stbcix r0,r5,r6 /* clear it */
stwcix r8,r5,r7 /* EOI it */
27:
/* load up FP state */ /* load up FP state */
bl kvmppc_load_fp bl kvmppc_load_fp
...@@ -1580,12 +1606,17 @@ secondary_nap: ...@@ -1580,12 +1606,17 @@ secondary_nap:
stwcx. r3, 0, r4 stwcx. r3, 0, r4
bne 51b bne 51b
kvm_no_guest:
li r0, KVM_HWTHREAD_IN_NAP
stb r0, HSTATE_HWTHREAD_STATE(r13)
li r0, 0
std r0, HSTATE_KVM_VCPU(r13)
li r3, LPCR_PECE0 li r3, LPCR_PECE0
mfspr r4, SPRN_LPCR mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4 mtspr SPRN_LPCR, r4
isync isync
li r0, 0
std r0, HSTATE_SCRATCH0(r13) std r0, HSTATE_SCRATCH0(r13)
ptesync ptesync
ld r0, HSTATE_SCRATCH0(r13) ld r0, HSTATE_SCRATCH0(r13)
...@@ -1599,8 +1630,8 @@ secondary_nap: ...@@ -1599,8 +1630,8 @@ secondary_nap:
* r3 = vcpu pointer * r3 = vcpu pointer
*/ */
_GLOBAL(kvmppc_save_fp) _GLOBAL(kvmppc_save_fp)
mfmsr r9 mfmsr r5
ori r8,r9,MSR_FP ori r8,r5,MSR_FP
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
oris r8,r8,MSR_VEC@h oris r8,r8,MSR_VEC@h
...@@ -1649,7 +1680,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -1649,7 +1680,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif #endif
mfspr r6,SPRN_VRSAVE mfspr r6,SPRN_VRSAVE
stw r6,VCPU_VRSAVE(r3) stw r6,VCPU_VRSAVE(r3)
mtmsrd r9 mtmsrd r5
isync isync
blr blr
......
...@@ -120,6 +120,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) ...@@ -120,6 +120,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
if (msr & MSR_POW) { if (msr & MSR_POW) {
if (!vcpu->arch.pending_exceptions) { if (!vcpu->arch.pending_exceptions) {
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu->stat.halt_wakeup++; vcpu->stat.halt_wakeup++;
/* Unset POW bit after we woke up */ /* Unset POW bit after we woke up */
...@@ -144,6 +145,21 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) ...@@ -144,6 +145,21 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
} }
} }
/*
* When switching from 32 to 64-bit, we may have a stale 32-bit
* magic page around, we need to flush it. Typically 32-bit magic
* page will be instanciated when calling into RTAS. Note: We
* assume that such transition only happens while in kernel mode,
* ie, we never transition from user 32-bit to kernel 64-bit with
* a 32-bit magic page around.
*/
if (vcpu->arch.magic_page_pa &&
!(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
/* going from RTAS to normal kernel code */
kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
~0xFFFUL);
}
/* Preload FPU if it's enabled */ /* Preload FPU if it's enabled */
if (vcpu->arch.shared->msr & MSR_FP) if (vcpu->arch.shared->msr & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
...@@ -251,6 +267,9 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) ...@@ -251,6 +267,9 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{ {
ulong mp_pa = vcpu->arch.magic_page_pa; ulong mp_pa = vcpu->arch.magic_page_pa;
if (!(vcpu->arch.shared->msr & MSR_SF))
mp_pa = (uint32_t)mp_pa;
if (unlikely(mp_pa) && if (unlikely(mp_pa) &&
unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
return 1; return 1;
...@@ -351,6 +370,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -351,6 +370,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* MMIO */ /* MMIO */
vcpu->stat.mmio_exits++; vcpu->stat.mmio_exits++;
vcpu->arch.paddr_accessed = pte.raddr; vcpu->arch.paddr_accessed = pte.raddr;
vcpu->arch.vaddr_accessed = pte.eaddr;
r = kvmppc_emulate_mmio(run, vcpu); r = kvmppc_emulate_mmio(run, vcpu);
if ( r == RESUME_HOST_NV ) if ( r == RESUME_HOST_NV )
r = RESUME_HOST; r = RESUME_HOST;
...@@ -528,6 +548,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -528,6 +548,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->exit_reason = KVM_EXIT_UNKNOWN; run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1; run->ready_for_interrupt_injection = 1;
/* We get here with MSR.EE=0, so enable it to be a nice citizen */
__hard_irq_enable();
trace_kvm_book3s_exit(exit_nr, vcpu); trace_kvm_book3s_exit(exit_nr, vcpu);
preempt_enable(); preempt_enable();
kvm_resched(vcpu); kvm_resched(vcpu);
...@@ -617,10 +640,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -617,10 +640,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
/* We're good on these - the host merely wanted to get our attention */ /* We're good on these - the host merely wanted to get our attention */
case BOOK3S_INTERRUPT_DECREMENTER: case BOOK3S_INTERRUPT_DECREMENTER:
case BOOK3S_INTERRUPT_HV_DECREMENTER:
vcpu->stat.dec_exits++; vcpu->stat.dec_exits++;
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
case BOOK3S_INTERRUPT_EXTERNAL: case BOOK3S_INTERRUPT_EXTERNAL:
case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
case BOOK3S_INTERRUPT_EXTERNAL_HV:
vcpu->stat.ext_intr_exits++; vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
...@@ -628,6 +654,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -628,6 +654,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
case BOOK3S_INTERRUPT_PROGRAM: case BOOK3S_INTERRUPT_PROGRAM:
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
{ {
enum emulation_result er; enum emulation_result er;
struct kvmppc_book3s_shadow_vcpu *svcpu; struct kvmppc_book3s_shadow_vcpu *svcpu;
...@@ -1131,6 +1158,31 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ...@@ -1131,6 +1158,31 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
return r; return r;
} }
#ifdef CONFIG_PPC64
int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
{
/* No flags */
info->flags = 0;
/* SLB is always 64 entries */
info->slb_size = 64;
/* Standard 4k base page size segment */
info->sps[0].page_shift = 12;
info->sps[0].slb_enc = 0;
info->sps[0].enc[0].page_shift = 12;
info->sps[0].enc[0].pte_enc = 0;
/* Standard 16M large page size segment */
info->sps[1].page_shift = 24;
info->sps[1].slb_enc = SLB_VSID_L;
info->sps[1].enc[0].page_shift = 24;
info->sps[1].enc[0].pte_enc = 0;
return 0;
}
#endif /* CONFIG_PPC64 */
int kvmppc_core_prepare_memory_region(struct kvm *kvm, int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem) struct kvm_userspace_memory_region *mem)
{ {
...@@ -1144,11 +1196,18 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, ...@@ -1144,11 +1196,18 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
int kvmppc_core_init_vm(struct kvm *kvm) int kvmppc_core_init_vm(struct kvm *kvm)
{ {
#ifdef CONFIG_PPC64
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
#endif
return 0; return 0;
} }
void kvmppc_core_destroy_vm(struct kvm *kvm) void kvmppc_core_destroy_vm(struct kvm *kvm)
{ {
#ifdef CONFIG_PPC64
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
#endif
} }
static int kvmppc_book3s_init(void) static int kvmppc_book3s_init(void)
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/anon_inodes.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h> #include <asm/kvm_book3s.h>
...@@ -98,6 +100,83 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) ...@@ -98,6 +100,83 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
return EMULATE_DONE; return EMULATE_DONE;
} }
/* Request defs for kvmppc_h_pr_bulk_remove() */
#define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
#define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
#define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
#define H_BULK_REMOVE_END 0xc000000000000000ULL
#define H_BULK_REMOVE_CODE 0x3000000000000000ULL
#define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
#define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
#define H_BULK_REMOVE_PARM 0x2000000000000000ULL
#define H_BULK_REMOVE_HW 0x3000000000000000ULL
#define H_BULK_REMOVE_RC 0x0c00000000000000ULL
#define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
#define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
#define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
#define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
#define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
#define H_BULK_REMOVE_MAX_BATCH 4
static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
{
int i;
int paramnr = 4;
int ret = H_SUCCESS;
for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
unsigned long pteg, rb, flags;
unsigned long pte[2];
unsigned long v = 0;
if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
break; /* Exit success */
} else if ((tsh & H_BULK_REMOVE_TYPE) !=
H_BULK_REMOVE_REQUEST) {
ret = H_PARAMETER;
break; /* Exit fail */
}
tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
tsh |= H_BULK_REMOVE_RESPONSE;
if ((tsh & H_BULK_REMOVE_ANDCOND) &&
(tsh & H_BULK_REMOVE_AVPN)) {
tsh |= H_BULK_REMOVE_PARM;
kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
ret = H_PARAMETER;
break; /* Exit fail */
}
pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
/* tsl = AVPN */
flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
if ((pte[0] & HPTE_V_VALID) == 0 ||
((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) ||
((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) {
tsh |= H_BULK_REMOVE_NOT_FOUND;
} else {
/* Splat the pteg in (userland) hpt */
copy_to_user((void __user *)pteg, &v, sizeof(v));
rb = compute_tlbie_rb(pte[0], pte[1],
tsh & H_BULK_REMOVE_PTEX);
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
tsh |= H_BULK_REMOVE_SUCCESS;
tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43;
}
kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
}
kvmppc_set_gpr(vcpu, 3, ret);
return EMULATE_DONE;
}
static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
{ {
unsigned long flags = kvmppc_get_gpr(vcpu, 4); unsigned long flags = kvmppc_get_gpr(vcpu, 4);
...@@ -134,6 +213,20 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) ...@@ -134,6 +213,20 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
return EMULATE_DONE; return EMULATE_DONE;
} }
static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
{
unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
unsigned long tce = kvmppc_get_gpr(vcpu, 6);
long rc;
rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
if (rc == H_TOO_HARD)
return EMULATE_FAIL;
kvmppc_set_gpr(vcpu, 3, rc);
return EMULATE_DONE;
}
int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
{ {
switch (cmd) { switch (cmd) {
...@@ -144,12 +237,12 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) ...@@ -144,12 +237,12 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
case H_PROTECT: case H_PROTECT:
return kvmppc_h_pr_protect(vcpu); return kvmppc_h_pr_protect(vcpu);
case H_BULK_REMOVE: case H_BULK_REMOVE:
/* We just flush all PTEs, so user space can return kvmppc_h_pr_bulk_remove(vcpu);
handle the HPT modifications */ case H_PUT_TCE:
kvmppc_mmu_pte_flush(vcpu, 0, 0); return kvmppc_h_pr_put_tce(vcpu);
break;
case H_CEDE: case H_CEDE:
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu->stat.halt_wakeup++; vcpu->stat.halt_wakeup++;
return EMULATE_DONE; return EMULATE_DONE;
} }
......
...@@ -128,24 +128,25 @@ no_dcbz32_on: ...@@ -128,24 +128,25 @@ no_dcbz32_on:
/* First clear RI in our current MSR value */ /* First clear RI in our current MSR value */
li r0, MSR_RI li r0, MSR_RI
andc r6, r6, r0 andc r6, r6, r0
MTMSR_EERI(r6)
mtsrr0 r9
mtsrr1 r4
PPC_LL r0, SVCPU_R0(r3) PPC_LL r0, SVCPU_R0(r3)
PPC_LL r1, SVCPU_R1(r3) PPC_LL r1, SVCPU_R1(r3)
PPC_LL r2, SVCPU_R2(r3) PPC_LL r2, SVCPU_R2(r3)
PPC_LL r4, SVCPU_R4(r3)
PPC_LL r5, SVCPU_R5(r3) PPC_LL r5, SVCPU_R5(r3)
PPC_LL r6, SVCPU_R6(r3)
PPC_LL r7, SVCPU_R7(r3) PPC_LL r7, SVCPU_R7(r3)
PPC_LL r8, SVCPU_R8(r3) PPC_LL r8, SVCPU_R8(r3)
PPC_LL r9, SVCPU_R9(r3)
PPC_LL r10, SVCPU_R10(r3) PPC_LL r10, SVCPU_R10(r3)
PPC_LL r11, SVCPU_R11(r3) PPC_LL r11, SVCPU_R11(r3)
PPC_LL r12, SVCPU_R12(r3) PPC_LL r12, SVCPU_R12(r3)
PPC_LL r13, SVCPU_R13(r3) PPC_LL r13, SVCPU_R13(r3)
MTMSR_EERI(r6)
mtsrr0 r9
mtsrr1 r4
PPC_LL r4, SVCPU_R4(r3)
PPC_LL r6, SVCPU_R6(r3)
PPC_LL r9, SVCPU_R9(r3)
PPC_LL r3, (SVCPU_R3)(r3) PPC_LL r3, (SVCPU_R3)(r3)
RFI RFI
......
This diff is collapsed.
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/switch_to.h>
#include "timing.h" #include "timing.h"
/* interrupt priortity ordering */ /* interrupt priortity ordering */
...@@ -48,7 +49,20 @@ ...@@ -48,7 +49,20 @@
#define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19
/* Internal pseudo-irqprio for level triggered externals */ /* Internal pseudo-irqprio for level triggered externals */
#define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20 #define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20
#define BOOKE_IRQPRIO_MAX 20 #define BOOKE_IRQPRIO_DBELL 21
#define BOOKE_IRQPRIO_DBELL_CRIT 22
#define BOOKE_IRQPRIO_MAX 23
#define BOOKE_IRQMASK_EE ((1 << BOOKE_IRQPRIO_EXTERNAL_LEVEL) | \
(1 << BOOKE_IRQPRIO_PERFORMANCE_MONITOR) | \
(1 << BOOKE_IRQPRIO_DBELL) | \
(1 << BOOKE_IRQPRIO_DECREMENTER) | \
(1 << BOOKE_IRQPRIO_FIT) | \
(1 << BOOKE_IRQPRIO_EXTERNAL))
#define BOOKE_IRQMASK_CE ((1 << BOOKE_IRQPRIO_DBELL_CRIT) | \
(1 << BOOKE_IRQPRIO_WATCHDOG) | \
(1 << BOOKE_IRQPRIO_CRITICAL))
extern unsigned long kvmppc_booke_handlers; extern unsigned long kvmppc_booke_handlers;
...@@ -61,8 +75,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); ...@@ -61,8 +75,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance); unsigned int inst, int *advance);
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
/* low-level asm code to transfer guest state */ /* low-level asm code to transfer guest state */
void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu); void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
...@@ -71,4 +85,46 @@ void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu); ...@@ -71,4 +85,46 @@ void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
/* high-level function, manages flags, host state */ /* high-level function, manages flags, host state */
void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu); void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu);
enum int_class {
INT_CLASS_NONCRIT,
INT_CLASS_CRIT,
INT_CLASS_MC,
INT_CLASS_DBG,
};
void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
/*
* Load up guest vcpu FP state if it's needed.
* It also set the MSR_FP in thread so that host know
* we're holding FPU, and then host can help to save
* guest vcpu FP state if other threads require to use FPU.
* This simulates an FP unavailable fault.
*
* It requires to be called with preemption disabled.
*/
static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PPC_FPU
if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
load_up_fpu();
current->thread.regs->msr |= MSR_FP;
}
#endif
}
/*
* Save guest vcpu FP state into thread.
* It requires to be called with preemption disabled.
*/
static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PPC_FPU
if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
giveup_fpu(current);
#endif
}
#endif /* __KVM_BOOKE_H__ */ #endif /* __KVM_BOOKE_H__ */
This diff is collapsed.
...@@ -419,13 +419,13 @@ lightweight_exit: ...@@ -419,13 +419,13 @@ lightweight_exit:
* written directly to the shared area, so we * written directly to the shared area, so we
* need to reload them here with the guest's values. * need to reload them here with the guest's values.
*/ */
lwz r3, VCPU_SHARED_SPRG4(r5) PPC_LD(r3, VCPU_SHARED_SPRG4, r5)
mtspr SPRN_SPRG4W, r3 mtspr SPRN_SPRG4W, r3
lwz r3, VCPU_SHARED_SPRG5(r5) PPC_LD(r3, VCPU_SHARED_SPRG5, r5)
mtspr SPRN_SPRG5W, r3 mtspr SPRN_SPRG5W, r3
lwz r3, VCPU_SHARED_SPRG6(r5) PPC_LD(r3, VCPU_SHARED_SPRG6, r5)
mtspr SPRN_SPRG6W, r3 mtspr SPRN_SPRG6W, r3
lwz r3, VCPU_SHARED_SPRG7(r5) PPC_LD(r3, VCPU_SHARED_SPRG7, r5)
mtspr SPRN_SPRG7W, r3 mtspr SPRN_SPRG7W, r3
#ifdef CONFIG_KVM_EXIT_TIMING #ifdef CONFIG_KVM_EXIT_TIMING
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -93,6 +93,12 @@ static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type) ...@@ -93,6 +93,12 @@ static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type)
case SIGNAL_EXITS: case SIGNAL_EXITS:
vcpu->stat.signal_exits++; vcpu->stat.signal_exits++;
break; break;
case DBELL_EXITS:
vcpu->stat.dbell_exits++;
break;
case GDBELL_EXITS:
vcpu->stat.gdbell_exits++;
break;
} }
} }
......
...@@ -52,4 +52,9 @@ struct kvm_sync_regs { ...@@ -52,4 +52,9 @@ struct kvm_sync_regs {
__u32 acrs[16]; /* access registers */ __u32 acrs[16]; /* access registers */
__u64 crs[16]; /* control registers */ __u64 crs[16]; /* control registers */
}; };
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
#define KVM_REG_S390_EPOCHDIFF (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x2)
#define KVM_REG_S390_CPU_TIMER (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x3)
#define KVM_REG_S390_CLOCK_COMP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x4)
#endif #endif
...@@ -148,6 +148,7 @@ struct kvm_vcpu_stat { ...@@ -148,6 +148,7 @@ struct kvm_vcpu_stat {
u32 instruction_sigp_restart; u32 instruction_sigp_restart;
u32 diagnose_10; u32 diagnose_10;
u32 diagnose_44; u32 diagnose_44;
u32 diagnose_9c;
}; };
struct kvm_s390_io_info { struct kvm_s390_io_info {
......
...@@ -149,6 +149,11 @@ static inline unsigned int kvm_arch_para_features(void) ...@@ -149,6 +149,11 @@ static inline unsigned int kvm_arch_para_features(void)
return 0; return 0;
} }
static inline bool kvm_check_and_clear_guest_paused(void)
{
return false;
}
#endif #endif
#endif /* __S390_KVM_PARA_H */ #endif /* __S390_KVM_PARA_H */
...@@ -48,6 +48,7 @@ int sclp_cpu_deconfigure(u8 cpu); ...@@ -48,6 +48,7 @@ int sclp_cpu_deconfigure(u8 cpu);
void sclp_facilities_detect(void); void sclp_facilities_detect(void);
unsigned long long sclp_get_rnmax(void); unsigned long long sclp_get_rnmax(void);
unsigned long long sclp_get_rzm(void); unsigned long long sclp_get_rzm(void);
u8 sclp_get_fac85(void);
int sclp_sdias_blk_count(void); int sclp_sdias_blk_count(void);
int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
int sclp_chp_configure(struct chp_id chpid); int sclp_chp_configure(struct chp_id chpid);
......
...@@ -47,9 +47,30 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu) ...@@ -47,9 +47,30 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
{ {
VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
vcpu->stat.diagnose_44++; vcpu->stat.diagnose_44++;
vcpu_put(vcpu); kvm_vcpu_on_spin(vcpu);
yield(); return 0;
vcpu_load(vcpu); }
static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tcpu;
int tid;
int i;
tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
vcpu->stat.diagnose_9c++;
VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
if (tid == vcpu->vcpu_id)
return 0;
kvm_for_each_vcpu(i, tcpu, kvm)
if (tcpu->vcpu_id == tid) {
kvm_vcpu_yield_to(tcpu);
break;
}
return 0; return 0;
} }
...@@ -89,6 +110,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) ...@@ -89,6 +110,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
return diag_release_pages(vcpu); return diag_release_pages(vcpu);
case 0x44: case 0x44:
return __diag_time_slice_end(vcpu); return __diag_time_slice_end(vcpu);
case 0x9c:
return __diag_time_slice_end_directed(vcpu);
case 0x308: case 0x308:
return __diag_ipl_functions(vcpu); return __diag_ipl_functions(vcpu);
default: default:
......
...@@ -101,6 +101,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu) ...@@ -101,6 +101,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
} }
static intercept_handler_t instruction_handlers[256] = { static intercept_handler_t instruction_handlers[256] = {
[0x01] = kvm_s390_handle_01,
[0x83] = kvm_s390_handle_diag, [0x83] = kvm_s390_handle_diag,
[0xae] = kvm_s390_handle_sigp, [0xae] = kvm_s390_handle_sigp,
[0xb2] = kvm_s390_handle_b2, [0xb2] = kvm_s390_handle_b2,
......
This diff is collapsed.
...@@ -79,6 +79,7 @@ int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); ...@@ -79,6 +79,7 @@ int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
/* implemented in priv.c */ /* implemented in priv.c */
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
/* implemented in sigp.c */ /* implemented in sigp.c */
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
......
This diff is collapsed.
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
#include <asm-generic/kvm_para.h>
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -40,5 +40,6 @@ struct pvclock_wall_clock { ...@@ -40,5 +40,6 @@ struct pvclock_wall_clock {
} __attribute__((__packed__)); } __attribute__((__packed__));
#define PVCLOCK_TSC_STABLE_BIT (1 << 0) #define PVCLOCK_TSC_STABLE_BIT (1 << 0)
#define PVCLOCK_GUEST_STOPPED (1 << 1)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PVCLOCK_ABI_H */ #endif /* _ASM_X86_PVCLOCK_ABI_H */
This diff is collapsed.
...@@ -36,6 +36,7 @@ config KVM ...@@ -36,6 +36,7 @@ config KVM
select TASKSTATS select TASKSTATS
select TASK_DELAY_ACCT select TASK_DELAY_ACCT
select PERF_EVENTS select PERF_EVENTS
select HAVE_KVM_MSI
---help--- ---help---
Support hosting fully virtualized guest machines using hardware Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent virtualization extensions. You will need a fairly recent
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment