Commit 95f328d3 authored by Gleb Natapov's avatar Gleb Natapov

Merge branch 'kvm-ppc-queue' of git://github.com/agraf/linux-2.6 into queue

Conflicts:
	arch/powerpc/include/asm/processor.h
parents daf72722 a78b55d1
...@@ -1810,6 +1810,50 @@ registers, find a list below: ...@@ -1810,6 +1810,50 @@ registers, find a list below:
PPC | KVM_REG_PPC_TLB3PS | 32 PPC | KVM_REG_PPC_TLB3PS | 32
PPC | KVM_REG_PPC_EPTCFG | 32 PPC | KVM_REG_PPC_EPTCFG | 32
PPC | KVM_REG_PPC_ICP_STATE | 64 PPC | KVM_REG_PPC_ICP_STATE | 64
PPC | KVM_REG_PPC_TB_OFFSET | 64
PPC | KVM_REG_PPC_SPMC1 | 32
PPC | KVM_REG_PPC_SPMC2 | 32
PPC | KVM_REG_PPC_IAMR | 64
PPC | KVM_REG_PPC_TFHAR | 64
PPC | KVM_REG_PPC_TFIAR | 64
PPC | KVM_REG_PPC_TEXASR | 64
PPC | KVM_REG_PPC_FSCR | 64
PPC | KVM_REG_PPC_PSPB | 32
PPC | KVM_REG_PPC_EBBHR | 64
PPC | KVM_REG_PPC_EBBRR | 64
PPC | KVM_REG_PPC_BESCR | 64
PPC | KVM_REG_PPC_TAR | 64
PPC | KVM_REG_PPC_DPDES | 64
PPC | KVM_REG_PPC_DAWR | 64
PPC | KVM_REG_PPC_DAWRX | 64
PPC | KVM_REG_PPC_CIABR | 64
PPC | KVM_REG_PPC_IC | 64
PPC | KVM_REG_PPC_VTB | 64
PPC | KVM_REG_PPC_CSIGR | 64
PPC | KVM_REG_PPC_TACR | 64
PPC | KVM_REG_PPC_TCSCR | 64
PPC | KVM_REG_PPC_PID | 64
PPC | KVM_REG_PPC_ACOP | 64
PPC | KVM_REG_PPC_VRSAVE | 32
PPC | KVM_REG_PPC_LPCR | 64
PPC | KVM_REG_PPC_PPR | 64
PPC | KVM_REG_PPC_ARCH_COMPAT 32
PPC | KVM_REG_PPC_TM_GPR0 | 64
...
PPC | KVM_REG_PPC_TM_GPR31 | 64
PPC | KVM_REG_PPC_TM_VSR0 | 128
...
PPC | KVM_REG_PPC_TM_VSR63 | 128
PPC | KVM_REG_PPC_TM_CR | 64
PPC | KVM_REG_PPC_TM_LR | 64
PPC | KVM_REG_PPC_TM_CTR | 64
PPC | KVM_REG_PPC_TM_FPSCR | 64
PPC | KVM_REG_PPC_TM_AMR | 64
PPC | KVM_REG_PPC_TM_PPR | 64
PPC | KVM_REG_PPC_TM_VRSAVE | 64
PPC | KVM_REG_PPC_TM_VSCR | 32
PPC | KVM_REG_PPC_TM_DSCR | 64
PPC | KVM_REG_PPC_TM_TAR | 64
ARM registers are mapped using the lower 32 bits. The upper 16 of that ARM registers are mapped using the lower 32 bits. The upper 16 of that
is the register group type, or coprocessor number: is the register group type, or coprocessor number:
......
...@@ -152,12 +152,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) ...@@ -152,12 +152,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
void kvm_arch_free_memslot(struct kvm_memory_slot *free, void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont) struct kvm_memory_slot *dont)
{ {
} }
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages)
{ {
return 0; return 0;
} }
......
...@@ -1550,12 +1550,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) ...@@ -1550,12 +1550,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
void kvm_arch_free_memslot(struct kvm_memory_slot *free, void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont) struct kvm_memory_slot *dont)
{ {
} }
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages)
{ {
return 0; return 0;
} }
......
...@@ -198,12 +198,13 @@ kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) ...@@ -198,12 +198,13 @@ kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
return -ENOIOCTLCMD; return -ENOIOCTLCMD;
} }
void kvm_arch_free_memslot(struct kvm_memory_slot *free, void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont) struct kvm_memory_slot *dont)
{ {
} }
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned long npages)
{ {
return 0; return 0;
} }
......
...@@ -77,4 +77,8 @@ static inline unsigned int get_d(u32 inst) ...@@ -77,4 +77,8 @@ static inline unsigned int get_d(u32 inst)
return inst & 0xffff; return inst & 0xffff;
} }
static inline unsigned int get_oc(u32 inst)
{
return (inst >> 11) & 0x7fff;
}
#endif /* __ASM_PPC_DISASSEMBLE_H__ */ #endif /* __ASM_PPC_DISASSEMBLE_H__ */
...@@ -198,12 +198,27 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) ...@@ -198,12 +198,27 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
cmpwi r10,0; \ cmpwi r10,0; \
bne do_kvm_##n bne do_kvm_##n
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/*
* If hv is possible, interrupts come into to the hv version
* of the kvmppc_interrupt code, which then jumps to the PR handler,
* kvmppc_interrupt_pr, if the guest is a PR guest.
*/
#define kvmppc_interrupt kvmppc_interrupt_hv
#else
#define kvmppc_interrupt kvmppc_interrupt_pr
#endif
#define __KVM_HANDLER(area, h, n) \ #define __KVM_HANDLER(area, h, n) \
do_kvm_##n: \ do_kvm_##n: \
BEGIN_FTR_SECTION_NESTED(947) \ BEGIN_FTR_SECTION_NESTED(947) \
ld r10,area+EX_CFAR(r13); \ ld r10,area+EX_CFAR(r13); \
std r10,HSTATE_CFAR(r13); \ std r10,HSTATE_CFAR(r13); \
END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \ END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \
BEGIN_FTR_SECTION_NESTED(948) \
ld r10,area+EX_PPR(r13); \
std r10,HSTATE_PPR(r13); \
END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
ld r10,area+EX_R10(r13); \ ld r10,area+EX_R10(r13); \
stw r9,HSTATE_SCRATCH1(r13); \ stw r9,HSTATE_SCRATCH1(r13); \
ld r9,area+EX_R9(r13); \ ld r9,area+EX_R9(r13); \
...@@ -217,6 +232,10 @@ do_kvm_##n: \ ...@@ -217,6 +232,10 @@ do_kvm_##n: \
ld r10,area+EX_R10(r13); \ ld r10,area+EX_R10(r13); \
beq 89f; \ beq 89f; \
stw r9,HSTATE_SCRATCH1(r13); \ stw r9,HSTATE_SCRATCH1(r13); \
BEGIN_FTR_SECTION_NESTED(948) \
ld r9,area+EX_PPR(r13); \
std r9,HSTATE_PPR(r13); \
END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
ld r9,area+EX_R9(r13); \ ld r9,area+EX_R9(r13); \
std r12,HSTATE_SCRATCH0(r13); \ std r12,HSTATE_SCRATCH0(r13); \
li r12,n; \ li r12,n; \
...@@ -236,7 +255,7 @@ do_kvm_##n: \ ...@@ -236,7 +255,7 @@ do_kvm_##n: \
#define KVM_HANDLER_SKIP(area, h, n) #define KVM_HANDLER_SKIP(area, h, n)
#endif #endif
#ifdef CONFIG_KVM_BOOK3S_PR #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
#define KVMTEST_PR(n) __KVMTEST(n) #define KVMTEST_PR(n) __KVMTEST(n)
#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) #define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n)
#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) #define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
......
...@@ -123,6 +123,8 @@ ...@@ -123,6 +123,8 @@
#define BOOK3S_HFLAG_SLB 0x2 #define BOOK3S_HFLAG_SLB 0x2
#define BOOK3S_HFLAG_PAIRED_SINGLE 0x4 #define BOOK3S_HFLAG_PAIRED_SINGLE 0x4
#define BOOK3S_HFLAG_NATIVE_PS 0x8 #define BOOK3S_HFLAG_NATIVE_PS 0x8
#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10
#define BOOK3S_HFLAG_NEW_TLBIE 0x20
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ #define RESUME_FLAG_HOST (1<<1) /* Resume host? */
...@@ -136,6 +138,8 @@ ...@@ -136,6 +138,8 @@
#define KVM_GUEST_MODE_NONE 0 #define KVM_GUEST_MODE_NONE 0
#define KVM_GUEST_MODE_GUEST 1 #define KVM_GUEST_MODE_GUEST 1
#define KVM_GUEST_MODE_SKIP 2 #define KVM_GUEST_MODE_SKIP 2
#define KVM_GUEST_MODE_GUEST_HV 3
#define KVM_GUEST_MODE_HOST_HV 4
#define KVM_INST_FETCH_FAILED -1 #define KVM_INST_FETCH_FAILED -1
......
...@@ -58,16 +58,18 @@ struct hpte_cache { ...@@ -58,16 +58,18 @@ struct hpte_cache {
struct hlist_node list_pte_long; struct hlist_node list_pte_long;
struct hlist_node list_vpte; struct hlist_node list_vpte;
struct hlist_node list_vpte_long; struct hlist_node list_vpte_long;
#ifdef CONFIG_PPC_BOOK3S_64
struct hlist_node list_vpte_64k;
#endif
struct rcu_head rcu_head; struct rcu_head rcu_head;
u64 host_vpn; u64 host_vpn;
u64 pfn; u64 pfn;
ulong slot; ulong slot;
struct kvmppc_pte pte; struct kvmppc_pte pte;
int pagesize;
}; };
struct kvmppc_vcpu_book3s { struct kvmppc_vcpu_book3s {
struct kvm_vcpu vcpu;
struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
struct kvmppc_sid_map sid_map[SID_MAP_NUM]; struct kvmppc_sid_map sid_map[SID_MAP_NUM];
struct { struct {
u64 esid; u64 esid;
...@@ -99,6 +101,9 @@ struct kvmppc_vcpu_book3s { ...@@ -99,6 +101,9 @@ struct kvmppc_vcpu_book3s {
struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
#ifdef CONFIG_PPC_BOOK3S_64
struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
#endif
int hpte_cache_count; int hpte_cache_count;
spinlock_t mmu_lock; spinlock_t mmu_lock;
}; };
...@@ -107,8 +112,9 @@ struct kvmppc_vcpu_book3s { ...@@ -107,8 +112,9 @@ struct kvmppc_vcpu_book3s {
#define CONTEXT_GUEST 1 #define CONTEXT_GUEST 1
#define CONTEXT_GUEST_END 2 #define CONTEXT_GUEST_END 2
#define VSID_REAL 0x0fffffffffc00000ULL #define VSID_REAL 0x07ffffffffc00000ULL
#define VSID_BAT 0x0fffffffffb00000ULL #define VSID_BAT 0x07ffffffffb00000ULL
#define VSID_64K 0x0800000000000000ULL
#define VSID_1T 0x1000000000000000ULL #define VSID_1T 0x1000000000000000ULL
#define VSID_REAL_DR 0x2000000000000000ULL #define VSID_REAL_DR 0x2000000000000000ULL
#define VSID_REAL_IR 0x4000000000000000ULL #define VSID_REAL_IR 0x4000000000000000ULL
...@@ -118,11 +124,12 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask) ...@@ -118,11 +124,12 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask)
extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
bool iswrite);
extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
...@@ -134,6 +141,7 @@ extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, ...@@ -134,6 +141,7 @@ extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
...@@ -151,7 +159,8 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, ...@@ -151,7 +159,8 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val); bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
bool *writable);
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
unsigned long *rmap, long pte_index, int realmode); unsigned long *rmap, long pte_index, int realmode);
extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
...@@ -172,6 +181,8 @@ extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, ...@@ -172,6 +181,8 @@ extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
unsigned long *hpret); unsigned long *hpret);
extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot, unsigned long *map); struct kvm_memory_slot *memslot, unsigned long *map);
extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
unsigned long mask);
extern void kvmppc_entry_trampoline(void); extern void kvmppc_entry_trampoline(void);
extern void kvmppc_hv_entry_trampoline(void); extern void kvmppc_hv_entry_trampoline(void);
...@@ -184,11 +195,9 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); ...@@ -184,11 +195,9 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{ {
return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); return vcpu->arch.book3s;
} }
extern void kvm_return_point(void);
/* Also add subarch specific defines */ /* Also add subarch specific defines */
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
...@@ -198,203 +207,6 @@ extern void kvm_return_point(void); ...@@ -198,203 +207,6 @@ extern void kvm_return_point(void);
#include <asm/kvm_book3s_64.h> #include <asm/kvm_book3s_64.h>
#endif #endif
#ifdef CONFIG_KVM_BOOK3S_PR
static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
{
return to_book3s(vcpu)->hior;
}
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
unsigned long pending_now, unsigned long old_pending)
{
if (pending_now)
vcpu->arch.shared->int_pending = 1;
else if (old_pending)
vcpu->arch.shared->int_pending = 0;
}
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
if ( num < 14 ) {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
svcpu->gpr[num] = val;
svcpu_put(svcpu);
to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
} else
vcpu->arch.gpr[num] = val;
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
if ( num < 14 ) {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
ulong r = svcpu->gpr[num];
svcpu_put(svcpu);
return r;
} else
return vcpu->arch.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
svcpu->cr = val;
svcpu_put(svcpu);
to_book3s(vcpu)->shadow_vcpu->cr = val;
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
u32 r;
r = svcpu->cr;
svcpu_put(svcpu);
return r;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
svcpu->xer = val;
to_book3s(vcpu)->shadow_vcpu->xer = val;
svcpu_put(svcpu);
}
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
u32 r;
r = svcpu->xer;
svcpu_put(svcpu);
return r;
}
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
svcpu->ctr = val;
svcpu_put(svcpu);
}
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
ulong r;
r = svcpu->ctr;
svcpu_put(svcpu);
return r;
}
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
svcpu->lr = val;
svcpu_put(svcpu);
}
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
ulong r;
r = svcpu->lr;
svcpu_put(svcpu);
return r;
}
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
svcpu->pc = val;
svcpu_put(svcpu);
}
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
ulong r;
r = svcpu->pc;
svcpu_put(svcpu);
return r;
}
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
{
ulong pc = kvmppc_get_pc(vcpu);
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
u32 r;
/* Load the instruction manually if it failed to do so in the
* exit path */
if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
r = svcpu->last_inst;
svcpu_put(svcpu);
return r;
}
/*
* Like kvmppc_get_last_inst(), but for fetching a sc instruction.
* Because the sc instruction sets SRR0 to point to the following
* instruction, we have to fetch from pc - 4.
*/
static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
{
ulong pc = kvmppc_get_pc(vcpu) - 4;
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
u32 r;
/* Load the instruction manually if it failed to do so in the
* exit path */
if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
r = svcpu->last_inst;
svcpu_put(svcpu);
return r;
}
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
ulong r;
r = svcpu->fault_dar;
svcpu_put(svcpu);
return r;
}
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
{
ulong crit_raw = vcpu->arch.shared->critical;
ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
bool crit;
/* Truncate crit indicators in 32 bit mode */
if (!(vcpu->arch.shared->msr & MSR_SF)) {
crit_raw &= 0xffffffff;
crit_r1 &= 0xffffffff;
}
/* Critical section when crit == r1 */
crit = (crit_raw == crit_r1);
/* ... and we're in supervisor mode */
crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
return crit;
}
#else /* CONFIG_KVM_BOOK3S_PR */
static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
{
return 0;
}
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
unsigned long pending_now, unsigned long old_pending)
{
}
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{ {
vcpu->arch.gpr[num] = val; vcpu->arch.gpr[num] = val;
...@@ -489,12 +301,6 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) ...@@ -489,12 +301,6 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return vcpu->arch.fault_dar; return vcpu->arch.fault_dar;
} }
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
{
return false;
}
#endif
/* Magic register values loaded into r3 and r4 before the 'sc' assembly /* Magic register values loaded into r3 and r4 before the 'sc' assembly
* instruction for the OSI hypercalls */ * instruction for the OSI hypercalls */
#define OSI_SC_MAGIC_R3 0x113724FA #define OSI_SC_MAGIC_R3 0x113724FA
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
{ {
return to_book3s(vcpu)->shadow_vcpu; return vcpu->arch.shadow_vcpu;
} }
static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#ifndef __ASM_KVM_BOOK3S_64_H__ #ifndef __ASM_KVM_BOOK3S_64_H__
#define __ASM_KVM_BOOK3S_64_H__ #define __ASM_KVM_BOOK3S_64_H__
#ifdef CONFIG_KVM_BOOK3S_PR #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
{ {
preempt_disable(); preempt_disable();
...@@ -35,7 +35,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) ...@@ -35,7 +35,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
#define SPAPR_TCE_SHIFT 12 #define SPAPR_TCE_SHIFT 12
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
extern unsigned long kvm_rma_pages; extern unsigned long kvm_rma_pages;
#endif #endif
...@@ -278,7 +278,7 @@ static inline int is_vrma_hpte(unsigned long hpte_v) ...@@ -278,7 +278,7 @@ static inline int is_vrma_hpte(unsigned long hpte_v)
(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16))); (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
} }
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* /*
* Note modification of an HPTE; set the HPTE modified bit * Note modification of an HPTE; set the HPTE modified bit
* if anyone is interested. * if anyone is interested.
...@@ -289,6 +289,6 @@ static inline void note_hpte_modification(struct kvm *kvm, ...@@ -289,6 +289,6 @@ static inline void note_hpte_modification(struct kvm *kvm,
if (atomic_read(&kvm->arch.hpte_mod_interest)) if (atomic_read(&kvm->arch.hpte_mod_interest))
rev->guest_rpte |= HPTE_GR_MODIFIED; rev->guest_rpte |= HPTE_GR_MODIFIED;
} }
#endif /* CONFIG_KVM_BOOK3S_64_HV */ #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */ #endif /* __ASM_KVM_BOOK3S_64_H__ */
...@@ -83,7 +83,7 @@ struct kvmppc_host_state { ...@@ -83,7 +83,7 @@ struct kvmppc_host_state {
u8 restore_hid5; u8 restore_hid5;
u8 napping; u8 napping;
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
u8 hwthread_req; u8 hwthread_req;
u8 hwthread_state; u8 hwthread_state;
u8 host_ipi; u8 host_ipi;
...@@ -101,6 +101,7 @@ struct kvmppc_host_state { ...@@ -101,6 +101,7 @@ struct kvmppc_host_state {
#endif #endif
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
u64 cfar; u64 cfar;
u64 ppr;
#endif #endif
}; };
...@@ -108,14 +109,14 @@ struct kvmppc_book3s_shadow_vcpu { ...@@ -108,14 +109,14 @@ struct kvmppc_book3s_shadow_vcpu {
ulong gpr[14]; ulong gpr[14];
u32 cr; u32 cr;
u32 xer; u32 xer;
u32 fault_dsisr;
u32 last_inst;
ulong ctr; ulong ctr;
ulong lr; ulong lr;
ulong pc; ulong pc;
ulong shadow_srr1; ulong shadow_srr1;
ulong fault_dar; ulong fault_dar;
u32 fault_dsisr;
u32 last_inst;
#ifdef CONFIG_PPC_BOOK3S_32 #ifdef CONFIG_PPC_BOOK3S_32
u32 sr[16]; /* Guest SRs */ u32 sr[16]; /* Guest SRs */
......
...@@ -27,6 +27,11 @@ ...@@ -27,6 +27,11 @@
#define KVMPPC_NR_LPIDS 64 #define KVMPPC_NR_LPIDS 64
#define KVMPPC_INST_EHPRIV 0x7c00021c #define KVMPPC_INST_EHPRIV 0x7c00021c
#define EHPRIV_OC_SHIFT 11
/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */
#define EHPRIV_OC_DEBUG 1
#define KVMPPC_INST_EHPRIV_DEBUG (KVMPPC_INST_EHPRIV | \
(EHPRIV_OC_DEBUG << EHPRIV_OC_SHIFT))
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{ {
......
...@@ -68,10 +68,12 @@ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); ...@@ -68,10 +68,12 @@ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
#define HPTEG_HASH_BITS_PTE_LONG 12 #define HPTEG_HASH_BITS_PTE_LONG 12
#define HPTEG_HASH_BITS_VPTE 13 #define HPTEG_HASH_BITS_VPTE 13
#define HPTEG_HASH_BITS_VPTE_LONG 5 #define HPTEG_HASH_BITS_VPTE_LONG 5
#define HPTEG_HASH_BITS_VPTE_64K 11
#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
#define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG) #define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG)
#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
#define HPTEG_HASH_NUM_VPTE_64K (1 << HPTEG_HASH_BITS_VPTE_64K)
/* Physical Address Mask - allowed range of real mode RAM access */ /* Physical Address Mask - allowed range of real mode RAM access */
#define KVM_PAM 0x0fffffffffffffffULL #define KVM_PAM 0x0fffffffffffffffULL
...@@ -84,6 +86,9 @@ struct lppaca; ...@@ -84,6 +86,9 @@ struct lppaca;
struct slb_shadow; struct slb_shadow;
struct dtl_entry; struct dtl_entry;
struct kvmppc_vcpu_book3s;
struct kvmppc_book3s_shadow_vcpu;
struct kvm_vm_stat { struct kvm_vm_stat {
u32 remote_tlb_flush; u32 remote_tlb_flush;
}; };
...@@ -219,15 +224,15 @@ struct revmap_entry { ...@@ -219,15 +224,15 @@ struct revmap_entry {
#define KVMPPC_GOT_PAGE 0x80 #define KVMPPC_GOT_PAGE 0x80
struct kvm_arch_memory_slot { struct kvm_arch_memory_slot {
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
unsigned long *rmap; unsigned long *rmap;
unsigned long *slot_phys; unsigned long *slot_phys;
#endif /* CONFIG_KVM_BOOK3S_64_HV */ #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
}; };
struct kvm_arch { struct kvm_arch {
unsigned int lpid; unsigned int lpid;
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
unsigned long hpt_virt; unsigned long hpt_virt;
struct revmap_entry *revmap; struct revmap_entry *revmap;
unsigned int host_lpid; unsigned int host_lpid;
...@@ -251,7 +256,10 @@ struct kvm_arch { ...@@ -251,7 +256,10 @@ struct kvm_arch {
cpumask_t need_tlb_flush; cpumask_t need_tlb_flush;
struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
int hpt_cma_alloc; int hpt_cma_alloc;
#endif /* CONFIG_KVM_BOOK3S_64_HV */ #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
struct mutex hpt_mutex;
#endif
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables; struct list_head spapr_tce_tables;
struct list_head rtas_tokens; struct list_head rtas_tokens;
...@@ -262,6 +270,7 @@ struct kvm_arch { ...@@ -262,6 +270,7 @@ struct kvm_arch {
#ifdef CONFIG_KVM_XICS #ifdef CONFIG_KVM_XICS
struct kvmppc_xics *xics; struct kvmppc_xics *xics;
#endif #endif
struct kvmppc_ops *kvm_ops;
}; };
/* /*
...@@ -289,6 +298,10 @@ struct kvmppc_vcore { ...@@ -289,6 +298,10 @@ struct kvmppc_vcore {
u64 stolen_tb; u64 stolen_tb;
u64 preempt_tb; u64 preempt_tb;
struct kvm_vcpu *runner; struct kvm_vcpu *runner;
u64 tb_offset; /* guest timebase - host timebase */
ulong lpcr;
u32 arch_compat;
ulong pcr;
}; };
#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
...@@ -323,6 +336,7 @@ struct kvmppc_pte { ...@@ -323,6 +336,7 @@ struct kvmppc_pte {
bool may_read : 1; bool may_read : 1;
bool may_write : 1; bool may_write : 1;
bool may_execute : 1; bool may_execute : 1;
u8 page_size; /* MMU_PAGE_xxx */
}; };
struct kvmppc_mmu { struct kvmppc_mmu {
...@@ -335,7 +349,8 @@ struct kvmppc_mmu { ...@@ -335,7 +349,8 @@ struct kvmppc_mmu {
/* book3s */ /* book3s */
void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value); void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum); u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data, bool iswrite);
void (*reset_msr)(struct kvm_vcpu *vcpu); void (*reset_msr)(struct kvm_vcpu *vcpu);
void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
...@@ -355,6 +370,7 @@ struct kvmppc_slb { ...@@ -355,6 +370,7 @@ struct kvmppc_slb {
bool large : 1; /* PTEs are 16MB */ bool large : 1; /* PTEs are 16MB */
bool tb : 1; /* 1TB segment */ bool tb : 1; /* 1TB segment */
bool class : 1; bool class : 1;
u8 base_page_size; /* MMU_PAGE_xxx */
}; };
# ifdef CONFIG_PPC_FSL_BOOK3E # ifdef CONFIG_PPC_FSL_BOOK3E
...@@ -372,17 +388,6 @@ struct kvmppc_slb { ...@@ -372,17 +388,6 @@ struct kvmppc_slb {
#define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */ #define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */
#define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */ #define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */
struct kvmppc_booke_debug_reg {
u32 dbcr0;
u32 dbcr1;
u32 dbcr2;
#ifdef CONFIG_KVM_E500MC
u32 dbcr4;
#endif
u64 iac[KVMPPC_BOOKE_MAX_IAC];
u64 dac[KVMPPC_BOOKE_MAX_DAC];
};
#define KVMPPC_IRQ_DEFAULT 0 #define KVMPPC_IRQ_DEFAULT 0
#define KVMPPC_IRQ_MPIC 1 #define KVMPPC_IRQ_MPIC 1
#define KVMPPC_IRQ_XICS 2 #define KVMPPC_IRQ_XICS 2
...@@ -397,6 +402,10 @@ struct kvm_vcpu_arch { ...@@ -397,6 +402,10 @@ struct kvm_vcpu_arch {
int slb_max; /* 1 + index of last valid entry in slb[] */ int slb_max; /* 1 + index of last valid entry in slb[] */
int slb_nr; /* total number of entries in SLB */ int slb_nr; /* total number of entries in SLB */
struct kvmppc_mmu mmu; struct kvmppc_mmu mmu;
struct kvmppc_vcpu_book3s *book3s;
#endif
#ifdef CONFIG_PPC_BOOK3S_32
struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
#endif #endif
ulong gpr[32]; ulong gpr[32];
...@@ -458,6 +467,8 @@ struct kvm_vcpu_arch { ...@@ -458,6 +467,8 @@ struct kvm_vcpu_arch {
u32 ctrl; u32 ctrl;
ulong dabr; ulong dabr;
ulong cfar; ulong cfar;
ulong ppr;
ulong shadow_srr1;
#endif #endif
u32 vrsave; /* also USPRG0 */ u32 vrsave; /* also USPRG0 */
u32 mmucr; u32 mmucr;
...@@ -493,6 +504,8 @@ struct kvm_vcpu_arch { ...@@ -493,6 +504,8 @@ struct kvm_vcpu_arch {
u64 mmcr[3]; u64 mmcr[3];
u32 pmc[8]; u32 pmc[8];
u64 siar;
u64 sdar;
#ifdef CONFIG_KVM_EXIT_TIMING #ifdef CONFIG_KVM_EXIT_TIMING
struct mutex exit_timing_lock; struct mutex exit_timing_lock;
...@@ -526,7 +539,10 @@ struct kvm_vcpu_arch { ...@@ -526,7 +539,10 @@ struct kvm_vcpu_arch {
u32 eptcfg; u32 eptcfg;
u32 epr; u32 epr;
u32 crit_save; u32 crit_save;
struct kvmppc_booke_debug_reg dbg_reg; /* guest debug registers*/
struct debug_reg dbg_reg;
/* hardware visible debug registers when in guest state */
struct debug_reg shadow_dbg_reg;
#endif #endif
gpa_t paddr_accessed; gpa_t paddr_accessed;
gva_t vaddr_accessed; gva_t vaddr_accessed;
...@@ -577,7 +593,7 @@ struct kvm_vcpu_arch { ...@@ -577,7 +593,7 @@ struct kvm_vcpu_arch {
struct kvmppc_icp *icp; /* XICS presentation controller */ struct kvmppc_icp *icp; /* XICS presentation controller */
#endif #endif
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
struct kvm_vcpu_arch_shared shregs; struct kvm_vcpu_arch_shared shregs;
unsigned long pgfault_addr; unsigned long pgfault_addr;
......
...@@ -106,13 +106,6 @@ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, ...@@ -106,13 +106,6 @@ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq); struct kvm_interrupt *irq);
extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int op, int *advance);
extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
ulong val);
extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
ulong *val);
extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
extern int kvmppc_booke_init(void); extern int kvmppc_booke_init(void);
...@@ -135,17 +128,17 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, ...@@ -135,17 +128,17 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args); struct kvm_create_spapr_tce *args);
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce); unsigned long ioba, unsigned long tce);
extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
struct kvm_allocate_rma *rma);
extern struct kvm_rma_info *kvm_alloc_rma(void); extern struct kvm_rma_info *kvm_alloc_rma(void);
extern void kvm_release_rma(struct kvm_rma_info *ri); extern void kvm_release_rma(struct kvm_rma_info *ri);
extern struct page *kvm_alloc_hpt(unsigned long nr_pages); extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
extern void kvm_release_hpt(struct page *page, unsigned long nr_pages); extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
extern int kvmppc_core_init_vm(struct kvm *kvm); extern int kvmppc_core_init_vm(struct kvm *kvm);
extern void kvmppc_core_destroy_vm(struct kvm *kvm); extern void kvmppc_core_destroy_vm(struct kvm *kvm);
extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free, extern void kvmppc_core_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *free,
struct kvm_memory_slot *dont); struct kvm_memory_slot *dont);
extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, extern int kvmppc_core_create_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot,
unsigned long npages); unsigned long npages);
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
...@@ -177,6 +170,72 @@ extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, ...@@ -177,6 +170,72 @@ extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq); extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq); extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
union kvmppc_one_reg {
u32 wval;
u64 dval;
vector128 vval;
u64 vsxval[2];
struct {
u64 addr;
u64 length;
} vpaval;
};
struct kvmppc_ops {
struct module *owner;
int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
union kvmppc_one_reg *val);
int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
union kvmppc_one_reg *val);
void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
void (*vcpu_put)(struct kvm_vcpu *vcpu);
void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
void (*vcpu_free)(struct kvm_vcpu *vcpu);
int (*check_requests)(struct kvm_vcpu *vcpu);
int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
int (*prepare_memory_region)(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem);
void (*commit_memory_region)(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old);
int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
unsigned long end);
int (*age_hva)(struct kvm *kvm, unsigned long hva);
int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
void (*mmu_destroy)(struct kvm_vcpu *vcpu);
void (*free_memslot)(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
int (*create_memslot)(struct kvm_memory_slot *slot,
unsigned long npages);
int (*init_vm)(struct kvm *kvm);
void (*destroy_vm)(struct kvm *kvm);
int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
unsigned long arg);
};
extern struct kvmppc_ops *kvmppc_hv_ops;
extern struct kvmppc_ops *kvmppc_pr_ops;
static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
{
return kvm->arch.kvm_ops == kvmppc_hv_ops;
}
/* /*
* Cuts out inst bits with ordering according to spec. * Cuts out inst bits with ordering according to spec.
* That means the leftmost bit is zero. All given bits are included. * That means the leftmost bit is zero. All given bits are included.
...@@ -210,17 +269,6 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value) ...@@ -210,17 +269,6 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
return r; return r;
} }
union kvmppc_one_reg {
u32 wval;
u64 dval;
vector128 vval;
u64 vsxval[2];
struct {
u64 addr;
u64 length;
} vpaval;
};
#define one_reg_size(id) \ #define one_reg_size(id) \
(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
...@@ -245,10 +293,10 @@ union kvmppc_one_reg { ...@@ -245,10 +293,10 @@ union kvmppc_one_reg {
__v; \ __v; \
}) })
void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
...@@ -260,7 +308,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); ...@@ -260,7 +308,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
struct openpic; struct openpic;
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
extern void kvm_cma_reserve(void) __init; extern void kvm_cma_reserve(void) __init;
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{ {
...@@ -269,10 +317,10 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) ...@@ -269,10 +317,10 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
static inline u32 kvmppc_get_xics_latch(void) static inline u32 kvmppc_get_xics_latch(void)
{ {
u32 xirr = get_paca()->kvm_hstate.saved_xirr; u32 xirr;
xirr = get_paca()->kvm_hstate.saved_xirr;
get_paca()->kvm_hstate.saved_xirr = 0; get_paca()->kvm_hstate.saved_xirr = 0;
return xirr; return xirr;
} }
...@@ -281,7 +329,10 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi) ...@@ -281,7 +329,10 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
paca[cpu].kvm_hstate.host_ipi = host_ipi; paca[cpu].kvm_hstate.host_ipi = host_ipi;
} }
extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu); static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
{
vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
}
#else #else
static inline void __init kvm_cma_reserve(void) static inline void __init kvm_cma_reserve(void)
......
...@@ -166,7 +166,7 @@ struct paca_struct { ...@@ -166,7 +166,7 @@ struct paca_struct {
struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
#ifdef CONFIG_KVM_BOOK3S_HANDLER #ifdef CONFIG_KVM_BOOK3S_HANDLER
#ifdef CONFIG_KVM_BOOK3S_PR #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
/* We use this to store guest state in */ /* We use this to store guest state in */
struct kvmppc_book3s_shadow_vcpu shadow_vcpu; struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
#endif #endif
......
...@@ -147,21 +147,7 @@ typedef struct { ...@@ -147,21 +147,7 @@ typedef struct {
#define TS_FPR(i) fpr[i][TS_FPROFFSET] #define TS_FPR(i) fpr[i][TS_FPROFFSET]
#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET] #define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET]
struct thread_struct { struct debug_reg {
unsigned long ksp; /* Kernel stack pointer */
#ifdef CONFIG_PPC64
unsigned long ksp_vsid;
#endif
struct pt_regs *regs; /* Pointer to saved register state */
mm_segment_t fs; /* for get_fs() validation */
#ifdef CONFIG_BOOKE
/* BookE base exception scratch space; align on cacheline */
unsigned long normsave[8] ____cacheline_aligned;
#endif
#ifdef CONFIG_PPC32
void *pgdir; /* root of page-table tree */
unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
#endif
#ifdef CONFIG_PPC_ADV_DEBUG_REGS #ifdef CONFIG_PPC_ADV_DEBUG_REGS
/* /*
* The following help to manage the use of Debug Control Registers * The following help to manage the use of Debug Control Registers
...@@ -198,6 +184,27 @@ struct thread_struct { ...@@ -198,6 +184,27 @@ struct thread_struct {
unsigned long dvc2; unsigned long dvc2;
#endif #endif
#endif #endif
};
struct thread_struct {
unsigned long ksp; /* Kernel stack pointer */
#ifdef CONFIG_PPC64
unsigned long ksp_vsid;
#endif
struct pt_regs *regs; /* Pointer to saved register state */
mm_segment_t fs; /* for get_fs() validation */
#ifdef CONFIG_BOOKE
/* BookE base exception scratch space; align on cacheline */
unsigned long normsave[8] ____cacheline_aligned;
#endif
#ifdef CONFIG_PPC32
void *pgdir; /* root of page-table tree */
unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
#endif
/* Debug Registers */
struct debug_reg debug;
/* FP and VSX 0-31 register set */ /* FP and VSX 0-31 register set */
double fpr[32][TS_FPRWIDTH] __attribute__((aligned(16))); double fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
struct { struct {
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#define _PAGE_U1 0x010000 #define _PAGE_U1 0x010000
#define _PAGE_U0 0x020000 #define _PAGE_U0 0x020000
#define _PAGE_ACCESSED 0x040000 #define _PAGE_ACCESSED 0x040000
#define _PAGE_LENDIAN 0x080000 #define _PAGE_ENDIAN 0x080000
#define _PAGE_GUARDED 0x100000 #define _PAGE_GUARDED 0x100000
#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */ #define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */
#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */ #define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */
......
...@@ -243,6 +243,7 @@ ...@@ -243,6 +243,7 @@
#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ #define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
#define SPRN_TBU40 0x11E /* Timebase upper 40 bits (hyper, R/W) */
#define SPRN_SPURR 0x134 /* Scaled PURR */ #define SPRN_SPURR 0x134 /* Scaled PURR */
#define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */ #define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */
#define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */ #define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */
...@@ -283,6 +284,7 @@ ...@@ -283,6 +284,7 @@
#define LPCR_ISL (1ul << (63-2)) #define LPCR_ISL (1ul << (63-2))
#define LPCR_VC_SH (63-2) #define LPCR_VC_SH (63-2)
#define LPCR_DPFD_SH (63-11) #define LPCR_DPFD_SH (63-11)
#define LPCR_DPFD (7ul << LPCR_DPFD_SH)
#define LPCR_VRMASD (0x1ful << (63-16)) #define LPCR_VRMASD (0x1ful << (63-16))
#define LPCR_VRMA_L (1ul << (63-12)) #define LPCR_VRMA_L (1ul << (63-12))
#define LPCR_VRMA_LP0 (1ul << (63-15)) #define LPCR_VRMA_LP0 (1ul << (63-15))
...@@ -299,6 +301,7 @@ ...@@ -299,6 +301,7 @@
#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ #define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
#define LPCR_MER 0x00000800 /* Mediated External Exception */ #define LPCR_MER 0x00000800 /* Mediated External Exception */
#define LPCR_MER_SH 11 #define LPCR_MER_SH 11
#define LPCR_TC 0x00000200 /* Translation control */
#define LPCR_LPES 0x0000000c #define LPCR_LPES 0x0000000c
#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */ #define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */ #define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
...@@ -311,6 +314,10 @@ ...@@ -311,6 +314,10 @@
#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ #define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
#define SPRN_HMER 0x150 /* Hardware m? error recovery */ #define SPRN_HMER 0x150 /* Hardware m? error recovery */
#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ #define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
#define SPRN_PCR 0x152 /* Processor compatibility register */
#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ #define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */ #define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */ #define SPRN_TLBVPNR 0x155 /* P7 TLB control register */
...@@ -420,6 +427,7 @@ ...@@ -420,6 +427,7 @@
#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ #define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */ #define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */
#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */ #define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */
#define HID4_RMOR (0xFFFFul << HID4_RMOR_SH)
#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */ #define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */
#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */ #define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */
#define HID4_LPID1_SH 0 /* partition ID top 2 bits */ #define HID4_LPID1_SH 0 /* partition ID top 2 bits */
...@@ -1102,6 +1110,13 @@ ...@@ -1102,6 +1110,13 @@
#define PVR_BE 0x0070 #define PVR_BE 0x0070
#define PVR_PA6T 0x0090 #define PVR_PA6T 0x0090
/* "Logical" PVR values defined in PAPR, representing architecture levels */
#define PVR_ARCH_204 0x0f000001
#define PVR_ARCH_205 0x0f000002
#define PVR_ARCH_206 0x0f000003
#define PVR_ARCH_206p 0x0f100003
#define PVR_ARCH_207 0x0f000004
/* Macros for setting and retrieving special purpose registers */ /* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define mfmsr() ({unsigned long rval; \ #define mfmsr() ({unsigned long rval; \
......
...@@ -381,7 +381,7 @@ ...@@ -381,7 +381,7 @@
#define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */ #define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */
#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
#define dbcr_iac_range(task) ((task)->thread.dbcr0) #define dbcr_iac_range(task) ((task)->thread.debug.dbcr0)
#define DBCR_IAC12I DBCR0_IA12 /* Range Inclusive */ #define DBCR_IAC12I DBCR0_IA12 /* Range Inclusive */
#define DBCR_IAC12X (DBCR0_IA12 | DBCR0_IA12X) /* Range Exclusive */ #define DBCR_IAC12X (DBCR0_IA12 | DBCR0_IA12X) /* Range Exclusive */
#define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X) /* IAC 1-2 Mode Bits */ #define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X) /* IAC 1-2 Mode Bits */
...@@ -395,7 +395,7 @@ ...@@ -395,7 +395,7 @@
#define DBCR1_DAC1W 0x20000000 /* DAC1 Write Debug Event */ #define DBCR1_DAC1W 0x20000000 /* DAC1 Write Debug Event */
#define DBCR1_DAC2W 0x10000000 /* DAC2 Write Debug Event */ #define DBCR1_DAC2W 0x10000000 /* DAC2 Write Debug Event */
#define dbcr_dac(task) ((task)->thread.dbcr1) #define dbcr_dac(task) ((task)->thread.debug.dbcr1)
#define DBCR_DAC1R DBCR1_DAC1R #define DBCR_DAC1R DBCR1_DAC1R
#define DBCR_DAC1W DBCR1_DAC1W #define DBCR_DAC1W DBCR1_DAC1W
#define DBCR_DAC2R DBCR1_DAC2R #define DBCR_DAC2R DBCR1_DAC2R
...@@ -441,7 +441,7 @@ ...@@ -441,7 +441,7 @@
#define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */ #define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */
#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
#define dbcr_dac(task) ((task)->thread.dbcr0) #define dbcr_dac(task) ((task)->thread.debug.dbcr0)
#define DBCR_DAC1R DBCR0_DAC1R #define DBCR_DAC1R DBCR0_DAC1R
#define DBCR_DAC1W DBCR0_DAC1W #define DBCR_DAC1W DBCR0_DAC1W
#define DBCR_DAC2R DBCR0_DAC2R #define DBCR_DAC2R DBCR0_DAC2R
...@@ -475,7 +475,7 @@ ...@@ -475,7 +475,7 @@
#define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */ #define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */
#define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */ #define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */
#define dbcr_iac_range(task) ((task)->thread.dbcr1) #define dbcr_iac_range(task) ((task)->thread.debug.dbcr1)
#define DBCR_IAC12I DBCR1_IAC12M /* Range Inclusive */ #define DBCR_IAC12I DBCR1_IAC12M /* Range Inclusive */
#define DBCR_IAC12X DBCR1_IAC12MX /* Range Exclusive */ #define DBCR_IAC12X DBCR1_IAC12MX /* Range Exclusive */
#define DBCR_IAC12MODE DBCR1_IAC12MX /* IAC 1-2 Mode Bits */ #define DBCR_IAC12MODE DBCR1_IAC12MX /* IAC 1-2 Mode Bits */
......
...@@ -35,6 +35,7 @@ extern void giveup_vsx(struct task_struct *); ...@@ -35,6 +35,7 @@ extern void giveup_vsx(struct task_struct *);
extern void enable_kernel_spe(void); extern void enable_kernel_spe(void);
extern void giveup_spe(struct task_struct *); extern void giveup_spe(struct task_struct *);
extern void load_up_spe(struct task_struct *); extern void load_up_spe(struct task_struct *);
extern void switch_booke_debug_regs(struct thread_struct *new_thread);
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
extern void discard_lazy_cpu_state(void); extern void discard_lazy_cpu_state(void);
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#define __KVM_HAVE_PPC_SMT #define __KVM_HAVE_PPC_SMT
#define __KVM_HAVE_IRQCHIP #define __KVM_HAVE_IRQCHIP
#define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_GUEST_DEBUG
struct kvm_regs { struct kvm_regs {
__u64 pc; __u64 pc;
...@@ -269,7 +270,24 @@ struct kvm_fpu { ...@@ -269,7 +270,24 @@ struct kvm_fpu {
__u64 fpr[32]; __u64 fpr[32];
}; };
/*
* Defines for h/w breakpoint, watchpoint (read, write or both) and
* software breakpoint.
* These are used as "type" in KVM_SET_GUEST_DEBUG ioctl and "status"
* for KVM_DEBUG_EXIT.
*/
#define KVMPPC_DEBUG_NONE 0x0
#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
struct kvm_debug_exit_arch { struct kvm_debug_exit_arch {
__u64 address;
/*
* exiting to userspace because of h/w breakpoint, watchpoint
* (read, write or both) and software breakpoint.
*/
__u32 status;
__u32 reserved;
}; };
/* for KVM_SET_GUEST_DEBUG */ /* for KVM_SET_GUEST_DEBUG */
...@@ -281,10 +299,6 @@ struct kvm_guest_debug_arch { ...@@ -281,10 +299,6 @@ struct kvm_guest_debug_arch {
* Type denotes h/w breakpoint, read watchpoint, write * Type denotes h/w breakpoint, read watchpoint, write
* watchpoint or watchpoint (both read and write). * watchpoint or watchpoint (both read and write).
*/ */
#define KVMPPC_DEBUG_NONE 0x0
#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
__u32 type; __u32 type;
__u32 reserved; __u32 reserved;
} bp[16]; } bp[16];
...@@ -429,6 +443,11 @@ struct kvm_get_htab_header { ...@@ -429,6 +443,11 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10) #define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
#define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11) #define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
#define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12) #define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
#define KVM_REG_PPC_MMCR2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x13)
#define KVM_REG_PPC_MMCRS (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x14)
#define KVM_REG_PPC_SIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
#define KVM_REG_PPC_SDAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)
#define KVM_REG_PPC_SIER (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x17)
#define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18) #define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
#define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19) #define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
...@@ -499,6 +518,65 @@ struct kvm_get_htab_header { ...@@ -499,6 +518,65 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a) #define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a)
#define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b) #define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b)
/* Timebase offset */
#define KVM_REG_PPC_TB_OFFSET (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9c)
/* POWER8 registers */
#define KVM_REG_PPC_SPMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9d)
#define KVM_REG_PPC_SPMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9e)
#define KVM_REG_PPC_IAMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9f)
#define KVM_REG_PPC_TFHAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa0)
#define KVM_REG_PPC_TFIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa1)
#define KVM_REG_PPC_TEXASR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa2)
#define KVM_REG_PPC_FSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa3)
#define KVM_REG_PPC_PSPB (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xa4)
#define KVM_REG_PPC_EBBHR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa5)
#define KVM_REG_PPC_EBBRR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa6)
#define KVM_REG_PPC_BESCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa7)
#define KVM_REG_PPC_TAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa8)
#define KVM_REG_PPC_DPDES (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa9)
#define KVM_REG_PPC_DAWR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaa)
#define KVM_REG_PPC_DAWRX (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xab)
#define KVM_REG_PPC_CIABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xac)
#define KVM_REG_PPC_IC (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xad)
#define KVM_REG_PPC_VTB (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xae)
#define KVM_REG_PPC_CSIGR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaf)
#define KVM_REG_PPC_TACR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb0)
#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
/* Architecture compatibility level */
#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
*/
#define KVM_REG_PPC_TM (KVM_REG_PPC | 0x80000000)
/* TM GPRs */
#define KVM_REG_PPC_TM_GPR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0)
#define KVM_REG_PPC_TM_GPR(n) (KVM_REG_PPC_TM_GPR0 + (n))
#define KVM_REG_PPC_TM_GPR31 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x1f)
/* TM VSX */
#define KVM_REG_PPC_TM_VSR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x20)
#define KVM_REG_PPC_TM_VSR(n) (KVM_REG_PPC_TM_VSR0 + (n))
#define KVM_REG_PPC_TM_VSR63 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x5f)
/* TM SPRS */
#define KVM_REG_PPC_TM_CR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x60)
#define KVM_REG_PPC_TM_LR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x61)
#define KVM_REG_PPC_TM_CTR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x62)
#define KVM_REG_PPC_TM_FPSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x63)
#define KVM_REG_PPC_TM_AMR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x64)
#define KVM_REG_PPC_TM_PPR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x65)
#define KVM_REG_PPC_TM_VRSAVE (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x66)
#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
/* PPC64 eXternal Interrupt Controller Specification */ /* PPC64 eXternal Interrupt Controller Specification */
#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */ #define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
......
...@@ -114,7 +114,7 @@ int main(void) ...@@ -114,7 +114,7 @@ int main(void)
#endif /* CONFIG_SPE */ #endif /* CONFIG_SPE */
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0)); DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, debug.dbcr0));
#endif #endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
...@@ -446,7 +446,7 @@ int main(void) ...@@ -446,7 +446,7 @@ int main(void)
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr)); DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0)); DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1)); DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
...@@ -477,7 +477,7 @@ int main(void) ...@@ -477,7 +477,7 @@ int main(void)
DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
/* book3s */ /* book3s */
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
...@@ -509,6 +509,8 @@ int main(void) ...@@ -509,6 +509,8 @@ int main(void)
DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
...@@ -518,18 +520,22 @@ int main(void) ...@@ -518,18 +520,22 @@ int main(void)
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid)); DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
offsetof(struct kvmppc_vcpu_book3s, vcpu)); DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_KVM_BOOK3S_PR #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
DEFINE(PACA_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
#else #else
# define SVCPU_FIELD(x, f) # define SVCPU_FIELD(x, f)
...@@ -581,7 +587,7 @@ int main(void) ...@@ -581,7 +587,7 @@ int main(void)
HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
HSTATE_FIELD(HSTATE_NAPPING, napping); HSTATE_FIELD(HSTATE_NAPPING, napping);
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req); HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state); HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
...@@ -597,10 +603,11 @@ int main(void) ...@@ -597,10 +603,11 @@ int main(void)
HSTATE_FIELD(HSTATE_DABR, dabr); HSTATE_FIELD(HSTATE_DABR, dabr);
HSTATE_FIELD(HSTATE_DECEXP, dec_expires); HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
DEFINE(IPI_PRIORITY, IPI_PRIORITY); DEFINE(IPI_PRIORITY, IPI_PRIORITY);
#endif /* CONFIG_KVM_BOOK3S_64_HV */ #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
HSTATE_FIELD(HSTATE_CFAR, cfar); HSTATE_FIELD(HSTATE_CFAR, cfar);
HSTATE_FIELD(HSTATE_PPR, ppr);
#endif /* CONFIG_PPC_BOOK3S_64 */ #endif /* CONFIG_PPC_BOOK3S_64 */
#else /* CONFIG_PPC_BOOK3S */ #else /* CONFIG_PPC_BOOK3S */
......
...@@ -126,7 +126,7 @@ BEGIN_FTR_SECTION ...@@ -126,7 +126,7 @@ BEGIN_FTR_SECTION
bgt cr1,. bgt cr1,.
GET_PACA(r13) GET_PACA(r13)
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
li r0,KVM_HWTHREAD_IN_KERNEL li r0,KVM_HWTHREAD_IN_KERNEL
stb r0,HSTATE_HWTHREAD_STATE(r13) stb r0,HSTATE_HWTHREAD_STATE(r13)
/* Order setting hwthread_state vs. testing hwthread_req */ /* Order setting hwthread_state vs. testing hwthread_req */
...@@ -425,7 +425,7 @@ data_access_check_stab: ...@@ -425,7 +425,7 @@ data_access_check_stab:
mfspr r9,SPRN_DSISR mfspr r9,SPRN_DSISR
srdi r10,r10,60 srdi r10,r10,60
rlwimi r10,r9,16,0x20 rlwimi r10,r9,16,0x20
#ifdef CONFIG_KVM_BOOK3S_PR #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
lbz r9,HSTATE_IN_GUEST(r13) lbz r9,HSTATE_IN_GUEST(r13)
rlwimi r10,r9,8,0x300 rlwimi r10,r9,8,0x300
#endif #endif
...@@ -650,6 +650,32 @@ slb_miss_user_pseries: ...@@ -650,6 +650,32 @@ slb_miss_user_pseries:
b . /* prevent spec. execution */ b . /* prevent spec. execution */
#endif /* __DISABLED__ */ #endif /* __DISABLED__ */
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
kvmppc_skip_interrupt:
/*
* Here all GPRs are unchanged from when the interrupt happened
* except for r13, which is saved in SPRG_SCRATCH0.
*/
mfspr r13, SPRN_SRR0
addi r13, r13, 4
mtspr SPRN_SRR0, r13
GET_SCRATCH0(r13)
rfid
b .
kvmppc_skip_Hinterrupt:
/*
* Here all GPRs are unchanged from when the interrupt happened
* except for r13, which is saved in SPRG_SCRATCH0.
*/
mfspr r13, SPRN_HSRR0
addi r13, r13, 4
mtspr SPRN_HSRR0, r13
GET_SCRATCH0(r13)
hrfid
b .
#endif
/* /*
* Code from here down to __end_handlers is invoked from the * Code from here down to __end_handlers is invoked from the
* exception prologs above. Because the prologs assemble the * exception prologs above. Because the prologs assemble the
......
...@@ -84,7 +84,7 @@ _GLOBAL(power7_nap) ...@@ -84,7 +84,7 @@ _GLOBAL(power7_nap)
std r9,_MSR(r1) std r9,_MSR(r1)
std r1,PACAR1(r13) std r1,PACAR1(r13)
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* Tell KVM we're napping */ /* Tell KVM we're napping */
li r4,KVM_HWTHREAD_IN_NAP li r4,KVM_HWTHREAD_IN_NAP
stb r4,HSTATE_HWTHREAD_STATE(r13) stb r4,HSTATE_HWTHREAD_STATE(r13)
......
...@@ -314,28 +314,28 @@ static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk); ...@@ -314,28 +314,28 @@ static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
*/ */
static void set_debug_reg_defaults(struct thread_struct *thread) static void set_debug_reg_defaults(struct thread_struct *thread)
{ {
thread->iac1 = thread->iac2 = 0; thread->debug.iac1 = thread->debug.iac2 = 0;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
thread->iac3 = thread->iac4 = 0; thread->debug.iac3 = thread->debug.iac4 = 0;
#endif #endif
thread->dac1 = thread->dac2 = 0; thread->debug.dac1 = thread->debug.dac2 = 0;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
thread->dvc1 = thread->dvc2 = 0; thread->debug.dvc1 = thread->debug.dvc2 = 0;
#endif #endif
thread->dbcr0 = 0; thread->debug.dbcr0 = 0;
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE
/* /*
* Force User/Supervisor bits to b11 (user-only MSR[PR]=1) * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
*/ */
thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \ thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
DBCR1_IAC3US | DBCR1_IAC4US; DBCR1_IAC3US | DBCR1_IAC4US;
/* /*
* Force Data Address Compare User/Supervisor bits to be User-only * Force Data Address Compare User/Supervisor bits to be User-only
* (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
*/ */
thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
#else #else
thread->dbcr1 = 0; thread->debug.dbcr1 = 0;
#endif #endif
} }
...@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread) ...@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread)
*/ */
mtmsr(mfmsr() & ~MSR_DE); mtmsr(mfmsr() & ~MSR_DE);
mtspr(SPRN_IAC1, thread->iac1); mtspr(SPRN_IAC1, thread->debug.iac1);
mtspr(SPRN_IAC2, thread->iac2); mtspr(SPRN_IAC2, thread->debug.iac2);
#if CONFIG_PPC_ADV_DEBUG_IACS > 2 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
mtspr(SPRN_IAC3, thread->iac3); mtspr(SPRN_IAC3, thread->debug.iac3);
mtspr(SPRN_IAC4, thread->iac4); mtspr(SPRN_IAC4, thread->debug.iac4);
#endif #endif
mtspr(SPRN_DAC1, thread->dac1); mtspr(SPRN_DAC1, thread->debug.dac1);
mtspr(SPRN_DAC2, thread->dac2); mtspr(SPRN_DAC2, thread->debug.dac2);
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
mtspr(SPRN_DVC1, thread->dvc1); mtspr(SPRN_DVC1, thread->debug.dvc1);
mtspr(SPRN_DVC2, thread->dvc2); mtspr(SPRN_DVC2, thread->debug.dvc2);
#endif #endif
mtspr(SPRN_DBCR0, thread->dbcr0); mtspr(SPRN_DBCR0, thread->debug.dbcr0);
mtspr(SPRN_DBCR1, thread->dbcr1); mtspr(SPRN_DBCR1, thread->debug.dbcr1);
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE
mtspr(SPRN_DBCR2, thread->dbcr2); mtspr(SPRN_DBCR2, thread->debug.dbcr2);
#endif #endif
} }
/* /*
...@@ -371,12 +371,13 @@ static void prime_debug_regs(struct thread_struct *thread) ...@@ -371,12 +371,13 @@ static void prime_debug_regs(struct thread_struct *thread)
* debug registers, set the debug registers from the values * debug registers, set the debug registers from the values
* stored in the new thread. * stored in the new thread.
*/ */
static void switch_booke_debug_regs(struct thread_struct *new_thread) void switch_booke_debug_regs(struct thread_struct *new_thread)
{ {
if ((current->thread.dbcr0 & DBCR0_IDM) if ((current->thread.debug.dbcr0 & DBCR0_IDM)
|| (new_thread->dbcr0 & DBCR0_IDM)) || (new_thread->debug.dbcr0 & DBCR0_IDM))
prime_debug_regs(new_thread); prime_debug_regs(new_thread);
} }
EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */ #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
#ifndef CONFIG_HAVE_HW_BREAKPOINT #ifndef CONFIG_HAVE_HW_BREAKPOINT
static void set_debug_reg_defaults(struct thread_struct *thread) static void set_debug_reg_defaults(struct thread_struct *thread)
......
This diff is collapsed.
...@@ -269,7 +269,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, ...@@ -269,7 +269,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (addr > 0) if (addr > 0)
break; break;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS #ifdef CONFIG_PPC_ADV_DEBUG_REGS
ret = put_user(child->thread.dac1, (u32 __user *)data); ret = put_user(child->thread.debug.dac1, (u32 __user *)data);
#else #else
dabr_fake = ( dabr_fake = (
(child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) | (child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
......
...@@ -1309,7 +1309,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, ...@@ -1309,7 +1309,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
unsigned char tmp; unsigned char tmp;
unsigned long new_msr = regs->msr; unsigned long new_msr = regs->msr;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS #ifdef CONFIG_PPC_ADV_DEBUG_REGS
unsigned long new_dbcr0 = current->thread.dbcr0; unsigned long new_dbcr0 = current->thread.debug.dbcr0;
#endif #endif
for (i=0; i<ndbg; i++) { for (i=0; i<ndbg; i++) {
...@@ -1324,7 +1324,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, ...@@ -1324,7 +1324,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
} else { } else {
new_dbcr0 &= ~DBCR0_IC; new_dbcr0 &= ~DBCR0_IC;
if (!DBCR_ACTIVE_EVENTS(new_dbcr0, if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
current->thread.dbcr1)) { current->thread.debug.dbcr1)) {
new_msr &= ~MSR_DE; new_msr &= ~MSR_DE;
new_dbcr0 &= ~DBCR0_IDM; new_dbcr0 &= ~DBCR0_IDM;
} }
...@@ -1359,7 +1359,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, ...@@ -1359,7 +1359,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
the user is really doing something wrong. */ the user is really doing something wrong. */
regs->msr = new_msr; regs->msr = new_msr;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS #ifdef CONFIG_PPC_ADV_DEBUG_REGS
current->thread.dbcr0 = new_dbcr0; current->thread.debug.dbcr0 = new_dbcr0;
#endif #endif
if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)) if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
......
...@@ -351,8 +351,8 @@ static inline int check_io_access(struct pt_regs *regs) ...@@ -351,8 +351,8 @@ static inline int check_io_access(struct pt_regs *regs)
#define REASON_TRAP ESR_PTR #define REASON_TRAP ESR_PTR
/* single-step stuff */ /* single-step stuff */
#define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
#else #else
/* On non-4xx, the reason for the machine check or program /* On non-4xx, the reason for the machine check or program
...@@ -1486,7 +1486,7 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status) ...@@ -1486,7 +1486,7 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
current->thread.dbcr2 &= ~DBCR2_DAC12MODE; current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
#endif #endif
do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
5); 5);
...@@ -1497,24 +1497,24 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status) ...@@ -1497,24 +1497,24 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
6); 6);
changed |= 0x01; changed |= 0x01;
} else if (debug_status & DBSR_IAC1) { } else if (debug_status & DBSR_IAC1) {
current->thread.dbcr0 &= ~DBCR0_IAC1; current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
dbcr_iac_range(current) &= ~DBCR_IAC12MODE; dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1); 1);
changed |= 0x01; changed |= 0x01;
} else if (debug_status & DBSR_IAC2) { } else if (debug_status & DBSR_IAC2) {
current->thread.dbcr0 &= ~DBCR0_IAC2; current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
2); 2);
changed |= 0x01; changed |= 0x01;
} else if (debug_status & DBSR_IAC3) { } else if (debug_status & DBSR_IAC3) {
current->thread.dbcr0 &= ~DBCR0_IAC3; current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
dbcr_iac_range(current) &= ~DBCR_IAC34MODE; dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
3); 3);
changed |= 0x01; changed |= 0x01;
} else if (debug_status & DBSR_IAC4) { } else if (debug_status & DBSR_IAC4) {
current->thread.dbcr0 &= ~DBCR0_IAC4; current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
4); 4);
changed |= 0x01; changed |= 0x01;
...@@ -1524,19 +1524,20 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status) ...@@ -1524,19 +1524,20 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
* Check all other debug flags and see if that bit needs to be turned * Check all other debug flags and see if that bit needs to be turned
* back on or not. * back on or not.
*/ */
if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
current->thread.debug.dbcr1))
regs->msr |= MSR_DE; regs->msr |= MSR_DE;
else else
/* Make sure the IDM flag is off */ /* Make sure the IDM flag is off */
current->thread.dbcr0 &= ~DBCR0_IDM; current->thread.debug.dbcr0 &= ~DBCR0_IDM;
if (changed & 0x01) if (changed & 0x01)
mtspr(SPRN_DBCR0, current->thread.dbcr0); mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
} }
void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
{ {
current->thread.dbsr = debug_status; current->thread.debug.dbsr = debug_status;
/* Hack alert: On BookE, Branch Taken stops on the branch itself, while /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
* on server, it stops on the target of the branch. In order to simulate * on server, it stops on the target of the branch. In order to simulate
...@@ -1553,8 +1554,8 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) ...@@ -1553,8 +1554,8 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
/* Do the single step trick only when coming from userspace */ /* Do the single step trick only when coming from userspace */
if (user_mode(regs)) { if (user_mode(regs)) {
current->thread.dbcr0 &= ~DBCR0_BT; current->thread.debug.dbcr0 &= ~DBCR0_BT;
current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
regs->msr |= MSR_DE; regs->msr |= MSR_DE;
return; return;
} }
...@@ -1582,13 +1583,13 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) ...@@ -1582,13 +1583,13 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
return; return;
if (user_mode(regs)) { if (user_mode(regs)) {
current->thread.dbcr0 &= ~DBCR0_IC; current->thread.debug.dbcr0 &= ~DBCR0_IC;
if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
current->thread.dbcr1)) current->thread.debug.dbcr1))
regs->msr |= MSR_DE; regs->msr |= MSR_DE;
else else
/* Make sure the IDM bit is off */ /* Make sure the IDM bit is off */
current->thread.dbcr0 &= ~DBCR0_IDM; current->thread.debug.dbcr0 &= ~DBCR0_IDM;
} }
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
......
...@@ -31,13 +31,13 @@ ...@@ -31,13 +31,13 @@
#include "44x_tlb.h" #include "44x_tlb.h"
#include "booke.h" #include "booke.h"
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static void kvmppc_core_vcpu_load_44x(struct kvm_vcpu *vcpu, int cpu)
{ {
kvmppc_booke_vcpu_load(vcpu, cpu); kvmppc_booke_vcpu_load(vcpu, cpu);
kvmppc_44x_tlb_load(vcpu); kvmppc_44x_tlb_load(vcpu);
} }
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) static void kvmppc_core_vcpu_put_44x(struct kvm_vcpu *vcpu)
{ {
kvmppc_44x_tlb_put(vcpu); kvmppc_44x_tlb_put(vcpu);
kvmppc_booke_vcpu_put(vcpu); kvmppc_booke_vcpu_put(vcpu);
...@@ -114,29 +114,32 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, ...@@ -114,29 +114,32 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) static int kvmppc_core_get_sregs_44x(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{ {
kvmppc_get_sregs_ivor(vcpu, sregs); return kvmppc_get_sregs_ivor(vcpu, sregs);
} }
int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) static int kvmppc_core_set_sregs_44x(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{ {
return kvmppc_set_sregs_ivor(vcpu, sregs); return kvmppc_set_sregs_ivor(vcpu, sregs);
} }
int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, static int kvmppc_get_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
union kvmppc_one_reg *val) union kvmppc_one_reg *val)
{ {
return -EINVAL; return -EINVAL;
} }
int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, static int kvmppc_set_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
union kvmppc_one_reg *val) union kvmppc_one_reg *val)
{ {
return -EINVAL; return -EINVAL;
} }
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) static struct kvm_vcpu *kvmppc_core_vcpu_create_44x(struct kvm *kvm,
unsigned int id)
{ {
struct kvmppc_vcpu_44x *vcpu_44x; struct kvmppc_vcpu_44x *vcpu_44x;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
...@@ -167,7 +170,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -167,7 +170,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
return ERR_PTR(err); return ERR_PTR(err);
} }
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) static void kvmppc_core_vcpu_free_44x(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
...@@ -176,28 +179,53 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) ...@@ -176,28 +179,53 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
kmem_cache_free(kvm_vcpu_cache, vcpu_44x); kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
} }
int kvmppc_core_init_vm(struct kvm *kvm) static int kvmppc_core_init_vm_44x(struct kvm *kvm)
{ {
return 0; return 0;
} }
void kvmppc_core_destroy_vm(struct kvm *kvm) static void kvmppc_core_destroy_vm_44x(struct kvm *kvm)
{ {
} }
static struct kvmppc_ops kvm_ops_44x = {
.get_sregs = kvmppc_core_get_sregs_44x,
.set_sregs = kvmppc_core_set_sregs_44x,
.get_one_reg = kvmppc_get_one_reg_44x,
.set_one_reg = kvmppc_set_one_reg_44x,
.vcpu_load = kvmppc_core_vcpu_load_44x,
.vcpu_put = kvmppc_core_vcpu_put_44x,
.vcpu_create = kvmppc_core_vcpu_create_44x,
.vcpu_free = kvmppc_core_vcpu_free_44x,
.mmu_destroy = kvmppc_mmu_destroy_44x,
.init_vm = kvmppc_core_init_vm_44x,
.destroy_vm = kvmppc_core_destroy_vm_44x,
.emulate_op = kvmppc_core_emulate_op_44x,
.emulate_mtspr = kvmppc_core_emulate_mtspr_44x,
.emulate_mfspr = kvmppc_core_emulate_mfspr_44x,
};
static int __init kvmppc_44x_init(void) static int __init kvmppc_44x_init(void)
{ {
int r; int r;
r = kvmppc_booke_init(); r = kvmppc_booke_init();
if (r) if (r)
return r; goto err_out;
return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE); r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
if (r)
goto err_out;
kvm_ops_44x.owner = THIS_MODULE;
kvmppc_pr_ops = &kvm_ops_44x;
err_out:
return r;
} }
static void __exit kvmppc_44x_exit(void) static void __exit kvmppc_44x_exit(void)
{ {
kvmppc_pr_ops = NULL;
kvmppc_booke_exit(); kvmppc_booke_exit();
} }
......
...@@ -91,7 +91,7 @@ static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn) ...@@ -91,7 +91,7 @@ static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn)
return EMULATE_DONE; return EMULATE_DONE;
} }
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance) unsigned int inst, int *advance)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
...@@ -152,7 +152,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -152,7 +152,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated; return emulated;
} }
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
...@@ -172,7 +172,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) ...@@ -172,7 +172,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
return emulated; return emulated;
} }
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
......
...@@ -268,7 +268,7 @@ static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, ...@@ -268,7 +268,7 @@ static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
trace_kvm_stlb_inval(stlb_index); trace_kvm_stlb_inval(stlb_index);
} }
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
int i; int i;
......
...@@ -34,17 +34,20 @@ config KVM_BOOK3S_64_HANDLER ...@@ -34,17 +34,20 @@ config KVM_BOOK3S_64_HANDLER
bool bool
select KVM_BOOK3S_HANDLER select KVM_BOOK3S_HANDLER
config KVM_BOOK3S_PR config KVM_BOOK3S_PR_POSSIBLE
bool bool
select KVM_MMIO select KVM_MMIO
select MMU_NOTIFIER select MMU_NOTIFIER
config KVM_BOOK3S_HV_POSSIBLE
bool
config KVM_BOOK3S_32 config KVM_BOOK3S_32
tristate "KVM support for PowerPC book3s_32 processors" tristate "KVM support for PowerPC book3s_32 processors"
depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT
select KVM select KVM
select KVM_BOOK3S_32_HANDLER select KVM_BOOK3S_32_HANDLER
select KVM_BOOK3S_PR select KVM_BOOK3S_PR_POSSIBLE
---help--- ---help---
Support running unmodified book3s_32 guest kernels Support running unmodified book3s_32 guest kernels
in virtual machines on book3s_32 host processors. in virtual machines on book3s_32 host processors.
...@@ -59,6 +62,7 @@ config KVM_BOOK3S_64 ...@@ -59,6 +62,7 @@ config KVM_BOOK3S_64
depends on PPC_BOOK3S_64 depends on PPC_BOOK3S_64
select KVM_BOOK3S_64_HANDLER select KVM_BOOK3S_64_HANDLER
select KVM select KVM
select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
---help--- ---help---
Support running unmodified book3s_64 and book3s_32 guest kernels Support running unmodified book3s_64 and book3s_32 guest kernels
in virtual machines on book3s_64 host processors. in virtual machines on book3s_64 host processors.
...@@ -69,8 +73,9 @@ config KVM_BOOK3S_64 ...@@ -69,8 +73,9 @@ config KVM_BOOK3S_64
If unsure, say N. If unsure, say N.
config KVM_BOOK3S_64_HV config KVM_BOOK3S_64_HV
bool "KVM support for POWER7 and PPC970 using hypervisor mode in host" tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host"
depends on KVM_BOOK3S_64 depends on KVM_BOOK3S_64
select KVM_BOOK3S_HV_POSSIBLE
select MMU_NOTIFIER select MMU_NOTIFIER
select CMA select CMA
---help--- ---help---
...@@ -89,9 +94,20 @@ config KVM_BOOK3S_64_HV ...@@ -89,9 +94,20 @@ config KVM_BOOK3S_64_HV
If unsure, say N. If unsure, say N.
config KVM_BOOK3S_64_PR config KVM_BOOK3S_64_PR
def_bool y tristate "KVM support without using hypervisor mode in host"
depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV depends on KVM_BOOK3S_64
select KVM_BOOK3S_PR select KVM_BOOK3S_PR_POSSIBLE
---help---
Support running guest kernels in virtual machines on processors
without using hypervisor mode in the host, by running the
guest in user mode (problem state) and emulating all
privileged instructions and registers.
This is not as fast as using hypervisor mode, but works on
machines where hypervisor mode is not available or not usable,
and can emulate processors that are different from the host
processor, including emulating 32-bit processors on a 64-bit
host.
config KVM_BOOKE_HV config KVM_BOOKE_HV
bool bool
......
...@@ -53,41 +53,51 @@ kvm-e500mc-objs := \ ...@@ -53,41 +53,51 @@ kvm-e500mc-objs := \
e500_emulate.o e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \
$(KVM)/coalesced_mmio.o \ book3s_64_vio_hv.o
kvm-pr-y := \
fpu.o \ fpu.o \
book3s_paired_singles.o \ book3s_paired_singles.o \
book3s_pr.o \ book3s_pr.o \
book3s_pr_papr.o \ book3s_pr_papr.o \
book3s_64_vio_hv.o \
book3s_emulate.o \ book3s_emulate.o \
book3s_interrupts.o \ book3s_interrupts.o \
book3s_mmu_hpte.o \ book3s_mmu_hpte.o \
book3s_64_mmu_host.o \ book3s_64_mmu_host.o \
book3s_64_mmu.o \ book3s_64_mmu.o \
book3s_32_mmu.o book3s_32_mmu.o
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
kvm-book3s_64-module-objs := \
$(KVM)/coalesced_mmio.o
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_rmhandlers.o book3s_rmhandlers.o
endif
kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \ kvm-hv-y += \
book3s_hv.o \ book3s_hv.o \
book3s_hv_interrupts.o \ book3s_hv_interrupts.o \
book3s_64_mmu_hv.o book3s_64_mmu_hv.o
kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \ kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
book3s_hv_rm_xics.o book3s_hv_rm_xics.o
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_hv_rmhandlers.o \ book3s_hv_rmhandlers.o \
book3s_hv_rm_mmu.o \ book3s_hv_rm_mmu.o \
book3s_64_vio_hv.o \
book3s_hv_ras.o \ book3s_hv_ras.o \
book3s_hv_builtin.o \ book3s_hv_builtin.o \
book3s_hv_cma.o \ book3s_hv_cma.o \
$(kvm-book3s_64-builtin-xics-objs-y) $(kvm-book3s_64-builtin-xics-objs-y)
endif
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \ kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
book3s_xics.o book3s_xics.o
kvm-book3s_64-module-objs := \ kvm-book3s_64-module-objs += \
$(KVM)/kvm_main.o \ $(KVM)/kvm_main.o \
$(KVM)/eventfd.o \ $(KVM)/eventfd.o \
powerpc.o \ powerpc.o \
...@@ -123,4 +133,7 @@ obj-$(CONFIG_KVM_E500MC) += kvm.o ...@@ -123,4 +133,7 @@ obj-$(CONFIG_KVM_E500MC) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_64_PR) += kvm-pr.o
obj-$(CONFIG_KVM_BOOK3S_64_HV) += kvm-hv.o
obj-y += $(kvm-book3s_64-builtin-objs-y) obj-y += $(kvm-book3s_64-builtin-objs-y)
This diff is collapsed.
/*
* Copyright IBM Corporation, 2013
* Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License or (at your optional) any later version of the license.
*
*/
#ifndef __POWERPC_KVM_BOOK3S_H__
#define __POWERPC_KVM_BOOK3S_H__
extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
struct kvm_memory_slot *memslot);
extern int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva);
extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start,
unsigned long end);
extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva);
extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva);
extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
int sprn, ulong spr_val);
extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
int sprn, ulong *spr_val);
extern int kvmppc_book3s_init_pr(void);
extern void kvmppc_book3s_exit_pr(void);
#endif
...@@ -84,7 +84,8 @@ static inline bool sr_nx(u32 sr_raw) ...@@ -84,7 +84,8 @@ static inline bool sr_nx(u32 sr_raw)
} }
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data); struct kvmppc_pte *pte, bool data,
bool iswrite);
static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid); u64 *vsid);
...@@ -99,7 +100,7 @@ static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -99,7 +100,7 @@ static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
u64 vsid; u64 vsid;
struct kvmppc_pte pte; struct kvmppc_pte pte;
if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data)) if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false))
return pte.vpage; return pte.vpage;
kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
...@@ -111,10 +112,11 @@ static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) ...@@ -111,10 +112,11 @@ static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, 0); kvmppc_set_msr(vcpu, 0);
} }
static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s, static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu,
u32 sre, gva_t eaddr, u32 sre, gva_t eaddr,
bool primary) bool primary)
{ {
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
u32 page, hash, pteg, htabmask; u32 page, hash, pteg, htabmask;
hva_t r; hva_t r;
...@@ -132,7 +134,7 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3 ...@@ -132,7 +134,7 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3
kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg, kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg,
sr_vsid(sre)); sr_vsid(sre));
r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
if (kvm_is_error_hva(r)) if (kvm_is_error_hva(r))
return r; return r;
return r | (pteg & ~PAGE_MASK); return r | (pteg & ~PAGE_MASK);
...@@ -145,7 +147,8 @@ static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary) ...@@ -145,7 +147,8 @@ static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary)
} }
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data) struct kvmppc_pte *pte, bool data,
bool iswrite)
{ {
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_bat *bat; struct kvmppc_bat *bat;
...@@ -186,8 +189,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -186,8 +189,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
printk(KERN_INFO "BAT is not readable!\n"); printk(KERN_INFO "BAT is not readable!\n");
continue; continue;
} }
if (!pte->may_write) { if (iswrite && !pte->may_write) {
/* let's treat r/o BATs as not-readable for now */
dprintk_pte("BAT is read-only!\n"); dprintk_pte("BAT is read-only!\n");
continue; continue;
} }
...@@ -201,9 +203,8 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -201,9 +203,8 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data, struct kvmppc_pte *pte, bool data,
bool primary) bool iswrite, bool primary)
{ {
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
u32 sre; u32 sre;
hva_t ptegp; hva_t ptegp;
u32 pteg[16]; u32 pteg[16];
...@@ -218,7 +219,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -218,7 +219,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu_book3s, sre, eaddr, primary); ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu, sre, eaddr, primary);
if (kvm_is_error_hva(ptegp)) { if (kvm_is_error_hva(ptegp)) {
printk(KERN_INFO "KVM: Invalid PTEG!\n"); printk(KERN_INFO "KVM: Invalid PTEG!\n");
goto no_page_found; goto no_page_found;
...@@ -258,9 +259,6 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -258,9 +259,6 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
break; break;
} }
if ( !pte->may_read )
continue;
dprintk_pte("MMU: Found PTE -> %x %x - %x\n", dprintk_pte("MMU: Found PTE -> %x %x - %x\n",
pteg[i], pteg[i+1], pp); pteg[i], pteg[i+1], pp);
found = 1; found = 1;
...@@ -271,19 +269,23 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -271,19 +269,23 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Update PTE C and A bits, so the guest's swapper knows we used the /* Update PTE C and A bits, so the guest's swapper knows we used the
page */ page */
if (found) { if (found) {
u32 oldpte = pteg[i+1]; u32 pte_r = pteg[i+1];
char __user *addr = (char __user *) &pteg[i+1];
if (pte->may_read)
pteg[i+1] |= PTEG_FLAG_ACCESSED;
if (pte->may_write)
pteg[i+1] |= PTEG_FLAG_DIRTY;
else
dprintk_pte("KVM: Mapping read-only page!\n");
/* Write back into the PTEG */
if (pteg[i+1] != oldpte)
copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
/*
* Use single-byte writes to update the HPTE, to
* conform to what real hardware does.
*/
if (pte->may_read && !(pte_r & PTEG_FLAG_ACCESSED)) {
pte_r |= PTEG_FLAG_ACCESSED;
put_user(pte_r >> 8, addr + 2);
}
if (iswrite && pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) {
pte_r |= PTEG_FLAG_DIRTY;
put_user(pte_r, addr + 3);
}
if (!pte->may_read || (iswrite && !pte->may_write))
return -EPERM;
return 0; return 0;
} }
...@@ -302,12 +304,14 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -302,12 +304,14 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
} }
static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data) struct kvmppc_pte *pte, bool data,
bool iswrite)
{ {
int r; int r;
ulong mp_ea = vcpu->arch.magic_page_ea; ulong mp_ea = vcpu->arch.magic_page_ea;
pte->eaddr = eaddr; pte->eaddr = eaddr;
pte->page_size = MMU_PAGE_4K;
/* Magic page override */ /* Magic page override */
if (unlikely(mp_ea) && if (unlikely(mp_ea) &&
...@@ -323,11 +327,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -323,11 +327,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
return 0; return 0;
} }
r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite);
if (r < 0) if (r < 0)
r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
data, iswrite, true);
if (r < 0) if (r < 0)
r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false); r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
data, iswrite, false);
return r; return r;
} }
...@@ -347,7 +353,12 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, ...@@ -347,7 +353,12 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
{ {
kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000); int i;
struct kvm_vcpu *v;
/* flush this VA on all cpus */
kvm_for_each_vcpu(i, v, vcpu->kvm)
kvmppc_mmu_pte_flush(v, ea, 0x0FFFF000);
} }
static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
......
...@@ -138,7 +138,8 @@ static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, ...@@ -138,7 +138,8 @@ static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
extern char etext[]; extern char etext[];
int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
bool iswrite)
{ {
pfn_t hpaddr; pfn_t hpaddr;
u64 vpn; u64 vpn;
...@@ -152,9 +153,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) ...@@ -152,9 +153,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
bool evict = false; bool evict = false;
struct hpte_cache *pte; struct hpte_cache *pte;
int r = 0; int r = 0;
bool writable;
/* Get host physical address for gpa */ /* Get host physical address for gpa */
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT,
iswrite, &writable);
if (is_error_noslot_pfn(hpaddr)) { if (is_error_noslot_pfn(hpaddr)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
orig_pte->eaddr); orig_pte->eaddr);
...@@ -204,7 +207,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) ...@@ -204,7 +207,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
(primary ? 0 : PTE_SEC); (primary ? 0 : PTE_SEC);
pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
if (orig_pte->may_write) { if (orig_pte->may_write && writable) {
pteg1 |= PP_RWRW; pteg1 |= PP_RWRW;
mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
} else { } else {
...@@ -259,6 +262,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) ...@@ -259,6 +262,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
return r; return r;
} }
void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
{
kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
}
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
{ {
struct kvmppc_sid_map *map; struct kvmppc_sid_map *map;
...@@ -341,7 +349,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) ...@@ -341,7 +349,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
svcpu_put(svcpu); svcpu_put(svcpu);
} }
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
{ {
int i; int i;
......
...@@ -107,9 +107,20 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -107,9 +107,20 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
return kvmppc_slb_calc_vpn(slb, eaddr); return kvmppc_slb_calc_vpn(slb, eaddr);
} }
static int mmu_pagesize(int mmu_pg)
{
switch (mmu_pg) {
case MMU_PAGE_64K:
return 16;
case MMU_PAGE_16M:
return 24;
}
return 12;
}
static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
{ {
return slbe->large ? 24 : 12; return mmu_pagesize(slbe->base_page_size);
} }
static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
...@@ -119,11 +130,11 @@ static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) ...@@ -119,11 +130,11 @@ static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p); return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
} }
static hva_t kvmppc_mmu_book3s_64_get_pteg( static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
struct kvmppc_vcpu_book3s *vcpu_book3s,
struct kvmppc_slb *slbe, gva_t eaddr, struct kvmppc_slb *slbe, gva_t eaddr,
bool second) bool second)
{ {
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
u64 hash, pteg, htabsize; u64 hash, pteg, htabsize;
u32 ssize; u32 ssize;
hva_t r; hva_t r;
...@@ -148,10 +159,10 @@ static hva_t kvmppc_mmu_book3s_64_get_pteg( ...@@ -148,10 +159,10 @@ static hva_t kvmppc_mmu_book3s_64_get_pteg(
/* When running a PAPR guest, SDR1 contains a HVA address instead /* When running a PAPR guest, SDR1 contains a HVA address instead
of a GPA */ of a GPA */
if (vcpu_book3s->vcpu.arch.papr_enabled) if (vcpu->arch.papr_enabled)
r = pteg; r = pteg;
else else
r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
if (kvm_is_error_hva(r)) if (kvm_is_error_hva(r))
return r; return r;
...@@ -166,18 +177,38 @@ static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr) ...@@ -166,18 +177,38 @@ static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
if (p < 24) if (p < 16)
avpn >>= ((80 - p) - 56) - 8; avpn >>= ((80 - p) - 56) - 8; /* 16 - p */
else else
avpn <<= 8; avpn <<= p - 16;
return avpn; return avpn;
} }
/*
* Return page size encoded in the second word of a HPTE, or
* -1 for an invalid encoding for the base page size indicated by
* the SLB entry. This doesn't handle mixed pagesize segments yet.
*/
static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
{
switch (slbe->base_page_size) {
case MMU_PAGE_64K:
if ((r & 0xf000) == 0x1000)
return MMU_PAGE_64K;
break;
case MMU_PAGE_16M:
if ((r & 0xff000) == 0)
return MMU_PAGE_16M;
break;
}
return -1;
}
static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *gpte, bool data) struct kvmppc_pte *gpte, bool data,
bool iswrite)
{ {
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_slb *slbe; struct kvmppc_slb *slbe;
hva_t ptegp; hva_t ptegp;
u64 pteg[16]; u64 pteg[16];
...@@ -189,6 +220,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -189,6 +220,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
u8 pp, key = 0; u8 pp, key = 0;
bool found = false; bool found = false;
bool second = false; bool second = false;
int pgsize;
ulong mp_ea = vcpu->arch.magic_page_ea; ulong mp_ea = vcpu->arch.magic_page_ea;
/* Magic page override */ /* Magic page override */
...@@ -202,6 +234,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -202,6 +234,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
gpte->may_execute = true; gpte->may_execute = true;
gpte->may_read = true; gpte->may_read = true;
gpte->may_write = true; gpte->may_write = true;
gpte->page_size = MMU_PAGE_4K;
return 0; return 0;
} }
...@@ -222,8 +255,12 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -222,8 +255,12 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID | v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
HPTE_V_SECONDARY; HPTE_V_SECONDARY;
pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
do_second: do_second:
ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second); ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
if (kvm_is_error_hva(ptegp)) if (kvm_is_error_hva(ptegp))
goto no_page_found; goto no_page_found;
...@@ -240,6 +277,13 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -240,6 +277,13 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
for (i=0; i<16; i+=2) { for (i=0; i<16; i+=2) {
/* Check all relevant fields of 1st dword */ /* Check all relevant fields of 1st dword */
if ((pteg[i] & v_mask) == v_val) { if ((pteg[i] & v_mask) == v_val) {
/* If large page bit is set, check pgsize encoding */
if (slbe->large &&
(vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
pgsize = decode_pagesize(slbe, pteg[i+1]);
if (pgsize < 0)
continue;
}
found = true; found = true;
break; break;
} }
...@@ -256,13 +300,15 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -256,13 +300,15 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
v = pteg[i]; v = pteg[i];
r = pteg[i+1]; r = pteg[i+1];
pp = (r & HPTE_R_PP) | key; pp = (r & HPTE_R_PP) | key;
eaddr_mask = 0xFFF; if (r & HPTE_R_PP0)
pp |= 8;
gpte->eaddr = eaddr; gpte->eaddr = eaddr;
gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
if (slbe->large)
eaddr_mask = 0xFFFFFF; eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1;
gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask); gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
gpte->page_size = pgsize;
gpte->may_execute = ((r & HPTE_R_N) ? false : true); gpte->may_execute = ((r & HPTE_R_N) ? false : true);
gpte->may_read = false; gpte->may_read = false;
gpte->may_write = false; gpte->may_write = false;
...@@ -277,6 +323,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -277,6 +323,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
case 3: case 3:
case 5: case 5:
case 7: case 7:
case 10:
gpte->may_read = true; gpte->may_read = true;
break; break;
} }
...@@ -287,30 +334,37 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -287,30 +334,37 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Update PTE R and C bits, so the guest's swapper knows we used the /* Update PTE R and C bits, so the guest's swapper knows we used the
* page */ * page */
if (gpte->may_read) { if (gpte->may_read && !(r & HPTE_R_R)) {
/* Set the accessed flag */ /*
* Set the accessed flag.
* We have to write this back with a single byte write
* because another vcpu may be accessing this on
* non-PAPR platforms such as mac99, and this is
* what real hardware does.
*/
char __user *addr = (char __user *) &pteg[i+1];
r |= HPTE_R_R; r |= HPTE_R_R;
put_user(r >> 8, addr + 6);
} }
if (data && gpte->may_write) { if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
/* Set the dirty flag -- XXX even if not writing */ /* Set the dirty flag */
/* Use a single byte write */
char __user *addr = (char __user *) &pteg[i+1];
r |= HPTE_R_C; r |= HPTE_R_C;
put_user(r, addr + 7);
} }
/* Write back into the PTEG */ mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
if (pteg[i+1] != r) {
pteg[i+1] = r;
copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
}
if (!gpte->may_read) if (!gpte->may_read || (iswrite && !gpte->may_write))
return -EPERM; return -EPERM;
return 0; return 0;
no_page_found: no_page_found:
mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
return -ENOENT; return -ENOENT;
no_seg_found: no_seg_found:
dprintk("KVM MMU: Trigger segment fault\n"); dprintk("KVM MMU: Trigger segment fault\n");
return -EINVAL; return -EINVAL;
} }
...@@ -345,6 +399,21 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) ...@@ -345,6 +399,21 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
slbe->nx = (rs & SLB_VSID_N) ? 1 : 0; slbe->nx = (rs & SLB_VSID_N) ? 1 : 0;
slbe->class = (rs & SLB_VSID_C) ? 1 : 0; slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
slbe->base_page_size = MMU_PAGE_4K;
if (slbe->large) {
if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
switch (rs & SLB_VSID_LP) {
case SLB_VSID_LP_00:
slbe->base_page_size = MMU_PAGE_16M;
break;
case SLB_VSID_LP_01:
slbe->base_page_size = MMU_PAGE_64K;
break;
}
} else
slbe->base_page_size = MMU_PAGE_16M;
}
slbe->orige = rb & (ESID_MASK | SLB_ESID_V); slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
slbe->origv = rs; slbe->origv = rs;
...@@ -460,14 +529,45 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va, ...@@ -460,14 +529,45 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
bool large) bool large)
{ {
u64 mask = 0xFFFFFFFFFULL; u64 mask = 0xFFFFFFFFFULL;
long i;
struct kvm_vcpu *v;
dprintk("KVM MMU: tlbie(0x%lx)\n", va); dprintk("KVM MMU: tlbie(0x%lx)\n", va);
/*
* The tlbie instruction changed behaviour starting with
* POWER6. POWER6 and later don't have the large page flag
* in the instruction but in the RB value, along with bits
* indicating page and segment sizes.
*/
if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
/* POWER6 or later */
if (va & 1) { /* L bit */
if ((va & 0xf000) == 0x1000)
mask = 0xFFFFFFFF0ULL; /* 64k page */
else
mask = 0xFFFFFF000ULL; /* 16M page */
}
} else {
/* older processors, e.g. PPC970 */
if (large) if (large)
mask = 0xFFFFFF000ULL; mask = 0xFFFFFF000ULL;
kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask); }
/* flush this VA on all vcpus */
kvm_for_each_vcpu(i, v, vcpu->kvm)
kvmppc_mmu_pte_vflush(v, va >> 12, mask);
} }
#ifdef CONFIG_PPC_64K_PAGES
static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
{
ulong mp_ea = vcpu->arch.magic_page_ea;
return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) &&
(mp_ea >> SID_SHIFT) == esid;
}
#endif
static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid) u64 *vsid)
{ {
...@@ -475,11 +575,13 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, ...@@ -475,11 +575,13 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
struct kvmppc_slb *slb; struct kvmppc_slb *slb;
u64 gvsid = esid; u64 gvsid = esid;
ulong mp_ea = vcpu->arch.magic_page_ea; ulong mp_ea = vcpu->arch.magic_page_ea;
int pagesize = MMU_PAGE_64K;
if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
if (slb) { if (slb) {
gvsid = slb->vsid; gvsid = slb->vsid;
pagesize = slb->base_page_size;
if (slb->tb) { if (slb->tb) {
gvsid <<= SID_SHIFT_1T - SID_SHIFT; gvsid <<= SID_SHIFT_1T - SID_SHIFT;
gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1); gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
...@@ -490,28 +592,41 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, ...@@ -490,28 +592,41 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
case 0: case 0:
*vsid = VSID_REAL | esid; gvsid = VSID_REAL | esid;
break; break;
case MSR_IR: case MSR_IR:
*vsid = VSID_REAL_IR | gvsid; gvsid |= VSID_REAL_IR;
break; break;
case MSR_DR: case MSR_DR:
*vsid = VSID_REAL_DR | gvsid; gvsid |= VSID_REAL_DR;
break; break;
case MSR_DR|MSR_IR: case MSR_DR|MSR_IR:
if (!slb) if (!slb)
goto no_slb; goto no_slb;
*vsid = gvsid;
break; break;
default: default:
BUG(); BUG();
break; break;
} }
#ifdef CONFIG_PPC_64K_PAGES
/*
* Mark this as a 64k segment if the host is using
* 64k pages, the host MMU supports 64k pages and
* the guest segment page size is >= 64k,
* but not if this segment contains the magic page.
*/
if (pagesize >= MMU_PAGE_64K &&
mmu_psize_defs[MMU_PAGE_64K].shift &&
!segment_contains_magic_page(vcpu, esid))
gvsid |= VSID_64K;
#endif
if (vcpu->arch.shared->msr & MSR_PR) if (vcpu->arch.shared->msr & MSR_PR)
*vsid |= VSID_PR; gvsid |= VSID_PR;
*vsid = gvsid;
return 0; return 0;
no_slb: no_slb:
......
...@@ -27,14 +27,14 @@ ...@@ -27,14 +27,14 @@
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include "trace.h" #include "trace_pr.h"
#define PTE_SIZE 12 #define PTE_SIZE 12
void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{ {
ppc_md.hpte_invalidate(pte->slot, pte->host_vpn, ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M, pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M,
false); false);
} }
...@@ -78,7 +78,8 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) ...@@ -78,7 +78,8 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
return NULL; return NULL;
} }
int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
bool iswrite)
{ {
unsigned long vpn; unsigned long vpn;
pfn_t hpaddr; pfn_t hpaddr;
...@@ -90,16 +91,26 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) ...@@ -90,16 +91,26 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
int attempt = 0; int attempt = 0;
struct kvmppc_sid_map *map; struct kvmppc_sid_map *map;
int r = 0; int r = 0;
int hpsize = MMU_PAGE_4K;
bool writable;
unsigned long mmu_seq;
struct kvm *kvm = vcpu->kvm;
struct hpte_cache *cpte;
unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
unsigned long pfn;
/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
smp_rmb();
/* Get host physical address for gpa */ /* Get host physical address for gpa */
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable);
if (is_error_noslot_pfn(hpaddr)) { if (is_error_noslot_pfn(pfn)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn);
r = -EINVAL; r = -EINVAL;
goto out; goto out;
} }
hpaddr <<= PAGE_SHIFT; hpaddr = pfn << PAGE_SHIFT;
hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
/* and write the mapping ea -> hpa into the pt */ /* and write the mapping ea -> hpa into the pt */
vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
...@@ -117,20 +128,39 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) ...@@ -117,20 +128,39 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
goto out; goto out;
} }
vsid = map->host_vsid; vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
if (!orig_pte->may_write) kvm_set_pfn_accessed(pfn);
rflags |= HPTE_R_PP; if (!orig_pte->may_write || !writable)
else rflags |= PP_RXRX;
mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); else {
mark_page_dirty(vcpu->kvm, gfn);
kvm_set_pfn_dirty(pfn);
}
if (!orig_pte->may_execute) if (!orig_pte->may_execute)
rflags |= HPTE_R_N; rflags |= HPTE_R_N;
else else
kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); kvmppc_mmu_flush_icache(pfn);
/*
* Use 64K pages if possible; otherwise, on 64K page kernels,
* we need to transfer 4 more bits from guest real to host real addr.
*/
if (vsid & VSID_64K)
hpsize = MMU_PAGE_64K;
else
hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M); cpte = kvmppc_mmu_hpte_cache_next(vcpu);
spin_lock(&kvm->mmu_lock);
if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
r = -EAGAIN;
goto out_unlock;
}
map_again: map_again:
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
...@@ -139,11 +169,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) ...@@ -139,11 +169,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
if (attempt > 1) if (attempt > 1)
if (ppc_md.hpte_remove(hpteg) < 0) { if (ppc_md.hpte_remove(hpteg) < 0) {
r = -1; r = -1;
goto out; goto out_unlock;
} }
ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M); hpsize, hpsize, MMU_SEGSIZE_256M);
if (ret < 0) { if (ret < 0) {
/* If we couldn't map a primary PTE, try a secondary */ /* If we couldn't map a primary PTE, try a secondary */
...@@ -152,8 +182,6 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) ...@@ -152,8 +182,6 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
attempt++; attempt++;
goto map_again; goto map_again;
} else { } else {
struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
trace_kvm_book3s_64_mmu_map(rflags, hpteg, trace_kvm_book3s_64_mmu_map(rflags, hpteg,
vpn, hpaddr, orig_pte); vpn, hpaddr, orig_pte);
...@@ -164,19 +192,37 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) ...@@ -164,19 +192,37 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
} }
pte->slot = hpteg + (ret & 7); cpte->slot = hpteg + (ret & 7);
pte->host_vpn = vpn; cpte->host_vpn = vpn;
pte->pte = *orig_pte; cpte->pte = *orig_pte;
pte->pfn = hpaddr >> PAGE_SHIFT; cpte->pfn = pfn;
cpte->pagesize = hpsize;
kvmppc_mmu_hpte_cache_map(vcpu, pte); kvmppc_mmu_hpte_cache_map(vcpu, cpte);
cpte = NULL;
} }
kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
out_unlock:
spin_unlock(&kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
if (cpte)
kvmppc_mmu_hpte_cache_free(cpte);
out: out:
return r; return r;
} }
void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
{
u64 mask = 0xfffffffffULL;
u64 vsid;
vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
if (vsid & VSID_64K)
mask = 0xffffffff0ULL;
kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
}
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
{ {
struct kvmppc_sid_map *map; struct kvmppc_sid_map *map;
...@@ -291,6 +337,12 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) ...@@ -291,6 +337,12 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
slb_vsid &= ~SLB_VSID_KP; slb_vsid &= ~SLB_VSID_KP;
slb_esid |= slb_index; slb_esid |= slb_index;
#ifdef CONFIG_PPC_64K_PAGES
/* Set host segment base page size to 64K if possible */
if (gvsid & VSID_64K)
slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
#endif
svcpu->slb[slb_index].esid = slb_esid; svcpu->slb[slb_index].esid = slb_esid;
svcpu->slb[slb_index].vsid = slb_vsid; svcpu->slb[slb_index].vsid = slb_vsid;
...@@ -326,7 +378,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) ...@@ -326,7 +378,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
svcpu_put(svcpu); svcpu_put(svcpu);
} }
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
{ {
kvmppc_mmu_hpte_destroy(vcpu); kvmppc_mmu_hpte_destroy(vcpu);
__destroy_context(to_book3s(vcpu)->context_id[0]); __destroy_context(to_book3s(vcpu)->context_id[0]);
......
...@@ -260,10 +260,6 @@ int kvmppc_mmu_hv_init(void) ...@@ -260,10 +260,6 @@ int kvmppc_mmu_hv_init(void)
return 0; return 0;
} }
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
}
static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
{ {
kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
...@@ -451,7 +447,7 @@ static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r, ...@@ -451,7 +447,7 @@ static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
} }
static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *gpte, bool data) struct kvmppc_pte *gpte, bool data, bool iswrite)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvmppc_slb *slbe; struct kvmppc_slb *slbe;
...@@ -906,21 +902,22 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, ...@@ -906,21 +902,22 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
return 0; return 0;
} }
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
{ {
if (kvm->arch.using_mmu_notifiers) if (kvm->arch.using_mmu_notifiers)
kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
return 0; return 0;
} }
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
{ {
if (kvm->arch.using_mmu_notifiers) if (kvm->arch.using_mmu_notifiers)
kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp); kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
return 0; return 0;
} }
void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{ {
unsigned long *rmapp; unsigned long *rmapp;
unsigned long gfn; unsigned long gfn;
...@@ -994,7 +991,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, ...@@ -994,7 +991,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
return ret; return ret;
} }
int kvm_age_hva(struct kvm *kvm, unsigned long hva) int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva)
{ {
if (!kvm->arch.using_mmu_notifiers) if (!kvm->arch.using_mmu_notifiers)
return 0; return 0;
...@@ -1032,14 +1029,14 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, ...@@ -1032,14 +1029,14 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
return ret; return ret;
} }
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
{ {
if (!kvm->arch.using_mmu_notifiers) if (!kvm->arch.using_mmu_notifiers)
return 0; return 0;
return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp); return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
} }
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
{ {
if (!kvm->arch.using_mmu_notifiers) if (!kvm->arch.using_mmu_notifiers)
return; return;
...@@ -1512,9 +1509,8 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ...@@ -1512,9 +1509,8 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
(VRMA_VSID << SLB_VSID_SHIFT_1T); (VRMA_VSID << SLB_VSID_SHIFT_1T);
lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; lpcr = senc << (LPCR_VRMASD_SH - 4);
lpcr |= senc << (LPCR_VRMASD_SH - 4); kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
kvm->arch.lpcr = lpcr;
rma_setup = 1; rma_setup = 1;
} }
++i; ++i;
......
...@@ -74,3 +74,4 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ...@@ -74,3 +74,4 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
/* Didn't find the liobn, punt it to userspace */ /* Didn't find the liobn, punt it to userspace */
return H_TOO_HARD; return H_TOO_HARD;
} }
EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
...@@ -86,7 +86,7 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) ...@@ -86,7 +86,7 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
return true; return true;
} }
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance) unsigned int inst, int *advance)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
...@@ -172,7 +172,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -172,7 +172,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.mmu.tlbie(vcpu, addr, large); vcpu->arch.mmu.tlbie(vcpu, addr, large);
break; break;
} }
#ifdef CONFIG_KVM_BOOK3S_64_PR #ifdef CONFIG_PPC_BOOK3S_64
case OP_31_XOP_FAKE_SC1: case OP_31_XOP_FAKE_SC1:
{ {
/* SC 1 papr hypercalls */ /* SC 1 papr hypercalls */
...@@ -267,12 +267,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -267,12 +267,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = kvmppc_st(vcpu, &addr, 32, zeros, true); r = kvmppc_st(vcpu, &addr, 32, zeros, true);
if ((r == -ENOENT) || (r == -EPERM)) { if ((r == -ENOENT) || (r == -EPERM)) {
struct kvmppc_book3s_shadow_vcpu *svcpu;
svcpu = svcpu_get(vcpu);
*advance = 0; *advance = 0;
vcpu->arch.shared->dar = vaddr; vcpu->arch.shared->dar = vaddr;
svcpu->fault_dar = vaddr; vcpu->arch.fault_dar = vaddr;
dsisr = DSISR_ISSTORE; dsisr = DSISR_ISSTORE;
if (r == -ENOENT) if (r == -ENOENT)
...@@ -281,8 +278,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -281,8 +278,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
dsisr |= DSISR_PROTFAULT; dsisr |= DSISR_PROTFAULT;
vcpu->arch.shared->dsisr = dsisr; vcpu->arch.shared->dsisr = dsisr;
svcpu->fault_dsisr = dsisr; vcpu->arch.fault_dsisr = dsisr;
svcpu_put(svcpu);
kvmppc_book3s_queue_irqprio(vcpu, kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_DATA_STORAGE); BOOK3S_INTERRUPT_DATA_STORAGE);
...@@ -349,7 +345,7 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) ...@@ -349,7 +345,7 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
return bat; return bat;
} }
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
...@@ -472,7 +468,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) ...@@ -472,7 +468,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
return emulated; return emulated;
} }
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{ {
int emulated = EMULATE_DONE; int emulated = EMULATE_DONE;
......
...@@ -20,9 +20,10 @@ ...@@ -20,9 +20,10 @@
#include <linux/export.h> #include <linux/export.h>
#include <asm/kvm_book3s.h> #include <asm/kvm_book3s.h>
#ifdef CONFIG_KVM_BOOK3S_64_HV #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline); EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
#else #endif
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
......
This diff is collapsed.
...@@ -158,9 +158,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) ...@@ -158,9 +158,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
* Interrupts are enabled again at this point. * Interrupts are enabled again at this point.
*/ */
.global kvmppc_handler_highmem
kvmppc_handler_highmem:
/* /*
* Register usage at this point: * Register usage at this point:
* *
......
This diff is collapsed.
...@@ -26,8 +26,12 @@ ...@@ -26,8 +26,12 @@
#if defined(CONFIG_PPC_BOOK3S_64) #if defined(CONFIG_PPC_BOOK3S_64)
#define FUNC(name) GLUE(.,name) #define FUNC(name) GLUE(.,name)
#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
#elif defined(CONFIG_PPC_BOOK3S_32) #elif defined(CONFIG_PPC_BOOK3S_32)
#define FUNC(name) name #define FUNC(name) name
#define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
#endif /* CONFIG_PPC_BOOK3S_XX */ #endif /* CONFIG_PPC_BOOK3S_XX */
#define VCPU_LOAD_NVGPRS(vcpu) \ #define VCPU_LOAD_NVGPRS(vcpu) \
...@@ -87,8 +91,14 @@ kvm_start_entry: ...@@ -87,8 +91,14 @@ kvm_start_entry:
VCPU_LOAD_NVGPRS(r4) VCPU_LOAD_NVGPRS(r4)
kvm_start_lightweight: kvm_start_lightweight:
/* Copy registers into shadow vcpu so we can access them in real mode */
GET_SHADOW_VCPU(r3)
bl FUNC(kvmppc_copy_to_svcpu)
nop
REST_GPR(4, r1)
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* Get the dcbz32 flag */
PPC_LL r3, VCPU_HFLAGS(r4) PPC_LL r3, VCPU_HFLAGS(r4)
rldicl r3, r3, 0, 63 /* r3 &= 1 */ rldicl r3, r3, 0, 63 /* r3 &= 1 */
stb r3, HSTATE_RESTORE_HID5(r13) stb r3, HSTATE_RESTORE_HID5(r13)
...@@ -111,9 +121,6 @@ kvm_start_lightweight: ...@@ -111,9 +121,6 @@ kvm_start_lightweight:
* *
*/ */
.global kvmppc_handler_highmem
kvmppc_handler_highmem:
/* /*
* Register usage at this point: * Register usage at this point:
* *
...@@ -125,18 +132,31 @@ kvmppc_handler_highmem: ...@@ -125,18 +132,31 @@ kvmppc_handler_highmem:
* *
*/ */
/* R7 = vcpu */ /* Transfer reg values from shadow vcpu back to vcpu struct */
PPC_LL r7, GPR4(r1) /* On 64-bit, interrupts are still off at this point */
PPC_LL r3, GPR4(r1) /* vcpu pointer */
GET_SHADOW_VCPU(r4)
bl FUNC(kvmppc_copy_from_svcpu)
nop
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* Re-enable interrupts */
ld r3, HSTATE_HOST_MSR(r13)
ori r3, r3, MSR_EE
MTMSR_EERI(r3)
/* /*
* Reload kernel SPRG3 value. * Reload kernel SPRG3 value.
* No need to save guest value as usermode can't modify SPRG3. * No need to save guest value as usermode can't modify SPRG3.
*/ */
ld r3, PACA_SPRG3(r13) ld r3, PACA_SPRG3(r13)
mtspr SPRN_SPRG3, r3 mtspr SPRN_SPRG3, r3
#endif /* CONFIG_PPC_BOOK3S_64 */ #endif /* CONFIG_PPC_BOOK3S_64 */
/* R7 = vcpu */
PPC_LL r7, GPR4(r1)
PPC_STL r14, VCPU_GPR(R14)(r7) PPC_STL r14, VCPU_GPR(R14)(r7)
PPC_STL r15, VCPU_GPR(R15)(r7) PPC_STL r15, VCPU_GPR(R15)(r7)
PPC_STL r16, VCPU_GPR(R16)(r7) PPC_STL r16, VCPU_GPR(R16)(r7)
...@@ -161,7 +181,7 @@ kvmppc_handler_highmem: ...@@ -161,7 +181,7 @@ kvmppc_handler_highmem:
/* Restore r3 (kvm_run) and r4 (vcpu) */ /* Restore r3 (kvm_run) and r4 (vcpu) */
REST_2GPRS(3, r1) REST_2GPRS(3, r1)
bl FUNC(kvmppc_handle_exit) bl FUNC(kvmppc_handle_exit_pr)
/* If RESUME_GUEST, get back in the loop */ /* If RESUME_GUEST, get back in the loop */
cmpwi r3, RESUME_GUEST cmpwi r3, RESUME_GUEST
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -260,6 +260,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) ...@@ -260,6 +260,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
*/ */
return rc; return rc;
} }
EXPORT_SYMBOL_GPL(kvmppc_rtas_hcall);
void kvmppc_rtas_tokens_free(struct kvm *kvm) void kvmppc_rtas_tokens_free(struct kvm *kvm)
{ {
......
...@@ -161,8 +161,8 @@ kvmppc_handler_trampoline_enter_end: ...@@ -161,8 +161,8 @@ kvmppc_handler_trampoline_enter_end:
.global kvmppc_handler_trampoline_exit .global kvmppc_handler_trampoline_exit
kvmppc_handler_trampoline_exit: kvmppc_handler_trampoline_exit:
.global kvmppc_interrupt .global kvmppc_interrupt_pr
kvmppc_interrupt: kvmppc_interrupt_pr:
/* Register usage at this point: /* Register usage at this point:
* *
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -117,7 +117,7 @@ static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu) ...@@ -117,7 +117,7 @@ static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW) #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW) #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
#define MAS2_ATTRIB_MASK \ #define MAS2_ATTRIB_MASK \
(MAS2_X0 | MAS2_X1) (MAS2_X0 | MAS2_X1 | MAS2_E | MAS2_G)
#define MAS3_ATTRIB_MASK \ #define MAS3_ATTRIB_MASK \
(MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \ (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
| E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment