Commit 3c536423 authored by Michael Ellerman's avatar Michael Ellerman

Merge branch 'topic/ppc-kvm' into next

Merge some powerpc KVM patches from our topic branch.

In particular this brings in Nick's big series rewriting parts of the
guest entry/exit path in C.

Conflicts:
	arch/powerpc/kernel/security.c
	arch/powerpc/kvm/book3s_hv_rmhandlers.S
parents 07d8ad6f fae5c9f3
...@@ -120,6 +120,7 @@ extern s32 patch__call_flush_branch_caches3; ...@@ -120,6 +120,7 @@ extern s32 patch__call_flush_branch_caches3;
extern s32 patch__flush_count_cache_return; extern s32 patch__flush_count_cache_return;
extern s32 patch__flush_link_stack_return; extern s32 patch__flush_link_stack_return;
extern s32 patch__call_kvm_flush_link_stack; extern s32 patch__call_kvm_flush_link_stack;
extern s32 patch__call_kvm_flush_link_stack_p9;
extern s32 patch__memset_nocache, patch__memcpy_nocache; extern s32 patch__memset_nocache, patch__memcpy_nocache;
extern long flush_branch_caches; extern long flush_branch_caches;
...@@ -140,7 +141,7 @@ void kvmhv_load_host_pmu(void); ...@@ -140,7 +141,7 @@ void kvmhv_load_host_pmu(void);
void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use); void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu); void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu); void kvmppc_p9_enter_guest(struct kvm_vcpu *vcpu);
long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr); long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr, long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
......
...@@ -35,6 +35,19 @@ ...@@ -35,6 +35,19 @@
/* PACA save area size in u64 units (exgen, exmc, etc) */ /* PACA save area size in u64 units (exgen, exmc, etc) */
#define EX_SIZE 10 #define EX_SIZE 10
/* PACA save area offsets */
#define EX_R9 0
#define EX_R10 8
#define EX_R11 16
#define EX_R12 24
#define EX_R13 32
#define EX_DAR 40
#define EX_DSISR 48
#define EX_CCR 52
#define EX_CFAR 56
#define EX_PPR 64
#define EX_CTR 72
/* /*
* maximum recursive depth of MCE exceptions * maximum recursive depth of MCE exceptions
*/ */
......
...@@ -147,6 +147,7 @@ ...@@ -147,6 +147,7 @@
#define KVM_GUEST_MODE_SKIP 2 #define KVM_GUEST_MODE_SKIP 2
#define KVM_GUEST_MODE_GUEST_HV 3 #define KVM_GUEST_MODE_GUEST_HV 3
#define KVM_GUEST_MODE_HOST_HV 4 #define KVM_GUEST_MODE_HOST_HV 4
#define KVM_GUEST_MODE_HV_P9 5 /* ISA >= v3.0 path */
#define KVM_INST_FETCH_FAILED -1 #define KVM_INST_FETCH_FAILED -1
......
...@@ -153,9 +153,17 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu) ...@@ -153,9 +153,17 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
return radix; return radix;
} }
int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr);
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
#endif #endif
/*
* Invalid HDSISR value which is used to indicate when HW has not set the reg.
* Used to work around an errata.
*/
#define HDSISR_CANARY 0x7fff
/* /*
* We use a lock bit in HPTE dword 0 to synchronize updates and * We use a lock bit in HPTE dword 0 to synchronize updates and
* accesses to each HPTE, and another bit to indicate non-present * accesses to each HPTE, and another bit to indicate non-present
......
...@@ -297,7 +297,6 @@ struct kvm_arch { ...@@ -297,7 +297,6 @@ struct kvm_arch {
u8 fwnmi_enabled; u8 fwnmi_enabled;
u8 secure_guest; u8 secure_guest;
u8 svm_enabled; u8 svm_enabled;
bool threads_indep;
bool nested_enable; bool nested_enable;
bool dawr1_enabled; bool dawr1_enabled;
pgd_t *pgtable; pgd_t *pgtable;
...@@ -683,7 +682,12 @@ struct kvm_vcpu_arch { ...@@ -683,7 +682,12 @@ struct kvm_vcpu_arch {
ulong fault_dar; ulong fault_dar;
u32 fault_dsisr; u32 fault_dsisr;
unsigned long intr_msr; unsigned long intr_msr;
ulong fault_gpa; /* guest real address of page fault (POWER9) */ /*
* POWER9 and later: fault_gpa contains the guest real address of page
* fault for a radix guest, or segment descriptor (equivalent to result
* from slbmfev of SLB entry that translated the EA) for hash guests.
*/
ulong fault_gpa;
#endif #endif
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE
......
...@@ -129,6 +129,7 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); ...@@ -129,6 +129,7 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags); extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
...@@ -606,6 +607,7 @@ extern void kvmppc_free_pimap(struct kvm *kvm); ...@@ -606,6 +607,7 @@ extern void kvmppc_free_pimap(struct kvm *kvm);
extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall); extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu); extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd); extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu); extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval); extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev, extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
...@@ -638,6 +640,8 @@ static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu) ...@@ -638,6 +640,8 @@ static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { } static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
{ return 0; } { return 0; }
static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
{ return 0; }
#endif #endif
#ifdef CONFIG_KVM_XIVE #ifdef CONFIG_KVM_XIVE
...@@ -655,8 +659,6 @@ extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, ...@@ -655,8 +659,6 @@ extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
u32 *priority); u32 *priority);
extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq); extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq); extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
extern void kvmppc_xive_init_module(void);
extern void kvmppc_xive_exit_module(void);
extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev, extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu); struct kvm_vcpu *vcpu, u32 cpu);
...@@ -671,6 +673,8 @@ extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval); ...@@ -671,6 +673,8 @@ extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status); int level, bool line_status);
extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu); extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu) static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{ {
...@@ -680,8 +684,6 @@ static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu) ...@@ -680,8 +684,6 @@ static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu); struct kvm_vcpu *vcpu, u32 cpu);
extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu); extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_xive_native_init_module(void);
extern void kvmppc_xive_native_exit_module(void);
extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
union kvmppc_one_reg *val); union kvmppc_one_reg *val);
extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
...@@ -695,8 +697,6 @@ static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, ...@@ -695,8 +697,6 @@ static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
u32 *priority) { return -1; } u32 *priority) { return -1; }
static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; } static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; } static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
static inline void kvmppc_xive_init_module(void) { }
static inline void kvmppc_xive_exit_module(void) { }
static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev, static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; } struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
...@@ -711,14 +711,14 @@ static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { retur ...@@ -711,14 +711,14 @@ static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { retur
static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status) { return -ENODEV; } int level, bool line_status) { return -ENODEV; }
static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { } static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
static inline void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { }
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu) static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{ return 0; } { return 0; }
static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; } struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { } static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
static inline void kvmppc_xive_native_init_module(void) { }
static inline void kvmppc_xive_native_exit_module(void) { }
static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
union kvmppc_one_reg *val) union kvmppc_one_reg *val)
{ return 0; } { return 0; }
...@@ -754,7 +754,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, ...@@ -754,7 +754,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
unsigned long tce_value, unsigned long npages); unsigned long tce_value, unsigned long npages);
long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
unsigned int yield_count); unsigned int yield_count);
long kvmppc_h_random(struct kvm_vcpu *vcpu); long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
void kvmhv_commence_exit(int trap); void kvmhv_commence_exit(int trap);
void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu); void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
void kvmppc_subcore_enter_guest(void); void kvmppc_subcore_enter_guest(void);
......
...@@ -121,12 +121,6 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) ...@@ -121,12 +121,6 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
} }
#endif #endif
#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
#else
static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
#endif
extern void switch_cop(struct mm_struct *next); extern void switch_cop(struct mm_struct *next);
extern int use_cop(unsigned long acop, struct mm_struct *mm); extern int use_cop(unsigned long acop, struct mm_struct *mm);
extern void drop_cop(unsigned long acop, struct mm_struct *mm); extern void drop_cop(unsigned long acop, struct mm_struct *mm);
......
...@@ -97,6 +97,18 @@ extern void div128_by_32(u64 dividend_high, u64 dividend_low, ...@@ -97,6 +97,18 @@ extern void div128_by_32(u64 dividend_high, u64 dividend_low,
extern void secondary_cpu_time_init(void); extern void secondary_cpu_time_init(void);
extern void __init time_init(void); extern void __init time_init(void);
#ifdef CONFIG_PPC64
static inline unsigned long test_irq_work_pending(void)
{
unsigned long x;
asm volatile("lbz %0,%1(13)"
: "=r" (x)
: "i" (offsetof(struct paca_struct, irq_work_pending)));
return x;
}
#endif
DECLARE_PER_CPU(u64, decrementers_next_tb); DECLARE_PER_CPU(u64, decrementers_next_tb);
/* Convert timebase ticks to nanoseconds */ /* Convert timebase ticks to nanoseconds */
......
...@@ -473,7 +473,6 @@ int main(void) ...@@ -473,7 +473,6 @@ int main(void)
OFFSET(VCPU_SLB_NR, kvm_vcpu, arch.slb_nr); OFFSET(VCPU_SLB_NR, kvm_vcpu, arch.slb_nr);
OFFSET(VCPU_FAULT_DSISR, kvm_vcpu, arch.fault_dsisr); OFFSET(VCPU_FAULT_DSISR, kvm_vcpu, arch.fault_dsisr);
OFFSET(VCPU_FAULT_DAR, kvm_vcpu, arch.fault_dar); OFFSET(VCPU_FAULT_DAR, kvm_vcpu, arch.fault_dar);
OFFSET(VCPU_FAULT_GPA, kvm_vcpu, arch.fault_gpa);
OFFSET(VCPU_INTR_MSR, kvm_vcpu, arch.intr_msr); OFFSET(VCPU_INTR_MSR, kvm_vcpu, arch.intr_msr);
OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst); OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
OFFSET(VCPU_TRAP, kvm_vcpu, arch.trap); OFFSET(VCPU_TRAP, kvm_vcpu, arch.trap);
......
This diff is collapsed.
...@@ -432,16 +432,19 @@ device_initcall(stf_barrier_debugfs_init); ...@@ -432,16 +432,19 @@ device_initcall(stf_barrier_debugfs_init);
static void update_branch_cache_flush(void) static void update_branch_cache_flush(void)
{ {
u32 *site; u32 *site, __maybe_unused *site2;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
site = &patch__call_kvm_flush_link_stack; site = &patch__call_kvm_flush_link_stack;
site2 = &patch__call_kvm_flush_link_stack_p9;
// This controls the branch from guest_exit_cont to kvm_flush_link_stack // This controls the branch from guest_exit_cont to kvm_flush_link_stack
if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
patch_instruction_site(site, ppc_inst(PPC_RAW_NOP())); patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
patch_instruction_site(site2, ppc_inst(PPC_RAW_NOP()));
} else { } else {
// Could use HW flush, but that could also flush count cache // Could use HW flush, but that could also flush count cache
patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
patch_branch_site(site2, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
} }
#endif #endif
......
...@@ -508,16 +508,6 @@ EXPORT_SYMBOL(profile_pc); ...@@ -508,16 +508,6 @@ EXPORT_SYMBOL(profile_pc);
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
*/ */
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
static inline unsigned long test_irq_work_pending(void)
{
unsigned long x;
asm volatile("lbz %0,%1(13)"
: "=r" (x)
: "i" (offsetof(struct paca_struct, irq_work_pending)));
return x;
}
static inline void set_irq_work_pending_flag(void) static inline void set_irq_work_pending_flag(void)
{ {
asm volatile("stb %0,%1(13)" : : asm volatile("stb %0,%1(13)" : :
......
...@@ -57,6 +57,7 @@ kvm-pr-y := \ ...@@ -57,6 +57,7 @@ kvm-pr-y := \
book3s_32_mmu.o book3s_32_mmu.o
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_64_entry.o \
tm.o tm.o
ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
...@@ -86,6 +87,7 @@ kvm-book3s_64-builtin-tm-objs-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \ ...@@ -86,6 +87,7 @@ kvm-book3s_64-builtin-tm-objs-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_hv_hmi.o \ book3s_hv_hmi.o \
book3s_hv_p9_entry.o \
book3s_hv_rmhandlers.o \ book3s_hv_rmhandlers.o \
book3s_hv_rm_mmu.o \ book3s_hv_rm_mmu.o \
book3s_hv_ras.o \ book3s_hv_ras.o \
......
...@@ -171,6 +171,12 @@ void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags) ...@@ -171,6 +171,12 @@ void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
} }
EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check); EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu)
{
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_SYSCALL, 0);
}
EXPORT_SYMBOL(kvmppc_core_queue_syscall);
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
{ {
/* might as well deliver this straight away */ /* might as well deliver this straight away */
...@@ -1044,13 +1050,10 @@ static int kvmppc_book3s_init(void) ...@@ -1044,13 +1050,10 @@ static int kvmppc_book3s_init(void)
#ifdef CONFIG_KVM_XICS #ifdef CONFIG_KVM_XICS
#ifdef CONFIG_KVM_XIVE #ifdef CONFIG_KVM_XIVE
if (xics_on_xive()) { if (xics_on_xive()) {
kvmppc_xive_init_module();
kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS); kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
if (kvmppc_xive_native_supported()) { if (kvmppc_xive_native_supported())
kvmppc_xive_native_init_module();
kvm_register_device_ops(&kvm_xive_native_ops, kvm_register_device_ops(&kvm_xive_native_ops,
KVM_DEV_TYPE_XIVE); KVM_DEV_TYPE_XIVE);
}
} else } else
#endif #endif
kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS); kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
...@@ -1060,12 +1063,6 @@ static int kvmppc_book3s_init(void) ...@@ -1060,12 +1063,6 @@ static int kvmppc_book3s_init(void)
static void kvmppc_book3s_exit(void) static void kvmppc_book3s_exit(void)
{ {
#ifdef CONFIG_KVM_XICS
if (xics_on_xive()) {
kvmppc_xive_exit_module();
kvmppc_xive_native_exit_module();
}
#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
kvmppc_book3s_exit_pr(); kvmppc_book3s_exit_pr();
#endif #endif
......
This diff is collapsed.
...@@ -391,10 +391,6 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ...@@ -391,10 +391,6 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
/* liobn, ioba, tce); */ /* liobn, ioba, tce); */
/* For radix, we might be in virtual mode, so punt */
if (kvm_is_radix(vcpu->kvm))
return H_TOO_HARD;
stt = kvmppc_find_table(vcpu->kvm, liobn); stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt) if (!stt)
return H_TOO_HARD; return H_TOO_HARD;
...@@ -489,10 +485,6 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, ...@@ -489,10 +485,6 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
bool prereg = false; bool prereg = false;
struct kvmppc_spapr_tce_iommu_table *stit; struct kvmppc_spapr_tce_iommu_table *stit;
/* For radix, we might be in virtual mode, so punt */
if (kvm_is_radix(vcpu->kvm))
return H_TOO_HARD;
/* /*
* used to check for invalidations in progress * used to check for invalidations in progress
*/ */
...@@ -602,10 +594,6 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, ...@@ -602,10 +594,6 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
long i, ret; long i, ret;
struct kvmppc_spapr_tce_iommu_table *stit; struct kvmppc_spapr_tce_iommu_table *stit;
/* For radix, we might be in virtual mode, so punt */
if (kvm_is_radix(vcpu->kvm))
return H_TOO_HARD;
stt = kvmppc_find_table(vcpu->kvm, liobn); stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt) if (!stt)
return H_TOO_HARD; return H_TOO_HARD;
......
This diff is collapsed.
...@@ -34,21 +34,6 @@ ...@@ -34,21 +34,6 @@
#include "book3s_xics.h" #include "book3s_xics.h"
#include "book3s_xive.h" #include "book3s_xive.h"
/*
* The XIVE module will populate these when it loads
*/
unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
unsigned long mfrr);
int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
/* /*
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
* should be power of 2. * should be power of 2.
...@@ -196,16 +181,9 @@ int kvmppc_hwrng_present(void) ...@@ -196,16 +181,9 @@ int kvmppc_hwrng_present(void)
} }
EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
long kvmppc_h_random(struct kvm_vcpu *vcpu) long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
{ {
int r; if (powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]))
/* Only need to do the expensive mfmsr() on radix */
if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
else
r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
if (r)
return H_SUCCESS; return H_SUCCESS;
return H_HARDWARE; return H_HARDWARE;
...@@ -221,15 +199,6 @@ void kvmhv_rm_send_ipi(int cpu) ...@@ -221,15 +199,6 @@ void kvmhv_rm_send_ipi(int cpu)
void __iomem *xics_phys; void __iomem *xics_phys;
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
/* For a nested hypervisor, use the XICS via hcall */
if (kvmhv_on_pseries()) {
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
IPI_PRIORITY);
return;
}
/* On POWER9 we can use msgsnd for any destination cpu. */ /* On POWER9 we can use msgsnd for any destination cpu. */
if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (cpu_has_feature(CPU_FTR_ARCH_300)) {
msg |= get_hard_smp_processor_id(cpu); msg |= get_hard_smp_processor_id(cpu);
...@@ -442,19 +411,12 @@ static long kvmppc_read_one_intr(bool *again) ...@@ -442,19 +411,12 @@ static long kvmppc_read_one_intr(bool *again)
return 1; return 1;
/* Now read the interrupt from the ICP */ /* Now read the interrupt from the ICP */
if (kvmhv_on_pseries()) { xics_phys = local_paca->kvm_hstate.xics_phys;
unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; rc = 0;
if (!xics_phys)
rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF); rc = opal_int_get_xirr(&xirr, false);
xirr = cpu_to_be32(retbuf[0]); else
} else { xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
xics_phys = local_paca->kvm_hstate.xics_phys;
rc = 0;
if (!xics_phys)
rc = opal_int_get_xirr(&xirr, false);
else
xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
}
if (rc < 0) if (rc < 0)
return 1; return 1;
...@@ -483,13 +445,7 @@ static long kvmppc_read_one_intr(bool *again) ...@@ -483,13 +445,7 @@ static long kvmppc_read_one_intr(bool *again)
*/ */
if (xisr == XICS_IPI) { if (xisr == XICS_IPI) {
rc = 0; rc = 0;
if (kvmhv_on_pseries()) { if (xics_phys) {
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
plpar_hcall_raw(H_IPI, retbuf,
hard_smp_processor_id(), 0xff);
plpar_hcall_raw(H_EOI, retbuf, h_xirr);
} else if (xics_phys) {
__raw_rm_writeb(0xff, xics_phys + XICS_MFRR); __raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
__raw_rm_writel(xirr, xics_phys + XICS_XIRR); __raw_rm_writel(xirr, xics_phys + XICS_XIRR);
} else { } else {
...@@ -515,13 +471,7 @@ static long kvmppc_read_one_intr(bool *again) ...@@ -515,13 +471,7 @@ static long kvmppc_read_one_intr(bool *again)
/* We raced with the host, /* We raced with the host,
* we need to resend that IPI, bummer * we need to resend that IPI, bummer
*/ */
if (kvmhv_on_pseries()) { if (xics_phys)
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
plpar_hcall_raw(H_IPI, retbuf,
hard_smp_processor_id(),
IPI_PRIORITY);
} else if (xics_phys)
__raw_rm_writeb(IPI_PRIORITY, __raw_rm_writeb(IPI_PRIORITY,
xics_phys + XICS_MFRR); xics_phys + XICS_MFRR);
else else
...@@ -541,22 +491,13 @@ static long kvmppc_read_one_intr(bool *again) ...@@ -541,22 +491,13 @@ static long kvmppc_read_one_intr(bool *again)
} }
#ifdef CONFIG_KVM_XICS #ifdef CONFIG_KVM_XICS
static inline bool is_rm(void)
{
return !(mfmsr() & MSR_DR);
}
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
{ {
if (!kvmppc_xics_enabled(vcpu)) if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD; return H_TOO_HARD;
if (xics_on_xive()) { if (xics_on_xive())
if (is_rm()) return xive_rm_h_xirr(vcpu);
return xive_rm_h_xirr(vcpu); else
if (unlikely(!__xive_vm_h_xirr))
return H_NOT_AVAILABLE;
return __xive_vm_h_xirr(vcpu);
} else
return xics_rm_h_xirr(vcpu); return xics_rm_h_xirr(vcpu);
} }
...@@ -565,13 +506,9 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) ...@@ -565,13 +506,9 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
if (!kvmppc_xics_enabled(vcpu)) if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD; return H_TOO_HARD;
vcpu->arch.regs.gpr[5] = get_tb(); vcpu->arch.regs.gpr[5] = get_tb();
if (xics_on_xive()) { if (xics_on_xive())
if (is_rm()) return xive_rm_h_xirr(vcpu);
return xive_rm_h_xirr(vcpu); else
if (unlikely(!__xive_vm_h_xirr))
return H_NOT_AVAILABLE;
return __xive_vm_h_xirr(vcpu);
} else
return xics_rm_h_xirr(vcpu); return xics_rm_h_xirr(vcpu);
} }
...@@ -579,13 +516,9 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) ...@@ -579,13 +516,9 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
{ {
if (!kvmppc_xics_enabled(vcpu)) if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD; return H_TOO_HARD;
if (xics_on_xive()) { if (xics_on_xive())
if (is_rm()) return xive_rm_h_ipoll(vcpu, server);
return xive_rm_h_ipoll(vcpu, server); else
if (unlikely(!__xive_vm_h_ipoll))
return H_NOT_AVAILABLE;
return __xive_vm_h_ipoll(vcpu, server);
} else
return H_TOO_HARD; return H_TOO_HARD;
} }
...@@ -594,13 +527,9 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, ...@@ -594,13 +527,9 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
{ {
if (!kvmppc_xics_enabled(vcpu)) if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD; return H_TOO_HARD;
if (xics_on_xive()) { if (xics_on_xive())
if (is_rm()) return xive_rm_h_ipi(vcpu, server, mfrr);
return xive_rm_h_ipi(vcpu, server, mfrr); else
if (unlikely(!__xive_vm_h_ipi))
return H_NOT_AVAILABLE;
return __xive_vm_h_ipi(vcpu, server, mfrr);
} else
return xics_rm_h_ipi(vcpu, server, mfrr); return xics_rm_h_ipi(vcpu, server, mfrr);
} }
...@@ -608,13 +537,9 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) ...@@ -608,13 +537,9 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
{ {
if (!kvmppc_xics_enabled(vcpu)) if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD; return H_TOO_HARD;
if (xics_on_xive()) { if (xics_on_xive())
if (is_rm()) return xive_rm_h_cppr(vcpu, cppr);
return xive_rm_h_cppr(vcpu, cppr); else
if (unlikely(!__xive_vm_h_cppr))
return H_NOT_AVAILABLE;
return __xive_vm_h_cppr(vcpu, cppr);
} else
return xics_rm_h_cppr(vcpu, cppr); return xics_rm_h_cppr(vcpu, cppr);
} }
...@@ -622,13 +547,9 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) ...@@ -622,13 +547,9 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
{ {
if (!kvmppc_xics_enabled(vcpu)) if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD; return H_TOO_HARD;
if (xics_on_xive()) { if (xics_on_xive())
if (is_rm()) return xive_rm_h_eoi(vcpu, xirr);
return xive_rm_h_eoi(vcpu, xirr); else
if (unlikely(!__xive_vm_h_eoi))
return H_NOT_AVAILABLE;
return __xive_vm_h_eoi(vcpu, xirr);
} else
return xics_rm_h_eoi(vcpu, xirr); return xics_rm_h_eoi(vcpu, xirr);
} }
#endif /* CONFIG_KVM_XICS */ #endif /* CONFIG_KVM_XICS */
......
...@@ -58,7 +58,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ...@@ -58,7 +58,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* /*
* Put whatever is in the decrementer into the * Put whatever is in the decrementer into the
* hypervisor decrementer. * hypervisor decrementer.
* Because of a hardware deviation in P8 and P9, * Because of a hardware deviation in P8,
* we need to set LPCR[HDICE] before writing HDEC. * we need to set LPCR[HDICE] before writing HDEC.
*/ */
ld r5, HSTATE_KVM_VCORE(r13) ld r5, HSTATE_KVM_VCORE(r13)
...@@ -67,15 +67,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ...@@ -67,15 +67,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
ori r8, r9, LPCR_HDICE ori r8, r9, LPCR_HDICE
mtspr SPRN_LPCR, r8 mtspr SPRN_LPCR, r8
isync isync
andis. r0, r9, LPCR_LD@h
mfspr r8,SPRN_DEC mfspr r8,SPRN_DEC
mftb r7 mftb r7
BEGIN_FTR_SECTION
/* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
bne 32f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r8,r8 extsw r8,r8
32: mtspr SPRN_HDEC,r8 mtspr SPRN_HDEC,r8
add r8,r8,r7 add r8,r8,r7
std r8,HSTATE_DECEXP(r13) std r8,HSTATE_DECEXP(r13)
......
This diff is collapsed.
...@@ -46,6 +46,10 @@ static int global_invalidates(struct kvm *kvm) ...@@ -46,6 +46,10 @@ static int global_invalidates(struct kvm *kvm)
else else
global = 1; global = 1;
/* LPID has been switched to host if in virt mode so can't do local */
if (!global && (mfmsr() & (MSR_IR|MSR_DR)))
global = 1;
if (!global) { if (!global) {
/* any other core might now have stale TLB entries... */ /* any other core might now have stale TLB entries... */
smp_wmb(); smp_wmb();
...@@ -398,6 +402,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -398,6 +402,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu->arch.pgdir, true, vcpu->arch.pgdir, true,
&vcpu->arch.regs.gpr[4]); &vcpu->arch.regs.gpr[4]);
} }
EXPORT_SYMBOL_GPL(kvmppc_h_enter);
#ifdef __BIG_ENDIAN__ #ifdef __BIG_ENDIAN__
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
...@@ -542,6 +547,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -542,6 +547,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
&vcpu->arch.regs.gpr[4]); &vcpu->arch.regs.gpr[4]);
} }
EXPORT_SYMBOL_GPL(kvmppc_h_remove);
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
{ {
...@@ -660,6 +666,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) ...@@ -660,6 +666,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(kvmppc_h_bulk_remove);
long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index, unsigned long avpn) unsigned long pte_index, unsigned long avpn)
...@@ -730,6 +737,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -730,6 +737,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
return H_SUCCESS; return H_SUCCESS;
} }
EXPORT_SYMBOL_GPL(kvmppc_h_protect);
long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index) unsigned long pte_index)
...@@ -770,6 +778,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -770,6 +778,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
} }
return H_SUCCESS; return H_SUCCESS;
} }
EXPORT_SYMBOL_GPL(kvmppc_h_read);
long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index) unsigned long pte_index)
...@@ -818,6 +827,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -818,6 +827,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(kvmppc_h_clear_ref);
long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index) unsigned long pte_index)
...@@ -865,6 +875,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -865,6 +875,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(kvmppc_h_clear_mod);
static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq, static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
unsigned long gpa, int writing, unsigned long *hpa, unsigned long gpa, int writing, unsigned long *hpa,
...@@ -1283,3 +1294,4 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, ...@@ -1283,3 +1294,4 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
return -1; /* send fault up to host kernel mode */ return -1; /* send fault up to host kernel mode */
} }
EXPORT_SYMBOL_GPL(kvmppc_hpte_hv_fault);
...@@ -141,13 +141,6 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, ...@@ -141,13 +141,6 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
return; return;
} }
if (xive_enabled() && kvmhv_on_pseries()) {
/* No XICS access or hypercalls available, too hard */
this_icp->rm_action |= XICS_RM_KICK_VCPU;
this_icp->rm_kick_target = vcpu;
return;
}
/* /*
* Check if the core is loaded, * Check if the core is loaded,
* if not, find an available host core to post to wake the VCPU, * if not, find an available host core to post to wake the VCPU,
...@@ -771,14 +764,6 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again) ...@@ -771,14 +764,6 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
void __iomem *xics_phys; void __iomem *xics_phys;
int64_t rc; int64_t rc;
if (kvmhv_on_pseries()) {
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
iosync();
plpar_hcall_raw(H_EOI, retbuf, hwirq);
return;
}
rc = pnv_opal_pci_msi_eoi(c, hwirq); rc = pnv_opal_pci_msi_eoi(c, hwirq);
if (rc) if (rc)
......
This diff is collapsed.
...@@ -164,12 +164,15 @@ kvmppc_interrupt_pr: ...@@ -164,12 +164,15 @@ kvmppc_interrupt_pr:
/* 64-bit entry. Register usage at this point: /* 64-bit entry. Register usage at this point:
* *
* SPRG_SCRATCH0 = guest R13 * SPRG_SCRATCH0 = guest R13
* R9 = HSTATE_IN_GUEST
* R12 = (guest CR << 32) | exit handler id * R12 = (guest CR << 32) | exit handler id
* R13 = PACA * R13 = PACA
* HSTATE.SCRATCH0 = guest R12 * HSTATE.SCRATCH0 = guest R12
* HSTATE.SCRATCH2 = guest R9
*/ */
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
/* Match 32-bit entry */ /* Match 32-bit entry */
ld r9,HSTATE_SCRATCH2(r13)
rotldi r12, r12, 32 /* Flip R12 halves for stw */ rotldi r12, r12, 32 /* Flip R12 halves for stw */
stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */ stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */
srdi r12, r12, 32 /* shift trap into low half */ srdi r12, r12, 32 /* shift trap into low half */
......
...@@ -127,6 +127,71 @@ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) ...@@ -127,6 +127,71 @@ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu); EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
/*
* Pull a vcpu's context from the XIVE on guest exit.
* This assumes we are in virtual mode (MMU on)
*/
void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
{
void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
if (!vcpu->arch.xive_pushed)
return;
/*
* Should not have been pushed if there is no tima
*/
if (WARN_ON(!tima))
return;
eieio();
/* First load to pull the context, we ignore the value */
__raw_readl(tima + TM_SPC_PULL_OS_CTX);
/* Second load to recover the context state (Words 0 and 1) */
vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS);
/* Fixup some of the state for the next load */
vcpu->arch.xive_saved_state.lsmfb = 0;
vcpu->arch.xive_saved_state.ack = 0xff;
vcpu->arch.xive_pushed = 0;
eieio();
}
EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
{
void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
if (!esc_vaddr)
return;
/* we are using XIVE with single escalation */
if (vcpu->arch.xive_esc_on) {
/*
* If we still have a pending escalation, abort the cede,
* and we must set PQ to 10 rather than 00 so that we don't
* potentially end up with two entries for the escalation
* interrupt in the XIVE interrupt queue. In that case
* we also don't want to set xive_esc_on to 1 here in
* case we race with xive_esc_irq().
*/
vcpu->arch.ceded = 0;
/*
* The escalation interrupts are special as we don't EOI them.
* There is no need to use the load-after-store ordering offset
* to set PQ to 10 as we won't use StoreEOI.
*/
__raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_10);
} else {
vcpu->arch.xive_esc_on = true;
mb();
__raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
}
mb();
}
EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);
/* /*
* This is a simple trigger for a generic XIVE IRQ. This must * This is a simple trigger for a generic XIVE IRQ. This must
* only be called for interrupts that support a trigger page * only be called for interrupts that support a trigger page
...@@ -2075,6 +2140,36 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) ...@@ -2075,6 +2140,36 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
return 0; return 0;
} }
int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
/* The VM should have configured XICS mode before doing XICS hcalls. */
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
switch (req) {
case H_XIRR:
return xive_vm_h_xirr(vcpu);
case H_CPPR:
return xive_vm_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
case H_EOI:
return xive_vm_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
case H_IPI:
return xive_vm_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5));
case H_IPOLL:
return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
case H_XIRR_X:
xive_vm_h_xirr(vcpu);
kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
return H_SUCCESS;
}
return H_UNSUPPORTED;
}
EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall);
int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu) int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
{ {
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
...@@ -2257,21 +2352,3 @@ struct kvm_device_ops kvm_xive_ops = { ...@@ -2257,21 +2352,3 @@ struct kvm_device_ops kvm_xive_ops = {
.get_attr = xive_get_attr, .get_attr = xive_get_attr,
.has_attr = xive_has_attr, .has_attr = xive_has_attr,
}; };
void kvmppc_xive_init_module(void)
{
__xive_vm_h_xirr = xive_vm_h_xirr;
__xive_vm_h_ipoll = xive_vm_h_ipoll;
__xive_vm_h_ipi = xive_vm_h_ipi;
__xive_vm_h_cppr = xive_vm_h_cppr;
__xive_vm_h_eoi = xive_vm_h_eoi;
}
void kvmppc_xive_exit_module(void)
{
__xive_vm_h_xirr = NULL;
__xive_vm_h_ipoll = NULL;
__xive_vm_h_ipi = NULL;
__xive_vm_h_cppr = NULL;
__xive_vm_h_eoi = NULL;
}
...@@ -289,13 +289,6 @@ extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, ...@@ -289,13 +289,6 @@ extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr); extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
unsigned long mfrr);
extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
/* /*
* Common Xive routines for XICS-over-XIVE and XIVE native * Common Xive routines for XICS-over-XIVE and XIVE native
*/ */
......
...@@ -1281,13 +1281,3 @@ struct kvm_device_ops kvm_xive_native_ops = { ...@@ -1281,13 +1281,3 @@ struct kvm_device_ops kvm_xive_native_ops = {
.has_attr = kvmppc_xive_native_has_attr, .has_attr = kvmppc_xive_native_has_attr,
.mmap = kvmppc_xive_native_mmap, .mmap = kvmppc_xive_native_mmap,
}; };
void kvmppc_xive_native_init_module(void)
{
;
}
void kvmppc_xive_native_exit_module(void)
{
;
}
...@@ -357,30 +357,19 @@ static void __init radix_init_pgtable(void) ...@@ -357,30 +357,19 @@ static void __init radix_init_pgtable(void)
} }
/* Find out how many PID bits are supported */ /* Find out how many PID bits are supported */
if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) { if (!cpu_has_feature(CPU_FTR_HVMODE) &&
if (!mmu_pid_bits) cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
mmu_pid_bits = 20;
mmu_base_pid = 1;
} else if (cpu_has_feature(CPU_FTR_HVMODE)) {
if (!mmu_pid_bits)
mmu_pid_bits = 20;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* /*
* When KVM is possible, we only use the top half of the * Older versions of KVM on these machines perfer if the
* PID space to avoid collisions between host and guest PIDs * guest only uses the low 19 PID bits.
* which can cause problems due to prefetch when exiting the
* guest with AIL=3
*/ */
mmu_base_pid = 1 << (mmu_pid_bits - 1);
#else
mmu_base_pid = 1;
#endif
} else {
/* The guest uses the bottom half of the PID space */
if (!mmu_pid_bits) if (!mmu_pid_bits)
mmu_pid_bits = 19; mmu_pid_bits = 19;
mmu_base_pid = 1; } else {
if (!mmu_pid_bits)
mmu_pid_bits = 20;
} }
mmu_base_pid = 1;
/* /*
* Allocate Partition table and process table for the * Allocate Partition table and process table for the
......
...@@ -1344,49 +1344,3 @@ void radix__flush_tlb_all(void) ...@@ -1344,49 +1344,3 @@ void radix__flush_tlb_all(void)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory"); : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
asm volatile("eieio; tlbsync; ptesync": : :"memory"); asm volatile("eieio; tlbsync; ptesync": : :"memory");
} }
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
{
unsigned long pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
return;
if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
return;
/*
* If this context hasn't run on that CPU before and KVM is
* around, there's a slim chance that the guest on another
* CPU just brought in obsolete translation into the TLB of
* this CPU due to a bad prefetch using the guest PID on
* the way into the hypervisor.
*
* We work around this here. If KVM is possible, we check if
* any sibling thread is in KVM. If it is, the window may exist
* and thus we flush that PID from the core.
*
* A potential future improvement would be to mark which PIDs
* have never been used on the system and avoid it if the PID
* is new and the process has no other cpumask bit set.
*/
if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
int cpu = smp_processor_id();
int sib = cpu_first_thread_sibling(cpu);
bool flush = false;
for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
if (sib == cpu)
continue;
if (!cpu_possible(sib))
continue;
if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
flush = true;
}
if (flush)
_tlbiel_pid(pid, RIC_FLUSH_ALL);
}
}
EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
...@@ -83,9 +83,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -83,9 +83,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
if (cpu_has_feature(CPU_FTR_ALTIVEC)) if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile ("dssall"); asm volatile ("dssall");
if (new_on_cpu) if (!new_on_cpu)
radix_kvm_prefetch_workaround(next);
else
membarrier_arch_switch_mm(prev, next, tsk); membarrier_arch_switch_mm(prev, next, tsk);
/* /*
......
...@@ -604,7 +604,7 @@ struct p9_sprs { ...@@ -604,7 +604,7 @@ struct p9_sprs {
u64 uamor; u64 uamor;
}; };
static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) static unsigned long power9_idle_stop(unsigned long psscr)
{ {
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
int first = cpu_first_thread_sibling(cpu); int first = cpu_first_thread_sibling(cpu);
...@@ -620,8 +620,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) ...@@ -620,8 +620,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
if (!(psscr & (PSSCR_EC|PSSCR_ESL))) { if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
/* EC=ESL=0 case */ /* EC=ESL=0 case */
BUG_ON(!mmu_on);
/* /*
* Wake synchronously. SRESET via xscom may still cause * Wake synchronously. SRESET via xscom may still cause
* a 0x100 powersave wakeup with SRR1 reason! * a 0x100 powersave wakeup with SRR1 reason!
...@@ -803,8 +801,7 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) ...@@ -803,8 +801,7 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
__slb_restore_bolted_realmode(); __slb_restore_bolted_realmode();
out: out:
if (mmu_on) mtmsr(MSR_KERNEL);
mtmsr(MSR_KERNEL);
return srr1; return srr1;
} }
...@@ -895,7 +892,7 @@ struct p10_sprs { ...@@ -895,7 +892,7 @@ struct p10_sprs {
*/ */
}; };
static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on) static unsigned long power10_idle_stop(unsigned long psscr)
{ {
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
int first = cpu_first_thread_sibling(cpu); int first = cpu_first_thread_sibling(cpu);
...@@ -909,8 +906,6 @@ static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on) ...@@ -909,8 +906,6 @@ static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on)
if (!(psscr & (PSSCR_EC|PSSCR_ESL))) { if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
/* EC=ESL=0 case */ /* EC=ESL=0 case */
BUG_ON(!mmu_on);
/* /*
* Wake synchronously. SRESET via xscom may still cause * Wake synchronously. SRESET via xscom may still cause
* a 0x100 powersave wakeup with SRR1 reason! * a 0x100 powersave wakeup with SRR1 reason!
...@@ -991,8 +986,7 @@ static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on) ...@@ -991,8 +986,7 @@ static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on)
__slb_restore_bolted_realmode(); __slb_restore_bolted_realmode();
out: out:
if (mmu_on) mtmsr(MSR_KERNEL);
mtmsr(MSR_KERNEL);
return srr1; return srr1;
} }
...@@ -1002,40 +996,10 @@ static unsigned long arch300_offline_stop(unsigned long psscr) ...@@ -1002,40 +996,10 @@ static unsigned long arch300_offline_stop(unsigned long psscr)
{ {
unsigned long srr1; unsigned long srr1;
#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
__ppc64_runlatch_off();
if (cpu_has_feature(CPU_FTR_ARCH_31)) if (cpu_has_feature(CPU_FTR_ARCH_31))
srr1 = power10_idle_stop(psscr, true); srr1 = power10_idle_stop(psscr);
else else
srr1 = power9_idle_stop(psscr, true); srr1 = power9_idle_stop(psscr);
__ppc64_runlatch_on();
#else
/*
* Tell KVM we're entering idle.
* This does not have to be done in real mode because the P9 MMU
* is independent per-thread. Some steppings share radix/hash mode
* between threads, but in that case KVM has a barrier sync in real
* mode before and after switching between radix and hash.
*
* kvm_start_guest must still be called in real mode though, hence
* the false argument.
*/
local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
__ppc64_runlatch_off();
if (cpu_has_feature(CPU_FTR_ARCH_31))
srr1 = power10_idle_stop(psscr, false);
else
srr1 = power9_idle_stop(psscr, false);
__ppc64_runlatch_on();
local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
/* Order setting hwthread_state vs. testing hwthread_req */
smp_mb();
if (local_paca->kvm_hstate.hwthread_req)
srr1 = idle_kvm_start_guest(srr1);
mtmsr(MSR_KERNEL);
#endif
return srr1; return srr1;
} }
...@@ -1055,9 +1019,9 @@ void arch300_idle_type(unsigned long stop_psscr_val, ...@@ -1055,9 +1019,9 @@ void arch300_idle_type(unsigned long stop_psscr_val,
__ppc64_runlatch_off(); __ppc64_runlatch_off();
if (cpu_has_feature(CPU_FTR_ARCH_31)) if (cpu_has_feature(CPU_FTR_ARCH_31))
srr1 = power10_idle_stop(psscr, true); srr1 = power10_idle_stop(psscr);
else else
srr1 = power9_idle_stop(psscr, true); srr1 = power9_idle_stop(psscr);
__ppc64_runlatch_on(); __ppc64_runlatch_on();
fini_irq_for_idle_irqsoff(); fini_irq_for_idle_irqsoff();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment