Commit b104e41c authored by Michael Ellerman's avatar Michael Ellerman

Merge branch 'topic/ppc-kvm' into next

Merge our KVM topic branch.
parents a5fc286f ad55bae7
...@@ -51,13 +51,11 @@ struct iommu_table_ops { ...@@ -51,13 +51,11 @@ struct iommu_table_ops {
int (*xchg_no_kill)(struct iommu_table *tbl, int (*xchg_no_kill)(struct iommu_table *tbl,
long index, long index,
unsigned long *hpa, unsigned long *hpa,
enum dma_data_direction *direction, enum dma_data_direction *direction);
bool realmode);
void (*tce_kill)(struct iommu_table *tbl, void (*tce_kill)(struct iommu_table *tbl,
unsigned long index, unsigned long index,
unsigned long pages, unsigned long pages);
bool realmode);
__be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc); __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc);
#endif #endif
......
...@@ -14,9 +14,6 @@ ...@@ -14,9 +14,6 @@
#define XICS_MFRR 0xc #define XICS_MFRR 0xc
#define XICS_IPI 2 /* interrupt source # for IPIs */ #define XICS_IPI 2 /* interrupt source # for IPIs */
/* LPIDs we support with this build -- runtime limit may be lower */
#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
/* Maximum number of threads per physical core */ /* Maximum number of threads per physical core */
#define MAX_SMT_THREADS 8 #define MAX_SMT_THREADS 8
......
...@@ -36,7 +36,12 @@ ...@@ -36,7 +36,12 @@
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
#include <asm/kvm_book3s_asm.h> /* for MAX_SMT_THREADS */ #include <asm/kvm_book3s_asm.h> /* for MAX_SMT_THREADS */
#define KVM_MAX_VCPU_IDS (MAX_SMT_THREADS * KVM_MAX_VCORES) #define KVM_MAX_VCPU_IDS (MAX_SMT_THREADS * KVM_MAX_VCORES)
#define KVM_MAX_NESTED_GUESTS KVMPPC_NR_LPIDS
/*
* Limit the nested partition table to 4096 entries (because that's what
* hardware supports). Both guest and host use this value.
*/
#define KVM_MAX_NESTED_GUESTS_SHIFT 12
#else #else
#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS #define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
...@@ -327,8 +332,7 @@ struct kvm_arch { ...@@ -327,8 +332,7 @@ struct kvm_arch {
struct list_head uvmem_pfns; struct list_head uvmem_pfns;
struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */ struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */
u64 l1_ptcr; u64 l1_ptcr;
int max_nested_lpid; struct idr kvm_nested_guest_idr;
struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];
/* This array can grow quite large, keep it at the end */ /* This array can grow quite large, keep it at the end */
struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
#endif #endif
......
...@@ -177,8 +177,6 @@ extern void kvmppc_setup_partition_table(struct kvm *kvm); ...@@ -177,8 +177,6 @@ extern void kvmppc_setup_partition_table(struct kvm *kvm);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce_64 *args); struct kvm_create_spapr_tce_64 *args);
extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
struct kvm *kvm, unsigned long liobn);
#define kvmppc_ioba_validate(stt, ioba, npages) \ #define kvmppc_ioba_validate(stt, ioba, npages) \
(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \ (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
(stt)->size, (ioba), (npages)) ? \ (stt)->size, (ioba), (npages)) ? \
...@@ -685,7 +683,7 @@ extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, ...@@ -685,7 +683,7 @@ extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status); int level, bool line_status);
extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu); extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu); extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu); extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu) static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{ {
...@@ -723,7 +721,7 @@ static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 ir ...@@ -723,7 +721,7 @@ static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 ir
int level, bool line_status) { return -ENODEV; } int level, bool line_status) { return -ENODEV; }
static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { } static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { } static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
static inline void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { } static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { return true; }
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu) static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{ return 0; } { return 0; }
...@@ -789,13 +787,6 @@ long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -789,13 +787,6 @@ long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long dest, unsigned long src); unsigned long dest, unsigned long src);
long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
unsigned long slb_v, unsigned int status, bool data); unsigned long slb_v, unsigned int status, bool data);
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
unsigned long mfrr);
int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu); void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
/* /*
...@@ -877,7 +868,6 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, ...@@ -877,7 +868,6 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
struct kvm_dirty_tlb *cfg); struct kvm_dirty_tlb *cfg);
long kvmppc_alloc_lpid(void); long kvmppc_alloc_lpid(void);
void kvmppc_claim_lpid(long lpid);
void kvmppc_free_lpid(long lpid); void kvmppc_free_lpid(long lpid);
void kvmppc_init_lpid(unsigned long nr_lpids); void kvmppc_init_lpid(unsigned long nr_lpids);
......
...@@ -34,15 +34,10 @@ extern void mm_iommu_init(struct mm_struct *mm); ...@@ -34,15 +34,10 @@ extern void mm_iommu_init(struct mm_struct *mm);
extern void mm_iommu_cleanup(struct mm_struct *mm); extern void mm_iommu_cleanup(struct mm_struct *mm);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
unsigned long ua, unsigned long size); unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
struct mm_struct *mm, unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
unsigned long ua, unsigned long entries); unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa); unsigned long ua, unsigned int pageshift, unsigned long *hpa);
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
unsigned int pageshift, unsigned long *size); unsigned int pageshift, unsigned long *size);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
......
...@@ -417,7 +417,6 @@ ...@@ -417,7 +417,6 @@
#define FSCR_DSCR __MASK(FSCR_DSCR_LG) #define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define FSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */ #define FSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ #define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
#define HFSCR_PREFIX __MASK(FSCR_PREFIX_LG)
#define HFSCR_MSGP __MASK(FSCR_MSGP_LG) #define HFSCR_MSGP __MASK(FSCR_MSGP_LG)
#define HFSCR_TAR __MASK(FSCR_TAR_LG) #define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_EBB __MASK(FSCR_EBB_LG) #define HFSCR_EBB __MASK(FSCR_EBB_LG)
...@@ -474,8 +473,6 @@ ...@@ -474,8 +473,6 @@
#ifndef SPRN_LPID #ifndef SPRN_LPID
#define SPRN_LPID 0x13F /* Logical Partition Identifier */ #define SPRN_LPID 0x13F /* Logical Partition Identifier */
#endif #endif
#define LPID_RSVD_POWER7 0x3ff /* Reserved LPID for partn switching */
#define LPID_RSVD 0xfff /* Reserved LPID for partn switching */
#define SPRN_HMER 0x150 /* Hypervisor maintenance exception reg */ #define SPRN_HMER 0x150 /* Hypervisor maintenance exception reg */
#define HMER_DEBUG_TRIG (1ul << (63 - 17)) /* Debug trigger */ #define HMER_DEBUG_TRIG (1ul << (63 - 17)) /* Debug trigger */
#define SPRN_HMEER 0x151 /* Hyp maintenance exception enable reg */ #define SPRN_HMEER 0x151 /* Hyp maintenance exception enable reg */
......
...@@ -1064,7 +1064,7 @@ extern long iommu_tce_xchg_no_kill(struct mm_struct *mm, ...@@ -1064,7 +1064,7 @@ extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
long ret; long ret;
unsigned long size = 0; unsigned long size = 0;
ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false); ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction);
if (!ret && ((*direction == DMA_FROM_DEVICE) || if (!ret && ((*direction == DMA_FROM_DEVICE) ||
(*direction == DMA_BIDIRECTIONAL)) && (*direction == DMA_BIDIRECTIONAL)) &&
!mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift, !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
...@@ -1079,7 +1079,7 @@ void iommu_tce_kill(struct iommu_table *tbl, ...@@ -1079,7 +1079,7 @@ void iommu_tce_kill(struct iommu_table *tbl,
unsigned long entry, unsigned long pages) unsigned long entry, unsigned long pages)
{ {
if (tbl->it_ops->tce_kill) if (tbl->it_ops->tce_kill)
tbl->it_ops->tce_kill(tbl, entry, pages, false); tbl->it_ops->tce_kill(tbl, entry, pages);
} }
EXPORT_SYMBOL_GPL(iommu_tce_kill); EXPORT_SYMBOL_GPL(iommu_tce_kill);
......
...@@ -37,9 +37,6 @@ kvm-e500mc-objs := \ ...@@ -37,9 +37,6 @@ kvm-e500mc-objs := \
e500_emulate.o e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
kvm-book3s_64-builtin-objs-$(CONFIG_SPAPR_TCE_IOMMU) := \
book3s_64_vio_hv.o
kvm-pr-y := \ kvm-pr-y := \
fpu.o \ fpu.o \
emulate.o \ emulate.o \
...@@ -76,7 +73,7 @@ kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \ ...@@ -76,7 +73,7 @@ kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
book3s_hv_tm.o book3s_hv_tm.o
kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \ kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
book3s_hv_rm_xics.o book3s_hv_rm_xive.o book3s_hv_rm_xics.o
kvm-book3s_64-builtin-tm-objs-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \ kvm-book3s_64-builtin-tm-objs-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
book3s_hv_tm_builtin.o book3s_hv_tm_builtin.o
......
...@@ -256,26 +256,34 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, ...@@ -256,26 +256,34 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
int kvmppc_mmu_hv_init(void) int kvmppc_mmu_hv_init(void)
{ {
unsigned long host_lpid, rsvd_lpid; unsigned long nr_lpids;
if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE)) if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE))
return -EINVAL; return -EINVAL;
host_lpid = 0; if (cpu_has_feature(CPU_FTR_HVMODE)) {
if (cpu_has_feature(CPU_FTR_HVMODE)) if (WARN_ON(mfspr(SPRN_LPID) != 0))
host_lpid = mfspr(SPRN_LPID); return -EINVAL;
nr_lpids = 1UL << mmu_lpid_bits;
} else {
nr_lpids = 1UL << KVM_MAX_NESTED_GUESTS_SHIFT;
}
/* POWER8 and above have 12-bit LPIDs (10-bit in POWER7) */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
if (cpu_has_feature(CPU_FTR_ARCH_207S)) /* POWER7 has 10-bit LPIDs, POWER8 has 12-bit LPIDs */
rsvd_lpid = LPID_RSVD; if (cpu_has_feature(CPU_FTR_ARCH_207S))
else WARN_ON(nr_lpids != 1UL << 12);
rsvd_lpid = LPID_RSVD_POWER7; else
WARN_ON(nr_lpids != 1UL << 10);
kvmppc_init_lpid(rsvd_lpid + 1); /*
* Reserve the last implemented LPID use in partition
* switching for POWER7 and POWER8.
*/
nr_lpids -= 1;
}
kvmppc_claim_lpid(host_lpid); kvmppc_init_lpid(nr_lpids);
/* rsvd_lpid is reserved for use in partition switching */
kvmppc_claim_lpid(rsvd_lpid);
return 0; return 0;
} }
...@@ -879,7 +887,7 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -879,7 +887,7 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
struct revmap_entry *rev = kvm->arch.hpt.rev; struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long head, i, j; unsigned long head, i, j;
__be64 *hptep; __be64 *hptep;
int ret = 0; bool ret = false;
unsigned long *rmapp; unsigned long *rmapp;
rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
...@@ -887,7 +895,7 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -887,7 +895,7 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
lock_rmap(rmapp); lock_rmap(rmapp);
if (*rmapp & KVMPPC_RMAP_REFERENCED) { if (*rmapp & KVMPPC_RMAP_REFERENCED) {
*rmapp &= ~KVMPPC_RMAP_REFERENCED; *rmapp &= ~KVMPPC_RMAP_REFERENCED;
ret = 1; ret = true;
} }
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
unlock_rmap(rmapp); unlock_rmap(rmapp);
...@@ -919,7 +927,7 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, ...@@ -919,7 +927,7 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
rev[i].guest_rpte |= HPTE_R_R; rev[i].guest_rpte |= HPTE_R_R;
note_hpte_modification(kvm, &rev[i]); note_hpte_modification(kvm, &rev[i]);
} }
ret = 1; ret = true;
} }
__unlock_hpte(hptep, be64_to_cpu(hptep[0])); __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
} while ((i = j) != head); } while ((i = j) != head);
......
...@@ -32,6 +32,18 @@ ...@@ -32,6 +32,18 @@
#include <asm/tce.h> #include <asm/tce.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
unsigned long liobn)
{
struct kvmppc_spapr_tce_table *stt;
list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
if (stt->liobn == liobn)
return stt;
return NULL;
}
static unsigned long kvmppc_tce_pages(unsigned long iommu_pages) static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
{ {
return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
...@@ -753,3 +765,34 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, ...@@ -753,3 +765,34 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce); EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba)
{
struct kvmppc_spapr_tce_table *stt;
long ret;
unsigned long idx;
struct page *page;
u64 *tbl;
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
ret = kvmppc_ioba_validate(stt, ioba, 1);
if (ret != H_SUCCESS)
return ret;
idx = (ioba >> stt->page_shift) - stt->offset;
page = stt->pages[idx / TCES_PER_PAGE];
if (!page) {
vcpu->arch.regs.gpr[4] = 0;
return H_SUCCESS;
}
tbl = (u64 *)page_address(page);
vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
return H_SUCCESS;
}
EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
This diff is collapsed.
...@@ -1327,6 +1327,12 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd) ...@@ -1327,6 +1327,12 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
case H_CONFER: case H_CONFER:
case H_REGISTER_VPA: case H_REGISTER_VPA:
case H_SET_MODE: case H_SET_MODE:
#ifdef CONFIG_SPAPR_TCE_IOMMU
case H_GET_TCE:
case H_PUT_TCE:
case H_PUT_TCE_INDIRECT:
case H_STUFF_TCE:
#endif
case H_LOGICAL_CI_LOAD: case H_LOGICAL_CI_LOAD:
case H_LOGICAL_CI_STORE: case H_LOGICAL_CI_STORE:
#ifdef CONFIG_KVM_XICS #ifdef CONFIG_KVM_XICS
...@@ -2835,7 +2841,7 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) ...@@ -2835,7 +2841,7 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
* to trap and then we emulate them. * to trap and then we emulate them.
*/ */
vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX; HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP;
if (cpu_has_feature(CPU_FTR_HVMODE)) { if (cpu_has_feature(CPU_FTR_HVMODE)) {
vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
...@@ -3968,6 +3974,7 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns ...@@ -3968,6 +3974,7 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
kvmhv_save_hv_regs(vcpu, &hvregs); kvmhv_save_hv_regs(vcpu, &hvregs);
hvregs.lpcr = lpcr; hvregs.lpcr = lpcr;
hvregs.amor = ~0;
vcpu->arch.regs.msr = vcpu->arch.shregs.msr; vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
hvregs.version = HV_GUEST_STATE_VERSION; hvregs.version = HV_GUEST_STATE_VERSION;
if (vcpu->arch.nested) { if (vcpu->arch.nested) {
...@@ -4030,6 +4037,8 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns ...@@ -4030,6 +4037,8 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr, u64 *tb) unsigned long lpcr, u64 *tb)
{ {
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested;
u64 next_timer; u64 next_timer;
int trap; int trap;
...@@ -4049,34 +4058,61 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4049,34 +4058,61 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb); trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb);
/* H_CEDE has to be handled now, not later */ /* H_CEDE has to be handled now, not later */
if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && if (trap == BOOK3S_INTERRUPT_SYSCALL && !nested &&
kvmppc_get_gpr(vcpu, 3) == H_CEDE) { kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
kvmppc_cede(vcpu); kvmppc_cede(vcpu);
kvmppc_set_gpr(vcpu, 3, 0); kvmppc_set_gpr(vcpu, 3, 0);
trap = 0; trap = 0;
} }
} else { } else if (nested) {
struct kvm *kvm = vcpu->kvm; __this_cpu_write(cpu_in_guest, kvm);
trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb);
__this_cpu_write(cpu_in_guest, NULL);
} else {
kvmppc_xive_push_vcpu(vcpu); kvmppc_xive_push_vcpu(vcpu);
__this_cpu_write(cpu_in_guest, kvm); __this_cpu_write(cpu_in_guest, kvm);
trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb);
__this_cpu_write(cpu_in_guest, NULL); __this_cpu_write(cpu_in_guest, NULL);
if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && if (trap == BOOK3S_INTERRUPT_SYSCALL &&
!(vcpu->arch.shregs.msr & MSR_PR)) { !(vcpu->arch.shregs.msr & MSR_PR)) {
unsigned long req = kvmppc_get_gpr(vcpu, 3); unsigned long req = kvmppc_get_gpr(vcpu, 3);
/* H_CEDE has to be handled now, not later */ /*
* XIVE rearm and XICS hcalls must be handled
* before xive context is pulled (is this
* true?)
*/
if (req == H_CEDE) { if (req == H_CEDE) {
/* H_CEDE has to be handled now */
kvmppc_cede(vcpu); kvmppc_cede(vcpu);
kvmppc_xive_rearm_escalation(vcpu); /* may un-cede */ if (!kvmppc_xive_rearm_escalation(vcpu)) {
/*
* Pending escalation so abort
* the cede.
*/
vcpu->arch.ceded = 0;
}
kvmppc_set_gpr(vcpu, 3, 0); kvmppc_set_gpr(vcpu, 3, 0);
trap = 0; trap = 0;
/* XICS hcalls must be handled before xive is pulled */ } else if (req == H_ENTER_NESTED) {
/*
* L2 should not run with the L1
* context so rearm and pull it.
*/
if (!kvmppc_xive_rearm_escalation(vcpu)) {
/*
* Pending escalation so abort
* H_ENTER_NESTED.
*/
kvmppc_set_gpr(vcpu, 3, 0);
trap = 0;
}
} else if (hcall_is_xics(req)) { } else if (hcall_is_xics(req)) {
int ret; int ret;
...@@ -4234,13 +4270,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) ...@@ -4234,13 +4270,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
start_wait = ktime_get(); start_wait = ktime_get();
vc->vcore_state = VCORE_SLEEPING; vc->vcore_state = VCORE_SLEEPING;
trace_kvmppc_vcore_blocked(vc, 0); trace_kvmppc_vcore_blocked(vc->runner, 0);
spin_unlock(&vc->lock); spin_unlock(&vc->lock);
schedule(); schedule();
finish_rcuwait(&vc->wait); finish_rcuwait(&vc->wait);
spin_lock(&vc->lock); spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE; vc->vcore_state = VCORE_INACTIVE;
trace_kvmppc_vcore_blocked(vc, 1); trace_kvmppc_vcore_blocked(vc->runner, 1);
++vc->runner->stat.halt_successful_wait; ++vc->runner->stat.halt_successful_wait;
cur = ktime_get(); cur = ktime_get();
...@@ -4520,9 +4556,14 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4520,9 +4556,14 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
if (!nested) { if (!nested) {
kvmppc_core_prepare_to_enter(vcpu); kvmppc_core_prepare_to_enter(vcpu);
if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, if (vcpu->arch.shregs.msr & MSR_EE) {
&vcpu->arch.pending_exceptions)) if (xive_interrupt_pending(vcpu))
kvmppc_inject_interrupt_hv(vcpu,
BOOK3S_INTERRUPT_EXTERNAL, 0);
} else if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
&vcpu->arch.pending_exceptions)) {
lpcr |= LPCR_MER; lpcr |= LPCR_MER;
}
} else if (vcpu->arch.pending_exceptions || } else if (vcpu->arch.pending_exceptions ||
vcpu->arch.doorbell_request || vcpu->arch.doorbell_request ||
xive_interrupt_pending(vcpu)) { xive_interrupt_pending(vcpu)) {
...@@ -4620,9 +4661,9 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, ...@@ -4620,9 +4661,9 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
if (kvmppc_vcpu_check_block(vcpu)) if (kvmppc_vcpu_check_block(vcpu))
break; break;
trace_kvmppc_vcore_blocked(vc, 0); trace_kvmppc_vcore_blocked(vcpu, 0);
schedule(); schedule();
trace_kvmppc_vcore_blocked(vc, 1); trace_kvmppc_vcore_blocked(vcpu, 1);
} }
finish_rcuwait(wait); finish_rcuwait(wait);
} }
...@@ -5284,6 +5325,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) ...@@ -5284,6 +5325,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
lpcr &= LPCR_PECE | LPCR_LPES; lpcr &= LPCR_PECE | LPCR_LPES;
} else { } else {
/*
* The L2 LPES mode will be set by the L0 according to whether
* or not it needs to take external interrupts in HV mode.
*/
lpcr = 0; lpcr = 0;
} }
lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
......
...@@ -489,70 +489,6 @@ static long kvmppc_read_one_intr(bool *again) ...@@ -489,70 +489,6 @@ static long kvmppc_read_one_intr(bool *again)
return kvmppc_check_passthru(xisr, xirr, again); return kvmppc_check_passthru(xisr, xirr, again);
} }
#ifdef CONFIG_KVM_XICS
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
if (xics_on_xive())
return xive_rm_h_xirr(vcpu);
else
return xics_rm_h_xirr(vcpu);
}
unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
vcpu->arch.regs.gpr[5] = get_tb();
if (xics_on_xive())
return xive_rm_h_xirr(vcpu);
else
return xics_rm_h_xirr(vcpu);
}
unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
if (xics_on_xive())
return xive_rm_h_ipoll(vcpu, server);
else
return H_TOO_HARD;
}
int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
unsigned long mfrr)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
if (xics_on_xive())
return xive_rm_h_ipi(vcpu, server, mfrr);
else
return xics_rm_h_ipi(vcpu, server, mfrr);
}
int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
if (xics_on_xive())
return xive_rm_h_cppr(vcpu, cppr);
else
return xics_rm_h_cppr(vcpu, cppr);
}
int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
if (xics_on_xive())
return xive_rm_h_eoi(vcpu, xirr);
else
return xics_rm_h_eoi(vcpu, xirr);
}
#endif /* CONFIG_KVM_XICS */
void kvmppc_bad_interrupt(struct pt_regs *regs) void kvmppc_bad_interrupt(struct pt_regs *regs)
{ {
/* /*
......
...@@ -261,8 +261,7 @@ static void load_l2_hv_regs(struct kvm_vcpu *vcpu, ...@@ -261,8 +261,7 @@ static void load_l2_hv_regs(struct kvm_vcpu *vcpu,
/* /*
* Don't let L1 change LPCR bits for the L2 except these: * Don't let L1 change LPCR bits for the L2 except these:
*/ */
mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | LPCR_MER;
LPCR_LPES | LPCR_MER;
/* /*
* Additional filtering is required depending on hardware * Additional filtering is required depending on hardware
...@@ -439,10 +438,11 @@ long kvmhv_nested_init(void) ...@@ -439,10 +438,11 @@ long kvmhv_nested_init(void)
if (!radix_enabled()) if (!radix_enabled())
return -ENODEV; return -ENODEV;
/* find log base 2 of KVMPPC_NR_LPIDS, rounding up */ /* Partition table entry is 1<<4 bytes in size, hence the 4. */
ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1; ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4;
if (ptb_order < 8) /* Minimum partition table size is 1<<12 bytes */
ptb_order = 8; if (ptb_order < 12)
ptb_order = 12;
pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order, pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
GFP_KERNEL); GFP_KERNEL);
if (!pseries_partition_tb) { if (!pseries_partition_tb) {
...@@ -450,7 +450,7 @@ long kvmhv_nested_init(void) ...@@ -450,7 +450,7 @@ long kvmhv_nested_init(void)
return -ENOMEM; return -ENOMEM;
} }
ptcr = __pa(pseries_partition_tb) | (ptb_order - 8); ptcr = __pa(pseries_partition_tb) | (ptb_order - 12);
rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr); rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
if (rc != H_SUCCESS) { if (rc != H_SUCCESS) {
pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n", pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
...@@ -521,11 +521,6 @@ static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp) ...@@ -521,11 +521,6 @@ static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table); kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
} }
void kvmhv_vm_nested_init(struct kvm *kvm)
{
kvm->arch.max_nested_lpid = -1;
}
/* /*
* Handle the H_SET_PARTITION_TABLE hcall. * Handle the H_SET_PARTITION_TABLE hcall.
* r4 = guest real address of partition table + log_2(size) - 12 * r4 = guest real address of partition table + log_2(size) - 12
...@@ -539,16 +534,14 @@ long kvmhv_set_partition_table(struct kvm_vcpu *vcpu) ...@@ -539,16 +534,14 @@ long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
long ret = H_SUCCESS; long ret = H_SUCCESS;
srcu_idx = srcu_read_lock(&kvm->srcu); srcu_idx = srcu_read_lock(&kvm->srcu);
/* /* Check partition size and base address. */
* Limit the partition table to 4096 entries (because that's what if ((ptcr & PRTS_MASK) + 12 - 4 > KVM_MAX_NESTED_GUESTS_SHIFT ||
* hardware supports), and check the base address.
*/
if ((ptcr & PRTS_MASK) > 12 - 8 ||
!kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT)) !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
ret = H_PARAMETER; ret = H_PARAMETER;
srcu_read_unlock(&kvm->srcu, srcu_idx); srcu_read_unlock(&kvm->srcu, srcu_idx);
if (ret == H_SUCCESS) if (ret == H_SUCCESS)
kvm->arch.l1_ptcr = ptcr; kvm->arch.l1_ptcr = ptcr;
return ret; return ret;
} }
...@@ -644,7 +637,7 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp) ...@@ -644,7 +637,7 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
ret = -EFAULT; ret = -EFAULT;
ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4); ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) { if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) {
int srcu_idx = srcu_read_lock(&kvm->srcu); int srcu_idx = srcu_read_lock(&kvm->srcu);
ret = kvm_read_guest(kvm, ptbl_addr, ret = kvm_read_guest(kvm, ptbl_addr,
&ptbl_entry, sizeof(ptbl_entry)); &ptbl_entry, sizeof(ptbl_entry));
...@@ -660,6 +653,35 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp) ...@@ -660,6 +653,35 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
kvmhv_set_nested_ptbl(gp); kvmhv_set_nested_ptbl(gp);
} }
void kvmhv_vm_nested_init(struct kvm *kvm)
{
idr_init(&kvm->arch.kvm_nested_guest_idr);
}
static struct kvm_nested_guest *__find_nested(struct kvm *kvm, int lpid)
{
return idr_find(&kvm->arch.kvm_nested_guest_idr, lpid);
}
static bool __prealloc_nested(struct kvm *kvm, int lpid)
{
if (idr_alloc(&kvm->arch.kvm_nested_guest_idr,
NULL, lpid, lpid + 1, GFP_KERNEL) != lpid)
return false;
return true;
}
static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp)
{
if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid))
WARN_ON(1);
}
static void __remove_nested(struct kvm *kvm, int lpid)
{
idr_remove(&kvm->arch.kvm_nested_guest_idr, lpid);
}
static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid) static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
{ {
struct kvm_nested_guest *gp; struct kvm_nested_guest *gp;
...@@ -720,13 +742,8 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp) ...@@ -720,13 +742,8 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
long ref; long ref;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
if (gp == kvm->arch.nested_guests[lpid]) { if (gp == __find_nested(kvm, lpid)) {
kvm->arch.nested_guests[lpid] = NULL; __remove_nested(kvm, lpid);
if (lpid == kvm->arch.max_nested_lpid) {
while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
;
kvm->arch.max_nested_lpid = lpid;
}
--gp->refcnt; --gp->refcnt;
} }
ref = gp->refcnt; ref = gp->refcnt;
...@@ -743,24 +760,22 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp) ...@@ -743,24 +760,22 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
*/ */
void kvmhv_release_all_nested(struct kvm *kvm) void kvmhv_release_all_nested(struct kvm *kvm)
{ {
int i; int lpid;
struct kvm_nested_guest *gp; struct kvm_nested_guest *gp;
struct kvm_nested_guest *freelist = NULL; struct kvm_nested_guest *freelist = NULL;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
int srcu_idx, bkt; int srcu_idx, bkt;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
gp = kvm->arch.nested_guests[i]; __remove_nested(kvm, lpid);
if (!gp)
continue;
kvm->arch.nested_guests[i] = NULL;
if (--gp->refcnt == 0) { if (--gp->refcnt == 0) {
gp->next = freelist; gp->next = freelist;
freelist = gp; freelist = gp;
} }
} }
kvm->arch.max_nested_lpid = -1; idr_destroy(&kvm->arch.kvm_nested_guest_idr);
/* idr is empty and may be reused at this point */
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
while ((gp = freelist) != NULL) { while ((gp = freelist) != NULL) {
freelist = gp->next; freelist = gp->next;
...@@ -792,12 +807,11 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, ...@@ -792,12 +807,11 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
{ {
struct kvm_nested_guest *gp, *newgp; struct kvm_nested_guest *gp, *newgp;
if (l1_lpid >= KVM_MAX_NESTED_GUESTS || if (l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
return NULL; return NULL;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
gp = kvm->arch.nested_guests[l1_lpid]; gp = __find_nested(kvm, l1_lpid);
if (gp) if (gp)
++gp->refcnt; ++gp->refcnt;
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
...@@ -808,17 +822,19 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, ...@@ -808,17 +822,19 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
newgp = kvmhv_alloc_nested(kvm, l1_lpid); newgp = kvmhv_alloc_nested(kvm, l1_lpid);
if (!newgp) if (!newgp)
return NULL; return NULL;
if (!__prealloc_nested(kvm, l1_lpid)) {
kvmhv_release_nested(newgp);
return NULL;
}
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
if (kvm->arch.nested_guests[l1_lpid]) { gp = __find_nested(kvm, l1_lpid);
/* someone else beat us to it */ if (!gp) {
gp = kvm->arch.nested_guests[l1_lpid]; __add_nested(kvm, l1_lpid, newgp);
} else {
kvm->arch.nested_guests[l1_lpid] = newgp;
++newgp->refcnt; ++newgp->refcnt;
gp = newgp; gp = newgp;
newgp = NULL; newgp = NULL;
if (l1_lpid > kvm->arch.max_nested_lpid)
kvm->arch.max_nested_lpid = l1_lpid;
} }
++gp->refcnt; ++gp->refcnt;
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
...@@ -841,20 +857,13 @@ void kvmhv_put_nested(struct kvm_nested_guest *gp) ...@@ -841,20 +857,13 @@ void kvmhv_put_nested(struct kvm_nested_guest *gp)
kvmhv_release_nested(gp); kvmhv_release_nested(gp);
} }
static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
{
if (lpid > kvm->arch.max_nested_lpid)
return NULL;
return kvm->arch.nested_guests[lpid];
}
pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
unsigned long ea, unsigned *hshift) unsigned long ea, unsigned *hshift)
{ {
struct kvm_nested_guest *gp; struct kvm_nested_guest *gp;
pte_t *pte; pte_t *pte;
gp = kvmhv_find_nested(kvm, lpid); gp = __find_nested(kvm, lpid);
if (!gp) if (!gp)
return NULL; return NULL;
...@@ -960,7 +969,7 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap, ...@@ -960,7 +969,7 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
gpa = n_rmap & RMAP_NESTED_GPA_MASK; gpa = n_rmap & RMAP_NESTED_GPA_MASK;
lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT; lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
gp = kvmhv_find_nested(kvm, lpid); gp = __find_nested(kvm, lpid);
if (!gp) if (!gp)
return; return;
...@@ -1152,16 +1161,13 @@ static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric) ...@@ -1152,16 +1161,13 @@ static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *gp; struct kvm_nested_guest *gp;
int i; int lpid;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
gp = kvm->arch.nested_guests[i]; spin_unlock(&kvm->mmu_lock);
if (gp) { kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
spin_unlock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
spin_lock(&kvm->mmu_lock);
}
} }
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
} }
...@@ -1313,7 +1319,7 @@ long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid, ...@@ -1313,7 +1319,7 @@ long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
* H_ENTER_NESTED call. Since we can't differentiate this case from * H_ENTER_NESTED call. Since we can't differentiate this case from
* the invalid case, we ignore such flush requests and return success. * the invalid case, we ignore such flush requests and return success.
*/ */
if (!kvmhv_find_nested(vcpu->kvm, lpid)) if (!__find_nested(vcpu->kvm, lpid))
return H_SUCCESS; return H_SUCCESS;
/* /*
...@@ -1657,15 +1663,12 @@ long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu) ...@@ -1657,15 +1663,12 @@ long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid) int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
{ {
int ret = -1; int ret = lpid + 1;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
while (++lpid <= kvm->arch.max_nested_lpid) { if (!idr_get_next(&kvm->arch.kvm_nested_guest_idr, &ret))
if (kvm->arch.nested_guests[lpid]) { ret = -1;
ret = lpid;
break;
}
}
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
return ret; return ret;
} }
...@@ -539,8 +539,10 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6 ...@@ -539,8 +539,10 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
{ {
struct kvm_nested_guest *nested = vcpu->arch.nested; struct kvm_nested_guest *nested = vcpu->arch.nested;
u32 lpid; u32 lpid;
u32 pid;
lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
pid = vcpu->arch.pid;
/* /*
* Prior memory accesses to host PID Q3 must be completed before we * Prior memory accesses to host PID Q3 must be completed before we
...@@ -551,7 +553,7 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6 ...@@ -551,7 +553,7 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
isync(); isync();
mtspr(SPRN_LPID, lpid); mtspr(SPRN_LPID, lpid);
mtspr(SPRN_LPCR, lpcr); mtspr(SPRN_LPCR, lpcr);
mtspr(SPRN_PID, vcpu->arch.pid); mtspr(SPRN_PID, pid);
/* /*
* isync not required here because we are HRFID'ing to guest before * isync not required here because we are HRFID'ing to guest before
* any guest context access, which is context synchronising. * any guest context access, which is context synchronising.
...@@ -561,9 +563,11 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6 ...@@ -561,9 +563,11 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr) static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
{ {
u32 lpid; u32 lpid;
u32 pid;
int i; int i;
lpid = kvm->arch.lpid; lpid = kvm->arch.lpid;
pid = vcpu->arch.pid;
/* /*
* See switch_mmu_to_guest_radix. ptesync should not be required here * See switch_mmu_to_guest_radix. ptesync should not be required here
...@@ -574,7 +578,7 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 ...@@ -574,7 +578,7 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
isync(); isync();
mtspr(SPRN_LPID, lpid); mtspr(SPRN_LPID, lpid);
mtspr(SPRN_LPCR, lpcr); mtspr(SPRN_LPCR, lpcr);
mtspr(SPRN_PID, vcpu->arch.pid); mtspr(SPRN_PID, pid);
for (i = 0; i < vcpu->arch.slb_max; i++) for (i = 0; i < vcpu->arch.slb_max; i++)
mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv); mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv);
...@@ -585,6 +589,9 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 ...@@ -585,6 +589,9 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
static void switch_mmu_to_host(struct kvm *kvm, u32 pid) static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
{ {
u32 lpid = kvm->arch.host_lpid;
u64 lpcr = kvm->arch.host_lpcr;
/* /*
* The guest has exited, so guest MMU context is no longer being * The guest has exited, so guest MMU context is no longer being
* non-speculatively accessed, but a hwsync is needed before the * non-speculatively accessed, but a hwsync is needed before the
...@@ -594,8 +601,8 @@ static void switch_mmu_to_host(struct kvm *kvm, u32 pid) ...@@ -594,8 +601,8 @@ static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
asm volatile("hwsync" ::: "memory"); asm volatile("hwsync" ::: "memory");
isync(); isync();
mtspr(SPRN_PID, pid); mtspr(SPRN_PID, pid);
mtspr(SPRN_LPID, kvm->arch.host_lpid); mtspr(SPRN_LPID, lpid);
mtspr(SPRN_LPCR, kvm->arch.host_lpcr); mtspr(SPRN_LPCR, lpcr);
/* /*
* isync is not required after the switch, because mtmsrd with L=0 * isync is not required after the switch, because mtmsrd with L=0
* is performed after this switch, which is context synchronising. * is performed after this switch, which is context synchronising.
......
...@@ -479,6 +479,11 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, ...@@ -479,6 +479,11 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
} }
} }
unsigned long xics_rm_h_xirr_x(struct kvm_vcpu *vcpu)
{
vcpu->arch.regs.gpr[5] = get_tb();
return xics_rm_h_xirr(vcpu);
}
unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu) unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
{ {
...@@ -883,7 +888,7 @@ long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, ...@@ -883,7 +888,7 @@ long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
/* --- Non-real mode XICS-related built-in routines --- */ /* --- Non-real mode XICS-related built-in routines --- */
/** /*
* Host Operations poked by RM KVM * Host Operations poked by RM KVM
*/ */
static void rm_host_ipi_action(int action, void *data) static void rm_host_ipi_action(int action, void *data)
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <linux/err.h>
#include <linux/kernel_stat.h>
#include <linux/pgtable.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
#include <asm/hvcall.h>
#include <asm/xics.h>
#include <asm/debug.h>
#include <asm/synch.h>
#include <asm/cputhreads.h>
#include <asm/ppc-opcode.h>
#include <asm/pnv-pci.h>
#include <asm/opal.h>
#include <asm/smp.h>
#include <asm/xive.h>
#include <asm/xive-regs.h>
#include "book3s_xive.h"
/* XXX */
#include <asm/udbg.h>
//#define DBG(fmt...) udbg_printf(fmt)
#define DBG(fmt...) do { } while(0)
static inline void __iomem *get_tima_phys(void)
{
return local_paca->kvm_hstate.xive_tima_phys;
}
#undef XIVE_RUNTIME_CHECKS
#define X_PFX xive_rm_
#define X_STATIC
#define X_STAT_PFX stat_rm_
#define __x_tima get_tima_phys()
#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_page))
#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_page))
#define __x_writeb __raw_rm_writeb
#define __x_readw __raw_rm_readw
#define __x_readq __raw_rm_readq
#define __x_writeq __raw_rm_writeq
#include "book3s_xive_template.c"
...@@ -50,6 +50,14 @@ ...@@ -50,6 +50,14 @@
#define STACK_SLOT_UAMOR (SFS-88) #define STACK_SLOT_UAMOR (SFS-88)
#define STACK_SLOT_FSCR (SFS-96) #define STACK_SLOT_FSCR (SFS-96)
/*
* Use the last LPID (all implemented LPID bits = 1) for partition switching.
* This is reserved in the LPID allocator. POWER7 only implements 0x3ff, but
* we write 0xfff into the LPID SPR anyway, which seems to work and just
* ignores the top bits.
*/
#define LPID_RSVD 0xfff
/* /*
* Call kvmppc_hv_entry in real mode. * Call kvmppc_hv_entry in real mode.
* Must be called with interrupts hard-disabled. * Must be called with interrupts hard-disabled.
...@@ -1784,13 +1792,8 @@ hcall_real_table: ...@@ -1784,13 +1792,8 @@ hcall_real_table:
.long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
.long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
.long DOTSYM(kvmppc_h_protect) - hcall_real_table .long DOTSYM(kvmppc_h_protect) - hcall_real_table
#ifdef CONFIG_SPAPR_TCE_IOMMU
.long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
#else
.long 0 /* 0x1c */ .long 0 /* 0x1c */
.long 0 /* 0x20 */ .long 0 /* 0x20 */
#endif
.long 0 /* 0x24 - H_SET_SPRG0 */ .long 0 /* 0x24 - H_SET_SPRG0 */
.long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table
...@@ -1808,11 +1811,11 @@ hcall_real_table: ...@@ -1808,11 +1811,11 @@ hcall_real_table:
.long 0 /* 0x5c */ .long 0 /* 0x5c */
.long 0 /* 0x60 */ .long 0 /* 0x60 */
#ifdef CONFIG_KVM_XICS #ifdef CONFIG_KVM_XICS
.long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table .long DOTSYM(xics_rm_h_eoi) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table .long DOTSYM(xics_rm_h_cppr) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table .long DOTSYM(xics_rm_h_ipi) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table .long 0 /* 0x70 - H_IPOLL */
.long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table .long DOTSYM(xics_rm_h_xirr) - hcall_real_table
#else #else
.long 0 /* 0x64 - H_EOI */ .long 0 /* 0x64 - H_EOI */
.long 0 /* 0x68 - H_CPPR */ .long 0 /* 0x68 - H_CPPR */
...@@ -1868,13 +1871,8 @@ hcall_real_table: ...@@ -1868,13 +1871,8 @@ hcall_real_table:
.long 0 /* 0x12c */ .long 0 /* 0x12c */
.long 0 /* 0x130 */ .long 0 /* 0x130 */
.long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
#ifdef CONFIG_SPAPR_TCE_IOMMU
.long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
#else
.long 0 /* 0x138 */ .long 0 /* 0x138 */
.long 0 /* 0x13c */ .long 0 /* 0x13c */
#endif
.long 0 /* 0x140 */ .long 0 /* 0x140 */
.long 0 /* 0x144 */ .long 0 /* 0x144 */
.long 0 /* 0x148 */ .long 0 /* 0x148 */
...@@ -1987,7 +1985,7 @@ hcall_real_table: ...@@ -1987,7 +1985,7 @@ hcall_real_table:
.long 0 /* 0x2f4 */ .long 0 /* 0x2f4 */
.long 0 /* 0x2f8 */ .long 0 /* 0x2f8 */
#ifdef CONFIG_KVM_XICS #ifdef CONFIG_KVM_XICS
.long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table .long DOTSYM(xics_rm_h_xirr_x) - hcall_real_table
#else #else
.long 0 /* 0x2fc - H_XIRR_X*/ .long 0 /* 0x2fc - H_XIRR_X*/
#endif #endif
......
...@@ -361,13 +361,15 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, ...@@ -361,13 +361,15 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
struct kvm *kvm, unsigned long *gfn) struct kvm *kvm, unsigned long *gfn)
{ {
struct kvmppc_uvmem_slot *p; struct kvmppc_uvmem_slot *p = NULL, *iter;
bool ret = false; bool ret = false;
unsigned long i; unsigned long i;
list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns) if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
p = iter;
break; break;
}
if (!p) if (!p)
return ret; return ret;
/* /*
......
...@@ -433,9 +433,12 @@ int kvmppc_hcall_impl_pr(unsigned long cmd) ...@@ -433,9 +433,12 @@ int kvmppc_hcall_impl_pr(unsigned long cmd)
case H_REMOVE: case H_REMOVE:
case H_PROTECT: case H_PROTECT:
case H_BULK_REMOVE: case H_BULK_REMOVE:
#ifdef CONFIG_SPAPR_TCE_IOMMU
case H_GET_TCE:
case H_PUT_TCE: case H_PUT_TCE:
case H_PUT_TCE_INDIRECT: case H_PUT_TCE_INDIRECT:
case H_STUFF_TCE: case H_STUFF_TCE:
#endif
case H_CEDE: case H_CEDE:
case H_LOGICAL_CI_LOAD: case H_LOGICAL_CI_LOAD:
case H_LOGICAL_CI_STORE: case H_LOGICAL_CI_STORE:
...@@ -464,7 +467,10 @@ static unsigned int default_hcall_list[] = { ...@@ -464,7 +467,10 @@ static unsigned int default_hcall_list[] = {
H_REMOVE, H_REMOVE,
H_PROTECT, H_PROTECT,
H_BULK_REMOVE, H_BULK_REMOVE,
#ifdef CONFIG_SPAPR_TCE_IOMMU
H_GET_TCE,
H_PUT_TCE, H_PUT_TCE,
#endif
H_CEDE, H_CEDE,
H_SET_MODE, H_SET_MODE,
#ifdef CONFIG_KVM_XICS #ifdef CONFIG_KVM_XICS
......
This diff is collapsed.
...@@ -285,13 +285,6 @@ static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle) ...@@ -285,13 +285,6 @@ static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle)
return cur & 0x7fffffff; return cur & 0x7fffffff;
} }
extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu);
extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
unsigned long mfrr);
extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
/* /*
* Common Xive routines for XICS-over-XIVE and XIVE native * Common Xive routines for XICS-over-XIVE and XIVE native
*/ */
......
This diff is collapsed.
...@@ -399,7 +399,6 @@ static int __init kvmppc_e500mc_init(void) ...@@ -399,7 +399,6 @@ static int __init kvmppc_e500mc_init(void)
* allocator. * allocator.
*/ */
kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core); kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core);
kvmppc_claim_lpid(0); /* host */
r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
if (r) if (r)
......
...@@ -2497,41 +2497,37 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -2497,41 +2497,37 @@ long kvm_arch_vm_ioctl(struct file *filp,
return r; return r;
} }
static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; static DEFINE_IDA(lpid_inuse);
static unsigned long nr_lpids; static unsigned long nr_lpids;
long kvmppc_alloc_lpid(void) long kvmppc_alloc_lpid(void)
{ {
long lpid; int lpid;
do { /* The host LPID must always be 0 (allocation starts at 1) */
lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
if (lpid >= nr_lpids) { if (lpid < 0) {
if (lpid == -ENOMEM)
pr_err("%s: Out of memory\n", __func__);
else
pr_err("%s: No LPIDs free\n", __func__); pr_err("%s: No LPIDs free\n", __func__);
return -ENOMEM; return -ENOMEM;
} }
} while (test_and_set_bit(lpid, lpid_inuse));
return lpid; return lpid;
} }
EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
void kvmppc_claim_lpid(long lpid)
{
set_bit(lpid, lpid_inuse);
}
EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
void kvmppc_free_lpid(long lpid) void kvmppc_free_lpid(long lpid)
{ {
clear_bit(lpid, lpid_inuse); ida_free(&lpid_inuse, lpid);
} }
EXPORT_SYMBOL_GPL(kvmppc_free_lpid); EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
/* nr_lpids_param includes the host LPID */
void kvmppc_init_lpid(unsigned long nr_lpids_param) void kvmppc_init_lpid(unsigned long nr_lpids_param)
{ {
nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); nr_lpids = nr_lpids_param;
memset(lpid_inuse, 0, sizeof(lpid_inuse));
} }
EXPORT_SYMBOL_GPL(kvmppc_init_lpid); EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
......
...@@ -409,9 +409,9 @@ TRACE_EVENT(kvmppc_run_core, ...@@ -409,9 +409,9 @@ TRACE_EVENT(kvmppc_run_core,
); );
TRACE_EVENT(kvmppc_vcore_blocked, TRACE_EVENT(kvmppc_vcore_blocked,
TP_PROTO(struct kvmppc_vcore *vc, int where), TP_PROTO(struct kvm_vcpu *vcpu, int where),
TP_ARGS(vc, where), TP_ARGS(vcpu, where),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, n_runnable) __field(int, n_runnable)
...@@ -421,8 +421,8 @@ TRACE_EVENT(kvmppc_vcore_blocked, ...@@ -421,8 +421,8 @@ TRACE_EVENT(kvmppc_vcore_blocked,
), ),
TP_fast_assign( TP_fast_assign(
__entry->runner_vcpu = vc->runner->vcpu_id; __entry->runner_vcpu = vcpu->vcpu_id;
__entry->n_runnable = vc->n_runnable; __entry->n_runnable = vcpu->arch.vcore->n_runnable;
__entry->where = where; __entry->where = where;
__entry->tgid = current->tgid; __entry->tgid = current->tgid;
), ),
......
...@@ -305,24 +305,6 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, ...@@ -305,24 +305,6 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
} }
EXPORT_SYMBOL_GPL(mm_iommu_lookup); EXPORT_SYMBOL_GPL(mm_iommu_lookup);
struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
unsigned long ua, unsigned long size)
{
struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
next) {
if ((mem->ua <= ua) &&
(ua + size <= mem->ua +
(mem->entries << PAGE_SHIFT))) {
ret = mem;
break;
}
}
return ret;
}
struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
unsigned long ua, unsigned long entries) unsigned long ua, unsigned long entries)
{ {
...@@ -369,56 +351,6 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, ...@@ -369,56 +351,6 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
} }
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa)
{
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
unsigned long *pa;
if (entry >= mem->entries)
return -EFAULT;
if (pageshift > mem->pageshift)
return -EFAULT;
if (!mem->hpas) {
*hpa = mem->dev_hpa + (ua - mem->ua);
return 0;
}
pa = (void *) vmalloc_to_phys(&mem->hpas[entry]);
if (!pa)
return -EFAULT;
*hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
return 0;
}
extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
{
struct mm_iommu_table_group_mem_t *mem;
long entry;
void *va;
unsigned long *pa;
mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
if (!mem)
return;
if (mem->dev_hpa != MM_IOMMU_TABLE_INVALID_HPA)
return;
entry = (ua - mem->ua) >> PAGE_SHIFT;
va = &mem->hpas[entry];
pa = (void *) vmalloc_to_phys(va);
if (!pa)
return;
*pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
}
bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
unsigned int pageshift, unsigned long *size) unsigned int pageshift, unsigned long *size)
{ {
......
...@@ -372,6 +372,9 @@ void register_page_bootmem_memmap(unsigned long section_nr, ...@@ -372,6 +372,9 @@ void register_page_bootmem_memmap(unsigned long section_nr,
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
unsigned int mmu_lpid_bits; unsigned int mmu_lpid_bits;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
EXPORT_SYMBOL_GPL(mmu_lpid_bits);
#endif
unsigned int mmu_pid_bits; unsigned int mmu_pid_bits;
static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT); static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
......
...@@ -145,8 +145,7 @@ int pnv_tce_build(struct iommu_table *tbl, long index, long npages, ...@@ -145,8 +145,7 @@ int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
int pnv_tce_xchg(struct iommu_table *tbl, long index, int pnv_tce_xchg(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction, unsigned long *hpa, enum dma_data_direction *direction)
bool alloc)
{ {
u64 proto_tce = iommu_direction_to_tce_perm(*direction); u64 proto_tce = iommu_direction_to_tce_perm(*direction);
unsigned long newtce = *hpa | proto_tce, oldtce; unsigned long newtce = *hpa | proto_tce, oldtce;
...@@ -164,7 +163,7 @@ int pnv_tce_xchg(struct iommu_table *tbl, long index, ...@@ -164,7 +163,7 @@ int pnv_tce_xchg(struct iommu_table *tbl, long index,
} }
if (!ptce) { if (!ptce) {
ptce = pnv_tce(tbl, false, idx, alloc); ptce = pnv_tce(tbl, false, idx, true);
if (!ptce) if (!ptce)
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -1268,22 +1268,20 @@ static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev, ...@@ -1268,22 +1268,20 @@ static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev,
return false; return false;
} }
static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb, static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb)
bool real_mode)
{ {
return real_mode ? (__be64 __iomem *)(phb->regs_phys + 0x210) : return phb->regs + 0x210;
(phb->regs + 0x210);
} }
static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl, static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
unsigned long index, unsigned long npages, bool rm) unsigned long index, unsigned long npages)
{ {
struct iommu_table_group_link *tgl = list_first_entry_or_null( struct iommu_table_group_link *tgl = list_first_entry_or_null(
&tbl->it_group_list, struct iommu_table_group_link, &tbl->it_group_list, struct iommu_table_group_link,
next); next);
struct pnv_ioda_pe *pe = container_of(tgl->table_group, struct pnv_ioda_pe *pe = container_of(tgl->table_group,
struct pnv_ioda_pe, table_group); struct pnv_ioda_pe, table_group);
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm); __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb);
unsigned long start, end, inc; unsigned long start, end, inc;
start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset); start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
...@@ -1298,11 +1296,7 @@ static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl, ...@@ -1298,11 +1296,7 @@ static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
mb(); /* Ensure above stores are visible */ mb(); /* Ensure above stores are visible */
while (start <= end) { while (start <= end) {
if (rm) __raw_writeq_be(start, invalidate);
__raw_rm_writeq_be(start, invalidate);
else
__raw_writeq_be(start, invalidate);
start += inc; start += inc;
} }
...@@ -1321,7 +1315,7 @@ static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, ...@@ -1321,7 +1315,7 @@ static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
attrs); attrs);
if (!ret) if (!ret)
pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false); pnv_pci_p7ioc_tce_invalidate(tbl, index, npages);
return ret; return ret;
} }
...@@ -1329,10 +1323,9 @@ static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, ...@@ -1329,10 +1323,9 @@ static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
/* Common for IODA1 and IODA2 */ /* Common for IODA1 and IODA2 */
static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index, static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction, unsigned long *hpa, enum dma_data_direction *direction)
bool realmode)
{ {
return pnv_tce_xchg(tbl, index, hpa, direction, !realmode); return pnv_tce_xchg(tbl, index, hpa, direction);
} }
#endif #endif
...@@ -1341,7 +1334,7 @@ static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index, ...@@ -1341,7 +1334,7 @@ static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
{ {
pnv_tce_free(tbl, index, npages); pnv_tce_free(tbl, index, npages);
pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false); pnv_pci_p7ioc_tce_invalidate(tbl, index, npages);
} }
static struct iommu_table_ops pnv_ioda1_iommu_ops = { static struct iommu_table_ops pnv_ioda1_iommu_ops = {
...@@ -1362,18 +1355,18 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = { ...@@ -1362,18 +1355,18 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe) static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
{ {
/* 01xb - invalidate TCEs that match the specified PE# */ /* 01xb - invalidate TCEs that match the specified PE# */
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false); __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb);
unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF); unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
mb(); /* Ensure above stores are visible */ mb(); /* Ensure above stores are visible */
__raw_writeq_be(val, invalidate); __raw_writeq_be(val, invalidate);
} }
static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm, static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe,
unsigned shift, unsigned long index, unsigned shift, unsigned long index,
unsigned long npages) unsigned long npages)
{ {
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm); __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb);
unsigned long start, end, inc; unsigned long start, end, inc;
/* We'll invalidate DMA address in PE scope */ /* We'll invalidate DMA address in PE scope */
...@@ -1388,10 +1381,7 @@ static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm, ...@@ -1388,10 +1381,7 @@ static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
mb(); mb();
while (start <= end) { while (start <= end) {
if (rm) __raw_writeq_be(start, invalidate);
__raw_rm_writeq_be(start, invalidate);
else
__raw_writeq_be(start, invalidate);
start += inc; start += inc;
} }
} }
...@@ -1408,7 +1398,7 @@ static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe) ...@@ -1408,7 +1398,7 @@ static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
} }
static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
unsigned long index, unsigned long npages, bool rm) unsigned long index, unsigned long npages)
{ {
struct iommu_table_group_link *tgl; struct iommu_table_group_link *tgl;
...@@ -1419,7 +1409,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, ...@@ -1419,7 +1409,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
unsigned int shift = tbl->it_page_shift; unsigned int shift = tbl->it_page_shift;
if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs) if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
pnv_pci_phb3_tce_invalidate(pe, rm, shift, pnv_pci_phb3_tce_invalidate(pe, shift,
index, npages); index, npages);
else else
opal_pci_tce_kill(phb->opal_id, opal_pci_tce_kill(phb->opal_id,
...@@ -1438,7 +1428,7 @@ static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, ...@@ -1438,7 +1428,7 @@ static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
attrs); attrs);
if (!ret) if (!ret)
pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false); pnv_pci_ioda2_tce_invalidate(tbl, index, npages);
return ret; return ret;
} }
...@@ -1448,7 +1438,7 @@ static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index, ...@@ -1448,7 +1438,7 @@ static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
{ {
pnv_tce_free(tbl, index, npages); pnv_tce_free(tbl, index, npages);
pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false); pnv_pci_ioda2_tce_invalidate(tbl, index, npages);
} }
static struct iommu_table_ops pnv_ioda2_iommu_ops = { static struct iommu_table_ops pnv_ioda2_iommu_ops = {
...@@ -2739,7 +2729,7 @@ static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe) ...@@ -2739,7 +2729,7 @@ static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
if (rc != OPAL_SUCCESS) if (rc != OPAL_SUCCESS)
return; return;
pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false); pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size);
if (pe->table_group.group) { if (pe->table_group.group) {
iommu_group_put(pe->table_group.group); iommu_group_put(pe->table_group.group);
WARN_ON(pe->table_group.group); WARN_ON(pe->table_group.group);
......
...@@ -311,8 +311,7 @@ extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, ...@@ -311,8 +311,7 @@ extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
unsigned long attrs); unsigned long attrs);
extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages); extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
extern int pnv_tce_xchg(struct iommu_table *tbl, long index, extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction, unsigned long *hpa, enum dma_data_direction *direction);
bool alloc);
extern __be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index, extern __be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index,
bool alloc); bool alloc);
extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index); extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
......
...@@ -666,8 +666,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) ...@@ -666,8 +666,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned
long *tce, enum dma_data_direction *direction, long *tce, enum dma_data_direction *direction)
bool realmode)
{ {
long rc; long rc;
unsigned long ioba = (unsigned long) index << tbl->it_page_shift; unsigned long ioba = (unsigned long) index << tbl->it_page_shift;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment