Commit ec594c47 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge branch 'kvm-ppc-next' of...

Merge branch 'kvm-ppc-next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into HEAD
parents 8afd74c2 feafd13c
...@@ -16,7 +16,21 @@ Groups: ...@@ -16,7 +16,21 @@ Groups:
KVM_DEV_VFIO_GROUP attributes: KVM_DEV_VFIO_GROUP attributes:
KVM_DEV_VFIO_GROUP_ADD: Add a VFIO group to VFIO-KVM device tracking KVM_DEV_VFIO_GROUP_ADD: Add a VFIO group to VFIO-KVM device tracking
kvm_device_attr.addr points to an int32_t file descriptor
for the VFIO group.
KVM_DEV_VFIO_GROUP_DEL: Remove a VFIO group from VFIO-KVM device tracking KVM_DEV_VFIO_GROUP_DEL: Remove a VFIO group from VFIO-KVM device tracking
kvm_device_attr.addr points to an int32_t file descriptor
for the VFIO group.
KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: attaches a guest visible TCE table
allocated by sPAPR KVM.
kvm_device_attr.addr points to a struct:
For each, kvm_device_attr.addr points to an int32_t file descriptor struct kvm_vfio_spapr_tce {
for the VFIO group. __s32 groupfd;
__s32 tablefd;
};
where
@groupfd is a file descriptor for a VFIO group;
@tablefd is a file descriptor for a TCE table allocated via
KVM_CREATE_SPAPR_TCE.
...@@ -87,6 +87,11 @@ static inline unsigned int get_oc(u32 inst) ...@@ -87,6 +87,11 @@ static inline unsigned int get_oc(u32 inst)
return (inst >> 11) & 0x7fff; return (inst >> 11) & 0x7fff;
} }
static inline unsigned int get_tx_or_sx(u32 inst)
{
return (inst) & 0x1;
}
#define IS_XFORM(inst) (get_op(inst) == 31) #define IS_XFORM(inst) (get_op(inst) == 31)
#define IS_DSFORM(inst) (get_op(inst) >= 56) #define IS_DSFORM(inst) (get_op(inst) >= 56)
......
...@@ -64,6 +64,11 @@ struct iommu_table_ops { ...@@ -64,6 +64,11 @@ struct iommu_table_ops {
long index, long index,
unsigned long *hpa, unsigned long *hpa,
enum dma_data_direction *direction); enum dma_data_direction *direction);
/* Real mode */
int (*exchange_rm)(struct iommu_table *tbl,
long index,
unsigned long *hpa,
enum dma_data_direction *direction);
#endif #endif
void (*clear)(struct iommu_table *tbl, void (*clear)(struct iommu_table *tbl,
long index, long npages); long index, long npages);
...@@ -114,6 +119,7 @@ struct iommu_table { ...@@ -114,6 +119,7 @@ struct iommu_table {
struct list_head it_group_list;/* List of iommu_table_group_link */ struct list_head it_group_list;/* List of iommu_table_group_link */
unsigned long *it_userspace; /* userspace view of the table */ unsigned long *it_userspace; /* userspace view of the table */
struct iommu_table_ops *it_ops; struct iommu_table_ops *it_ops;
struct kref it_kref;
}; };
#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \ #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
...@@ -146,8 +152,8 @@ static inline void *get_iommu_table_base(struct device *dev) ...@@ -146,8 +152,8 @@ static inline void *get_iommu_table_base(struct device *dev)
extern int dma_iommu_dma_supported(struct device *dev, u64 mask); extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
/* Frees table for an individual device node */ extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl);
extern void iommu_free_table(struct iommu_table *tbl, const char *node_name); extern int iommu_tce_table_put(struct iommu_table *tbl);
/* Initializes an iommu_table based in values set in the passed-in /* Initializes an iommu_table based in values set in the passed-in
* structure * structure
...@@ -208,6 +214,8 @@ extern void iommu_del_device(struct device *dev); ...@@ -208,6 +214,8 @@ extern void iommu_del_device(struct device *dev);
extern int __init tce_iommu_bus_notifier_init(void); extern int __init tce_iommu_bus_notifier_init(void);
extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
unsigned long *hpa, enum dma_data_direction *direction); unsigned long *hpa, enum dma_data_direction *direction);
extern long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
unsigned long *hpa, enum dma_data_direction *direction);
#else #else
static inline void iommu_register_group(struct iommu_table_group *table_group, static inline void iommu_register_group(struct iommu_table_group *table_group,
int pci_domain_number, int pci_domain_number,
...@@ -288,11 +296,21 @@ static inline void iommu_restore(void) ...@@ -288,11 +296,21 @@ static inline void iommu_restore(void)
#endif #endif
/* The API to support IOMMU operations for VFIO */ /* The API to support IOMMU operations for VFIO */
extern int iommu_tce_clear_param_check(struct iommu_table *tbl, extern int iommu_tce_check_ioba(unsigned long page_shift,
unsigned long ioba, unsigned long tce_value, unsigned long offset, unsigned long size,
unsigned long npages); unsigned long ioba, unsigned long npages);
extern int iommu_tce_put_param_check(struct iommu_table *tbl, extern int iommu_tce_check_gpa(unsigned long page_shift,
unsigned long ioba, unsigned long tce); unsigned long gpa);
#define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \
(iommu_tce_check_ioba((tbl)->it_page_shift, \
(tbl)->it_offset, (tbl)->it_size, \
(ioba), (npages)) || (tce_value))
#define iommu_tce_put_param_check(tbl, ioba, gpa) \
(iommu_tce_check_ioba((tbl)->it_page_shift, \
(tbl)->it_offset, (tbl)->it_size, \
(ioba), 1) || \
iommu_tce_check_gpa((tbl)->it_page_shift, (gpa)))
extern void iommu_flush_tce(struct iommu_table *tbl); extern void iommu_flush_tce(struct iommu_table *tbl);
extern int iommu_take_ownership(struct iommu_table *tbl); extern int iommu_take_ownership(struct iommu_table *tbl);
......
...@@ -188,6 +188,13 @@ struct kvmppc_pginfo { ...@@ -188,6 +188,13 @@ struct kvmppc_pginfo {
atomic_t refcnt; atomic_t refcnt;
}; };
struct kvmppc_spapr_tce_iommu_table {
struct rcu_head rcu;
struct list_head next;
struct iommu_table *tbl;
struct kref kref;
};
struct kvmppc_spapr_tce_table { struct kvmppc_spapr_tce_table {
struct list_head list; struct list_head list;
struct kvm *kvm; struct kvm *kvm;
...@@ -196,6 +203,7 @@ struct kvmppc_spapr_tce_table { ...@@ -196,6 +203,7 @@ struct kvmppc_spapr_tce_table {
u32 page_shift; u32 page_shift;
u64 offset; /* in pages */ u64 offset; /* in pages */
u64 size; /* window size in pages */ u64 size; /* window size in pages */
struct list_head iommu_tables;
struct page *pages[0]; struct page *pages[0];
}; };
...@@ -342,6 +350,7 @@ struct kvmppc_pte { ...@@ -342,6 +350,7 @@ struct kvmppc_pte {
bool may_read : 1; bool may_read : 1;
bool may_write : 1; bool may_write : 1;
bool may_execute : 1; bool may_execute : 1;
unsigned long wimg;
u8 page_size; /* MMU_PAGE_xxx */ u8 page_size; /* MMU_PAGE_xxx */
}; };
...@@ -438,6 +447,11 @@ struct mmio_hpte_cache { ...@@ -438,6 +447,11 @@ struct mmio_hpte_cache {
unsigned int index; unsigned int index;
}; };
#define KVMPPC_VSX_COPY_NONE 0
#define KVMPPC_VSX_COPY_WORD 1
#define KVMPPC_VSX_COPY_DWORD 2
#define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3
struct openpic; struct openpic;
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
...@@ -641,6 +655,21 @@ struct kvm_vcpu_arch { ...@@ -641,6 +655,21 @@ struct kvm_vcpu_arch {
u8 io_gpr; /* GPR used as IO source/target */ u8 io_gpr; /* GPR used as IO source/target */
u8 mmio_host_swabbed; u8 mmio_host_swabbed;
u8 mmio_sign_extend; u8 mmio_sign_extend;
/* conversion between single and double precision */
u8 mmio_sp64_extend;
/*
* Number of simulations for vsx.
* If we use 2*8bytes to simulate 1*16bytes,
* then the number should be 2 and
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_DWORD.
* If we use 4*4bytes to simulate 1*16bytes,
* the number should be 4 and
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
*/
u8 mmio_vsx_copy_nums;
u8 mmio_vsx_offset;
u8 mmio_vsx_copy_type;
u8 mmio_vsx_tx_sx_enabled;
u8 osi_needed; u8 osi_needed;
u8 osi_enabled; u8 osi_enabled;
u8 papr_enabled; u8 papr_enabled;
...@@ -729,6 +758,8 @@ struct kvm_vcpu_arch { ...@@ -729,6 +758,8 @@ struct kvm_vcpu_arch {
}; };
#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET] #define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
#define VCPU_VSX_FPR(vcpu, i, j) ((vcpu)->arch.fp.fpr[i][j])
#define VCPU_VSX_VR(vcpu, i) ((vcpu)->arch.vr.vr[i])
/* Values for vcpu->arch.state */ /* Values for vcpu->arch.state */
#define KVMPPC_VCPU_NOTREADY 0 #define KVMPPC_VCPU_NOTREADY 0
...@@ -742,6 +773,7 @@ struct kvm_vcpu_arch { ...@@ -742,6 +773,7 @@ struct kvm_vcpu_arch {
#define KVM_MMIO_REG_FPR 0x0020 #define KVM_MMIO_REG_FPR 0x0020
#define KVM_MMIO_REG_QPR 0x0040 #define KVM_MMIO_REG_QPR 0x0040
#define KVM_MMIO_REG_FQPR 0x0060 #define KVM_MMIO_REG_FQPR 0x0060
#define KVM_MMIO_REG_VSX 0x0080
#define __KVM_HAVE_ARCH_WQP #define __KVM_HAVE_ARCH_WQP
#define __KVM_HAVE_CREATE_DEVICE #define __KVM_HAVE_CREATE_DEVICE
......
...@@ -78,9 +78,15 @@ extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -78,9 +78,15 @@ extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, unsigned int rt, unsigned int bytes,
int is_default_endian); int is_default_endian);
extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, u64 val, unsigned int bytes,
int is_default_endian); int is_default_endian);
extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
int rs, unsigned int bytes,
int is_default_endian);
extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
enum instruction_type type, u32 *inst); enum instruction_type type, u32 *inst);
...@@ -132,6 +138,9 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); ...@@ -132,6 +138,9 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
...@@ -164,13 +173,19 @@ extern long kvmppc_prepare_vrma(struct kvm *kvm, ...@@ -164,13 +173,19 @@ extern long kvmppc_prepare_vrma(struct kvm *kvm,
extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu, extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot, unsigned long porder); struct kvm_memory_slot *memslot, unsigned long porder);
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
struct iommu_group *grp);
extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
struct iommu_group *grp);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce_64 *args); struct kvm_create_spapr_tce_64 *args);
extern struct kvmppc_spapr_tce_table *kvmppc_find_table( extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
struct kvm_vcpu *vcpu, unsigned long liobn); struct kvm *kvm, unsigned long liobn);
extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, #define kvmppc_ioba_validate(stt, ioba, npages) \
unsigned long ioba, unsigned long npages); (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
(stt)->size, (ioba), (npages)) ? \
H_PARAMETER : H_SUCCESS)
extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt, extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
unsigned long tce); unsigned long tce);
extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
...@@ -240,6 +255,7 @@ union kvmppc_one_reg { ...@@ -240,6 +255,7 @@ union kvmppc_one_reg {
u64 dval; u64 dval;
vector128 vval; vector128 vval;
u64 vsxval[2]; u64 vsxval[2];
u32 vsx32val[4];
struct { struct {
u64 addr; u64 addr;
u64 length; u64 length;
......
...@@ -29,10 +29,14 @@ extern void mm_iommu_init(struct mm_struct *mm); ...@@ -29,10 +29,14 @@ extern void mm_iommu_init(struct mm_struct *mm);
extern void mm_iommu_cleanup(struct mm_struct *mm); extern void mm_iommu_cleanup(struct mm_struct *mm);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
unsigned long ua, unsigned long size); unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
struct mm_struct *mm, unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
unsigned long ua, unsigned long entries); unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa); unsigned long ua, unsigned long *hpa);
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
#endif #endif
......
...@@ -86,32 +86,79 @@ ...@@ -86,32 +86,79 @@
#define OP_TRAP_64 2 #define OP_TRAP_64 2
#define OP_31_XOP_TRAP 4 #define OP_31_XOP_TRAP 4
#define OP_31_XOP_LDX 21
#define OP_31_XOP_LWZX 23 #define OP_31_XOP_LWZX 23
#define OP_31_XOP_LDUX 53
#define OP_31_XOP_DCBST 54 #define OP_31_XOP_DCBST 54
#define OP_31_XOP_LWZUX 55 #define OP_31_XOP_LWZUX 55
#define OP_31_XOP_TRAP_64 68 #define OP_31_XOP_TRAP_64 68
#define OP_31_XOP_DCBF 86 #define OP_31_XOP_DCBF 86
#define OP_31_XOP_LBZX 87 #define OP_31_XOP_LBZX 87
#define OP_31_XOP_STDX 149
#define OP_31_XOP_STWX 151 #define OP_31_XOP_STWX 151
#define OP_31_XOP_STDUX 181
#define OP_31_XOP_STWUX 183
#define OP_31_XOP_STBX 215 #define OP_31_XOP_STBX 215
#define OP_31_XOP_LBZUX 119 #define OP_31_XOP_LBZUX 119
#define OP_31_XOP_STBUX 247 #define OP_31_XOP_STBUX 247
#define OP_31_XOP_LHZX 279 #define OP_31_XOP_LHZX 279
#define OP_31_XOP_LHZUX 311 #define OP_31_XOP_LHZUX 311
#define OP_31_XOP_MFSPR 339 #define OP_31_XOP_MFSPR 339
#define OP_31_XOP_LWAX 341
#define OP_31_XOP_LHAX 343 #define OP_31_XOP_LHAX 343
#define OP_31_XOP_LWAUX 373
#define OP_31_XOP_LHAUX 375 #define OP_31_XOP_LHAUX 375
#define OP_31_XOP_STHX 407 #define OP_31_XOP_STHX 407
#define OP_31_XOP_STHUX 439 #define OP_31_XOP_STHUX 439
#define OP_31_XOP_MTSPR 467 #define OP_31_XOP_MTSPR 467
#define OP_31_XOP_DCBI 470 #define OP_31_XOP_DCBI 470
#define OP_31_XOP_LDBRX 532
#define OP_31_XOP_LWBRX 534 #define OP_31_XOP_LWBRX 534
#define OP_31_XOP_TLBSYNC 566 #define OP_31_XOP_TLBSYNC 566
#define OP_31_XOP_STDBRX 660
#define OP_31_XOP_STWBRX 662 #define OP_31_XOP_STWBRX 662
#define OP_31_XOP_STFSX 663
#define OP_31_XOP_STFSUX 695
#define OP_31_XOP_STFDX 727
#define OP_31_XOP_STFDUX 759
#define OP_31_XOP_LHBRX 790 #define OP_31_XOP_LHBRX 790
#define OP_31_XOP_LFIWAX 855
#define OP_31_XOP_LFIWZX 887
#define OP_31_XOP_STHBRX 918 #define OP_31_XOP_STHBRX 918
#define OP_31_XOP_STFIWX 983
/* VSX Scalar Load Instructions */
#define OP_31_XOP_LXSDX 588
#define OP_31_XOP_LXSSPX 524
#define OP_31_XOP_LXSIWAX 76
#define OP_31_XOP_LXSIWZX 12
/* VSX Scalar Store Instructions */
#define OP_31_XOP_STXSDX 716
#define OP_31_XOP_STXSSPX 652
#define OP_31_XOP_STXSIWX 140
/* VSX Vector Load Instructions */
#define OP_31_XOP_LXVD2X 844
#define OP_31_XOP_LXVW4X 780
/* VSX Vector Load and Splat Instruction */
#define OP_31_XOP_LXVDSX 332
/* VSX Vector Store Instructions */
#define OP_31_XOP_STXVD2X 972
#define OP_31_XOP_STXVW4X 908
#define OP_31_XOP_LFSX 535
#define OP_31_XOP_LFSUX 567
#define OP_31_XOP_LFDX 599
#define OP_31_XOP_LFDUX 631
#define OP_LWZ 32 #define OP_LWZ 32
#define OP_STFS 52
#define OP_STFSU 53
#define OP_STFD 54
#define OP_STFDU 55
#define OP_LD 58 #define OP_LD 58
#define OP_LWZU 33 #define OP_LWZU 33
#define OP_LBZ 34 #define OP_LBZ 34
...@@ -127,6 +174,17 @@ ...@@ -127,6 +174,17 @@
#define OP_LHAU 43 #define OP_LHAU 43
#define OP_STH 44 #define OP_STH 44
#define OP_STHU 45 #define OP_STHU 45
#define OP_LMW 46
#define OP_STMW 47
#define OP_LFS 48
#define OP_LFSU 49
#define OP_LFD 50
#define OP_LFDU 51
#define OP_STFS 52
#define OP_STFSU 53
#define OP_STFD 54
#define OP_STFDU 55
#define OP_LQ 56
/* sorted alphabetically */ /* sorted alphabetically */
#define PPC_INST_BHRBE 0x7c00025c #define PPC_INST_BHRBE 0x7c00025c
......
...@@ -711,13 +711,16 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) ...@@ -711,13 +711,16 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
return tbl; return tbl;
} }
void iommu_free_table(struct iommu_table *tbl, const char *node_name) static void iommu_table_free(struct kref *kref)
{ {
unsigned long bitmap_sz; unsigned long bitmap_sz;
unsigned int order; unsigned int order;
struct iommu_table *tbl;
if (!tbl) tbl = container_of(kref, struct iommu_table, it_kref);
return;
if (tbl->it_ops->free)
tbl->it_ops->free(tbl);
if (!tbl->it_map) { if (!tbl->it_map) {
kfree(tbl); kfree(tbl);
...@@ -733,7 +736,7 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name) ...@@ -733,7 +736,7 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
/* verify that table contains no entries */ /* verify that table contains no entries */
if (!bitmap_empty(tbl->it_map, tbl->it_size)) if (!bitmap_empty(tbl->it_map, tbl->it_size))
pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name); pr_warn("%s: Unexpected TCEs\n", __func__);
/* calculate bitmap size in bytes */ /* calculate bitmap size in bytes */
bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
...@@ -746,6 +749,24 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name) ...@@ -746,6 +749,24 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
kfree(tbl); kfree(tbl);
} }
struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
{
if (kref_get_unless_zero(&tbl->it_kref))
return tbl;
return NULL;
}
EXPORT_SYMBOL_GPL(iommu_tce_table_get);
int iommu_tce_table_put(struct iommu_table *tbl)
{
if (WARN_ON(!tbl))
return 0;
return kref_put(&tbl->it_kref, iommu_table_free);
}
EXPORT_SYMBOL_GPL(iommu_tce_table_put);
/* Creates TCEs for a user provided buffer. The user buffer must be /* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage (not vmalloc). The address passed here * contiguous real kernel storage (not vmalloc). The address passed here
* comprises a page address and offset into that page. The dma_addr_t * comprises a page address and offset into that page. The dma_addr_t
...@@ -942,47 +963,36 @@ void iommu_flush_tce(struct iommu_table *tbl) ...@@ -942,47 +963,36 @@ void iommu_flush_tce(struct iommu_table *tbl)
} }
EXPORT_SYMBOL_GPL(iommu_flush_tce); EXPORT_SYMBOL_GPL(iommu_flush_tce);
int iommu_tce_clear_param_check(struct iommu_table *tbl, int iommu_tce_check_ioba(unsigned long page_shift,
unsigned long ioba, unsigned long tce_value, unsigned long offset, unsigned long size,
unsigned long npages) unsigned long ioba, unsigned long npages)
{ {
/* tbl->it_ops->clear() does not support any value but 0 */ unsigned long mask = (1UL << page_shift) - 1;
if (tce_value)
return -EINVAL;
if (ioba & ~IOMMU_PAGE_MASK(tbl)) if (ioba & mask)
return -EINVAL; return -EINVAL;
ioba >>= tbl->it_page_shift; ioba >>= page_shift;
if (ioba < tbl->it_offset) if (ioba < offset)
return -EINVAL; return -EINVAL;
if ((ioba + npages) > (tbl->it_offset + tbl->it_size)) if ((ioba + 1) > (offset + size))
return -EINVAL; return -EINVAL;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check); EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
int iommu_tce_put_param_check(struct iommu_table *tbl, int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
unsigned long ioba, unsigned long tce)
{ {
if (tce & ~IOMMU_PAGE_MASK(tbl)) unsigned long mask = (1UL << page_shift) - 1;
return -EINVAL;
if (ioba & ~IOMMU_PAGE_MASK(tbl))
return -EINVAL;
ioba >>= tbl->it_page_shift;
if (ioba < tbl->it_offset)
return -EINVAL;
if ((ioba + 1) > (tbl->it_offset + tbl->it_size)) if (gpa & mask)
return -EINVAL; return -EINVAL;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(iommu_tce_put_param_check); EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
unsigned long *hpa, enum dma_data_direction *direction) unsigned long *hpa, enum dma_data_direction *direction)
...@@ -1004,6 +1014,31 @@ long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, ...@@ -1004,6 +1014,31 @@ long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
} }
EXPORT_SYMBOL_GPL(iommu_tce_xchg); EXPORT_SYMBOL_GPL(iommu_tce_xchg);
#ifdef CONFIG_PPC_BOOK3S_64
long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
unsigned long *hpa, enum dma_data_direction *direction)
{
long ret;
ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
(*direction == DMA_BIDIRECTIONAL))) {
struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT);
if (likely(pg)) {
SetPageDirty(pg);
} else {
tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
ret = -EFAULT;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm);
#endif
int iommu_take_ownership(struct iommu_table *tbl) int iommu_take_ownership(struct iommu_table *tbl)
{ {
unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
......
...@@ -67,6 +67,7 @@ config KVM_BOOK3S_64 ...@@ -67,6 +67,7 @@ config KVM_BOOK3S_64
select KVM_BOOK3S_64_HANDLER select KVM_BOOK3S_64_HANDLER
select KVM select KVM
select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
select SPAPR_TCE_IOMMU if IOMMU_SUPPORT
---help--- ---help---
Support running unmodified book3s_64 and book3s_32 guest kernels Support running unmodified book3s_64 and book3s_32 guest kernels
in virtual machines on book3s_64 host processors. in virtual machines on book3s_64 host processors.
......
...@@ -197,6 +197,24 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) ...@@ -197,6 +197,24 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
} }
EXPORT_SYMBOL_GPL(kvmppc_core_queue_program); EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
{
/* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
}
void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
{
/* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
}
void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
{
/* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
}
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
{ {
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
......
...@@ -319,6 +319,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -319,6 +319,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
gpte->may_execute = true; gpte->may_execute = true;
gpte->may_read = false; gpte->may_read = false;
gpte->may_write = false; gpte->may_write = false;
gpte->wimg = r & HPTE_R_WIMG;
switch (pp) { switch (pp) {
case 0: case 0:
......
...@@ -145,6 +145,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, ...@@ -145,6 +145,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
else else
kvmppc_mmu_flush_icache(pfn); kvmppc_mmu_flush_icache(pfn);
rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
/* /*
* Use 64K pages if possible; otherwise, on 64K page kernels, * Use 64K pages if possible; otherwise, on 64K page kernels,
* we need to transfer 4 more bits from guest real to host real addr. * we need to transfer 4 more bits from guest real to host real addr.
...@@ -177,12 +179,15 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, ...@@ -177,12 +179,15 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
hpsize, hpsize, MMU_SEGSIZE_256M); hpsize, hpsize, MMU_SEGSIZE_256M);
if (ret < 0) { if (ret == -1) {
/* If we couldn't map a primary PTE, try a secondary */ /* If we couldn't map a primary PTE, try a secondary */
hash = ~hash; hash = ~hash;
vflags ^= HPTE_V_SECONDARY; vflags ^= HPTE_V_SECONDARY;
attempt++; attempt++;
goto map_again; goto map_again;
} else if (ret < 0) {
r = -EIO;
goto out_unlock;
} else { } else {
trace_kvm_book3s_64_mmu_map(rflags, hpteg, trace_kvm_book3s_64_mmu_map(rflags, hpteg,
vpn, hpaddr, orig_pte); vpn, hpaddr, orig_pte);
......
This diff is collapsed.
This diff is collapsed.
...@@ -503,10 +503,18 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) ...@@ -503,10 +503,18 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
break; break;
unprivileged: unprivileged:
default: default:
printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn); pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
#ifndef DEBUG_SPR if (sprn & 0x10) {
emulated = EMULATE_FAIL; if (kvmppc_get_msr(vcpu) & MSR_PR) {
#endif kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
emulated = EMULATE_AGAIN;
}
} else {
if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
emulated = EMULATE_AGAIN;
}
}
break; break;
} }
...@@ -648,10 +656,20 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val ...@@ -648,10 +656,20 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
break; break;
default: default:
unprivileged: unprivileged:
printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn); pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
#ifndef DEBUG_SPR if (sprn & 0x10) {
emulated = EMULATE_FAIL; if (kvmppc_get_msr(vcpu) & MSR_PR) {
#endif kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
emulated = EMULATE_AGAIN;
}
} else {
if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
sprn == 4 || sprn == 5 || sprn == 6) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
emulated = EMULATE_AGAIN;
}
}
break; break;
} }
......
...@@ -3624,11 +3624,9 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) ...@@ -3624,11 +3624,9 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
return -EIO; return -EIO;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
if (!kvm->arch.pimap)
goto unlock;
if (kvm->arch.pimap == NULL) {
mutex_unlock(&kvm->lock);
return 0;
}
pimap = kvm->arch.pimap; pimap = kvm->arch.pimap;
for (i = 0; i < pimap->n_mapped; i++) { for (i = 0; i < pimap->n_mapped; i++) {
...@@ -3650,7 +3648,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) ...@@ -3650,7 +3648,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
* We don't free this structure even when the count goes to * We don't free this structure even when the count goes to
* zero. The structure is freed when we destroy the VM. * zero. The structure is freed when we destroy the VM.
*/ */
unlock:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return 0; return 0;
} }
......
...@@ -537,8 +537,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -537,8 +537,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
int r = RESUME_GUEST; int r = RESUME_GUEST;
int relocated; int relocated;
int page_found = 0; int page_found = 0;
struct kvmppc_pte pte; struct kvmppc_pte pte = { 0 };
bool is_mmio = false;
bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
u64 vsid; u64 vsid;
...@@ -616,8 +615,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -616,8 +615,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Page not found in guest SLB */ /* Page not found in guest SLB */
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
} else if (!is_mmio && } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
kvmppc_visible_gpa(vcpu, pte.raddr)) {
if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
/* /*
* There is already a host HPTE there, presumably * There is already a host HPTE there, presumably
...@@ -627,7 +625,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -627,7 +625,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_mmu_unmap_page(vcpu, &pte); kvmppc_mmu_unmap_page(vcpu, &pte);
} }
/* The guest's PTE is not mapped yet. Map on the host */ /* The guest's PTE is not mapped yet. Map on the host */
kvmppc_mmu_map_page(vcpu, &pte, iswrite); if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
/* Exit KVM if mapping failed */
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
if (data) if (data)
vcpu->stat.sp_storage++; vcpu->stat.sp_storage++;
else if (vcpu->arch.mmu.is_dcbz32(vcpu) && else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
......
...@@ -300,6 +300,11 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) ...@@ -300,6 +300,11 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
} }
void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
{
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
}
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
{ {
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
......
...@@ -797,9 +797,8 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) ...@@ -797,9 +797,8 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
host_tlb_params[0].sets = host_tlb_params[0].sets =
host_tlb_params[0].entries / host_tlb_params[0].ways; host_tlb_params[0].entries / host_tlb_params[0].ways;
host_tlb_params[1].sets = 1; host_tlb_params[1].sets = 1;
vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries,
vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * sizeof(*vcpu_e500->h2g_tlb1_rmap),
host_tlb_params[1].entries,
GFP_KERNEL); GFP_KERNEL);
if (!vcpu_e500->h2g_tlb1_rmap) if (!vcpu_e500->h2g_tlb1_rmap)
return -EINVAL; return -EINVAL;
......
...@@ -259,10 +259,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -259,10 +259,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case OP_31_XOP_MFSPR: case OP_31_XOP_MFSPR:
emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
if (emulated == EMULATE_AGAIN) {
emulated = EMULATE_DONE;
advance = 0;
}
break; break;
case OP_31_XOP_MTSPR: case OP_31_XOP_MTSPR:
emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
if (emulated == EMULATE_AGAIN) {
emulated = EMULATE_DONE;
advance = 0;
}
break; break;
case OP_31_XOP_TLBSYNC: case OP_31_XOP_TLBSYNC:
......
This diff is collapsed.
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <asm/cputhreads.h> #include <asm/cputhreads.h>
#include <asm/irqflags.h> #include <asm/irqflags.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/switch_to.h>
#include "timing.h" #include "timing.h"
#include "irq.h" #include "irq.h"
#include "../mm/mmu_decl.h" #include "../mm/mmu_decl.h"
...@@ -533,6 +534,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -533,6 +534,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE: case KVM_CAP_SPAPR_TCE:
case KVM_CAP_SPAPR_TCE_64: case KVM_CAP_SPAPR_TCE_64:
/* fallthrough */
case KVM_CAP_SPAPR_TCE_VFIO:
case KVM_CAP_PPC_RTAS: case KVM_CAP_PPC_RTAS:
case KVM_CAP_PPC_FIXUP_HCALL: case KVM_CAP_PPC_FIXUP_HCALL:
case KVM_CAP_PPC_ENABLE_HCALL: case KVM_CAP_PPC_ENABLE_HCALL:
...@@ -801,6 +804,129 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, ...@@ -801,6 +804,129 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
} }
#ifdef CONFIG_VSX
static inline int kvmppc_get_vsr_dword_offset(int index)
{
int offset;
if ((index != 0) && (index != 1))
return -1;
#ifdef __BIG_ENDIAN
offset = index;
#else
offset = 1 - index;
#endif
return offset;
}
static inline int kvmppc_get_vsr_word_offset(int index)
{
int offset;
if ((index > 3) || (index < 0))
return -1;
#ifdef __BIG_ENDIAN
offset = index;
#else
offset = 3 - index;
#endif
return offset;
}
static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
u64 gpr)
{
union kvmppc_one_reg val;
int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
if (offset == -1)
return;
if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
val.vval = VCPU_VSX_VR(vcpu, index);
val.vsxval[offset] = gpr;
VCPU_VSX_VR(vcpu, index) = val.vval;
} else {
VCPU_VSX_FPR(vcpu, index, offset) = gpr;
}
}
static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
u64 gpr)
{
union kvmppc_one_reg val;
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
val.vval = VCPU_VSX_VR(vcpu, index);
val.vsxval[0] = gpr;
val.vsxval[1] = gpr;
VCPU_VSX_VR(vcpu, index) = val.vval;
} else {
VCPU_VSX_FPR(vcpu, index, 0) = gpr;
VCPU_VSX_FPR(vcpu, index, 1) = gpr;
}
}
static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
u32 gpr32)
{
union kvmppc_one_reg val;
int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
int dword_offset, word_offset;
if (offset == -1)
return;
if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
val.vval = VCPU_VSX_VR(vcpu, index);
val.vsx32val[offset] = gpr32;
VCPU_VSX_VR(vcpu, index) = val.vval;
} else {
dword_offset = offset / 2;
word_offset = offset % 2;
val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
val.vsx32val[word_offset] = gpr32;
VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
}
}
#endif /* CONFIG_VSX */
#ifdef CONFIG_PPC_FPU
static inline u64 sp_to_dp(u32 fprs)
{
u64 fprd;
preempt_disable();
enable_kernel_fp();
asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
: "fr0");
preempt_enable();
return fprd;
}
static inline u32 dp_to_sp(u64 fprd)
{
u32 fprs;
preempt_disable();
enable_kernel_fp();
asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
: "fr0");
preempt_enable();
return fprs;
}
#else
#define sp_to_dp(x) (x)
#define dp_to_sp(x) (x)
#endif /* CONFIG_PPC_FPU */
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run) struct kvm_run *run)
{ {
...@@ -827,6 +953,10 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -827,6 +953,10 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
} }
} }
/* conversion between single and double precision */
if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
gpr = sp_to_dp(gpr);
if (vcpu->arch.mmio_sign_extend) { if (vcpu->arch.mmio_sign_extend) {
switch (run->mmio.len) { switch (run->mmio.len) {
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
...@@ -843,8 +973,6 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -843,8 +973,6 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
} }
} }
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
case KVM_MMIO_REG_GPR: case KVM_MMIO_REG_GPR:
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
...@@ -860,6 +988,17 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -860,6 +988,17 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break; break;
#endif
#ifdef CONFIG_VSX
case KVM_MMIO_REG_VSX:
if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD)
kvmppc_set_vsr_dword(vcpu, gpr);
else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD)
kvmppc_set_vsr_word(vcpu, gpr);
else if (vcpu->arch.mmio_vsx_copy_type ==
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
kvmppc_set_vsr_dword_dump(vcpu, gpr);
break;
#endif #endif
default: default:
BUG(); BUG();
...@@ -927,6 +1066,35 @@ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -927,6 +1066,35 @@ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
} }
#ifdef CONFIG_VSX
int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend)
{
enum emulation_result emulated = EMULATE_DONE;
/* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
(vcpu->arch.mmio_vsx_copy_nums < 0) ) {
return EMULATE_FAIL;
}
while (vcpu->arch.mmio_vsx_copy_nums) {
emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
is_default_endian, mmio_sign_extend);
if (emulated != EMULATE_DONE)
break;
vcpu->arch.paddr_accessed += run->mmio.len;
vcpu->arch.mmio_vsx_copy_nums--;
vcpu->arch.mmio_vsx_offset++;
}
return emulated;
}
#endif /* CONFIG_VSX */
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, int is_default_endian) u64 val, unsigned int bytes, int is_default_endian)
{ {
...@@ -952,6 +1120,9 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -952,6 +1120,9 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->mmio_needed = 1; vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1; vcpu->mmio_is_write = 1;
if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
val = dp_to_sp(val);
/* Store the value at the lowest bytes in 'data'. */ /* Store the value at the lowest bytes in 'data'. */
if (!host_swabbed) { if (!host_swabbed) {
switch (bytes) { switch (bytes) {
...@@ -985,6 +1156,129 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -985,6 +1156,129 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
} }
EXPORT_SYMBOL_GPL(kvmppc_handle_store); EXPORT_SYMBOL_GPL(kvmppc_handle_store);
#ifdef CONFIG_VSX
static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
{
u32 dword_offset, word_offset;
union kvmppc_one_reg reg;
int vsx_offset = 0;
int copy_type = vcpu->arch.mmio_vsx_copy_type;
int result = 0;
switch (copy_type) {
case KVMPPC_VSX_COPY_DWORD:
vsx_offset =
kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
if (vsx_offset == -1) {
result = -1;
break;
}
if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
} else {
reg.vval = VCPU_VSX_VR(vcpu, rs);
*val = reg.vsxval[vsx_offset];
}
break;
case KVMPPC_VSX_COPY_WORD:
vsx_offset =
kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
if (vsx_offset == -1) {
result = -1;
break;
}
if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
dword_offset = vsx_offset / 2;
word_offset = vsx_offset % 2;
reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
*val = reg.vsx32val[word_offset];
} else {
reg.vval = VCPU_VSX_VR(vcpu, rs);
*val = reg.vsx32val[vsx_offset];
}
break;
default:
result = -1;
break;
}
return result;
}
int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
int rs, unsigned int bytes, int is_default_endian)
{
u64 val;
enum emulation_result emulated = EMULATE_DONE;
vcpu->arch.io_gpr = rs;
/* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
(vcpu->arch.mmio_vsx_copy_nums < 0) ) {
return EMULATE_FAIL;
}
while (vcpu->arch.mmio_vsx_copy_nums) {
if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
return EMULATE_FAIL;
emulated = kvmppc_handle_store(run, vcpu,
val, bytes, is_default_endian);
if (emulated != EMULATE_DONE)
break;
vcpu->arch.paddr_accessed += run->mmio.len;
vcpu->arch.mmio_vsx_copy_nums--;
vcpu->arch.mmio_vsx_offset++;
}
return emulated;
}
static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
enum emulation_result emulated = EMULATE_FAIL;
int r;
vcpu->arch.paddr_accessed += run->mmio.len;
if (!vcpu->mmio_is_write) {
emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
} else {
emulated = kvmppc_handle_vsx_store(run, vcpu,
vcpu->arch.io_gpr, run->mmio.len, 1);
}
switch (emulated) {
case EMULATE_DO_MMIO:
run->exit_reason = KVM_EXIT_MMIO;
r = RESUME_HOST;
break;
case EMULATE_FAIL:
pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
r = RESUME_HOST;
break;
default:
r = RESUME_GUEST;
break;
}
return r;
}
#endif /* CONFIG_VSX */
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
{ {
int r = 0; int r = 0;
...@@ -1087,13 +1381,24 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -1087,13 +1381,24 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
int r; int r;
sigset_t sigsaved; sigset_t sigsaved;
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
if (vcpu->mmio_needed) { if (vcpu->mmio_needed) {
vcpu->mmio_needed = 0;
if (!vcpu->mmio_is_write) if (!vcpu->mmio_is_write)
kvmppc_complete_mmio_load(vcpu, run); kvmppc_complete_mmio_load(vcpu, run);
vcpu->mmio_needed = 0; #ifdef CONFIG_VSX
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
vcpu->arch.mmio_vsx_copy_nums--;
vcpu->arch.mmio_vsx_offset++;
}
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
if (r == RESUME_HOST) {
vcpu->mmio_needed = 1;
return r;
}
}
#endif
} else if (vcpu->arch.osi_needed) { } else if (vcpu->arch.osi_needed) {
u64 *gprs = run->osi.gprs; u64 *gprs = run->osi.gprs;
int i; int i;
...@@ -1115,6 +1420,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -1115,6 +1420,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
#endif #endif
} }
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
if (run->immediate_exit) if (run->immediate_exit)
r = -EINTR; r = -EINTR;
else else
......
...@@ -314,6 +314,25 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, ...@@ -314,6 +314,25 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
} }
EXPORT_SYMBOL_GPL(mm_iommu_lookup); EXPORT_SYMBOL_GPL(mm_iommu_lookup);
struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
unsigned long ua, unsigned long size)
{
struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
next) {
if ((mem->ua <= ua) &&
(ua + size <= mem->ua +
(mem->entries << PAGE_SHIFT))) {
ret = mem;
break;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
unsigned long ua, unsigned long entries) unsigned long ua, unsigned long entries)
{ {
...@@ -345,6 +364,26 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, ...@@ -345,6 +364,26 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
} }
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa)
{
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
void *va = &mem->hpas[entry];
unsigned long *pa;
if (entry >= mem->entries)
return -EFAULT;
pa = (void *) vmalloc_to_phys(va);
if (!pa)
return -EFAULT;
*hpa = *pa | (ua & ~PAGE_MASK);
return 0;
}
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem) long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
{ {
if (atomic64_inc_not_zero(&mem->mapped)) if (atomic64_inc_not_zero(&mem->mapped))
......
...@@ -1424,8 +1424,7 @@ static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe ...@@ -1424,8 +1424,7 @@ static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe
iommu_group_put(pe->table_group.group); iommu_group_put(pe->table_group.group);
BUG_ON(pe->table_group.group); BUG_ON(pe->table_group.group);
} }
pnv_pci_ioda2_table_free_pages(tbl); iommu_tce_table_put(tbl);
iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
} }
static void pnv_ioda_release_vf_PE(struct pci_dev *pdev) static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)
...@@ -1860,6 +1859,17 @@ static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index, ...@@ -1860,6 +1859,17 @@ static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
return ret; return ret;
} }
static int pnv_ioda1_tce_xchg_rm(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction)
{
long ret = pnv_tce_xchg(tbl, index, hpa, direction);
if (!ret)
pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, true);
return ret;
}
#endif #endif
static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index, static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
...@@ -1874,6 +1884,7 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = { ...@@ -1874,6 +1884,7 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
.set = pnv_ioda1_tce_build, .set = pnv_ioda1_tce_build,
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
.exchange = pnv_ioda1_tce_xchg, .exchange = pnv_ioda1_tce_xchg,
.exchange_rm = pnv_ioda1_tce_xchg_rm,
#endif #endif
.clear = pnv_ioda1_tce_free, .clear = pnv_ioda1_tce_free,
.get = pnv_tce_get, .get = pnv_tce_get,
...@@ -1948,7 +1959,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, ...@@ -1948,7 +1959,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
{ {
struct iommu_table_group_link *tgl; struct iommu_table_group_link *tgl;
list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) { list_for_each_entry_lockless(tgl, &tbl->it_group_list, next) {
struct pnv_ioda_pe *pe = container_of(tgl->table_group, struct pnv_ioda_pe *pe = container_of(tgl->table_group,
struct pnv_ioda_pe, table_group); struct pnv_ioda_pe, table_group);
struct pnv_phb *phb = pe->phb; struct pnv_phb *phb = pe->phb;
...@@ -2004,6 +2015,17 @@ static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index, ...@@ -2004,6 +2015,17 @@ static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
return ret; return ret;
} }
static int pnv_ioda2_tce_xchg_rm(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction)
{
long ret = pnv_tce_xchg(tbl, index, hpa, direction);
if (!ret)
pnv_pci_ioda2_tce_invalidate(tbl, index, 1, true);
return ret;
}
#endif #endif
static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index, static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
...@@ -2017,13 +2039,13 @@ static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index, ...@@ -2017,13 +2039,13 @@ static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
static void pnv_ioda2_table_free(struct iommu_table *tbl) static void pnv_ioda2_table_free(struct iommu_table *tbl)
{ {
pnv_pci_ioda2_table_free_pages(tbl); pnv_pci_ioda2_table_free_pages(tbl);
iommu_free_table(tbl, "pnv");
} }
static struct iommu_table_ops pnv_ioda2_iommu_ops = { static struct iommu_table_ops pnv_ioda2_iommu_ops = {
.set = pnv_ioda2_tce_build, .set = pnv_ioda2_tce_build,
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
.exchange = pnv_ioda2_tce_xchg, .exchange = pnv_ioda2_tce_xchg,
.exchange_rm = pnv_ioda2_tce_xchg_rm,
#endif #endif
.clear = pnv_ioda2_tce_free, .clear = pnv_ioda2_tce_free,
.get = pnv_tce_get, .get = pnv_tce_get,
...@@ -2203,7 +2225,7 @@ static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb, ...@@ -2203,7 +2225,7 @@ static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
__free_pages(tce_mem, get_order(tce32_segsz * segs)); __free_pages(tce_mem, get_order(tce32_segsz * segs));
if (tbl) { if (tbl) {
pnv_pci_unlink_table_and_group(tbl, &pe->table_group); pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
iommu_free_table(tbl, "pnv"); iommu_tce_table_put(tbl);
} }
} }
...@@ -2293,16 +2315,16 @@ static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group, ...@@ -2293,16 +2315,16 @@ static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
if (!tbl) if (!tbl)
return -ENOMEM; return -ENOMEM;
tbl->it_ops = &pnv_ioda2_iommu_ops;
ret = pnv_pci_ioda2_table_alloc_pages(nid, ret = pnv_pci_ioda2_table_alloc_pages(nid,
bus_offset, page_shift, window_size, bus_offset, page_shift, window_size,
levels, tbl); levels, tbl);
if (ret) { if (ret) {
iommu_free_table(tbl, "pnv"); iommu_tce_table_put(tbl);
return ret; return ret;
} }
tbl->it_ops = &pnv_ioda2_iommu_ops;
*ptbl = tbl; *ptbl = tbl;
return 0; return 0;
...@@ -2343,7 +2365,7 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) ...@@ -2343,7 +2365,7 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
if (rc) { if (rc) {
pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
rc); rc);
pnv_ioda2_table_free(tbl); iommu_tce_table_put(tbl);
return rc; return rc;
} }
...@@ -2431,7 +2453,7 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group) ...@@ -2431,7 +2453,7 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
pnv_pci_ioda2_unset_window(&pe->table_group, 0); pnv_pci_ioda2_unset_window(&pe->table_group, 0);
if (pe->pbus) if (pe->pbus)
pnv_ioda_setup_bus_dma(pe, pe->pbus, false); pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
pnv_ioda2_table_free(tbl); iommu_tce_table_put(tbl);
} }
static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group) static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
...@@ -3406,7 +3428,7 @@ static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe) ...@@ -3406,7 +3428,7 @@ static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
} }
free_pages(tbl->it_base, get_order(tbl->it_size << 3)); free_pages(tbl->it_base, get_order(tbl->it_size << 3));
iommu_free_table(tbl, "pnv"); iommu_tce_table_put(tbl);
} }
static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
...@@ -3433,7 +3455,7 @@ static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) ...@@ -3433,7 +3455,7 @@ static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
} }
pnv_pci_ioda2_table_free_pages(tbl); pnv_pci_ioda2_table_free_pages(tbl);
iommu_free_table(tbl, "pnv"); iommu_tce_table_put(tbl);
} }
static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe, static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
......
...@@ -767,6 +767,7 @@ struct iommu_table *pnv_pci_table_alloc(int nid) ...@@ -767,6 +767,7 @@ struct iommu_table *pnv_pci_table_alloc(int nid)
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid); tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
INIT_LIST_HEAD_RCU(&tbl->it_group_list); INIT_LIST_HEAD_RCU(&tbl->it_group_list);
kref_init(&tbl->it_kref);
return tbl; return tbl;
} }
......
...@@ -74,6 +74,7 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node) ...@@ -74,6 +74,7 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node)
goto fail_exit; goto fail_exit;
INIT_LIST_HEAD_RCU(&tbl->it_group_list); INIT_LIST_HEAD_RCU(&tbl->it_group_list);
kref_init(&tbl->it_kref);
tgl->table_group = table_group; tgl->table_group = table_group;
list_add_rcu(&tgl->next, &tbl->it_group_list); list_add_rcu(&tgl->next, &tbl->it_group_list);
...@@ -115,7 +116,7 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group, ...@@ -115,7 +116,7 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group,
BUG_ON(table_group->group); BUG_ON(table_group->group);
} }
#endif #endif
iommu_free_table(tbl, node_name); iommu_tce_table_put(tbl);
kfree(table_group); kfree(table_group);
} }
......
...@@ -1318,7 +1318,7 @@ static void vio_dev_release(struct device *dev) ...@@ -1318,7 +1318,7 @@ static void vio_dev_release(struct device *dev)
struct iommu_table *tbl = get_iommu_table_base(dev); struct iommu_table *tbl = get_iommu_table_base(dev);
if (tbl) if (tbl)
iommu_free_table(tbl, of_node_full_name(dev->of_node)); iommu_tce_table_put(tbl);
of_node_put(dev->of_node); of_node_put(dev->of_node);
kfree(to_vio_dev(dev)); kfree(to_vio_dev(dev));
} }
......
...@@ -680,7 +680,7 @@ static void tce_iommu_free_table(struct tce_container *container, ...@@ -680,7 +680,7 @@ static void tce_iommu_free_table(struct tce_container *container,
unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
tce_iommu_userspace_view_free(tbl, container->mm); tce_iommu_userspace_view_free(tbl, container->mm);
tbl->it_ops->free(tbl); iommu_tce_table_put(tbl);
decrement_locked_vm(container->mm, pages); decrement_locked_vm(container->mm, pages);
} }
......
...@@ -892,6 +892,7 @@ struct kvm_ppc_resize_hpt { ...@@ -892,6 +892,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_MIPS_64BIT 139 #define KVM_CAP_MIPS_64BIT 139
#define KVM_CAP_S390_GS 140 #define KVM_CAP_S390_GS 140
#define KVM_CAP_S390_AIS 141 #define KVM_CAP_S390_AIS 141
#define KVM_CAP_SPAPR_TCE_VFIO 142
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
...@@ -1096,6 +1097,7 @@ struct kvm_device_attr { ...@@ -1096,6 +1097,7 @@ struct kvm_device_attr {
#define KVM_DEV_VFIO_GROUP 1 #define KVM_DEV_VFIO_GROUP 1
#define KVM_DEV_VFIO_GROUP_ADD 1 #define KVM_DEV_VFIO_GROUP_ADD 1
#define KVM_DEV_VFIO_GROUP_DEL 2 #define KVM_DEV_VFIO_GROUP_DEL 2
#define KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE 3
enum kvm_device_type { enum kvm_device_type {
KVM_DEV_TYPE_FSL_MPIC_20 = 1, KVM_DEV_TYPE_FSL_MPIC_20 = 1,
...@@ -1117,6 +1119,11 @@ enum kvm_device_type { ...@@ -1117,6 +1119,11 @@ enum kvm_device_type {
KVM_DEV_TYPE_MAX, KVM_DEV_TYPE_MAX,
}; };
struct kvm_vfio_spapr_tce {
__s32 groupfd;
__s32 tablefd;
};
/* /*
* ioctls for VM fds * ioctls for VM fds
*/ */
......
...@@ -20,6 +20,10 @@ ...@@ -20,6 +20,10 @@
#include <linux/vfio.h> #include <linux/vfio.h>
#include "vfio.h" #include "vfio.h"
#ifdef CONFIG_SPAPR_TCE_IOMMU
#include <asm/kvm_ppc.h>
#endif
struct kvm_vfio_group { struct kvm_vfio_group {
struct list_head node; struct list_head node;
struct vfio_group *vfio_group; struct vfio_group *vfio_group;
...@@ -89,6 +93,47 @@ static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group) ...@@ -89,6 +93,47 @@ static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group)
return ret > 0; return ret > 0;
} }
#ifdef CONFIG_SPAPR_TCE_IOMMU
static int kvm_vfio_external_user_iommu_id(struct vfio_group *vfio_group)
{
int (*fn)(struct vfio_group *);
int ret = -EINVAL;
fn = symbol_get(vfio_external_user_iommu_id);
if (!fn)
return ret;
ret = fn(vfio_group);
symbol_put(vfio_external_user_iommu_id);
return ret;
}
static struct iommu_group *kvm_vfio_group_get_iommu_group(
struct vfio_group *group)
{
int group_id = kvm_vfio_external_user_iommu_id(group);
if (group_id < 0)
return NULL;
return iommu_group_get_by_id(group_id);
}
static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
struct vfio_group *vfio_group)
{
struct iommu_group *grp = kvm_vfio_group_get_iommu_group(vfio_group);
if (WARN_ON_ONCE(!grp))
return;
kvm_spapr_tce_release_iommu_group(kvm, grp);
iommu_group_put(grp);
}
#endif
/* /*
* Groups can use the same or different IOMMU domains. If the same then * Groups can use the same or different IOMMU domains. If the same then
* adding a new group may change the coherency of groups we've previously * adding a new group may change the coherency of groups we've previously
...@@ -211,6 +256,9 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) ...@@ -211,6 +256,9 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
mutex_unlock(&kv->lock); mutex_unlock(&kv->lock);
#ifdef CONFIG_SPAPR_TCE_IOMMU
kvm_spapr_tce_release_vfio_group(dev->kvm, vfio_group);
#endif
kvm_vfio_group_set_kvm(vfio_group, NULL); kvm_vfio_group_set_kvm(vfio_group, NULL);
kvm_vfio_group_put_external_user(vfio_group); kvm_vfio_group_put_external_user(vfio_group);
...@@ -218,6 +266,57 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) ...@@ -218,6 +266,57 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
kvm_vfio_update_coherency(dev); kvm_vfio_update_coherency(dev);
return ret; return ret;
#ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: {
struct kvm_vfio_spapr_tce param;
struct kvm_vfio *kv = dev->private;
struct vfio_group *vfio_group;
struct kvm_vfio_group *kvg;
struct fd f;
struct iommu_group *grp;
if (copy_from_user(&param, (void __user *)arg,
sizeof(struct kvm_vfio_spapr_tce)))
return -EFAULT;
f = fdget(param.groupfd);
if (!f.file)
return -EBADF;
vfio_group = kvm_vfio_group_get_external_user(f.file);
fdput(f);
if (IS_ERR(vfio_group))
return PTR_ERR(vfio_group);
grp = kvm_vfio_group_get_iommu_group(vfio_group);
if (WARN_ON_ONCE(!grp)) {
kvm_vfio_group_put_external_user(vfio_group);
return -EIO;
}
ret = -ENOENT;
mutex_lock(&kv->lock);
list_for_each_entry(kvg, &kv->group_list, node) {
if (kvg->vfio_group != vfio_group)
continue;
ret = kvm_spapr_tce_attach_iommu_group(dev->kvm,
param.tablefd, grp);
break;
}
mutex_unlock(&kv->lock);
iommu_group_put(grp);
kvm_vfio_group_put_external_user(vfio_group);
return ret;
}
#endif /* CONFIG_SPAPR_TCE_IOMMU */
} }
return -ENXIO; return -ENXIO;
...@@ -242,6 +341,9 @@ static int kvm_vfio_has_attr(struct kvm_device *dev, ...@@ -242,6 +341,9 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
switch (attr->attr) { switch (attr->attr) {
case KVM_DEV_VFIO_GROUP_ADD: case KVM_DEV_VFIO_GROUP_ADD:
case KVM_DEV_VFIO_GROUP_DEL: case KVM_DEV_VFIO_GROUP_DEL:
#ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
#endif
return 0; return 0;
} }
...@@ -257,6 +359,9 @@ static void kvm_vfio_destroy(struct kvm_device *dev) ...@@ -257,6 +359,9 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
struct kvm_vfio_group *kvg, *tmp; struct kvm_vfio_group *kvg, *tmp;
list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) { list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
#ifdef CONFIG_SPAPR_TCE_IOMMU
kvm_spapr_tce_release_vfio_group(dev->kvm, kvg->vfio_group);
#endif
kvm_vfio_group_set_kvm(kvg->vfio_group, NULL); kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
kvm_vfio_group_put_external_user(kvg->vfio_group); kvm_vfio_group_put_external_user(kvg->vfio_group);
list_del(&kvg->node); list_del(&kvg->node);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment