Commit b1af23d8 authored by Alexey Kardashevskiy's avatar Alexey Kardashevskiy Committed by Paul Mackerras

KVM: PPC: iommu: Unify TCE checking

This reworks helpers for checking TCE update parameters in way they
can be used in KVM.

This should cause no behavioral change.
Signed-off-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Acked-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent da6f59e1
...@@ -296,11 +296,21 @@ static inline void iommu_restore(void) ...@@ -296,11 +296,21 @@ static inline void iommu_restore(void)
#endif #endif
/* The API to support IOMMU operations for VFIO */ /* The API to support IOMMU operations for VFIO */
extern int iommu_tce_clear_param_check(struct iommu_table *tbl, extern int iommu_tce_check_ioba(unsigned long page_shift,
unsigned long ioba, unsigned long tce_value, unsigned long offset, unsigned long size,
unsigned long npages); unsigned long ioba, unsigned long npages);
extern int iommu_tce_put_param_check(struct iommu_table *tbl, extern int iommu_tce_check_gpa(unsigned long page_shift,
unsigned long ioba, unsigned long tce); unsigned long gpa);
#define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \
(iommu_tce_check_ioba((tbl)->it_page_shift, \
(tbl)->it_offset, (tbl)->it_size, \
(ioba), (npages)) || (tce_value))
#define iommu_tce_put_param_check(tbl, ioba, gpa) \
(iommu_tce_check_ioba((tbl)->it_page_shift, \
(tbl)->it_offset, (tbl)->it_size, \
(ioba), 1) || \
iommu_tce_check_gpa((tbl)->it_page_shift, (gpa)))
extern void iommu_flush_tce(struct iommu_table *tbl); extern void iommu_flush_tce(struct iommu_table *tbl);
extern int iommu_take_ownership(struct iommu_table *tbl); extern int iommu_take_ownership(struct iommu_table *tbl);
......
...@@ -178,8 +178,10 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, ...@@ -178,8 +178,10 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce_64 *args); struct kvm_create_spapr_tce_64 *args);
extern struct kvmppc_spapr_tce_table *kvmppc_find_table( extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
struct kvm *kvm, unsigned long liobn); struct kvm *kvm, unsigned long liobn);
extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, #define kvmppc_ioba_validate(stt, ioba, npages) \
unsigned long ioba, unsigned long npages); (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
(stt)->size, (ioba), (npages)) ? \
H_PARAMETER : H_SUCCESS)
extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt, extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
unsigned long tce); unsigned long tce);
extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
......
...@@ -963,47 +963,36 @@ void iommu_flush_tce(struct iommu_table *tbl) ...@@ -963,47 +963,36 @@ void iommu_flush_tce(struct iommu_table *tbl)
} }
EXPORT_SYMBOL_GPL(iommu_flush_tce); EXPORT_SYMBOL_GPL(iommu_flush_tce);
int iommu_tce_clear_param_check(struct iommu_table *tbl, int iommu_tce_check_ioba(unsigned long page_shift,
unsigned long ioba, unsigned long tce_value, unsigned long offset, unsigned long size,
unsigned long npages) unsigned long ioba, unsigned long npages)
{ {
/* tbl->it_ops->clear() does not support any value but 0 */ unsigned long mask = (1UL << page_shift) - 1;
if (tce_value)
return -EINVAL;
if (ioba & ~IOMMU_PAGE_MASK(tbl)) if (ioba & mask)
return -EINVAL; return -EINVAL;
ioba >>= tbl->it_page_shift; ioba >>= page_shift;
if (ioba < tbl->it_offset) if (ioba < offset)
return -EINVAL; return -EINVAL;
if ((ioba + npages) > (tbl->it_offset + tbl->it_size)) if ((ioba + 1) > (offset + size))
return -EINVAL; return -EINVAL;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check); EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
int iommu_tce_put_param_check(struct iommu_table *tbl, int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
unsigned long ioba, unsigned long tce)
{ {
if (tce & ~IOMMU_PAGE_MASK(tbl)) unsigned long mask = (1UL << page_shift) - 1;
return -EINVAL;
if (ioba & ~IOMMU_PAGE_MASK(tbl))
return -EINVAL;
ioba >>= tbl->it_page_shift;
if (ioba < tbl->it_offset)
return -EINVAL;
if ((ioba + 1) > (tbl->it_offset + tbl->it_size)) if (gpa & mask)
return -EINVAL; return -EINVAL;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(iommu_tce_put_param_check); EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
unsigned long *hpa, enum dma_data_direction *direction) unsigned long *hpa, enum dma_data_direction *direction)
......
...@@ -61,27 +61,6 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm, ...@@ -61,27 +61,6 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
} }
EXPORT_SYMBOL_GPL(kvmppc_find_table); EXPORT_SYMBOL_GPL(kvmppc_find_table);
/*
* Validates IO address.
*
* WARNING: This will be called in real-mode on HV KVM and virtual
* mode on PR KVM
*/
long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
unsigned long ioba, unsigned long npages)
{
unsigned long mask = (1ULL << stt->page_shift) - 1;
unsigned long idx = ioba >> stt->page_shift;
if ((ioba & mask) || (idx < stt->offset) ||
(idx - stt->offset + npages > stt->size) ||
(idx + npages < idx))
return H_PARAMETER;
return H_SUCCESS;
}
EXPORT_SYMBOL_GPL(kvmppc_ioba_validate);
/* /*
* Validates TCE address. * Validates TCE address.
* At the moment flags and page mask are validated. * At the moment flags and page mask are validated.
...@@ -95,10 +74,14 @@ EXPORT_SYMBOL_GPL(kvmppc_ioba_validate); ...@@ -95,10 +74,14 @@ EXPORT_SYMBOL_GPL(kvmppc_ioba_validate);
*/ */
long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
{ {
unsigned long page_mask = ~((1ULL << stt->page_shift) - 1); unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
unsigned long mask = ~(page_mask | TCE_PCI_WRITE | TCE_PCI_READ); enum dma_data_direction dir = iommu_tce_direction(tce);
/* Allow userspace to poison TCE table */
if (dir == DMA_NONE)
return H_SUCCESS;
if (tce & mask) if (iommu_tce_check_gpa(stt->page_shift, gpa))
return H_PARAMETER; return H_PARAMETER;
return H_SUCCESS; return H_SUCCESS;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment