Commit aae4c8e2 authored by Tom Murphy's avatar Tom Murphy Committed by Joerg Roedel

iommu: Rename iommu_tlb_* functions to iommu_iotlb_*

To keep naming consistent we should stick with *iotlb*. This patch
renames a few remaining functions.
Signed-off-by: default avatarTom Murphy <murphyt7@tcd.ie>
Link: https://lore.kernel.org/r/20200817210051.13546-1-murphyt7@tcd.ieSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent f75aef39
...@@ -471,7 +471,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, ...@@ -471,7 +471,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
WARN_ON(unmapped != size); WARN_ON(unmapped != size);
if (!cookie->fq_domain) if (!cookie->fq_domain)
iommu_tlb_sync(domain, &iotlb_gather); iommu_iotlb_sync(domain, &iotlb_gather);
iommu_dma_free_iova(cookie, dma_addr, size); iommu_dma_free_iova(cookie, dma_addr, size);
} }
......
...@@ -762,7 +762,7 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group, ...@@ -762,7 +762,7 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
} }
iommu_flush_tlb_all(domain); iommu_flush_iotlb_all(domain);
out: out:
iommu_put_resv_regions(dev, &mappings); iommu_put_resv_regions(dev, &mappings);
...@@ -2316,7 +2316,7 @@ size_t iommu_unmap(struct iommu_domain *domain, ...@@ -2316,7 +2316,7 @@ size_t iommu_unmap(struct iommu_domain *domain,
iommu_iotlb_gather_init(&iotlb_gather); iommu_iotlb_gather_init(&iotlb_gather);
ret = __iommu_unmap(domain, iova, size, &iotlb_gather); ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
iommu_tlb_sync(domain, &iotlb_gather); iommu_iotlb_sync(domain, &iotlb_gather);
return ret; return ret;
} }
......
...@@ -774,7 +774,7 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain, ...@@ -774,7 +774,7 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
long unlocked = 0; long unlocked = 0;
struct vfio_regions *entry, *next; struct vfio_regions *entry, *next;
iommu_tlb_sync(domain->domain, iotlb_gather); iommu_iotlb_sync(domain->domain, iotlb_gather);
list_for_each_entry_safe(entry, next, regions, list) { list_for_each_entry_safe(entry, next, regions, list) {
unlocked += vfio_unpin_pages_remote(dma, unlocked += vfio_unpin_pages_remote(dma,
......
...@@ -31,7 +31,7 @@ enum io_pgtable_fmt { ...@@ -31,7 +31,7 @@ enum io_pgtable_fmt {
* single page. IOMMUs that cannot batch TLB invalidation * single page. IOMMUs that cannot batch TLB invalidation
* operations efficiently will typically issue them here, but * operations efficiently will typically issue them here, but
* others may decide to update the iommu_iotlb_gather structure * others may decide to update the iommu_iotlb_gather structure
* and defer the invalidation until iommu_tlb_sync() instead. * and defer the invalidation until iommu_iotlb_sync() instead.
* *
* Note that these can all be called in atomic context and must therefore * Note that these can all be called in atomic context and must therefore
* not block. * not block.
......
...@@ -514,13 +514,13 @@ extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) ...@@ -514,13 +514,13 @@ extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags); unsigned long iova, int flags);
static inline void iommu_flush_tlb_all(struct iommu_domain *domain) static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
{ {
if (domain->ops->flush_iotlb_all) if (domain->ops->flush_iotlb_all)
domain->ops->flush_iotlb_all(domain); domain->ops->flush_iotlb_all(domain);
} }
static inline void iommu_tlb_sync(struct iommu_domain *domain, static inline void iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather) struct iommu_iotlb_gather *iotlb_gather)
{ {
if (domain->ops->iotlb_sync) if (domain->ops->iotlb_sync)
...@@ -543,7 +543,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, ...@@ -543,7 +543,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
if (gather->pgsize != size || if (gather->pgsize != size ||
end < gather->start || start > gather->end) { end < gather->start || start > gather->end) {
if (gather->pgsize) if (gather->pgsize)
iommu_tlb_sync(domain, gather); iommu_iotlb_sync(domain, gather);
gather->pgsize = size; gather->pgsize = size;
} }
...@@ -725,11 +725,11 @@ static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain, ...@@ -725,11 +725,11 @@ static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
return 0; return 0;
} }
static inline void iommu_flush_tlb_all(struct iommu_domain *domain) static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
{ {
} }
static inline void iommu_tlb_sync(struct iommu_domain *domain, static inline void iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather) struct iommu_iotlb_gather *iotlb_gather)
{ {
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment