Commit e0fb1b36 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU updates from Joerg Roedel:
 "The updates include:

   - rate limiting for the VT-d fault handler

   - remove statistics code from the AMD IOMMU driver.  It is unused and
     should be replaced by something more generic if needed

   - per-domain pagesize-bitmaps in IOMMU core code to support systems
     with different types of IOMMUs

   - support for ACPI devices in the AMD IOMMU driver

   - 4GB mode support for Mediatek IOMMU driver

   - ARM-SMMU updates from Will Deacon:
      - support for 64k pages with SMMUv1 implementations (e.g MMU-401)
      - remove open-coded 64-bit MMIO accessors
      - initial support for 16-bit VMIDs, as supported by some ThunderX
        SMMU implementations
      - a couple of errata workarounds for silicon in the field

   - various fixes here and there"

* tag 'iommu-updates-v4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (44 commits)
  iommu/arm-smmu: Use per-domain page sizes.
  iommu/amd: Remove statistics code
  iommu/dma: Finish optimising higher-order allocations
  iommu: Allow selecting page sizes per domain
  iommu: of: enforce const-ness of struct iommu_ops
  iommu: remove unused priv field from struct iommu_ops
  iommu/dma: Implement scatterlist segment merging
  iommu/arm-smmu: Clear cache lock bit of ACR
  iommu/arm-smmu: Support SMMUv1 64KB supplement
  iommu/arm-smmu: Decouple context format from kernel config
  iommu/arm-smmu: Tidy up 64-bit/atomic I/O accesses
  io-64-nonatomic: Add relaxed accessor variants
  iommu/arm-smmu: Work around MMU-500 prefetch errata
  iommu/arm-smmu: Convert ThunderX workaround to new method
  iommu/arm-smmu: Differentiate specific implementations
  iommu/arm-smmu: Workaround for ThunderX erratum #27704
  iommu/arm-smmu: Add support for 16 bit VMID
  iommu/amd: Move get_device_id() and friends to beginning of file
  iommu/amd: Don't use IS_ERR_VALUE to check integer values
  iommu/amd: Signedness bug in acpihid_device_group()
  ...
parents f4c80d5a 6c0b43df
...@@ -53,7 +53,9 @@ stable kernels. ...@@ -53,7 +53,9 @@ stable kernels.
| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 | | ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
| ARM | Cortex-A57 | #852523 | N/A | | ARM | Cortex-A57 | #852523 | N/A |
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | | ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | | | | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | | Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
| Cavium | ThunderX SMMUv2 | #27704 | N/A |
...@@ -16,6 +16,7 @@ conditions. ...@@ -16,6 +16,7 @@ conditions.
"arm,mmu-400" "arm,mmu-400"
"arm,mmu-401" "arm,mmu-401"
"arm,mmu-500" "arm,mmu-500"
"cavium,smmu-v2"
depending on the particular implementation and/or the depending on the particular implementation and/or the
version of the architecture implemented. version of the architecture implemented.
......
...@@ -1787,6 +1787,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -1787,6 +1787,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
PCI device 00:14.0 write the parameter as: PCI device 00:14.0 write the parameter as:
ivrs_hpet[0]=00:14.0 ivrs_hpet[0]=00:14.0
ivrs_acpihid [HW,X86_64]
Provide an override to the ACPI-HID:UID<->DEVICE-ID
mapping provided in the IVRS ACPI table. For
example, to map UART-HID:UID AMD0020:0 to
PCI device 00:14.5 write the parameter as:
ivrs_acpihid[00:14.5]=AMD0020:0
js= [HW,JOY] Analog joystick js= [HW,JOY] Analog joystick
See Documentation/input/joystick.txt. See Documentation/input/joystick.txt.
......
...@@ -118,7 +118,7 @@ static inline unsigned long dma_max_pfn(struct device *dev) ...@@ -118,7 +118,7 @@ static inline unsigned long dma_max_pfn(struct device *dev)
#define arch_setup_dma_ops arch_setup_dma_ops #define arch_setup_dma_ops arch_setup_dma_ops
extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent); const struct iommu_ops *iommu, bool coherent);
#define arch_teardown_dma_ops arch_teardown_dma_ops #define arch_teardown_dma_ops arch_teardown_dma_ops
extern void arch_teardown_dma_ops(struct device *dev); extern void arch_teardown_dma_ops(struct device *dev);
......
...@@ -2215,7 +2215,7 @@ static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) ...@@ -2215,7 +2215,7 @@ static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
} }
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu) const struct iommu_ops *iommu)
{ {
struct dma_iommu_mapping *mapping; struct dma_iommu_mapping *mapping;
...@@ -2253,7 +2253,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) ...@@ -2253,7 +2253,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
#else #else
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu) const struct iommu_ops *iommu)
{ {
return false; return false;
} }
...@@ -2270,7 +2270,7 @@ static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) ...@@ -2270,7 +2270,7 @@ static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
} }
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent) const struct iommu_ops *iommu, bool coherent)
{ {
struct dma_map_ops *dma_ops; struct dma_map_ops *dma_ops;
......
...@@ -48,7 +48,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) ...@@ -48,7 +48,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
} }
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent); const struct iommu_ops *iommu, bool coherent);
#define arch_setup_dma_ops arch_setup_dma_ops #define arch_setup_dma_ops arch_setup_dma_ops
#ifdef CONFIG_IOMMU_DMA #ifdef CONFIG_IOMMU_DMA
......
...@@ -562,8 +562,8 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, ...@@ -562,8 +562,8 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
struct page **pages; struct page **pages;
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle, pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
flush_page); handle, flush_page);
if (!pages) if (!pages)
return NULL; return NULL;
...@@ -947,13 +947,13 @@ void arch_teardown_dma_ops(struct device *dev) ...@@ -947,13 +947,13 @@ void arch_teardown_dma_ops(struct device *dev)
#else #else
static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu) const struct iommu_ops *iommu)
{ } { }
#endif /* CONFIG_IOMMU_DMA */ #endif /* CONFIG_IOMMU_DMA */
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent) const struct iommu_ops *iommu, bool coherent)
{ {
if (!dev->archdata.dma_ops) if (!dev->archdata.dma_ops)
dev->archdata.dma_ops = &swiotlb_dma_ops; dev->archdata.dma_ops = &swiotlb_dma_ops;
......
...@@ -76,8 +76,7 @@ config IOMMU_DMA ...@@ -76,8 +76,7 @@ config IOMMU_DMA
config FSL_PAMU config FSL_PAMU
bool "Freescale IOMMU support" bool "Freescale IOMMU support"
depends on PPC32 depends on PPC_E500MC || (COMPILE_TEST && PPC)
depends on PPC_E500MC || COMPILE_TEST
select IOMMU_API select IOMMU_API
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
help help
...@@ -124,16 +123,6 @@ config AMD_IOMMU ...@@ -124,16 +123,6 @@ config AMD_IOMMU
your BIOS for an option to enable it or if you have an IVRS ACPI your BIOS for an option to enable it or if you have an IVRS ACPI
table. table.
config AMD_IOMMU_STATS
bool "Export AMD IOMMU statistics to debugfs"
depends on AMD_IOMMU
select DEBUG_FS
---help---
This option enables code in the AMD IOMMU driver to collect various
statistics about whats happening in the driver and exports that
information to userspace via debugfs.
If unsure, say N.
config AMD_IOMMU_V2 config AMD_IOMMU_V2
tristate "AMD IOMMU Version 2 driver" tristate "AMD IOMMU Version 2 driver"
depends on AMD_IOMMU depends on AMD_IOMMU
......
This diff is collapsed.
This diff is collapsed.
...@@ -527,6 +527,19 @@ struct amd_iommu { ...@@ -527,6 +527,19 @@ struct amd_iommu {
#endif #endif
}; };
#define ACPIHID_UID_LEN 256
#define ACPIHID_HID_LEN 9
struct acpihid_map_entry {
struct list_head list;
u8 uid[ACPIHID_UID_LEN];
u8 hid[ACPIHID_HID_LEN];
u16 devid;
u16 root_devid;
bool cmd_line;
struct iommu_group *group;
};
struct devid_map { struct devid_map {
struct list_head list; struct list_head list;
u8 id; u8 id;
...@@ -537,6 +550,7 @@ struct devid_map { ...@@ -537,6 +550,7 @@ struct devid_map {
/* Map HPET and IOAPIC ids to the devid used by the IOMMU */ /* Map HPET and IOAPIC ids to the devid used by the IOMMU */
extern struct list_head ioapic_map; extern struct list_head ioapic_map;
extern struct list_head hpet_map; extern struct list_head hpet_map;
extern struct list_head acpihid_map;
/* /*
* List with all IOMMUs in the system. This list is not locked because it is * List with all IOMMUs in the system. This list is not locked because it is
...@@ -668,30 +682,4 @@ static inline int get_hpet_devid(int id) ...@@ -668,30 +682,4 @@ static inline int get_hpet_devid(int id)
return -EINVAL; return -EINVAL;
} }
#ifdef CONFIG_AMD_IOMMU_STATS
struct __iommu_counter {
char *name;
struct dentry *dent;
u64 value;
};
#define DECLARE_STATS_COUNTER(nm) \
static struct __iommu_counter nm = { \
.name = #nm, \
}
#define INC_STATS_COUNTER(name) name.value += 1
#define ADD_STATS_COUNTER(name, x) name.value += (x)
#define SUB_STATS_COUNTER(name, x) name.value -= (x)
#else /* CONFIG_AMD_IOMMU_STATS */
#define DECLARE_STATS_COUNTER(name)
#define INC_STATS_COUNTER(name)
#define ADD_STATS_COUNTER(name, x)
#define SUB_STATS_COUNTER(name, x)
#endif /* CONFIG_AMD_IOMMU_STATS */
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
...@@ -590,6 +590,7 @@ struct arm_smmu_device { ...@@ -590,6 +590,7 @@ struct arm_smmu_device {
unsigned long ias; /* IPA */ unsigned long ias; /* IPA */
unsigned long oas; /* PA */ unsigned long oas; /* PA */
unsigned long pgsize_bitmap;
#define ARM_SMMU_MAX_ASIDS (1 << 16) #define ARM_SMMU_MAX_ASIDS (1 << 16)
unsigned int asid_bits; unsigned int asid_bits;
...@@ -1516,8 +1517,6 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain, ...@@ -1516,8 +1517,6 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
return 0; return 0;
} }
static struct iommu_ops arm_smmu_ops;
static int arm_smmu_domain_finalise(struct iommu_domain *domain) static int arm_smmu_domain_finalise(struct iommu_domain *domain)
{ {
int ret; int ret;
...@@ -1555,7 +1554,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) ...@@ -1555,7 +1554,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
} }
pgtbl_cfg = (struct io_pgtable_cfg) { pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = arm_smmu_ops.pgsize_bitmap, .pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias, .ias = ias,
.oas = oas, .oas = oas,
.tlb = &arm_smmu_gather_ops, .tlb = &arm_smmu_gather_ops,
...@@ -1566,7 +1565,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) ...@@ -1566,7 +1565,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
if (!pgtbl_ops) if (!pgtbl_ops)
return -ENOMEM; return -ENOMEM;
arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
smmu_domain->pgtbl_ops = pgtbl_ops; smmu_domain->pgtbl_ops = pgtbl_ops;
ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
...@@ -2410,7 +2409,6 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) ...@@ -2410,7 +2409,6 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
{ {
u32 reg; u32 reg;
bool coherent; bool coherent;
unsigned long pgsize_bitmap = 0;
/* IDR0 */ /* IDR0 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0); reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
...@@ -2541,13 +2539,16 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) ...@@ -2541,13 +2539,16 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
/* Page sizes */ /* Page sizes */
if (reg & IDR5_GRAN64K) if (reg & IDR5_GRAN64K)
pgsize_bitmap |= SZ_64K | SZ_512M; smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
if (reg & IDR5_GRAN16K) if (reg & IDR5_GRAN16K)
pgsize_bitmap |= SZ_16K | SZ_32M; smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
if (reg & IDR5_GRAN4K) if (reg & IDR5_GRAN4K)
pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
arm_smmu_ops.pgsize_bitmap &= pgsize_bitmap; if (arm_smmu_ops.pgsize_bitmap == -1UL)
arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
else
arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
/* Output address size */ /* Output address size */
switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) { switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
......
This diff is collapsed.
...@@ -94,7 +94,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size ...@@ -94,7 +94,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size
return -ENODEV; return -ENODEV;
/* Use the smallest supported page size for IOVA granularity */ /* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->ops->pgsize_bitmap); order = __ffs(domain->pgsize_bitmap);
base_pfn = max_t(unsigned long, 1, base >> order); base_pfn = max_t(unsigned long, 1, base >> order);
end_pfn = (base + size - 1) >> order; end_pfn = (base + size - 1) >> order;
...@@ -190,11 +190,15 @@ static void __iommu_dma_free_pages(struct page **pages, int count) ...@@ -190,11 +190,15 @@ static void __iommu_dma_free_pages(struct page **pages, int count)
kvfree(pages); kvfree(pages);
} }
static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp) static struct page **__iommu_dma_alloc_pages(unsigned int count,
unsigned long order_mask, gfp_t gfp)
{ {
struct page **pages; struct page **pages;
unsigned int i = 0, array_size = count * sizeof(*pages); unsigned int i = 0, array_size = count * sizeof(*pages);
unsigned int order = MAX_ORDER;
order_mask &= (2U << MAX_ORDER) - 1;
if (!order_mask)
return NULL;
if (array_size <= PAGE_SIZE) if (array_size <= PAGE_SIZE)
pages = kzalloc(array_size, GFP_KERNEL); pages = kzalloc(array_size, GFP_KERNEL);
...@@ -208,36 +212,38 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp) ...@@ -208,36 +212,38 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
while (count) { while (count) {
struct page *page = NULL; struct page *page = NULL;
int j; unsigned int order_size;
/* /*
* Higher-order allocations are a convenience rather * Higher-order allocations are a convenience rather
* than a necessity, hence using __GFP_NORETRY until * than a necessity, hence using __GFP_NORETRY until
* falling back to single-page allocations. * falling back to minimum-order allocations.
*/ */
for (order = min_t(unsigned int, order, __fls(count)); for (order_mask &= (2U << __fls(count)) - 1;
order > 0; order--) { order_mask; order_mask &= ~order_size) {
page = alloc_pages(gfp | __GFP_NORETRY, order); unsigned int order = __fls(order_mask);
order_size = 1U << order;
page = alloc_pages((order_mask - order_size) ?
gfp | __GFP_NORETRY : gfp, order);
if (!page) if (!page)
continue; continue;
if (PageCompound(page)) { if (!order)
if (!split_huge_page(page))
break; break;
__free_pages(page, order); if (!PageCompound(page)) {
} else {
split_page(page, order); split_page(page, order);
break; break;
} else if (!split_huge_page(page)) {
break;
} }
__free_pages(page, order);
} }
if (!page)
page = alloc_page(gfp);
if (!page) { if (!page) {
__iommu_dma_free_pages(pages, i); __iommu_dma_free_pages(pages, i);
return NULL; return NULL;
} }
j = 1 << order; count -= order_size;
count -= j; while (order_size--)
while (j--)
pages[i++] = page++; pages[i++] = page++;
} }
return pages; return pages;
...@@ -267,6 +273,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size, ...@@ -267,6 +273,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
* attached to an iommu_dma_domain * attached to an iommu_dma_domain
* @size: Size of buffer in bytes * @size: Size of buffer in bytes
* @gfp: Allocation flags * @gfp: Allocation flags
* @attrs: DMA attributes for this allocation
* @prot: IOMMU mapping flags * @prot: IOMMU mapping flags
* @handle: Out argument for allocated DMA handle * @handle: Out argument for allocated DMA handle
* @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
...@@ -278,8 +285,8 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size, ...@@ -278,8 +285,8 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
* Return: Array of struct page pointers describing the buffer, * Return: Array of struct page pointers describing the buffer,
* or NULL on failure. * or NULL on failure.
*/ */
struct page **iommu_dma_alloc(struct device *dev, size_t size, struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
gfp_t gfp, int prot, dma_addr_t *handle, struct dma_attrs *attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t)) void (*flush_page)(struct device *, const void *, phys_addr_t))
{ {
struct iommu_domain *domain = iommu_get_domain_for_dev(dev); struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
...@@ -288,11 +295,22 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, ...@@ -288,11 +295,22 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size,
struct page **pages; struct page **pages;
struct sg_table sgt; struct sg_table sgt;
dma_addr_t dma_addr; dma_addr_t dma_addr;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
*handle = DMA_ERROR_CODE; *handle = DMA_ERROR_CODE;
pages = __iommu_dma_alloc_pages(count, gfp); min_size = alloc_sizes & -alloc_sizes;
if (min_size < PAGE_SIZE) {
min_size = PAGE_SIZE;
alloc_sizes |= PAGE_SIZE;
} else {
size = ALIGN(size, min_size);
}
if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs))
alloc_sizes = min_size;
count = PAGE_ALIGN(size) >> PAGE_SHIFT;
pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
if (!pages) if (!pages)
return NULL; return NULL;
...@@ -389,26 +407,58 @@ void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, ...@@ -389,26 +407,58 @@ void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
/* /*
* Prepare a successfully-mapped scatterlist to give back to the caller. * Prepare a successfully-mapped scatterlist to give back to the caller.
* Handling IOVA concatenation can come later, if needed *
* At this point the segments are already laid out by iommu_dma_map_sg() to
* avoid individually crossing any boundaries, so we merely need to check a
* segment's start address to avoid concatenating across one.
*/ */
static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
dma_addr_t dma_addr) dma_addr_t dma_addr)
{ {
struct scatterlist *s; struct scatterlist *s, *cur = sg;
int i; unsigned long seg_mask = dma_get_seg_boundary(dev);
unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
int i, count = 0;
for_each_sg(sg, s, nents, i) { for_each_sg(sg, s, nents, i) {
/* Un-swizzling the fields here, hence the naming mismatch */ /* Restore this segment's original unaligned fields first */
unsigned int s_offset = sg_dma_address(s); unsigned int s_iova_off = sg_dma_address(s);
unsigned int s_length = sg_dma_len(s); unsigned int s_length = sg_dma_len(s);
unsigned int s_dma_len = s->length; unsigned int s_iova_len = s->length;
s->offset += s_offset; s->offset += s_iova_off;
s->length = s_length; s->length = s_length;
sg_dma_address(s) = dma_addr + s_offset; sg_dma_address(s) = DMA_ERROR_CODE;
dma_addr += s_dma_len; sg_dma_len(s) = 0;
/*
* Now fill in the real DMA data. If...
* - there is a valid output segment to append to
* - and this segment starts on an IOVA page boundary
* - but doesn't fall at a segment boundary
* - and wouldn't make the resulting output segment too long
*/
if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
(cur_len + s_length <= max_len)) {
/* ...then concatenate it with the previous one */
cur_len += s_length;
} else {
/* Otherwise start the next output segment */
if (i > 0)
cur = sg_next(cur);
cur_len = s_length;
count++;
sg_dma_address(cur) = dma_addr + s_iova_off;
}
sg_dma_len(cur) = cur_len;
dma_addr += s_iova_len;
if (s_length + s_iova_off < s_iova_len)
cur_len = 0;
} }
return i; return count;
} }
/* /*
...@@ -446,34 +496,40 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -446,34 +496,40 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
struct scatterlist *s, *prev = NULL; struct scatterlist *s, *prev = NULL;
dma_addr_t dma_addr; dma_addr_t dma_addr;
size_t iova_len = 0; size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
int i; int i;
/* /*
* Work out how much IOVA space we need, and align the segments to * Work out how much IOVA space we need, and align the segments to
* IOVA granules for the IOMMU driver to handle. With some clever * IOVA granules for the IOMMU driver to handle. With some clever
* trickery we can modify the list in-place, but reversibly, by * trickery we can modify the list in-place, but reversibly, by
* hiding the original data in the as-yet-unused DMA fields. * stashing the unaligned parts in the as-yet-unused DMA fields.
*/ */
for_each_sg(sg, s, nents, i) { for_each_sg(sg, s, nents, i) {
size_t s_offset = iova_offset(iovad, s->offset); size_t s_iova_off = iova_offset(iovad, s->offset);
size_t s_length = s->length; size_t s_length = s->length;
size_t pad_len = (mask - iova_len + 1) & mask;
sg_dma_address(s) = s_offset; sg_dma_address(s) = s_iova_off;
sg_dma_len(s) = s_length; sg_dma_len(s) = s_length;
s->offset -= s_offset; s->offset -= s_iova_off;
s_length = iova_align(iovad, s_length + s_offset); s_length = iova_align(iovad, s_length + s_iova_off);
s->length = s_length; s->length = s_length;
/* /*
* The simple way to avoid the rare case of a segment * Due to the alignment of our single IOVA allocation, we can
* crossing the boundary mask is to pad the previous one * depend on these assumptions about the segment boundary mask:
* to end at a naturally-aligned IOVA for this one's size, * - If mask size >= IOVA size, then the IOVA range cannot
* at the cost of potentially over-allocating a little. * possibly fall across a boundary, so we don't care.
* - If mask size < IOVA size, then the IOVA range must start
* exactly on a boundary, therefore we can lay things out
* based purely on segment lengths without needing to know
* the actual addresses beforehand.
* - The mask must be a power of 2, so pad_len == 0 if
* iova_len == 0, thus we cannot dereference prev the first
* time through here (i.e. before it has a meaningful value).
*/ */
if (prev) { if (pad_len && pad_len < s_length - 1) {
size_t pad_len = roundup_pow_of_two(s_length);
pad_len = (pad_len - iova_len) & (pad_len - 1);
prev->length += pad_len; prev->length += pad_len;
iova_len += pad_len; iova_len += pad_len;
} }
......
...@@ -1579,18 +1579,14 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type, ...@@ -1579,18 +1579,14 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
reason = dmar_get_fault_reason(fault_reason, &fault_type); reason = dmar_get_fault_reason(fault_reason, &fault_type);
if (fault_type == INTR_REMAP) if (fault_type == INTR_REMAP)
pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] " pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
"fault index %llx\n" source_id >> 8, PCI_SLOT(source_id & 0xFF),
"INTR-REMAP:[fault reason %02d] %s\n",
(source_id >> 8), PCI_SLOT(source_id & 0xFF),
PCI_FUNC(source_id & 0xFF), addr >> 48, PCI_FUNC(source_id & 0xFF), addr >> 48,
fault_reason, reason); fault_reason, reason);
else else
pr_err("DMAR:[%s] Request device [%02x:%02x.%d] " pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
"fault addr %llx \n" type ? "DMA Read" : "DMA Write",
"DMAR:[fault reason %02d] %s\n", source_id >> 8, PCI_SLOT(source_id & 0xFF),
(type ? "DMA Read" : "DMA Write"),
(source_id >> 8), PCI_SLOT(source_id & 0xFF),
PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
return 0; return 0;
} }
...@@ -1602,10 +1598,17 @@ irqreturn_t dmar_fault(int irq, void *dev_id) ...@@ -1602,10 +1598,17 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
int reg, fault_index; int reg, fault_index;
u32 fault_status; u32 fault_status;
unsigned long flag; unsigned long flag;
bool ratelimited;
static DEFINE_RATELIMIT_STATE(rs,
DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
/* Disable printing, simply clear the fault when ratelimited */
ratelimited = !__ratelimit(&rs);
raw_spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
fault_status = readl(iommu->reg + DMAR_FSTS_REG); fault_status = readl(iommu->reg + DMAR_FSTS_REG);
if (fault_status) if (fault_status && !ratelimited)
pr_err("DRHD: handling fault status reg %x\n", fault_status); pr_err("DRHD: handling fault status reg %x\n", fault_status);
/* TBD: ignore advanced fault log currently */ /* TBD: ignore advanced fault log currently */
...@@ -1627,6 +1630,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) ...@@ -1627,6 +1630,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
if (!(data & DMA_FRCD_F)) if (!(data & DMA_FRCD_F))
break; break;
if (!ratelimited) {
fault_reason = dma_frcd_fault_reason(data); fault_reason = dma_frcd_fault_reason(data);
type = dma_frcd_type(data); type = dma_frcd_type(data);
...@@ -1637,12 +1641,15 @@ irqreturn_t dmar_fault(int irq, void *dev_id) ...@@ -1637,12 +1641,15 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
guest_addr = dmar_readq(iommu->reg + reg + guest_addr = dmar_readq(iommu->reg + reg +
fault_index * PRIMARY_FAULT_REG_LEN); fault_index * PRIMARY_FAULT_REG_LEN);
guest_addr = dma_frcd_page_addr(guest_addr); guest_addr = dma_frcd_page_addr(guest_addr);
}
/* clear the fault */ /* clear the fault */
writel(DMA_FRCD_F, iommu->reg + reg + writel(DMA_FRCD_F, iommu->reg + reg +
fault_index * PRIMARY_FAULT_REG_LEN + 12); fault_index * PRIMARY_FAULT_REG_LEN + 12);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
if (!ratelimited)
dmar_fault_do_one(iommu, type, fault_reason, dmar_fault_do_one(iommu, type, fault_reason,
source_id, guest_addr); source_id, guest_addr);
......
...@@ -1143,7 +1143,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, ...@@ -1143,7 +1143,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
} while (!first_pte_in_page(++pte) && pfn <= last_pfn); } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
} }
/* free page table pages. last level pte should already be cleared */ /* clear last level (leaf) ptes and free page table pages. */
static void dma_pte_free_pagetable(struct dmar_domain *domain, static void dma_pte_free_pagetable(struct dmar_domain *domain,
unsigned long start_pfn, unsigned long start_pfn,
unsigned long last_pfn) unsigned long last_pfn)
......
...@@ -121,6 +121,8 @@ ...@@ -121,6 +121,8 @@
#define ARM_V7S_TEX_MASK 0x7 #define ARM_V7S_TEX_MASK 0x7
#define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT) #define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
#define ARM_V7S_ATTR_MTK_4GB BIT(9) /* MTK extend it for 4GB mode */
/* *well, except for TEX on level 2 large pages, of course :( */ /* *well, except for TEX on level 2 large pages, of course :( */
#define ARM_V7S_CONT_PAGE_TEX_SHIFT 6 #define ARM_V7S_CONT_PAGE_TEX_SHIFT 6
#define ARM_V7S_CONT_PAGE_TEX_MASK (ARM_V7S_TEX_MASK << ARM_V7S_CONT_PAGE_TEX_SHIFT) #define ARM_V7S_CONT_PAGE_TEX_MASK (ARM_V7S_TEX_MASK << ARM_V7S_CONT_PAGE_TEX_SHIFT)
...@@ -258,9 +260,10 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, ...@@ -258,9 +260,10 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
struct io_pgtable_cfg *cfg) struct io_pgtable_cfg *cfg)
{ {
bool ap = !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS); bool ap = !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS);
arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S | arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S;
ARM_V7S_ATTR_TEX(1);
if (!(prot & IOMMU_MMIO))
pte |= ARM_V7S_ATTR_TEX(1);
if (ap) { if (ap) {
pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV; pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV;
if (!(prot & IOMMU_WRITE)) if (!(prot & IOMMU_WRITE))
...@@ -270,7 +273,9 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, ...@@ -270,7 +273,9 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
if ((prot & IOMMU_NOEXEC) && ap) if ((prot & IOMMU_NOEXEC) && ap)
pte |= ARM_V7S_ATTR_XN(lvl); pte |= ARM_V7S_ATTR_XN(lvl);
if (prot & IOMMU_CACHE) if (prot & IOMMU_MMIO)
pte |= ARM_V7S_ATTR_B;
else if (prot & IOMMU_CACHE)
pte |= ARM_V7S_ATTR_B | ARM_V7S_ATTR_C; pte |= ARM_V7S_ATTR_B | ARM_V7S_ATTR_C;
return pte; return pte;
...@@ -279,10 +284,13 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, ...@@ -279,10 +284,13 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl) static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
{ {
int prot = IOMMU_READ; int prot = IOMMU_READ;
arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
if (pte & (ARM_V7S_PTE_AP_RDONLY << ARM_V7S_ATTR_SHIFT(lvl))) if (attr & ARM_V7S_PTE_AP_RDONLY)
prot |= IOMMU_WRITE; prot |= IOMMU_WRITE;
if (pte & ARM_V7S_ATTR_C) if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
prot |= IOMMU_MMIO;
else if (pte & ARM_V7S_ATTR_C)
prot |= IOMMU_CACHE; prot |= IOMMU_CACHE;
return prot; return prot;
...@@ -364,6 +372,9 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, ...@@ -364,6 +372,9 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)) if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS))
pte |= ARM_V7S_ATTR_NS_SECTION; pte |= ARM_V7S_ATTR_NS_SECTION;
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB)
pte |= ARM_V7S_ATTR_MTK_4GB;
if (num_entries > 1) if (num_entries > 1)
pte = arm_v7s_pte_to_cont(pte, lvl); pte = arm_v7s_pte_to_cont(pte, lvl);
...@@ -625,7 +636,13 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, ...@@ -625,7 +636,13 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS | IO_PGTABLE_QUIRK_NO_PERMS |
IO_PGTABLE_QUIRK_TLBI_ON_MAP)) IO_PGTABLE_QUIRK_TLBI_ON_MAP |
IO_PGTABLE_QUIRK_ARM_MTK_4GB))
return NULL;
/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB &&
!(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS))
return NULL; return NULL;
data = kmalloc(sizeof(*data), GFP_KERNEL); data = kmalloc(sizeof(*data), GFP_KERNEL);
......
...@@ -355,7 +355,10 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, ...@@ -355,7 +355,10 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
pte |= ARM_LPAE_PTE_AP_RDONLY; pte |= ARM_LPAE_PTE_AP_RDONLY;
if (prot & IOMMU_CACHE) if (prot & IOMMU_MMIO)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
else if (prot & IOMMU_CACHE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT); << ARM_LPAE_PTE_ATTRINDX_SHIFT);
} else { } else {
...@@ -364,7 +367,9 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, ...@@ -364,7 +367,9 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
pte |= ARM_LPAE_PTE_HAP_READ; pte |= ARM_LPAE_PTE_HAP_READ;
if (prot & IOMMU_WRITE) if (prot & IOMMU_WRITE)
pte |= ARM_LPAE_PTE_HAP_WRITE; pte |= ARM_LPAE_PTE_HAP_WRITE;
if (prot & IOMMU_CACHE) if (prot & IOMMU_MMIO)
pte |= ARM_LPAE_PTE_MEMATTR_DEV;
else if (prot & IOMMU_CACHE)
pte |= ARM_LPAE_PTE_MEMATTR_OIWB; pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
else else
pte |= ARM_LPAE_PTE_MEMATTR_NC; pte |= ARM_LPAE_PTE_MEMATTR_NC;
......
...@@ -25,8 +25,7 @@ ...@@ -25,8 +25,7 @@
#include "io-pgtable.h" #include "io-pgtable.h"
static const struct io_pgtable_init_fns * static const struct io_pgtable_init_fns *
io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
{
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
[ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns, [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns,
[ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns, [ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
......
...@@ -60,10 +60,16 @@ struct io_pgtable_cfg { ...@@ -60,10 +60,16 @@ struct io_pgtable_cfg {
* IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
* (unmapped) entries but the hardware might do so anyway, perform * (unmapped) entries but the hardware might do so anyway, perform
* TLB maintenance when mapping as well as when unmapping. * TLB maintenance when mapping as well as when unmapping.
*
* IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all
* PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
* when the SoC is in "4GB mode" and they can only access the high
* remap of DRAM (0x1_00000000 to 0x1_ffffffff).
*/ */
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
#define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
unsigned long quirks; unsigned long quirks;
unsigned long pgsize_bitmap; unsigned long pgsize_bitmap;
unsigned int ias; unsigned int ias;
......
...@@ -337,9 +337,9 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, ...@@ -337,9 +337,9 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
if (!domain || domain->type != IOMMU_DOMAIN_DMA) if (!domain || domain->type != IOMMU_DOMAIN_DMA)
return 0; return 0;
BUG_ON(!domain->ops->pgsize_bitmap); BUG_ON(!domain->pgsize_bitmap);
pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap); pg_size = 1UL << __ffs(domain->pgsize_bitmap);
INIT_LIST_HEAD(&mappings); INIT_LIST_HEAD(&mappings);
iommu_get_dm_regions(dev, &mappings); iommu_get_dm_regions(dev, &mappings);
...@@ -1069,6 +1069,8 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, ...@@ -1069,6 +1069,8 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
domain->ops = bus->iommu_ops; domain->ops = bus->iommu_ops;
domain->type = type; domain->type = type;
/* Assume all sizes by default; the driver may override this later */
domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
return domain; return domain;
} }
...@@ -1293,7 +1295,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain, ...@@ -1293,7 +1295,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
pgsize = (1UL << (pgsize_idx + 1)) - 1; pgsize = (1UL << (pgsize_idx + 1)) - 1;
/* throw away page sizes not supported by the hardware */ /* throw away page sizes not supported by the hardware */
pgsize &= domain->ops->pgsize_bitmap; pgsize &= domain->pgsize_bitmap;
/* make sure we're still sane */ /* make sure we're still sane */
BUG_ON(!pgsize); BUG_ON(!pgsize);
...@@ -1315,14 +1317,14 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -1315,14 +1317,14 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
int ret = 0; int ret = 0;
if (unlikely(domain->ops->map == NULL || if (unlikely(domain->ops->map == NULL ||
domain->ops->pgsize_bitmap == 0UL)) domain->pgsize_bitmap == 0UL))
return -ENODEV; return -ENODEV;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL; return -EINVAL;
/* find out the minimum page size supported */ /* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
/* /*
* both the virtual address and the physical one, as well as * both the virtual address and the physical one, as well as
...@@ -1369,14 +1371,14 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) ...@@ -1369,14 +1371,14 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
unsigned long orig_iova = iova; unsigned long orig_iova = iova;
if (unlikely(domain->ops->unmap == NULL || if (unlikely(domain->ops->unmap == NULL ||
domain->ops->pgsize_bitmap == 0UL)) domain->pgsize_bitmap == 0UL))
return -ENODEV; return -ENODEV;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL; return -EINVAL;
/* find out the minimum page size supported */ /* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
/* /*
* The virtual address, as well as the size of the mapping, must be * The virtual address, as well as the size of the mapping, must be
...@@ -1422,10 +1424,10 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, ...@@ -1422,10 +1424,10 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
unsigned int i, min_pagesz; unsigned int i, min_pagesz;
int ret; int ret;
if (unlikely(domain->ops->pgsize_bitmap == 0UL)) if (unlikely(domain->pgsize_bitmap == 0UL))
return 0; return 0;
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
for_each_sg(sg, s, nents, i) { for_each_sg(sg, s, nents, i) {
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
...@@ -1506,7 +1508,7 @@ int iommu_domain_get_attr(struct iommu_domain *domain, ...@@ -1506,7 +1508,7 @@ int iommu_domain_get_attr(struct iommu_domain *domain,
break; break;
case DOMAIN_ATTR_PAGING: case DOMAIN_ATTR_PAGING:
paging = data; paging = data;
*paging = (domain->ops->pgsize_bitmap != 0UL); *paging = (domain->pgsize_bitmap != 0UL);
break; break;
case DOMAIN_ATTR_WINDOWS: case DOMAIN_ATTR_WINDOWS:
count = data; count = data;
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details. * GNU General Public License for more details.
*/ */
#include <linux/bootmem.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/component.h> #include <linux/component.h>
...@@ -56,7 +57,7 @@ ...@@ -56,7 +57,7 @@
#define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5) #define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5)
#define REG_MMU_IVRP_PADDR 0x114 #define REG_MMU_IVRP_PADDR 0x114
#define F_MMU_IVRP_PA_SET(pa) ((pa) >> 1) #define F_MMU_IVRP_PA_SET(pa, ext) (((pa) >> 1) | ((!!(ext)) << 31))
#define REG_MMU_INT_CONTROL0 0x120 #define REG_MMU_INT_CONTROL0 0x120
#define F_L2_MULIT_HIT_EN BIT(0) #define F_L2_MULIT_HIT_EN BIT(0)
...@@ -125,6 +126,7 @@ struct mtk_iommu_data { ...@@ -125,6 +126,7 @@ struct mtk_iommu_data {
struct mtk_iommu_domain *m4u_dom; struct mtk_iommu_domain *m4u_dom;
struct iommu_group *m4u_group; struct iommu_group *m4u_group;
struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
bool enable_4GB;
}; };
static struct iommu_ops mtk_iommu_ops; static struct iommu_ops mtk_iommu_ops;
...@@ -257,6 +259,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) ...@@ -257,6 +259,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
.iommu_dev = data->dev, .iommu_dev = data->dev,
}; };
if (data->enable_4GB)
dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
if (!dom->iop) { if (!dom->iop) {
dev_err(data->dev, "Failed to alloc io pgtable\n"); dev_err(data->dev, "Failed to alloc io pgtable\n");
...@@ -264,7 +269,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) ...@@ -264,7 +269,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
} }
/* Update our support page sizes bitmap */ /* Update our support page sizes bitmap */
mtk_iommu_ops.pgsize_bitmap = dom->cfg.pgsize_bitmap; dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
data->base + REG_MMU_PT_BASE_ADDR); data->base + REG_MMU_PT_BASE_ADDR);
...@@ -530,7 +535,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) ...@@ -530,7 +535,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
F_INT_PRETETCH_TRANSATION_FIFO_FAULT; F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base), writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
data->base + REG_MMU_IVRP_PADDR); data->base + REG_MMU_IVRP_PADDR);
writel_relaxed(0, data->base + REG_MMU_DCM_DIS); writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
...@@ -591,6 +596,9 @@ static int mtk_iommu_probe(struct platform_device *pdev) ...@@ -591,6 +596,9 @@ static int mtk_iommu_probe(struct platform_device *pdev)
return -ENOMEM; return -ENOMEM;
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
/* Whether the current dram is over 4GB */
data->enable_4GB = !!(max_pfn > (0xffffffffUL >> PAGE_SHIFT));
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
data->base = devm_ioremap_resource(dev, res); data->base = devm_ioremap_resource(dev, res);
if (IS_ERR(data->base)) if (IS_ERR(data->base))
...@@ -690,7 +698,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev) ...@@ -690,7 +698,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base), writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
base + REG_MMU_IVRP_PADDR); base + REG_MMU_IVRP_PADDR);
return 0; return 0;
} }
......
...@@ -98,12 +98,12 @@ EXPORT_SYMBOL_GPL(of_get_dma_window); ...@@ -98,12 +98,12 @@ EXPORT_SYMBOL_GPL(of_get_dma_window);
struct of_iommu_node { struct of_iommu_node {
struct list_head list; struct list_head list;
struct device_node *np; struct device_node *np;
struct iommu_ops *ops; const struct iommu_ops *ops;
}; };
static LIST_HEAD(of_iommu_list); static LIST_HEAD(of_iommu_list);
static DEFINE_SPINLOCK(of_iommu_lock); static DEFINE_SPINLOCK(of_iommu_lock);
void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops) void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops)
{ {
struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
...@@ -119,10 +119,10 @@ void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops) ...@@ -119,10 +119,10 @@ void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
spin_unlock(&of_iommu_lock); spin_unlock(&of_iommu_lock);
} }
struct iommu_ops *of_iommu_get_ops(struct device_node *np) const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
{ {
struct of_iommu_node *node; struct of_iommu_node *node;
struct iommu_ops *ops = NULL; const struct iommu_ops *ops = NULL;
spin_lock(&of_iommu_lock); spin_lock(&of_iommu_lock);
list_for_each_entry(node, &of_iommu_list, list) list_for_each_entry(node, &of_iommu_list, list)
...@@ -134,12 +134,12 @@ struct iommu_ops *of_iommu_get_ops(struct device_node *np) ...@@ -134,12 +134,12 @@ struct iommu_ops *of_iommu_get_ops(struct device_node *np)
return ops; return ops;
} }
struct iommu_ops *of_iommu_configure(struct device *dev, const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np) struct device_node *master_np)
{ {
struct of_phandle_args iommu_spec; struct of_phandle_args iommu_spec;
struct device_node *np; struct device_node *np;
struct iommu_ops *ops = NULL; const struct iommu_ops *ops = NULL;
int idx = 0; int idx = 0;
/* /*
......
...@@ -628,10 +628,12 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) ...@@ -628,10 +628,12 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
break; break;
default: default:
fn = NULL; fn = NULL;
BUG();
break; break;
} }
if (WARN_ON(!fn))
return -EINVAL;
prot = get_iopte_attr(e); prot = get_iopte_attr(e);
spin_lock(&obj->page_table_lock); spin_lock(&obj->page_table_lock);
...@@ -987,7 +989,6 @@ static int omap_iommu_remove(struct platform_device *pdev) ...@@ -987,7 +989,6 @@ static int omap_iommu_remove(struct platform_device *pdev)
{ {
struct omap_iommu *obj = platform_get_drvdata(pdev); struct omap_iommu *obj = platform_get_drvdata(pdev);
iopgtable_clear_entry_all(obj);
omap_iommu_debugfs_remove(obj); omap_iommu_debugfs_remove(obj);
pm_runtime_disable(obj->dev); pm_runtime_disable(obj->dev);
...@@ -1161,7 +1162,8 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type) ...@@ -1161,7 +1162,8 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
* should never fail, but please keep this around to ensure * should never fail, but please keep this around to ensure
* we keep the hardware happy * we keep the hardware happy
*/ */
BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)); if (WARN_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)))
goto fail_align;
clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
spin_lock_init(&omap_domain->lock); spin_lock_init(&omap_domain->lock);
...@@ -1172,6 +1174,8 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type) ...@@ -1172,6 +1174,8 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
return &omap_domain->domain; return &omap_domain->domain;
fail_align:
kfree(omap_domain->pgtable);
fail_nomem: fail_nomem:
kfree(omap_domain); kfree(omap_domain);
out: out:
......
...@@ -1049,6 +1049,8 @@ static int rk_iommu_probe(struct platform_device *pdev) ...@@ -1049,6 +1049,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
for (i = 0; i < pdev->num_resources; i++) { for (i = 0; i < pdev->num_resources; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i); res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
continue;
iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res); iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(iommu->bases[i])) if (IS_ERR(iommu->bases[i]))
continue; continue;
......
...@@ -88,7 +88,7 @@ void of_dma_configure(struct device *dev, struct device_node *np) ...@@ -88,7 +88,7 @@ void of_dma_configure(struct device *dev, struct device_node *np)
int ret; int ret;
bool coherent; bool coherent;
unsigned long offset; unsigned long offset;
struct iommu_ops *iommu; const struct iommu_ops *iommu;
/* /*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to * Set default coherent_dma_mask to 32 bit. Drivers are expected to
......
...@@ -407,7 +407,7 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) ...@@ -407,7 +407,7 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
mutex_lock(&iommu->lock); mutex_lock(&iommu->lock);
list_for_each_entry(domain, &iommu->domain_list, next) list_for_each_entry(domain, &iommu->domain_list, next)
bitmap &= domain->domain->ops->pgsize_bitmap; bitmap &= domain->domain->pgsize_bitmap;
mutex_unlock(&iommu->lock); mutex_unlock(&iommu->lock);
/* /*
......
...@@ -191,7 +191,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr) ...@@ -191,7 +191,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define readl_relaxed readl #define readl_relaxed readl
#endif #endif
#ifndef readq_relaxed #if defined(readq) && !defined(readq_relaxed)
#define readq_relaxed readq #define readq_relaxed readq
#endif #endif
...@@ -207,7 +207,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr) ...@@ -207,7 +207,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define writel_relaxed writel #define writel_relaxed writel
#endif #endif
#ifndef writeq_relaxed #if defined(writeq) && !defined(writeq_relaxed)
#define writeq_relaxed writeq #define writeq_relaxed writeq
#endif #endif
......
...@@ -38,8 +38,8 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); ...@@ -38,8 +38,8 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
* These implement the bulk of the relevant DMA mapping callbacks, but require * These implement the bulk of the relevant DMA mapping callbacks, but require
* the arch code to take care of attributes and cache maintenance * the arch code to take care of attributes and cache maintenance
*/ */
struct page **iommu_dma_alloc(struct device *dev, size_t size, struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
gfp_t gfp, int prot, dma_addr_t *handle, struct dma_attrs *attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t)); void (*flush_page)(struct device *, const void *, phys_addr_t));
void iommu_dma_free(struct device *dev, struct page **pages, size_t size, void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle); dma_addr_t *handle);
......
...@@ -514,7 +514,7 @@ extern u64 dma_get_required_mask(struct device *dev); ...@@ -514,7 +514,7 @@ extern u64 dma_get_required_mask(struct device *dev);
#ifndef arch_setup_dma_ops #ifndef arch_setup_dma_ops
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
u64 size, struct iommu_ops *iommu, u64 size, const struct iommu_ops *iommu,
bool coherent) { } bool coherent) { }
#endif #endif
......
...@@ -21,6 +21,23 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr) ...@@ -21,6 +21,23 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr)
writel(val, addr); writel(val, addr);
} }
static inline __u64 hi_lo_readq_relaxed(const volatile void __iomem *addr)
{
const volatile u32 __iomem *p = addr;
u32 low, high;
high = readl_relaxed(p + 1);
low = readl_relaxed(p);
return low + ((u64)high << 32);
}
static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr)
{
writel_relaxed(val >> 32, addr + 4);
writel_relaxed(val, addr);
}
#ifndef readq #ifndef readq
#define readq hi_lo_readq #define readq hi_lo_readq
#endif #endif
...@@ -29,4 +46,12 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr) ...@@ -29,4 +46,12 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr)
#define writeq hi_lo_writeq #define writeq hi_lo_writeq
#endif #endif
#ifndef readq_relaxed
#define readq_relaxed hi_lo_readq_relaxed
#endif
#ifndef writeq_relaxed
#define writeq_relaxed hi_lo_writeq_relaxed
#endif
#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */ #endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */
...@@ -21,6 +21,23 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr) ...@@ -21,6 +21,23 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr)
writel(val >> 32, addr + 4); writel(val >> 32, addr + 4);
} }
static inline __u64 lo_hi_readq_relaxed(const volatile void __iomem *addr)
{
const volatile u32 __iomem *p = addr;
u32 low, high;
low = readl_relaxed(p);
high = readl_relaxed(p + 1);
return low + ((u64)high << 32);
}
static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr)
{
writel_relaxed(val, addr);
writel_relaxed(val >> 32, addr + 4);
}
#ifndef readq #ifndef readq
#define readq lo_hi_readq #define readq lo_hi_readq
#endif #endif
...@@ -29,4 +46,12 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr) ...@@ -29,4 +46,12 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr)
#define writeq lo_hi_writeq #define writeq lo_hi_writeq
#endif #endif
#ifndef readq_relaxed
#define readq_relaxed lo_hi_readq_relaxed
#endif
#ifndef writeq_relaxed
#define writeq_relaxed lo_hi_writeq_relaxed
#endif
#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */ #endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#define IOMMU_WRITE (1 << 1) #define IOMMU_WRITE (1 << 1)
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
#define IOMMU_NOEXEC (1 << 3) #define IOMMU_NOEXEC (1 << 3)
#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
struct iommu_ops; struct iommu_ops;
struct iommu_group; struct iommu_group;
...@@ -78,6 +79,7 @@ struct iommu_domain_geometry { ...@@ -78,6 +79,7 @@ struct iommu_domain_geometry {
struct iommu_domain { struct iommu_domain {
unsigned type; unsigned type;
const struct iommu_ops *ops; const struct iommu_ops *ops;
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
iommu_fault_handler_t handler; iommu_fault_handler_t handler;
void *handler_token; void *handler_token;
struct iommu_domain_geometry geometry; struct iommu_domain_geometry geometry;
...@@ -155,8 +157,7 @@ struct iommu_dm_region { ...@@ -155,8 +157,7 @@ struct iommu_dm_region {
* @domain_set_windows: Set the number of windows for a domain * @domain_set_windows: Set the number of windows for a domain
* @domain_get_windows: Return the number of windows for a domain * @domain_get_windows: Return the number of windows for a domain
* @of_xlate: add OF master IDs to iommu grouping * @of_xlate: add OF master IDs to iommu grouping
* @pgsize_bitmap: bitmap of supported page sizes * @pgsize_bitmap: bitmap of all possible supported page sizes
* @priv: per-instance data private to the iommu driver
*/ */
struct iommu_ops { struct iommu_ops {
bool (*capable)(enum iommu_cap); bool (*capable)(enum iommu_cap);
...@@ -198,7 +199,6 @@ struct iommu_ops { ...@@ -198,7 +199,6 @@ struct iommu_ops {
int (*of_xlate)(struct device *dev, struct of_phandle_args *args); int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
unsigned long pgsize_bitmap; unsigned long pgsize_bitmap;
void *priv;
}; };
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
......
...@@ -12,7 +12,7 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix, ...@@ -12,7 +12,7 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix,
size_t *size); size_t *size);
extern void of_iommu_init(void); extern void of_iommu_init(void);
extern struct iommu_ops *of_iommu_configure(struct device *dev, extern const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np); struct device_node *master_np);
#else #else
...@@ -25,7 +25,7 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix, ...@@ -25,7 +25,7 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
} }
static inline void of_iommu_init(void) { } static inline void of_iommu_init(void) { }
static inline struct iommu_ops *of_iommu_configure(struct device *dev, static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np) struct device_node *master_np)
{ {
return NULL; return NULL;
...@@ -33,8 +33,8 @@ static inline struct iommu_ops *of_iommu_configure(struct device *dev, ...@@ -33,8 +33,8 @@ static inline struct iommu_ops *of_iommu_configure(struct device *dev,
#endif /* CONFIG_OF_IOMMU */ #endif /* CONFIG_OF_IOMMU */
void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops); void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops);
struct iommu_ops *of_iommu_get_ops(struct device_node *np); const struct iommu_ops *of_iommu_get_ops(struct device_node *np);
extern struct of_device_id __iommu_of_table; extern struct of_device_id __iommu_of_table;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment