Commit e5c37228 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v4.17' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU updates from Joerg Roedel:

 - OF_IOMMU support for the Rockchip iommu driver so that it can use
   generic DT bindings

 - rework of locking in the AMD IOMMU interrupt remapping code to make
   it work better in RT kernels

 - support for improved iotlb flushing in the AMD IOMMU driver

 - support for 52-bit physical and virtual addressing in the ARM-SMMU

 - various other small fixes and cleanups

* tag 'iommu-updates-v4.17' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (53 commits)
  iommu/io-pgtable-arm: Avoid warning with 32-bit phys_addr_t
  iommu/rockchip: Support sharing IOMMU between masters
  iommu/rockchip: Add runtime PM support
  iommu/rockchip: Fix error handling in init
  iommu/rockchip: Use OF_IOMMU to attach devices automatically
  iommu/rockchip: Use IOMMU device for dma mapping operations
  dt-bindings: iommu/rockchip: Add clock property
  iommu/rockchip: Control clocks needed to access the IOMMU
  iommu/rockchip: Fix TLB flush of secondary IOMMUs
  iommu/rockchip: Use iopoll helpers to wait for hardware
  iommu/rockchip: Fix error handling in attach
  iommu/rockchip: Request irqs in rk_iommu_probe()
  iommu/rockchip: Fix error handling in probe
  iommu/rockchip: Prohibit unbind and remove
  iommu/amd: Return proper error code in irq_remapping_alloc()
  iommu/amd: Make amd_iommu_devtable_lock a spin_lock
  iommu/amd: Drop the lock while allocating new irq remap table
  iommu/amd: Factor out setting the remap table for a devid
  iommu/amd: Use `table' instead `irt' as variable name in amd_iommu_update_ga()
  iommu/amd: Remove the special case from alloc_irq_table()
  ...
parents 1fe43114 d4f96fd5
...@@ -11,6 +11,8 @@ Required Properties: ...@@ -11,6 +11,8 @@ Required Properties:
the device is compatible with the R-Car Gen2 VMSA-compatible IPMMU. the device is compatible with the R-Car Gen2 VMSA-compatible IPMMU.
- "renesas,ipmmu-r8a73a4" for the R8A73A4 (R-Mobile APE6) IPMMU. - "renesas,ipmmu-r8a73a4" for the R8A73A4 (R-Mobile APE6) IPMMU.
- "renesas,ipmmu-r8a7743" for the R8A7743 (RZ/G1M) IPMMU.
- "renesas,ipmmu-r8a7745" for the R8A7745 (RZ/G1E) IPMMU.
- "renesas,ipmmu-r8a7790" for the R8A7790 (R-Car H2) IPMMU. - "renesas,ipmmu-r8a7790" for the R8A7790 (R-Car H2) IPMMU.
- "renesas,ipmmu-r8a7791" for the R8A7791 (R-Car M2-W) IPMMU. - "renesas,ipmmu-r8a7791" for the R8A7791 (R-Car M2-W) IPMMU.
- "renesas,ipmmu-r8a7793" for the R8A7793 (R-Car M2-N) IPMMU. - "renesas,ipmmu-r8a7793" for the R8A7793 (R-Car M2-N) IPMMU.
...@@ -19,7 +21,8 @@ Required Properties: ...@@ -19,7 +21,8 @@ Required Properties:
- "renesas,ipmmu-r8a7796" for the R8A7796 (R-Car M3-W) IPMMU. - "renesas,ipmmu-r8a7796" for the R8A7796 (R-Car M3-W) IPMMU.
- "renesas,ipmmu-r8a77970" for the R8A77970 (R-Car V3M) IPMMU. - "renesas,ipmmu-r8a77970" for the R8A77970 (R-Car V3M) IPMMU.
- "renesas,ipmmu-r8a77995" for the R8A77995 (R-Car D3) IPMMU. - "renesas,ipmmu-r8a77995" for the R8A77995 (R-Car D3) IPMMU.
- "renesas,ipmmu-vmsa" for generic R-Car Gen2 VMSA-compatible IPMMU. - "renesas,ipmmu-vmsa" for generic R-Car Gen2 or RZ/G1 VMSA-compatible
IPMMU.
- reg: Base address and size of the IPMMU registers. - reg: Base address and size of the IPMMU registers.
- interrupts: Specifiers for the MMU fault interrupts. For instances that - interrupts: Specifiers for the MMU fault interrupts. For instances that
......
...@@ -14,6 +14,11 @@ Required properties: ...@@ -14,6 +14,11 @@ Required properties:
"single-master" device, and needs no additional information "single-master" device, and needs no additional information
to associate with its master device. See: to associate with its master device. See:
Documentation/devicetree/bindings/iommu/iommu.txt Documentation/devicetree/bindings/iommu/iommu.txt
- clocks : A list of clocks required for the IOMMU to be accessible by
the host CPU.
- clock-names : Should contain the following:
"iface" - Main peripheral bus clock (PCLK/HCL) (required)
"aclk" - AXI bus clock (required)
Optional properties: Optional properties:
- rockchip,disable-mmu-reset : Don't use the mmu reset operation. - rockchip,disable-mmu-reset : Don't use the mmu reset operation.
...@@ -27,5 +32,7 @@ Example: ...@@ -27,5 +32,7 @@ Example:
reg = <0xff940300 0x100>; reg = <0xff940300 0x100>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "vopl_mmu"; interrupt-names = "vopl_mmu";
clocks = <&cru ACLK_VOP1>, <&cru HCLK_VOP1>;
clock-names = "aclk", "iface";
#iommu-cells = <0>; #iommu-cells = <0>;
}; };
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
struct iort_its_msi_chip { struct iort_its_msi_chip {
struct list_head list; struct list_head list;
struct fwnode_handle *fw_node; struct fwnode_handle *fw_node;
phys_addr_t base_addr;
u32 translation_id; u32 translation_id;
}; };
...@@ -156,14 +157,16 @@ static LIST_HEAD(iort_msi_chip_list); ...@@ -156,14 +157,16 @@ static LIST_HEAD(iort_msi_chip_list);
static DEFINE_SPINLOCK(iort_msi_chip_lock); static DEFINE_SPINLOCK(iort_msi_chip_lock);
/** /**
* iort_register_domain_token() - register domain token and related ITS ID * iort_register_domain_token() - register domain token along with related
* to the list from where we can get it back later on. * ITS ID and base address to the list from where we can get it back later on.
* @trans_id: ITS ID. * @trans_id: ITS ID.
* @base: ITS base address.
* @fw_node: Domain token. * @fw_node: Domain token.
* *
* Returns: 0 on success, -ENOMEM if no memory when allocating list element * Returns: 0 on success, -ENOMEM if no memory when allocating list element
*/ */
int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node) int iort_register_domain_token(int trans_id, phys_addr_t base,
struct fwnode_handle *fw_node)
{ {
struct iort_its_msi_chip *its_msi_chip; struct iort_its_msi_chip *its_msi_chip;
...@@ -173,6 +176,7 @@ int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node) ...@@ -173,6 +176,7 @@ int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node)
its_msi_chip->fw_node = fw_node; its_msi_chip->fw_node = fw_node;
its_msi_chip->translation_id = trans_id; its_msi_chip->translation_id = trans_id;
its_msi_chip->base_addr = base;
spin_lock(&iort_msi_chip_lock); spin_lock(&iort_msi_chip_lock);
list_add(&its_msi_chip->list, &iort_msi_chip_list); list_add(&its_msi_chip->list, &iort_msi_chip_list);
...@@ -569,6 +573,24 @@ int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) ...@@ -569,6 +573,24 @@ int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
return -ENODEV; return -ENODEV;
} }
static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
{
struct iort_its_msi_chip *its_msi_chip;
int ret = -ENODEV;
spin_lock(&iort_msi_chip_lock);
list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
if (its_msi_chip->translation_id == its_id) {
*base = its_msi_chip->base_addr;
ret = 0;
break;
}
}
spin_unlock(&iort_msi_chip_lock);
return ret;
}
/** /**
* iort_dev_find_its_id() - Find the ITS identifier for a device * iort_dev_find_its_id() - Find the ITS identifier for a device
* @dev: The device. * @dev: The device.
...@@ -754,6 +776,24 @@ static inline bool iort_iommu_driver_enabled(u8 type) ...@@ -754,6 +776,24 @@ static inline bool iort_iommu_driver_enabled(u8 type)
} }
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
{
struct acpi_iort_node *iommu;
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
iommu = iort_get_iort_node(fwspec->iommu_fwnode);
if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
struct acpi_iort_smmu_v3 *smmu;
smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
return iommu;
}
return NULL;
}
static inline const struct iommu_ops *iort_fwspec_iommu_ops( static inline const struct iommu_ops *iort_fwspec_iommu_ops(
struct iommu_fwspec *fwspec) struct iommu_fwspec *fwspec)
{ {
...@@ -770,6 +810,69 @@ static inline int iort_add_device_replay(const struct iommu_ops *ops, ...@@ -770,6 +810,69 @@ static inline int iort_add_device_replay(const struct iommu_ops *ops,
return err; return err;
} }
/**
* iort_iommu_msi_get_resv_regions - Reserved region driver helper
* @dev: Device from iommu_get_resv_regions()
* @head: Reserved region list from iommu_get_resv_regions()
*
* Returns: Number of msi reserved regions on success (0 if platform
* doesn't require the reservation or no associated msi regions),
* appropriate error value otherwise. The ITS interrupt translation
* spaces (ITS_base + SZ_64K, SZ_64K) associated with the device
* are the msi reserved regions.
*/
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
{
struct acpi_iort_its_group *its;
struct acpi_iort_node *iommu_node, *its_node = NULL;
int i, resv = 0;
iommu_node = iort_get_msi_resv_iommu(dev);
if (!iommu_node)
return 0;
/*
* Current logic to reserve ITS regions relies on HW topologies
* where a given PCI or named component maps its IDs to only one
* ITS group; if a PCI or named component can map its IDs to
* different ITS groups through IORT mappings this function has
* to be reworked to ensure we reserve regions for all ITS groups
* a given PCI or named component may map IDs to.
*/
for (i = 0; i < dev->iommu_fwspec->num_ids; i++) {
its_node = iort_node_map_id(iommu_node,
dev->iommu_fwspec->ids[i],
NULL, IORT_MSI_TYPE);
if (its_node)
break;
}
if (!its_node)
return 0;
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)its_node->node_data;
for (i = 0; i < its->its_count; i++) {
phys_addr_t base;
if (!iort_find_its_base(its->identifiers[i], &base)) {
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
struct iommu_resv_region *region;
region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
prot, IOMMU_RESV_MSI);
if (region) {
list_add_tail(&region->list, head);
resv++;
}
}
}
return (resv == its->its_count) ? resv : -ENODEV;
}
#else #else
static inline const struct iommu_ops *iort_fwspec_iommu_ops( static inline const struct iommu_ops *iort_fwspec_iommu_ops(
struct iommu_fwspec *fwspec) struct iommu_fwspec *fwspec)
...@@ -777,6 +880,8 @@ static inline const struct iommu_ops *iort_fwspec_iommu_ops( ...@@ -777,6 +880,8 @@ static inline const struct iommu_ops *iort_fwspec_iommu_ops(
static inline int iort_add_device_replay(const struct iommu_ops *ops, static inline int iort_add_device_replay(const struct iommu_ops *ops,
struct device *dev) struct device *dev)
{ return 0; } { return 0; }
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
{ return 0; }
#endif #endif
static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
......
This diff is collapsed.
...@@ -1474,7 +1474,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) ...@@ -1474,7 +1474,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
{ {
int ret; int ret;
spin_lock_init(&iommu->lock); raw_spin_lock_init(&iommu->lock);
/* Add IOMMU to internal data structures */ /* Add IOMMU to internal data structures */
list_add_tail(&iommu->list, &amd_iommu_list); list_add_tail(&iommu->list, &amd_iommu_list);
......
...@@ -408,7 +408,7 @@ extern bool amd_iommu_iotlb_sup; ...@@ -408,7 +408,7 @@ extern bool amd_iommu_iotlb_sup;
#define IRQ_TABLE_ALIGNMENT 128 #define IRQ_TABLE_ALIGNMENT 128
struct irq_remap_table { struct irq_remap_table {
spinlock_t lock; raw_spinlock_t lock;
unsigned min_index; unsigned min_index;
u32 *table; u32 *table;
}; };
...@@ -490,7 +490,7 @@ struct amd_iommu { ...@@ -490,7 +490,7 @@ struct amd_iommu {
int index; int index;
/* locks the accesses to the hardware */ /* locks the accesses to the hardware */
spinlock_t lock; raw_spinlock_t lock;
/* Pointer to PCI device of this IOMMU */ /* Pointer to PCI device of this IOMMU */
struct pci_dev *dev; struct pci_dev *dev;
...@@ -627,7 +627,7 @@ struct devid_map { ...@@ -627,7 +627,7 @@ struct devid_map {
*/ */
struct iommu_dev_data { struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */ struct list_head list; /* For domain->dev_list */
struct list_head dev_data_list; /* For global dev_data_list */ struct llist_node dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */ struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */ u16 devid; /* PCI Device ID */
u16 alias; /* Alias Device ID */ u16 alias; /* Alias Device ID */
......
This diff is collapsed.
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/acpi_iort.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/dma-iommu.h> #include <linux/dma-iommu.h>
#include <linux/gfp.h> #include <linux/gfp.h>
...@@ -167,13 +168,18 @@ EXPORT_SYMBOL(iommu_put_dma_cookie); ...@@ -167,13 +168,18 @@ EXPORT_SYMBOL(iommu_put_dma_cookie);
* *
* IOMMU drivers can use this to implement their .get_resv_regions callback * IOMMU drivers can use this to implement their .get_resv_regions callback
* for general non-IOMMU-specific reservations. Currently, this covers host * for general non-IOMMU-specific reservations. Currently, this covers host
* bridge windows for PCI devices. * bridge windows for PCI devices and GICv3 ITS region reservation on ACPI
* based ARM platforms that may require HW MSI reservation.
*/ */
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{ {
struct pci_host_bridge *bridge; struct pci_host_bridge *bridge;
struct resource_entry *window; struct resource_entry *window;
if (!is_of_node(dev->iommu_fwspec->iommu_fwnode) &&
iort_iommu_msi_get_resv_regions(dev, list) < 0)
return;
if (!dev_is_pci(dev)) if (!dev_is_pci(dev))
return; return;
......
...@@ -806,7 +806,7 @@ int __init dmar_dev_scope_init(void) ...@@ -806,7 +806,7 @@ int __init dmar_dev_scope_init(void)
return dmar_dev_scope_status; return dmar_dev_scope_status;
} }
void dmar_register_bus_notifier(void) void __init dmar_register_bus_notifier(void)
{ {
bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb); bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
} }
......
...@@ -1239,17 +1239,6 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain, ...@@ -1239,17 +1239,6 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
return phys; return phys;
} }
static struct iommu_group *get_device_iommu_group(struct device *dev)
{
struct iommu_group *group;
group = iommu_group_get(dev);
if (!group)
group = iommu_group_alloc();
return group;
}
static int exynos_iommu_add_device(struct device *dev) static int exynos_iommu_add_device(struct device *dev)
{ {
struct exynos_iommu_owner *owner = dev->archdata.iommu; struct exynos_iommu_owner *owner = dev->archdata.iommu;
...@@ -1345,7 +1334,7 @@ static const struct iommu_ops exynos_iommu_ops = { ...@@ -1345,7 +1334,7 @@ static const struct iommu_ops exynos_iommu_ops = {
.unmap = exynos_iommu_unmap, .unmap = exynos_iommu_unmap,
.map_sg = default_iommu_map_sg, .map_sg = default_iommu_map_sg,
.iova_to_phys = exynos_iommu_iova_to_phys, .iova_to_phys = exynos_iommu_iova_to_phys,
.device_group = get_device_iommu_group, .device_group = generic_device_group,
.add_device = exynos_iommu_add_device, .add_device = exynos_iommu_add_device,
.remove_device = exynos_iommu_remove_device, .remove_device = exynos_iommu_remove_device,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
......
...@@ -5043,7 +5043,6 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, ...@@ -5043,7 +5043,6 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
{ {
struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct page *freelist = NULL; struct page *freelist = NULL;
struct intel_iommu *iommu;
unsigned long start_pfn, last_pfn; unsigned long start_pfn, last_pfn;
unsigned int npages; unsigned int npages;
int iommu_id, level = 0; int iommu_id, level = 0;
...@@ -5062,12 +5061,9 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, ...@@ -5062,12 +5061,9 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
npages = last_pfn - start_pfn + 1; npages = last_pfn - start_pfn + 1;
for_each_domain_iommu(iommu_id, dmar_domain) { for_each_domain_iommu(iommu_id, dmar_domain)
iommu = g_iommus[iommu_id];
iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain, iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
start_pfn, npages, !freelist, 0); start_pfn, npages, !freelist, 0);
}
dma_free_pagelist(freelist); dma_free_pagelist(freelist);
......
...@@ -396,6 +396,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ ...@@ -396,6 +396,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
pasid_max - 1, GFP_KERNEL); pasid_max - 1, GFP_KERNEL);
if (ret < 0) { if (ret < 0) {
kfree(svm); kfree(svm);
kfree(sdev);
goto out; goto out;
} }
svm->pasid = ret; svm->pasid = ret;
...@@ -422,17 +423,13 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ ...@@ -422,17 +423,13 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
iommu->pasid_table[svm->pasid].val = pasid_entry_val; iommu->pasid_table[svm->pasid].val = pasid_entry_val;
wmb(); wmb();
/* In caching mode, we still have to flush with PASID 0 when
* a PASID table entry becomes present. Not entirely clear /*
* *why* that would be the case — surely we could just issue * Flush PASID cache when a PASID table entry becomes
* a flush with the PASID value that we've changed? The PASID * present.
* is the index into the table, after all. It's not like domain */
* IDs in the case of the equivalent context-entry change in
* caching mode. And for that matter it's not entirely clear why
* a VMM would be in the business of caching the PASID table
* anyway. Surely that can be left entirely to the guest? */
if (cap_caching_mode(iommu->cap)) if (cap_caching_mode(iommu->cap))
intel_flush_pasid_dev(svm, sdev, 0); intel_flush_pasid_dev(svm, sdev, svm->pasid);
} }
list_add_rcu(&sdev->list, &svm->devs); list_add_rcu(&sdev->list, &svm->devs);
......
...@@ -357,8 +357,8 @@ static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl) ...@@ -357,8 +357,8 @@ static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl)
return false; return false;
} }
static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long, static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long,
size_t, int, arm_v7s_iopte *); size_t, int, arm_v7s_iopte *);
static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
unsigned long iova, phys_addr_t paddr, int prot, unsigned long iova, phys_addr_t paddr, int prot,
...@@ -541,9 +541,10 @@ static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, ...@@ -541,9 +541,10 @@ static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
return pte; return pte;
} }
static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
unsigned long iova, size_t size, unsigned long iova, size_t size,
arm_v7s_iopte blk_pte, arm_v7s_iopte *ptep) arm_v7s_iopte blk_pte,
arm_v7s_iopte *ptep)
{ {
struct io_pgtable_cfg *cfg = &data->iop.cfg; struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_v7s_iopte pte, *tablep; arm_v7s_iopte pte, *tablep;
...@@ -584,9 +585,9 @@ static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, ...@@ -584,9 +585,9 @@ static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
return size; return size;
} }
static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
unsigned long iova, size_t size, int lvl, unsigned long iova, size_t size, int lvl,
arm_v7s_iopte *ptep) arm_v7s_iopte *ptep)
{ {
arm_v7s_iopte pte[ARM_V7S_CONT_PAGES]; arm_v7s_iopte pte[ARM_V7S_CONT_PAGES];
struct io_pgtable *iop = &data->iop; struct io_pgtable *iop = &data->iop;
...@@ -656,8 +657,8 @@ static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, ...@@ -656,8 +657,8 @@ static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep); return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep);
} }
static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size) size_t size)
{ {
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sizes.h> #include <linux/sizes.h>
...@@ -32,7 +33,7 @@ ...@@ -32,7 +33,7 @@
#include "io-pgtable.h" #include "io-pgtable.h"
#define ARM_LPAE_MAX_ADDR_BITS 48 #define ARM_LPAE_MAX_ADDR_BITS 52
#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
#define ARM_LPAE_MAX_LEVELS 4 #define ARM_LPAE_MAX_LEVELS 4
...@@ -86,6 +87,8 @@ ...@@ -86,6 +87,8 @@
#define ARM_LPAE_PTE_TYPE_TABLE 3 #define ARM_LPAE_PTE_TYPE_TABLE 3
#define ARM_LPAE_PTE_TYPE_PAGE 3 #define ARM_LPAE_PTE_TYPE_PAGE 3
#define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
...@@ -159,6 +162,7 @@ ...@@ -159,6 +162,7 @@
#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
#define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
#define ARM_LPAE_MAIR_ATTR_MASK 0xff #define ARM_LPAE_MAIR_ATTR_MASK 0xff
...@@ -170,9 +174,7 @@ ...@@ -170,9 +174,7 @@
#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
/* IOPTE accessors */ /* IOPTE accessors */
#define iopte_deref(pte,d) \ #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
(__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
& ~(ARM_LPAE_GRANULE(d) - 1ULL)))
#define iopte_type(pte,l) \ #define iopte_type(pte,l) \
(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
...@@ -184,12 +186,6 @@ ...@@ -184,12 +186,6 @@
(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK)) (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
#define iopte_to_pfn(pte,d) \
(((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
#define pfn_to_iopte(pfn,d) \
(((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
struct arm_lpae_io_pgtable { struct arm_lpae_io_pgtable {
struct io_pgtable iop; struct io_pgtable iop;
...@@ -203,6 +199,27 @@ struct arm_lpae_io_pgtable { ...@@ -203,6 +199,27 @@ struct arm_lpae_io_pgtable {
typedef u64 arm_lpae_iopte; typedef u64 arm_lpae_iopte;
static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
struct arm_lpae_io_pgtable *data)
{
arm_lpae_iopte pte = paddr;
/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
}
static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
struct arm_lpae_io_pgtable *data)
{
u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
if (data->pg_shift < 16)
return paddr;
/* Rotate the packed high-order bits back to the top */
return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
}
static bool selftest_running = false; static bool selftest_running = false;
static dma_addr_t __arm_lpae_dma_addr(void *pages) static dma_addr_t __arm_lpae_dma_addr(void *pages)
...@@ -268,9 +285,9 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, ...@@ -268,9 +285,9 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
__arm_lpae_sync_pte(ptep, cfg); __arm_lpae_sync_pte(ptep, cfg);
} }
static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
unsigned long iova, size_t size, int lvl, unsigned long iova, size_t size, int lvl,
arm_lpae_iopte *ptep); arm_lpae_iopte *ptep);
static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
phys_addr_t paddr, arm_lpae_iopte prot, phys_addr_t paddr, arm_lpae_iopte prot,
...@@ -287,7 +304,7 @@ static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, ...@@ -287,7 +304,7 @@ static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
pte |= ARM_LPAE_PTE_TYPE_BLOCK; pte |= ARM_LPAE_PTE_TYPE_BLOCK;
pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
pte |= pfn_to_iopte(paddr >> data->pg_shift, data); pte |= paddr_to_iopte(paddr, data);
__arm_lpae_set_pte(ptep, pte, &data->iop.cfg); __arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
} }
...@@ -506,10 +523,10 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop) ...@@ -506,10 +523,10 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
kfree(data); kfree(data);
} }
static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
unsigned long iova, size_t size, unsigned long iova, size_t size,
arm_lpae_iopte blk_pte, int lvl, arm_lpae_iopte blk_pte, int lvl,
arm_lpae_iopte *ptep) arm_lpae_iopte *ptep)
{ {
struct io_pgtable_cfg *cfg = &data->iop.cfg; struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_lpae_iopte pte, *tablep; arm_lpae_iopte pte, *tablep;
...@@ -528,7 +545,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, ...@@ -528,7 +545,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
if (size == split_sz) if (size == split_sz)
unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data); unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
blk_paddr = iopte_to_pfn(blk_pte, data) << data->pg_shift; blk_paddr = iopte_to_paddr(blk_pte, data);
pte = iopte_prot(blk_pte); pte = iopte_prot(blk_pte);
for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) { for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
...@@ -560,9 +577,9 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, ...@@ -560,9 +577,9 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
return size; return size;
} }
static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
unsigned long iova, size_t size, int lvl, unsigned long iova, size_t size, int lvl,
arm_lpae_iopte *ptep) arm_lpae_iopte *ptep)
{ {
arm_lpae_iopte pte; arm_lpae_iopte pte;
struct io_pgtable *iop = &data->iop; struct io_pgtable *iop = &data->iop;
...@@ -606,8 +623,8 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, ...@@ -606,8 +623,8 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
} }
static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size) size_t size)
{ {
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte *ptep = data->pgd; arm_lpae_iopte *ptep = data->pgd;
...@@ -652,12 +669,13 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, ...@@ -652,12 +669,13 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
found_translation: found_translation:
iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova; return iopte_to_paddr(pte, data) | iova;
} }
static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
{ {
unsigned long granule; unsigned long granule, page_sizes;
unsigned int max_addr_bits = 48;
/* /*
* We need to restrict the supported page sizes to match the * We need to restrict the supported page sizes to match the
...@@ -677,17 +695,24 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) ...@@ -677,17 +695,24 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
switch (granule) { switch (granule) {
case SZ_4K: case SZ_4K:
cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); page_sizes = (SZ_4K | SZ_2M | SZ_1G);
break; break;
case SZ_16K: case SZ_16K:
cfg->pgsize_bitmap &= (SZ_16K | SZ_32M); page_sizes = (SZ_16K | SZ_32M);
break; break;
case SZ_64K: case SZ_64K:
cfg->pgsize_bitmap &= (SZ_64K | SZ_512M); max_addr_bits = 52;
page_sizes = (SZ_64K | SZ_512M);
if (cfg->oas > 48)
page_sizes |= 1ULL << 42; /* 4TB */
break; break;
default: default:
cfg->pgsize_bitmap = 0; page_sizes = 0;
} }
cfg->pgsize_bitmap &= page_sizes;
cfg->ias = min(cfg->ias, max_addr_bits);
cfg->oas = min(cfg->oas, max_addr_bits);
} }
static struct arm_lpae_io_pgtable * static struct arm_lpae_io_pgtable *
...@@ -784,6 +809,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) ...@@ -784,6 +809,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
case 48: case 48:
reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT); reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
break; break;
case 52:
reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
break;
default: default:
goto out_free_data; goto out_free_data;
} }
...@@ -891,6 +919,9 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) ...@@ -891,6 +919,9 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
case 48: case 48:
reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT); reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
break; break;
case 52:
reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
break;
default: default:
goto out_free_data; goto out_free_data;
} }
......
...@@ -119,8 +119,8 @@ struct io_pgtable_cfg { ...@@ -119,8 +119,8 @@ struct io_pgtable_cfg {
struct io_pgtable_ops { struct io_pgtable_ops {
int (*map)(struct io_pgtable_ops *ops, unsigned long iova, int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot); phys_addr_t paddr, size_t size, int prot);
int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
size_t size); size_t size);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
unsigned long iova); unsigned long iova);
}; };
......
...@@ -1573,10 +1573,10 @@ static size_t __iommu_unmap(struct iommu_domain *domain, ...@@ -1573,10 +1573,10 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
if (unlikely(ops->unmap == NULL || if (unlikely(ops->unmap == NULL ||
domain->pgsize_bitmap == 0UL)) domain->pgsize_bitmap == 0UL))
return -ENODEV; return 0;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL; return 0;
/* find out the minimum page size supported */ /* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->pgsize_bitmap); min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
...@@ -1589,7 +1589,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain, ...@@ -1589,7 +1589,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
if (!IS_ALIGNED(iova | size, min_pagesz)) { if (!IS_ALIGNED(iova | size, min_pagesz)) {
pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
iova, size, min_pagesz); iova, size, min_pagesz);
return -EINVAL; return 0;
} }
pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
......
...@@ -60,7 +60,7 @@ ...@@ -60,7 +60,7 @@
(((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data)) (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data))
#define REG_MMU_IVRP_PADDR 0x114 #define REG_MMU_IVRP_PADDR 0x114
#define F_MMU_IVRP_PA_SET(pa, ext) (((pa) >> 1) | ((!!(ext)) << 31))
#define REG_MMU_VLD_PA_RNG 0x118 #define REG_MMU_VLD_PA_RNG 0x118
#define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA)) #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA))
...@@ -539,8 +539,13 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) ...@@ -539,8 +539,13 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
F_INT_PRETETCH_TRANSATION_FIFO_FAULT; F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB), if (data->m4u_plat == M4U_MT8173)
data->base + REG_MMU_IVRP_PADDR); regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
else
regval = lower_32_bits(data->protect_base) |
upper_32_bits(data->protect_base);
writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
if (data->enable_4GB && data->m4u_plat != M4U_MT8173) { if (data->enable_4GB && data->m4u_plat != M4U_MT8173) {
/* /*
* If 4GB mode is enabled, the validate PA range is from * If 4GB mode is enabled, the validate PA range is from
...@@ -695,6 +700,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev) ...@@ -695,6 +700,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
clk_disable_unprepare(data->bclk); clk_disable_unprepare(data->bclk);
return 0; return 0;
} }
...@@ -717,8 +723,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev) ...@@ -717,8 +723,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB), writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
base + REG_MMU_IVRP_PADDR);
if (data->m4u_dom) if (data->m4u_dom)
writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
base + REG_MMU_PT_BASE_ADDR); base + REG_MMU_PT_BASE_ADDR);
......
...@@ -32,6 +32,7 @@ struct mtk_iommu_suspend_reg { ...@@ -32,6 +32,7 @@ struct mtk_iommu_suspend_reg {
u32 ctrl_reg; u32 ctrl_reg;
u32 int_control0; u32 int_control0;
u32 int_main_control; u32 int_main_control;
u32 ivrp_paddr;
}; };
enum mtk_iommu_plat { enum mtk_iommu_plat {
......
...@@ -417,20 +417,12 @@ static int mtk_iommu_create_mapping(struct device *dev, ...@@ -417,20 +417,12 @@ static int mtk_iommu_create_mapping(struct device *dev,
m4udev->archdata.iommu = mtk_mapping; m4udev->archdata.iommu = mtk_mapping;
} }
ret = arm_iommu_attach_device(dev, mtk_mapping);
if (ret)
goto err_release_mapping;
return 0; return 0;
err_release_mapping:
arm_iommu_release_mapping(mtk_mapping);
m4udev->archdata.iommu = NULL;
return ret;
} }
static int mtk_iommu_add_device(struct device *dev) static int mtk_iommu_add_device(struct device *dev)
{ {
struct dma_iommu_mapping *mtk_mapping;
struct of_phandle_args iommu_spec; struct of_phandle_args iommu_spec;
struct of_phandle_iterator it; struct of_phandle_iterator it;
struct mtk_iommu_data *data; struct mtk_iommu_data *data;
...@@ -451,15 +443,30 @@ static int mtk_iommu_add_device(struct device *dev) ...@@ -451,15 +443,30 @@ static int mtk_iommu_add_device(struct device *dev)
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return -ENODEV; /* Not a iommu client device */ return -ENODEV; /* Not a iommu client device */
data = dev->iommu_fwspec->iommu_priv; /*
iommu_device_link(&data->iommu, dev); * This is a short-term bodge because the ARM DMA code doesn't
* understand multi-device groups, but we have to call into it
group = iommu_group_get_for_dev(dev); * successfully (and not just rely on a normal IOMMU API attach
* here) in order to set the correct DMA API ops on @dev.
*/
group = iommu_group_alloc();
if (IS_ERR(group)) if (IS_ERR(group))
return PTR_ERR(group); return PTR_ERR(group);
err = iommu_group_add_device(group, dev);
iommu_group_put(group); iommu_group_put(group);
return 0; if (err)
return err;
data = dev->iommu_fwspec->iommu_priv;
mtk_mapping = data->dev->archdata.iommu;
err = arm_iommu_attach_device(dev, mtk_mapping);
if (err) {
iommu_group_remove_device(dev);
return err;
}
return iommu_device_link(&data->iommu, dev);;
} }
static void mtk_iommu_remove_device(struct device *dev) static void mtk_iommu_remove_device(struct device *dev)
...@@ -476,24 +483,6 @@ static void mtk_iommu_remove_device(struct device *dev) ...@@ -476,24 +483,6 @@ static void mtk_iommu_remove_device(struct device *dev)
iommu_fwspec_free(dev); iommu_fwspec_free(dev);
} }
static struct iommu_group *mtk_iommu_device_group(struct device *dev)
{
struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
if (!data)
return ERR_PTR(-ENODEV);
/* All the client devices are in the same m4u iommu-group */
if (!data->m4u_group) {
data->m4u_group = iommu_group_alloc();
if (IS_ERR(data->m4u_group))
dev_err(dev, "Failed to allocate M4U IOMMU group\n");
} else {
iommu_group_ref_get(data->m4u_group);
}
return data->m4u_group;
}
static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
{ {
u32 regval; u32 regval;
...@@ -546,7 +535,6 @@ static struct iommu_ops mtk_iommu_ops = { ...@@ -546,7 +535,6 @@ static struct iommu_ops mtk_iommu_ops = {
.iova_to_phys = mtk_iommu_iova_to_phys, .iova_to_phys = mtk_iommu_iova_to_phys,
.add_device = mtk_iommu_add_device, .add_device = mtk_iommu_add_device,
.remove_device = mtk_iommu_remove_device, .remove_device = mtk_iommu_remove_device,
.device_group = mtk_iommu_device_group,
.pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT, .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
}; };
......
...@@ -1536,7 +1536,7 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev) ...@@ -1536,7 +1536,7 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev)
struct iommu_group *group = ERR_PTR(-EINVAL); struct iommu_group *group = ERR_PTR(-EINVAL);
if (arch_data->iommu_dev) if (arch_data->iommu_dev)
group = arch_data->iommu_dev->group; group = iommu_group_ref_get(arch_data->iommu_dev->group);
return group; return group;
} }
......
This diff is collapsed.
...@@ -3612,7 +3612,8 @@ static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, ...@@ -3612,7 +3612,8 @@ static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
return -ENOMEM; return -ENOMEM;
} }
err = iort_register_domain_token(its_entry->translation_id, dom_handle); err = iort_register_domain_token(its_entry->translation_id, res.start,
dom_handle);
if (err) { if (err) {
pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n", pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
&res.start, its_entry->translation_id); &res.start, its_entry->translation_id);
......
...@@ -26,7 +26,8 @@ ...@@ -26,7 +26,8 @@
#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL) #define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL)
#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL) #define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL)
int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node); int iort_register_domain_token(int trans_id, phys_addr_t base,
struct fwnode_handle *fw_node);
void iort_deregister_domain_token(int trans_id); void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id); struct fwnode_handle *iort_find_domain_token(int trans_id);
#ifdef CONFIG_ACPI_IORT #ifdef CONFIG_ACPI_IORT
...@@ -38,6 +39,7 @@ int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id); ...@@ -38,6 +39,7 @@ int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
/* IOMMU interface */ /* IOMMU interface */
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size); void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size);
const struct iommu_ops *iort_iommu_configure(struct device *dev); const struct iommu_ops *iort_iommu_configure(struct device *dev);
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head);
#else #else
static inline void acpi_iort_init(void) { } static inline void acpi_iort_init(void) { }
static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
...@@ -52,6 +54,9 @@ static inline void iort_dma_setup(struct device *dev, u64 *dma_addr, ...@@ -52,6 +54,9 @@ static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
static inline const struct iommu_ops *iort_iommu_configure( static inline const struct iommu_ops *iort_iommu_configure(
struct device *dev) struct device *dev)
{ return NULL; } { return NULL; }
static inline
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
{ return 0; }
#endif #endif
#endif /* __ACPI_IORT_H__ */ #endif /* __ACPI_IORT_H__ */
...@@ -209,12 +209,12 @@ ...@@ -209,12 +209,12 @@
#define DMA_FECTL_IM (((u32)1) << 31) #define DMA_FECTL_IM (((u32)1) << 31)
/* FSTS_REG */ /* FSTS_REG */
#define DMA_FSTS_PPF ((u32)2) #define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
#define DMA_FSTS_PFO ((u32)1) #define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
#define DMA_FSTS_IQE (1 << 4) #define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
#define DMA_FSTS_ICE (1 << 5) #define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
#define DMA_FSTS_ITE (1 << 6) #define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
#define DMA_FSTS_PRO (1 << 7) #define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */ /* FRCD_REG, 32 bits access */
......
...@@ -465,23 +465,23 @@ static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -465,23 +465,23 @@ static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV; return -ENODEV;
} }
static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, static inline size_t iommu_unmap(struct iommu_domain *domain,
size_t size) unsigned long iova, size_t size)
{ {
return -ENODEV; return 0;
} }
static inline int iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
int gfp_order) unsigned long iova, int gfp_order)
{ {
return -ENODEV; return 0;
} }
static inline size_t iommu_map_sg(struct iommu_domain *domain, static inline size_t iommu_map_sg(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg, unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot) unsigned int nents, int prot)
{ {
return -ENODEV; return 0;
} }
static inline void iommu_flush_tlb_all(struct iommu_domain *domain) static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment