Commit ebb4949e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU UPDATES from Joerg Roedel:

 - KVM PCIe/MSI passthrough support on ARM/ARM64

 - introduction of a core representation for individual hardware iommus

 - support for IOMMU privileged mappings as supported by some ARM IOMMUS

 - 16-bit SID support for ARM-SMMUv2

 - stream table optimization for ARM-SMMUv3

 - various fixes and other small improvements

* tag 'iommu-updates-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (61 commits)
  vfio/type1: Fix error return code in vfio_iommu_type1_attach_group()
  iommu: Remove iommu_register_instance interface
  iommu/exynos: Make use of iommu_device_register interface
  iommu/mediatek: Make use of iommu_device_register interface
  iommu/msm: Make use of iommu_device_register interface
  iommu/arm-smmu: Make use of the iommu_register interface
  iommu: Add iommu_device_set_fwnode() interface
  iommu: Make iommu_device_link/unlink take a struct iommu_device
  iommu: Add sysfs bindings for struct iommu_device
  iommu: Introduce new 'struct iommu_device'
  iommu: Rename struct iommu_device
  iommu: Rename iommu_get_instance()
  iommu: Fix static checker warning in iommu_insert_device_resv_regions
  iommu: Avoid unnecessary assignment of dev->iommu_fwspec
  iommu/mediatek: Remove bogus 'select' statements
  iommu/dma: Remove bogus dma_supported() implementation
  iommu/ipmmu-vmsa: Restrict IOMMU Domain Geometry to 32-bit address space
  iommu/vt-d: Don't over-free page table directories
  iommu/vt-d: Tylersburg isoch identity map check is done too late.
  iommu/vt-d: Fix some macros that are incorrectly specified in intel-iommu
  ...
parents 937b5b5d 8d2932dd
...@@ -12,3 +12,15 @@ Description: /sys/kernel/iommu_groups/ contains a number of sub- ...@@ -12,3 +12,15 @@ Description: /sys/kernel/iommu_groups/ contains a number of sub-
file if the IOMMU driver has chosen to register a more file if the IOMMU driver has chosen to register a more
common name for the group. common name for the group.
Users: Users:
What: /sys/kernel/iommu_groups/reserved_regions
Date: January 2017
KernelVersion: v4.11
Contact: Eric Auger <eric.auger@redhat.com>
Description: /sys/kernel/iommu_groups/reserved_regions list IOVA
regions that are reserved. Not necessarily all
reserved regions are listed. This is typically used to
output direct-mapped, MSI, non mappable regions. Each
region is described on a single line: the 1st field is
the base IOVA, the second is the end IOVA and the third
field describes the type of the region.
...@@ -143,3 +143,13 @@ So, this provides a way for drivers to avoid those error messages on calls ...@@ -143,3 +143,13 @@ So, this provides a way for drivers to avoid those error messages on calls
where allocation failures are not a problem, and shouldn't bother the logs. where allocation failures are not a problem, and shouldn't bother the logs.
NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC. NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC.
DMA_ATTR_PRIVILEGED
------------------------------
Some advanced peripherals such as remote processors and GPUs perform
accesses to DMA buffers in both privileged "supervisor" and unprivileged
"user" modes. This attribute is used to indicate to the DMA-mapping
subsystem that the buffer is fully accessible at the elevated privilege
level (and ideally inaccessible or at least read-only at the
lesser-privileged levels).
...@@ -1171,6 +1171,25 @@ core_initcall(dma_debug_do_init); ...@@ -1171,6 +1171,25 @@ core_initcall(dma_debug_do_init);
#ifdef CONFIG_ARM_DMA_USE_IOMMU #ifdef CONFIG_ARM_DMA_USE_IOMMU
static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
{
int prot = 0;
if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV;
switch (dir) {
case DMA_BIDIRECTIONAL:
return prot | IOMMU_READ | IOMMU_WRITE;
case DMA_TO_DEVICE:
return prot | IOMMU_READ;
case DMA_FROM_DEVICE:
return prot | IOMMU_WRITE;
default:
return prot;
}
}
/* IOMMU */ /* IOMMU */
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
...@@ -1394,7 +1413,8 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, ...@@ -1394,7 +1413,8 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
* Create a mapping in device IO address space for specified pages * Create a mapping in device IO address space for specified pages
*/ */
static dma_addr_t static dma_addr_t
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size) __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
unsigned long attrs)
{ {
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
...@@ -1419,7 +1439,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) ...@@ -1419,7 +1439,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
len = (j - i) << PAGE_SHIFT; len = (j - i) << PAGE_SHIFT;
ret = iommu_map(mapping->domain, iova, phys, len, ret = iommu_map(mapping->domain, iova, phys, len,
IOMMU_READ|IOMMU_WRITE); __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
if (ret < 0) if (ret < 0)
goto fail; goto fail;
iova += len; iova += len;
...@@ -1476,7 +1496,8 @@ static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) ...@@ -1476,7 +1496,8 @@ static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
} }
static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
dma_addr_t *handle, int coherent_flag) dma_addr_t *handle, int coherent_flag,
unsigned long attrs)
{ {
struct page *page; struct page *page;
void *addr; void *addr;
...@@ -1488,7 +1509,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, ...@@ -1488,7 +1509,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
if (!addr) if (!addr)
return NULL; return NULL;
*handle = __iommu_create_mapping(dev, &page, size); *handle = __iommu_create_mapping(dev, &page, size, attrs);
if (*handle == DMA_ERROR_CODE) if (*handle == DMA_ERROR_CODE)
goto err_mapping; goto err_mapping;
...@@ -1522,7 +1543,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, ...@@ -1522,7 +1543,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
return __iommu_alloc_simple(dev, size, gfp, handle, return __iommu_alloc_simple(dev, size, gfp, handle,
coherent_flag); coherent_flag, attrs);
/* /*
* Following is a work-around (a.k.a. hack) to prevent pages * Following is a work-around (a.k.a. hack) to prevent pages
...@@ -1537,7 +1558,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, ...@@ -1537,7 +1558,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (!pages) if (!pages)
return NULL; return NULL;
*handle = __iommu_create_mapping(dev, pages, size); *handle = __iommu_create_mapping(dev, pages, size, attrs);
if (*handle == DMA_ERROR_CODE) if (*handle == DMA_ERROR_CODE)
goto err_buffer; goto err_buffer;
...@@ -1672,27 +1693,6 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, ...@@ -1672,27 +1693,6 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
GFP_KERNEL); GFP_KERNEL);
} }
static int __dma_direction_to_prot(enum dma_data_direction dir)
{
int prot;
switch (dir) {
case DMA_BIDIRECTIONAL:
prot = IOMMU_READ | IOMMU_WRITE;
break;
case DMA_TO_DEVICE:
prot = IOMMU_READ;
break;
case DMA_FROM_DEVICE:
prot = IOMMU_WRITE;
break;
default:
prot = 0;
}
return prot;
}
/* /*
* Map a part of the scatter-gather list into contiguous io address space * Map a part of the scatter-gather list into contiguous io address space
*/ */
...@@ -1722,7 +1722,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, ...@@ -1722,7 +1722,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
prot = __dma_direction_to_prot(dir); prot = __dma_info_to_prot(dir, attrs);
ret = iommu_map(mapping->domain, iova, phys, len, prot); ret = iommu_map(mapping->domain, iova, phys, len, prot);
if (ret < 0) if (ret < 0)
...@@ -1930,7 +1930,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p ...@@ -1930,7 +1930,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
if (dma_addr == DMA_ERROR_CODE) if (dma_addr == DMA_ERROR_CODE)
return dma_addr; return dma_addr;
prot = __dma_direction_to_prot(dir); prot = __dma_info_to_prot(dir, attrs);
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
if (ret < 0) if (ret < 0)
...@@ -2036,7 +2036,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev, ...@@ -2036,7 +2036,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
if (dma_addr == DMA_ERROR_CODE) if (dma_addr == DMA_ERROR_CODE)
return dma_addr; return dma_addr;
prot = __dma_direction_to_prot(dir) | IOMMU_MMIO; prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
if (ret < 0) if (ret < 0)
......
...@@ -558,7 +558,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, ...@@ -558,7 +558,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
unsigned long attrs) unsigned long attrs)
{ {
bool coherent = is_device_dma_coherent(dev); bool coherent = is_device_dma_coherent(dev);
int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
size_t iosize = size; size_t iosize = size;
void *addr; void *addr;
...@@ -712,7 +712,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, ...@@ -712,7 +712,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
unsigned long attrs) unsigned long attrs)
{ {
bool coherent = is_device_dma_coherent(dev); bool coherent = is_device_dma_coherent(dev);
int prot = dma_direction_to_prot(dir, coherent); int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
if (!iommu_dma_mapping_error(dev, dev_addr) && if (!iommu_dma_mapping_error(dev, dev_addr) &&
...@@ -770,7 +770,7 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, ...@@ -770,7 +770,7 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
__iommu_sync_sg_for_device(dev, sgl, nelems, dir); __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
return iommu_dma_map_sg(dev, sgl, nelems, return iommu_dma_map_sg(dev, sgl, nelems,
dma_direction_to_prot(dir, coherent)); dma_info_to_prot(dir, coherent, attrs));
} }
static void __iommu_unmap_sg_attrs(struct device *dev, static void __iommu_unmap_sg_attrs(struct device *dev,
...@@ -799,7 +799,6 @@ static struct dma_map_ops iommu_dma_ops = { ...@@ -799,7 +799,6 @@ static struct dma_map_ops iommu_dma_ops = {
.sync_sg_for_device = __iommu_sync_sg_for_device, .sync_sg_for_device = __iommu_sync_sg_for_device,
.map_resource = iommu_dma_map_resource, .map_resource = iommu_dma_map_resource,
.unmap_resource = iommu_dma_unmap_resource, .unmap_resource = iommu_dma_unmap_resource,
.dma_supported = iommu_dma_supported,
.mapping_error = iommu_dma_mapping_error, .mapping_error = iommu_dma_mapping_error,
}; };
......
...@@ -536,7 +536,7 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev, ...@@ -536,7 +536,7 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
if (!iort_fwnode) if (!iort_fwnode)
return NULL; return NULL;
ops = iommu_get_instance(iort_fwnode); ops = iommu_ops_from_fwnode(iort_fwnode);
if (!ops) if (!ops)
return NULL; return NULL;
......
...@@ -1859,9 +1859,10 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330) ...@@ -1859,9 +1859,10 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
* Alloc MicroCode buffer for 'chans' Channel threads. * Alloc MicroCode buffer for 'chans' Channel threads.
* A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
*/ */
pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev, pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev,
chans * pl330->mcbufsz, chans * pl330->mcbufsz,
&pl330->mcode_bus, GFP_KERNEL); &pl330->mcode_bus, GFP_KERNEL,
DMA_ATTR_PRIVILEGED);
if (!pl330->mcode_cpu) { if (!pl330->mcode_cpu) {
dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n", dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
__func__, __LINE__); __func__, __LINE__);
......
...@@ -352,9 +352,6 @@ config MTK_IOMMU_V1 ...@@ -352,9 +352,6 @@ config MTK_IOMMU_V1
select IOMMU_API select IOMMU_API
select MEMORY select MEMORY
select MTK_SMI select MTK_SMI
select COMMON_CLK_MT2701_MMSYS
select COMMON_CLK_MT2701_IMGSYS
select COMMON_CLK_MT2701_VDECSYS
help help
Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is
Multimedia Memory Managememt Unit. This option enables remapping of Multimedia Memory Managememt Unit. This option enables remapping of
......
...@@ -112,7 +112,7 @@ static struct timer_list queue_timer; ...@@ -112,7 +112,7 @@ static struct timer_list queue_timer;
* Domain for untranslated devices - only allocated * Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line. * if iommu=pt passed on kernel cmd line.
*/ */
static const struct iommu_ops amd_iommu_ops; const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier); static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1; int amd_iommu_max_glx_val = -1;
...@@ -445,6 +445,7 @@ static void init_iommu_group(struct device *dev) ...@@ -445,6 +445,7 @@ static void init_iommu_group(struct device *dev)
static int iommu_init_device(struct device *dev) static int iommu_init_device(struct device *dev)
{ {
struct iommu_dev_data *dev_data; struct iommu_dev_data *dev_data;
struct amd_iommu *iommu;
int devid; int devid;
if (dev->archdata.iommu) if (dev->archdata.iommu)
...@@ -454,6 +455,8 @@ static int iommu_init_device(struct device *dev) ...@@ -454,6 +455,8 @@ static int iommu_init_device(struct device *dev)
if (devid < 0) if (devid < 0)
return devid; return devid;
iommu = amd_iommu_rlookup_table[devid];
dev_data = find_dev_data(devid); dev_data = find_dev_data(devid);
if (!dev_data) if (!dev_data)
return -ENOMEM; return -ENOMEM;
...@@ -469,8 +472,7 @@ static int iommu_init_device(struct device *dev) ...@@ -469,8 +472,7 @@ static int iommu_init_device(struct device *dev)
dev->archdata.iommu = dev_data; dev->archdata.iommu = dev_data;
iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, iommu_device_link(&iommu->iommu, dev);
dev);
return 0; return 0;
} }
...@@ -495,13 +497,16 @@ static void iommu_ignore_device(struct device *dev) ...@@ -495,13 +497,16 @@ static void iommu_ignore_device(struct device *dev)
static void iommu_uninit_device(struct device *dev) static void iommu_uninit_device(struct device *dev)
{ {
int devid;
struct iommu_dev_data *dev_data; struct iommu_dev_data *dev_data;
struct amd_iommu *iommu;
int devid;
devid = get_device_id(dev); devid = get_device_id(dev);
if (devid < 0) if (devid < 0)
return; return;
iommu = amd_iommu_rlookup_table[devid];
dev_data = search_dev_data(devid); dev_data = search_dev_data(devid);
if (!dev_data) if (!dev_data)
return; return;
...@@ -509,8 +514,7 @@ static void iommu_uninit_device(struct device *dev) ...@@ -509,8 +514,7 @@ static void iommu_uninit_device(struct device *dev)
if (dev_data->domain) if (dev_data->domain)
detach_device(dev); detach_device(dev);
iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, iommu_device_unlink(&iommu->iommu, dev);
dev);
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
...@@ -3161,9 +3165,10 @@ static bool amd_iommu_capable(enum iommu_cap cap) ...@@ -3161,9 +3165,10 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return false; return false;
} }
static void amd_iommu_get_dm_regions(struct device *dev, static void amd_iommu_get_resv_regions(struct device *dev,
struct list_head *head) struct list_head *head)
{ {
struct iommu_resv_region *region;
struct unity_map_entry *entry; struct unity_map_entry *entry;
int devid; int devid;
...@@ -3172,41 +3177,56 @@ static void amd_iommu_get_dm_regions(struct device *dev, ...@@ -3172,41 +3177,56 @@ static void amd_iommu_get_dm_regions(struct device *dev,
return; return;
list_for_each_entry(entry, &amd_iommu_unity_map, list) { list_for_each_entry(entry, &amd_iommu_unity_map, list) {
struct iommu_dm_region *region; size_t length;
int prot = 0;
if (devid < entry->devid_start || devid > entry->devid_end) if (devid < entry->devid_start || devid > entry->devid_end)
continue; continue;
region = kzalloc(sizeof(*region), GFP_KERNEL); length = entry->address_end - entry->address_start;
if (entry->prot & IOMMU_PROT_IR)
prot |= IOMMU_READ;
if (entry->prot & IOMMU_PROT_IW)
prot |= IOMMU_WRITE;
region = iommu_alloc_resv_region(entry->address_start,
length, prot,
IOMMU_RESV_DIRECT);
if (!region) { if (!region) {
pr_err("Out of memory allocating dm-regions for %s\n", pr_err("Out of memory allocating dm-regions for %s\n",
dev_name(dev)); dev_name(dev));
return; return;
} }
region->start = entry->address_start;
region->length = entry->address_end - entry->address_start;
if (entry->prot & IOMMU_PROT_IR)
region->prot |= IOMMU_READ;
if (entry->prot & IOMMU_PROT_IW)
region->prot |= IOMMU_WRITE;
list_add_tail(&region->list, head); list_add_tail(&region->list, head);
} }
region = iommu_alloc_resv_region(MSI_RANGE_START,
MSI_RANGE_END - MSI_RANGE_START + 1,
0, IOMMU_RESV_RESERVED);
if (!region)
return;
list_add_tail(&region->list, head);
region = iommu_alloc_resv_region(HT_RANGE_START,
HT_RANGE_END - HT_RANGE_START + 1,
0, IOMMU_RESV_RESERVED);
if (!region)
return;
list_add_tail(&region->list, head);
} }
static void amd_iommu_put_dm_regions(struct device *dev, static void amd_iommu_put_resv_regions(struct device *dev,
struct list_head *head) struct list_head *head)
{ {
struct iommu_dm_region *entry, *next; struct iommu_resv_region *entry, *next;
list_for_each_entry_safe(entry, next, head, list) list_for_each_entry_safe(entry, next, head, list)
kfree(entry); kfree(entry);
} }
static void amd_iommu_apply_dm_region(struct device *dev, static void amd_iommu_apply_resv_region(struct device *dev,
struct iommu_domain *domain, struct iommu_domain *domain,
struct iommu_dm_region *region) struct iommu_resv_region *region)
{ {
struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
unsigned long start, end; unsigned long start, end;
...@@ -3217,7 +3237,7 @@ static void amd_iommu_apply_dm_region(struct device *dev, ...@@ -3217,7 +3237,7 @@ static void amd_iommu_apply_dm_region(struct device *dev,
WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
} }
static const struct iommu_ops amd_iommu_ops = { const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable, .capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc, .domain_alloc = amd_iommu_domain_alloc,
.domain_free = amd_iommu_domain_free, .domain_free = amd_iommu_domain_free,
...@@ -3230,9 +3250,9 @@ static const struct iommu_ops amd_iommu_ops = { ...@@ -3230,9 +3250,9 @@ static const struct iommu_ops amd_iommu_ops = {
.add_device = amd_iommu_add_device, .add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device, .remove_device = amd_iommu_remove_device,
.device_group = amd_iommu_device_group, .device_group = amd_iommu_device_group,
.get_dm_regions = amd_iommu_get_dm_regions, .get_resv_regions = amd_iommu_get_resv_regions,
.put_dm_regions = amd_iommu_put_dm_regions, .put_resv_regions = amd_iommu_put_resv_regions,
.apply_dm_region = amd_iommu_apply_dm_region, .apply_resv_region = amd_iommu_apply_resv_region,
.pgsize_bitmap = AMD_IOMMU_PGSIZES, .pgsize_bitmap = AMD_IOMMU_PGSIZES,
}; };
......
...@@ -94,6 +94,8 @@ ...@@ -94,6 +94,8 @@
* out of it. * out of it.
*/ */
extern const struct iommu_ops amd_iommu_ops;
/* /*
* structure describing one IOMMU in the ACPI table. Typically followed by one * structure describing one IOMMU in the ACPI table. Typically followed by one
* or more ivhd_entrys. * or more ivhd_entrys.
...@@ -1635,9 +1637,10 @@ static int iommu_init_pci(struct amd_iommu *iommu) ...@@ -1635,9 +1637,10 @@ static int iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_erratum_746_workaround(iommu); amd_iommu_erratum_746_workaround(iommu);
amd_iommu_ats_write_check_workaround(iommu); amd_iommu_ats_write_check_workaround(iommu);
iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu, iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
amd_iommu_groups, "ivhd%d", amd_iommu_groups, "ivhd%d", iommu->index);
iommu->index); iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
iommu_device_register(&iommu->iommu);
return pci_enable_device(iommu->dev); return pci_enable_device(iommu->dev);
} }
...@@ -2230,7 +2233,7 @@ static int __init early_amd_iommu_init(void) ...@@ -2230,7 +2233,7 @@ static int __init early_amd_iommu_init(void)
*/ */
ret = check_ivrs_checksum(ivrs_base); ret = check_ivrs_checksum(ivrs_base);
if (ret) if (ret)
return ret; goto out;
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
......
...@@ -535,8 +535,8 @@ struct amd_iommu { ...@@ -535,8 +535,8 @@ struct amd_iommu {
/* if one, we need to send a completion wait command */ /* if one, we need to send a completion wait command */
bool need_sync; bool need_sync;
/* IOMMU sysfs device */ /* Handle for IOMMU core code */
struct device *iommu_dev; struct iommu_device iommu;
/* /*
* We can't rely on the BIOS to restore all values on reinit, so we * We can't rely on the BIOS to restore all values on reinit, so we
......
...@@ -269,9 +269,6 @@ ...@@ -269,9 +269,6 @@
#define STRTAB_STE_1_SHCFG_INCOMING 1UL #define STRTAB_STE_1_SHCFG_INCOMING 1UL
#define STRTAB_STE_1_SHCFG_SHIFT 44 #define STRTAB_STE_1_SHCFG_SHIFT 44
#define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL
#define STRTAB_STE_1_PRIVCFG_SHIFT 48
#define STRTAB_STE_2_S2VMID_SHIFT 0 #define STRTAB_STE_2_S2VMID_SHIFT 0
#define STRTAB_STE_2_S2VMID_MASK 0xffffUL #define STRTAB_STE_2_S2VMID_MASK 0xffffUL
#define STRTAB_STE_2_VTCR_SHIFT 32 #define STRTAB_STE_2_VTCR_SHIFT 32
...@@ -412,6 +409,9 @@ ...@@ -412,6 +409,9 @@
/* High-level queue structures */ /* High-level queue structures */
#define ARM_SMMU_POLL_TIMEOUT_US 100 #define ARM_SMMU_POLL_TIMEOUT_US 100
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
static bool disable_bypass; static bool disable_bypass;
module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass, MODULE_PARM_DESC(disable_bypass,
...@@ -616,6 +616,9 @@ struct arm_smmu_device { ...@@ -616,6 +616,9 @@ struct arm_smmu_device {
unsigned int sid_bits; unsigned int sid_bits;
struct arm_smmu_strtab_cfg strtab_cfg; struct arm_smmu_strtab_cfg strtab_cfg;
/* IOMMU core code handle */
struct iommu_device iommu;
}; };
/* SMMU private data for each master */ /* SMMU private data for each master */
...@@ -1042,13 +1045,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, ...@@ -1042,13 +1045,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
} }
} }
/* Nuke the existing Config, as we're going to rewrite it */ /* Nuke the existing STE_0 value, as we're going to rewrite it */
val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT); val = ste->valid ? STRTAB_STE_0_V : 0;
if (ste->valid)
val |= STRTAB_STE_0_V;
else
val &= ~STRTAB_STE_0_V;
if (ste->bypass) { if (ste->bypass) {
val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
...@@ -1073,9 +1071,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, ...@@ -1073,9 +1071,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
#ifdef CONFIG_PCI_ATS #ifdef CONFIG_PCI_ATS
STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT | STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
#endif #endif
STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT | STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
STRTAB_STE_1_PRIVCFG_UNPRIV <<
STRTAB_STE_1_PRIVCFG_SHIFT);
if (smmu->features & ARM_SMMU_FEAT_STALLS) if (smmu->features & ARM_SMMU_FEAT_STALLS)
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
...@@ -1083,7 +1079,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, ...@@ -1083,7 +1079,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
<< STRTAB_STE_0_S1CTXPTR_SHIFT) | << STRTAB_STE_0_S1CTXPTR_SHIFT) |
STRTAB_STE_0_CFG_S1_TRANS; STRTAB_STE_0_CFG_S1_TRANS;
} }
if (ste->s2_cfg) { if (ste->s2_cfg) {
...@@ -1372,8 +1367,6 @@ static bool arm_smmu_capable(enum iommu_cap cap) ...@@ -1372,8 +1367,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
switch (cap) { switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY: case IOMMU_CAP_CACHE_COHERENCY:
return true; return true;
case IOMMU_CAP_INTR_REMAP:
return true; /* MSIs are just memory writes */
case IOMMU_CAP_NOEXEC: case IOMMU_CAP_NOEXEC:
return true; return true;
default: default:
...@@ -1795,8 +1788,10 @@ static int arm_smmu_add_device(struct device *dev) ...@@ -1795,8 +1788,10 @@ static int arm_smmu_add_device(struct device *dev)
} }
group = iommu_group_get_for_dev(dev); group = iommu_group_get_for_dev(dev);
if (!IS_ERR(group)) if (!IS_ERR(group)) {
iommu_group_put(group); iommu_group_put(group);
iommu_device_link(&smmu->iommu, dev);
}
return PTR_ERR_OR_ZERO(group); return PTR_ERR_OR_ZERO(group);
} }
...@@ -1805,14 +1800,17 @@ static void arm_smmu_remove_device(struct device *dev) ...@@ -1805,14 +1800,17 @@ static void arm_smmu_remove_device(struct device *dev)
{ {
struct iommu_fwspec *fwspec = dev->iommu_fwspec; struct iommu_fwspec *fwspec = dev->iommu_fwspec;
struct arm_smmu_master_data *master; struct arm_smmu_master_data *master;
struct arm_smmu_device *smmu;
if (!fwspec || fwspec->ops != &arm_smmu_ops) if (!fwspec || fwspec->ops != &arm_smmu_ops)
return; return;
master = fwspec->iommu_priv; master = fwspec->iommu_priv;
smmu = master->smmu;
if (master && master->ste.valid) if (master && master->ste.valid)
arm_smmu_detach_dev(dev); arm_smmu_detach_dev(dev);
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
iommu_device_unlink(&smmu->iommu, dev);
kfree(master); kfree(master);
iommu_fwspec_free(dev); iommu_fwspec_free(dev);
} }
...@@ -1883,6 +1881,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) ...@@ -1883,6 +1881,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1); return iommu_fwspec_add_ids(dev, args->args, 1);
} }
static void arm_smmu_get_resv_regions(struct device *dev,
struct list_head *head)
{
struct iommu_resv_region *region;
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
prot, IOMMU_RESV_MSI);
if (!region)
return;
list_add_tail(&region->list, head);
}
static void arm_smmu_put_resv_regions(struct device *dev,
struct list_head *head)
{
struct iommu_resv_region *entry, *next;
list_for_each_entry_safe(entry, next, head, list)
kfree(entry);
}
static struct iommu_ops arm_smmu_ops = { static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable, .capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc, .domain_alloc = arm_smmu_domain_alloc,
...@@ -1898,6 +1919,8 @@ static struct iommu_ops arm_smmu_ops = { ...@@ -1898,6 +1919,8 @@ static struct iommu_ops arm_smmu_ops = {
.domain_get_attr = arm_smmu_domain_get_attr, .domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr, .domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate, .of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = arm_smmu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during device attach */ .pgsize_bitmap = -1UL, /* Restricted during device attach */
}; };
...@@ -1983,17 +2006,9 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) ...@@ -1983,17 +2006,9 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
u32 size, l1size; u32 size, l1size;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
/* /* Calculate the L1 size, capped to the SIDSIZE. */
* If we can resolve everything with a single L2 table, then we size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
* just need a single L1 descriptor. Otherwise, calculate the L1 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
* size, capped to the SIDSIZE.
*/
if (smmu->sid_bits < STRTAB_SPLIT) {
size = 0;
} else {
size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
size = min(size, smmu->sid_bits - STRTAB_SPLIT);
}
cfg->num_l1_ents = 1 << size; cfg->num_l1_ents = 1 << size;
size += STRTAB_SPLIT; size += STRTAB_SPLIT;
...@@ -2504,6 +2519,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) ...@@ -2504,6 +2519,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK; smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK; smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
/*
* If the SMMU supports fewer bits than would fill a single L2 stream
* table, use a linear table instead.
*/
if (smmu->sid_bits <= STRTAB_SPLIT)
smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
/* IDR5 */ /* IDR5 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
...@@ -2613,6 +2635,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) ...@@ -2613,6 +2635,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
{ {
int irq, ret; int irq, ret;
struct resource *res; struct resource *res;
resource_size_t ioaddr;
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
bool bypass; bool bypass;
...@@ -2630,6 +2653,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) ...@@ -2630,6 +2653,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
dev_err(dev, "MMIO region too small (%pr)\n", res); dev_err(dev, "MMIO region too small (%pr)\n", res);
return -EINVAL; return -EINVAL;
} }
ioaddr = res->start;
smmu->base = devm_ioremap_resource(dev, res); smmu->base = devm_ioremap_resource(dev, res);
if (IS_ERR(smmu->base)) if (IS_ERR(smmu->base))
...@@ -2682,7 +2706,15 @@ static int arm_smmu_device_probe(struct platform_device *pdev) ...@@ -2682,7 +2706,15 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return ret; return ret;
/* And we're up. Go go go! */ /* And we're up. Go go go! */
iommu_register_instance(dev->fwnode, &arm_smmu_ops); ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
"smmu3.%pa", &ioaddr);
if (ret)
return ret;
iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
ret = iommu_device_register(&smmu->iommu);
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
if (pci_bus_type.iommu_ops != &arm_smmu_ops) { if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
* - v7/v8 long-descriptor format * - v7/v8 long-descriptor format
* - Non-secure access to the SMMU * - Non-secure access to the SMMU
* - Context fault reporting * - Context fault reporting
* - Extended Stream ID (16 bit)
*/ */
#define pr_fmt(fmt) "arm-smmu: " fmt #define pr_fmt(fmt) "arm-smmu: " fmt
...@@ -87,6 +88,7 @@ ...@@ -87,6 +88,7 @@
#define sCR0_CLIENTPD (1 << 0) #define sCR0_CLIENTPD (1 << 0)
#define sCR0_GFRE (1 << 1) #define sCR0_GFRE (1 << 1)
#define sCR0_GFIE (1 << 2) #define sCR0_GFIE (1 << 2)
#define sCR0_EXIDENABLE (1 << 3)
#define sCR0_GCFGFRE (1 << 4) #define sCR0_GCFGFRE (1 << 4)
#define sCR0_GCFGFIE (1 << 5) #define sCR0_GCFGFIE (1 << 5)
#define sCR0_USFCFG (1 << 10) #define sCR0_USFCFG (1 << 10)
...@@ -126,6 +128,7 @@ ...@@ -126,6 +128,7 @@
#define ID0_NUMIRPT_MASK 0xff #define ID0_NUMIRPT_MASK 0xff
#define ID0_NUMSIDB_SHIFT 9 #define ID0_NUMSIDB_SHIFT 9
#define ID0_NUMSIDB_MASK 0xf #define ID0_NUMSIDB_MASK 0xf
#define ID0_EXIDS (1 << 8)
#define ID0_NUMSMRG_SHIFT 0 #define ID0_NUMSMRG_SHIFT 0
#define ID0_NUMSMRG_MASK 0xff #define ID0_NUMSMRG_MASK 0xff
...@@ -169,6 +172,7 @@ ...@@ -169,6 +172,7 @@
#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
#define S2CR_CBNDX_SHIFT 0 #define S2CR_CBNDX_SHIFT 0
#define S2CR_CBNDX_MASK 0xff #define S2CR_CBNDX_MASK 0xff
#define S2CR_EXIDVALID (1 << 10)
#define S2CR_TYPE_SHIFT 16 #define S2CR_TYPE_SHIFT 16
#define S2CR_TYPE_MASK 0x3 #define S2CR_TYPE_MASK 0x3
enum arm_smmu_s2cr_type { enum arm_smmu_s2cr_type {
...@@ -260,6 +264,7 @@ enum arm_smmu_s2cr_privcfg { ...@@ -260,6 +264,7 @@ enum arm_smmu_s2cr_privcfg {
#define TTBCR2_SEP_SHIFT 15 #define TTBCR2_SEP_SHIFT 15
#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
#define TTBCR2_AS (1 << 4)
#define TTBRn_ASID_SHIFT 48 #define TTBRn_ASID_SHIFT 48
...@@ -281,6 +286,9 @@ enum arm_smmu_s2cr_privcfg { ...@@ -281,6 +286,9 @@ enum arm_smmu_s2cr_privcfg {
#define FSYNR0_WNR (1 << 4) #define FSYNR0_WNR (1 << 4)
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
static int force_stage; static int force_stage;
module_param(force_stage, int, S_IRUGO); module_param(force_stage, int, S_IRUGO);
MODULE_PARM_DESC(force_stage, MODULE_PARM_DESC(force_stage,
...@@ -351,6 +359,7 @@ struct arm_smmu_device { ...@@ -351,6 +359,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9) #define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10) #define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11) #define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
#define ARM_SMMU_FEAT_EXIDS (1 << 12)
u32 features; u32 features;
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
...@@ -380,6 +389,9 @@ struct arm_smmu_device { ...@@ -380,6 +389,9 @@ struct arm_smmu_device {
unsigned int *irqs; unsigned int *irqs;
u32 cavium_id_base; /* Specific to Cavium */ u32 cavium_id_base; /* Specific to Cavium */
/* IOMMU core code handle */
struct iommu_device iommu;
}; };
enum arm_smmu_context_fmt { enum arm_smmu_context_fmt {
...@@ -778,6 +790,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, ...@@ -778,6 +790,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
reg2 |= TTBCR2_SEP_UPSTREAM; reg2 |= TTBCR2_SEP_UPSTREAM;
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
reg2 |= TTBCR2_AS;
} }
if (smmu->version > ARM_SMMU_V1) if (smmu->version > ARM_SMMU_V1)
writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2); writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
...@@ -1048,7 +1062,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) ...@@ -1048,7 +1062,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
struct arm_smmu_smr *smr = smmu->smrs + idx; struct arm_smmu_smr *smr = smmu->smrs + idx;
u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT; u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
if (smr->valid) if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
reg |= SMR_VALID; reg |= SMR_VALID;
writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
} }
...@@ -1060,6 +1074,9 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) ...@@ -1060,6 +1074,9 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
(s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT | (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
(s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT; (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
smmu->smrs[idx].valid)
reg |= S2CR_EXIDVALID;
writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx)); writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
} }
...@@ -1070,6 +1087,34 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) ...@@ -1070,6 +1087,34 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
arm_smmu_write_smr(smmu, idx); arm_smmu_write_smr(smmu, idx);
} }
/*
* The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
* should be called after sCR0 is written.
*/
static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
{
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
u32 smr;
if (!smmu->smrs)
return;
/*
* SMR.ID bits may not be preserved if the corresponding MASK
* bits are set, so check each one separately. We can reject
* masters later if they try to claim IDs outside these masks.
*/
smr = smmu->streamid_mask << SMR_ID_SHIFT;
writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
smmu->streamid_mask = smr >> SMR_ID_SHIFT;
smr = smmu->streamid_mask << SMR_MASK_SHIFT;
writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
}
static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
{ {
struct arm_smmu_smr *smrs = smmu->smrs; struct arm_smmu_smr *smrs = smmu->smrs;
...@@ -1214,7 +1259,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, ...@@ -1214,7 +1259,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
continue; continue;
s2cr[idx].type = type; s2cr[idx].type = type;
s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV; s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
s2cr[idx].cbndx = cbndx; s2cr[idx].cbndx = cbndx;
arm_smmu_write_s2cr(smmu, idx); arm_smmu_write_s2cr(smmu, idx);
} }
...@@ -1371,8 +1416,6 @@ static bool arm_smmu_capable(enum iommu_cap cap) ...@@ -1371,8 +1416,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
* requests. * requests.
*/ */
return true; return true;
case IOMMU_CAP_INTR_REMAP:
return true; /* MSIs are just memory writes */
case IOMMU_CAP_NOEXEC: case IOMMU_CAP_NOEXEC:
return true; return true;
default: default:
...@@ -1444,6 +1487,8 @@ static int arm_smmu_add_device(struct device *dev) ...@@ -1444,6 +1487,8 @@ static int arm_smmu_add_device(struct device *dev)
if (ret) if (ret)
goto out_free; goto out_free;
iommu_device_link(&smmu->iommu, dev);
return 0; return 0;
out_free: out_free:
...@@ -1456,10 +1501,17 @@ static int arm_smmu_add_device(struct device *dev) ...@@ -1456,10 +1501,17 @@ static int arm_smmu_add_device(struct device *dev)
static void arm_smmu_remove_device(struct device *dev) static void arm_smmu_remove_device(struct device *dev)
{ {
struct iommu_fwspec *fwspec = dev->iommu_fwspec; struct iommu_fwspec *fwspec = dev->iommu_fwspec;
struct arm_smmu_master_cfg *cfg;
struct arm_smmu_device *smmu;
if (!fwspec || fwspec->ops != &arm_smmu_ops) if (!fwspec || fwspec->ops != &arm_smmu_ops)
return; return;
cfg = fwspec->iommu_priv;
smmu = cfg->smmu;
iommu_device_unlink(&smmu->iommu, dev);
arm_smmu_master_free_smes(fwspec); arm_smmu_master_free_smes(fwspec);
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
kfree(fwspec->iommu_priv); kfree(fwspec->iommu_priv);
...@@ -1549,6 +1601,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) ...@@ -1549,6 +1601,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, &fwid, 1); return iommu_fwspec_add_ids(dev, &fwid, 1);
} }
static void arm_smmu_get_resv_regions(struct device *dev,
struct list_head *head)
{
struct iommu_resv_region *region;
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
prot, IOMMU_RESV_MSI);
if (!region)
return;
list_add_tail(&region->list, head);
}
static void arm_smmu_put_resv_regions(struct device *dev,
struct list_head *head)
{
struct iommu_resv_region *entry, *next;
list_for_each_entry_safe(entry, next, head, list)
kfree(entry);
}
static struct iommu_ops arm_smmu_ops = { static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable, .capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc, .domain_alloc = arm_smmu_domain_alloc,
...@@ -1564,6 +1639,8 @@ static struct iommu_ops arm_smmu_ops = { ...@@ -1564,6 +1639,8 @@ static struct iommu_ops arm_smmu_ops = {
.domain_get_attr = arm_smmu_domain_get_attr, .domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr, .domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate, .of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = arm_smmu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during device attach */ .pgsize_bitmap = -1UL, /* Restricted during device attach */
}; };
...@@ -1648,6 +1725,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) ...@@ -1648,6 +1725,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
if (smmu->features & ARM_SMMU_FEAT_VMID16) if (smmu->features & ARM_SMMU_FEAT_VMID16)
reg |= sCR0_VMID16EN; reg |= sCR0_VMID16EN;
if (smmu->features & ARM_SMMU_FEAT_EXIDS)
reg |= sCR0_EXIDENABLE;
/* Push the button */ /* Push the button */
__arm_smmu_tlb_sync(smmu); __arm_smmu_tlb_sync(smmu);
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
...@@ -1735,11 +1815,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) ...@@ -1735,11 +1815,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
"\t(IDR0.CTTW overridden by FW configuration)\n"); "\t(IDR0.CTTW overridden by FW configuration)\n");
/* Max. number of entries we have for stream matching/indexing */ /* Max. number of entries we have for stream matching/indexing */
size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK); if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
smmu->features |= ARM_SMMU_FEAT_EXIDS;
size = 1 << 16;
} else {
size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
}
smmu->streamid_mask = size - 1; smmu->streamid_mask = size - 1;
if (id & ID0_SMS) { if (id & ID0_SMS) {
u32 smr;
smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK; size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
if (size == 0) { if (size == 0) {
...@@ -1748,21 +1831,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) ...@@ -1748,21 +1831,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENODEV; return -ENODEV;
} }
/*
* SMR.ID bits may not be preserved if the corresponding MASK
* bits are set, so check each one separately. We can reject
* masters later if they try to claim IDs outside these masks.
*/
smr = smmu->streamid_mask << SMR_ID_SHIFT;
writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
smmu->streamid_mask = smr >> SMR_ID_SHIFT;
smr = smmu->streamid_mask << SMR_MASK_SHIFT;
writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
/* Zero-initialised to mark as invalid */ /* Zero-initialised to mark as invalid */
smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
GFP_KERNEL); GFP_KERNEL);
...@@ -1770,8 +1838,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) ...@@ -1770,8 +1838,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENOMEM; return -ENOMEM;
dev_notice(smmu->dev, dev_notice(smmu->dev,
"\tstream matching with %lu register groups, mask 0x%x", "\tstream matching with %lu register groups", size);
size, smmu->smr_mask_mask);
} }
/* s2cr->type == 0 means translation, so initialise explicitly */ /* s2cr->type == 0 means translation, so initialise explicitly */
smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
...@@ -2011,6 +2078,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev, ...@@ -2011,6 +2078,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
static int arm_smmu_device_probe(struct platform_device *pdev) static int arm_smmu_device_probe(struct platform_device *pdev)
{ {
struct resource *res; struct resource *res;
resource_size_t ioaddr;
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
int num_irqs, i, err; int num_irqs, i, err;
...@@ -2031,6 +2099,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) ...@@ -2031,6 +2099,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return err; return err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ioaddr = res->start;
smmu->base = devm_ioremap_resource(dev, res); smmu->base = devm_ioremap_resource(dev, res);
if (IS_ERR(smmu->base)) if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base); return PTR_ERR(smmu->base);
...@@ -2091,9 +2160,25 @@ static int arm_smmu_device_probe(struct platform_device *pdev) ...@@ -2091,9 +2160,25 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
} }
} }
iommu_register_instance(dev->fwnode, &arm_smmu_ops); err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
"smmu.%pa", &ioaddr);
if (err) {
dev_err(dev, "Failed to register iommu in sysfs\n");
return err;
}
iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
err = iommu_device_register(&smmu->iommu);
if (err) {
dev_err(dev, "Failed to register iommu\n");
return err;
}
platform_set_drvdata(pdev, smmu); platform_set_drvdata(pdev, smmu);
arm_smmu_device_reset(smmu); arm_smmu_device_reset(smmu);
arm_smmu_test_smr_masks(smmu);
/* Oh, for a proper bus abstraction */ /* Oh, for a proper bus abstraction */
if (!iommu_present(&platform_bus_type)) if (!iommu_present(&platform_bus_type))
......
...@@ -37,15 +37,50 @@ struct iommu_dma_msi_page { ...@@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
phys_addr_t phys; phys_addr_t phys;
}; };
enum iommu_dma_cookie_type {
IOMMU_DMA_IOVA_COOKIE,
IOMMU_DMA_MSI_COOKIE,
};
struct iommu_dma_cookie { struct iommu_dma_cookie {
struct iova_domain iovad; enum iommu_dma_cookie_type type;
struct list_head msi_page_list; union {
spinlock_t msi_lock; /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
struct iova_domain iovad;
/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
dma_addr_t msi_iova;
};
struct list_head msi_page_list;
spinlock_t msi_lock;
}; };
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
{
if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
return cookie->iovad.granule;
return PAGE_SIZE;
}
static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
{ {
return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; struct iommu_dma_cookie *cookie = domain->iova_cookie;
if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
return &cookie->iovad;
return NULL;
}
static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
{
struct iommu_dma_cookie *cookie;
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
if (cookie) {
spin_lock_init(&cookie->msi_lock);
INIT_LIST_HEAD(&cookie->msi_page_list);
cookie->type = type;
}
return cookie;
} }
int iommu_dma_init(void) int iommu_dma_init(void)
...@@ -61,26 +96,54 @@ int iommu_dma_init(void) ...@@ -61,26 +96,54 @@ int iommu_dma_init(void)
* callback when domain->type == IOMMU_DOMAIN_DMA. * callback when domain->type == IOMMU_DOMAIN_DMA.
*/ */
int iommu_get_dma_cookie(struct iommu_domain *domain) int iommu_get_dma_cookie(struct iommu_domain *domain)
{
if (domain->iova_cookie)
return -EEXIST;
domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
if (!domain->iova_cookie)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(iommu_get_dma_cookie);
/**
* iommu_get_msi_cookie - Acquire just MSI remapping resources
* @domain: IOMMU domain to prepare
* @base: Start address of IOVA region for MSI mappings
*
* Users who manage their own IOVA allocation and do not want DMA API support,
* but would still like to take advantage of automatic MSI remapping, can use
* this to initialise their own domain appropriately. Users should reserve a
* contiguous IOVA region, starting at @base, large enough to accommodate the
* number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
* used by the devices attached to @domain.
*/
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
{ {
struct iommu_dma_cookie *cookie; struct iommu_dma_cookie *cookie;
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
return -EINVAL;
if (domain->iova_cookie) if (domain->iova_cookie)
return -EEXIST; return -EEXIST;
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
if (!cookie) if (!cookie)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&cookie->msi_lock); cookie->msi_iova = base;
INIT_LIST_HEAD(&cookie->msi_page_list);
domain->iova_cookie = cookie; domain->iova_cookie = cookie;
return 0; return 0;
} }
EXPORT_SYMBOL(iommu_get_dma_cookie); EXPORT_SYMBOL(iommu_get_msi_cookie);
/** /**
* iommu_put_dma_cookie - Release a domain's DMA mapping resources * iommu_put_dma_cookie - Release a domain's DMA mapping resources
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
* iommu_get_msi_cookie()
* *
* IOMMU drivers should normally call this from their domain_free callback. * IOMMU drivers should normally call this from their domain_free callback.
*/ */
...@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) ...@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
if (!cookie) if (!cookie)
return; return;
if (cookie->iovad.granule) if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
put_iova_domain(&cookie->iovad); put_iova_domain(&cookie->iovad);
list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
...@@ -137,11 +200,13 @@ static void iova_reserve_pci_windows(struct pci_dev *dev, ...@@ -137,11 +200,13 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev) u64 size, struct device *dev)
{ {
struct iova_domain *iovad = cookie_iovad(domain); struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn; unsigned long order, base_pfn, end_pfn;
bool pci = dev && dev_is_pci(dev);
if (!iovad) if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -ENODEV; return -EINVAL;
/* Use the smallest supported page size for IOVA granularity */ /* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap); order = __ffs(domain->pgsize_bitmap);
...@@ -161,19 +226,31 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, ...@@ -161,19 +226,31 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
end_pfn = min_t(unsigned long, end_pfn, end_pfn = min_t(unsigned long, end_pfn,
domain->geometry.aperture_end >> order); domain->geometry.aperture_end >> order);
} }
/*
* PCI devices may have larger DMA masks, but still prefer allocating
* within a 32-bit mask to avoid DAC addressing. Such limitations don't
* apply to the typical platform device, so for those we may as well
* leave the cache limit at the top of their range to save an rb_last()
* traversal on every allocation.
*/
if (pci)
end_pfn &= DMA_BIT_MASK(32) >> order;
/* All we can safely do with an existing domain is enlarge it */ /* start_pfn is always nonzero for an already-initialised domain */
if (iovad->start_pfn) { if (iovad->start_pfn) {
if (1UL << order != iovad->granule || if (1UL << order != iovad->granule ||
base_pfn != iovad->start_pfn || base_pfn != iovad->start_pfn) {
end_pfn < iovad->dma_32bit_pfn) {
pr_warn("Incompatible range for DMA domain\n"); pr_warn("Incompatible range for DMA domain\n");
return -EFAULT; return -EFAULT;
} }
iovad->dma_32bit_pfn = end_pfn; /*
* If we have devices with different DMA masks, move the free
* area cache limit down for the benefit of the smaller one.
*/
iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
} else { } else {
init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
if (dev && dev_is_pci(dev)) if (pci)
iova_reserve_pci_windows(to_pci_dev(dev), iovad); iova_reserve_pci_windows(to_pci_dev(dev), iovad);
} }
return 0; return 0;
...@@ -181,16 +258,22 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, ...@@ -181,16 +258,22 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
EXPORT_SYMBOL(iommu_dma_init_domain); EXPORT_SYMBOL(iommu_dma_init_domain);
/** /**
* dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
* page flags.
* @dir: Direction of DMA transfer * @dir: Direction of DMA transfer
* @coherent: Is the DMA master cache-coherent? * @coherent: Is the DMA master cache-coherent?
* @attrs: DMA attributes for the mapping
* *
* Return: corresponding IOMMU API page protection flags * Return: corresponding IOMMU API page protection flags
*/ */
int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs)
{ {
int prot = coherent ? IOMMU_CACHE : 0; int prot = coherent ? IOMMU_CACHE : 0;
if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV;
switch (dir) { switch (dir) {
case DMA_BIDIRECTIONAL: case DMA_BIDIRECTIONAL:
return prot | IOMMU_READ | IOMMU_WRITE; return prot | IOMMU_READ | IOMMU_WRITE;
...@@ -204,19 +287,28 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) ...@@ -204,19 +287,28 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
} }
static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
dma_addr_t dma_limit) dma_addr_t dma_limit, struct device *dev)
{ {
struct iova_domain *iovad = cookie_iovad(domain); struct iova_domain *iovad = cookie_iovad(domain);
unsigned long shift = iova_shift(iovad); unsigned long shift = iova_shift(iovad);
unsigned long length = iova_align(iovad, size) >> shift; unsigned long length = iova_align(iovad, size) >> shift;
struct iova *iova = NULL;
if (domain->geometry.force_aperture) if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, domain->geometry.aperture_end); dma_limit = min(dma_limit, domain->geometry.aperture_end);
/* Try to get PCI devices a SAC address */
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift,
true);
/* /*
* Enforce size-alignment to be safe - there could perhaps be an * Enforce size-alignment to be safe - there could perhaps be an
* attribute to control this per-device, or at least per-domain... * attribute to control this per-device, or at least per-domain...
*/ */
return alloc_iova(iovad, length, dma_limit >> shift, true); if (!iova)
iova = alloc_iova(iovad, length, dma_limit >> shift, true);
return iova;
} }
/* The IOVA allocator knows what we mapped, so just unmap whatever that was */ /* The IOVA allocator knows what we mapped, so just unmap whatever that was */
...@@ -369,7 +461,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, ...@@ -369,7 +461,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
if (!pages) if (!pages)
return NULL; return NULL;
iova = __alloc_iova(domain, size, dev->coherent_dma_mask); iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev);
if (!iova) if (!iova)
goto out_free_pages; goto out_free_pages;
...@@ -440,7 +532,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, ...@@ -440,7 +532,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
struct iova_domain *iovad = cookie_iovad(domain); struct iova_domain *iovad = cookie_iovad(domain);
size_t iova_off = iova_offset(iovad, phys); size_t iova_off = iova_offset(iovad, phys);
size_t len = iova_align(iovad, size + iova_off); size_t len = iova_align(iovad, size + iova_off);
struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev)); struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev);
if (!iova) if (!iova)
return DMA_ERROR_CODE; return DMA_ERROR_CODE;
...@@ -598,7 +690,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -598,7 +690,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s; prev = s;
} }
iova = __alloc_iova(domain, iova_len, dma_get_mask(dev)); iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova) if (!iova)
goto out_restore_sg; goto out_restore_sg;
...@@ -633,7 +725,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, ...@@ -633,7 +725,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs) size_t size, enum dma_data_direction dir, unsigned long attrs)
{ {
return __iommu_dma_map(dev, phys, size, return __iommu_dma_map(dev, phys, size,
dma_direction_to_prot(dir, false) | IOMMU_MMIO); dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
} }
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
...@@ -642,16 +734,6 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, ...@@ -642,16 +734,6 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
} }
int iommu_dma_supported(struct device *dev, u64 mask)
{
/*
* 'Special' IOMMUs which don't have the same addressing capability
* as the CPU will have to wait until we have some way to query that
* before they'll be able to use this framework.
*/
return 1;
}
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{ {
return dma_addr == DMA_ERROR_CODE; return dma_addr == DMA_ERROR_CODE;
...@@ -662,11 +744,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, ...@@ -662,11 +744,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
{ {
struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi_page; struct iommu_dma_msi_page *msi_page;
struct iova_domain *iovad = &cookie->iovad; struct iova_domain *iovad = cookie_iovad(domain);
struct iova *iova; struct iova *iova;
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
size_t size = cookie_msi_granule(cookie);
msi_addr &= ~(phys_addr_t)iova_mask(iovad); msi_addr &= ~(phys_addr_t)(size - 1);
list_for_each_entry(msi_page, &cookie->msi_page_list, list) list_for_each_entry(msi_page, &cookie->msi_page_list, list)
if (msi_page->phys == msi_addr) if (msi_page->phys == msi_addr)
return msi_page; return msi_page;
...@@ -675,13 +758,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, ...@@ -675,13 +758,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page) if (!msi_page)
return NULL; return NULL;
iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
if (!iova)
goto out_free_page;
msi_page->phys = msi_addr; msi_page->phys = msi_addr;
msi_page->iova = iova_dma_addr(iovad, iova); if (iovad) {
if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) iova = __alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
goto out_free_page;
msi_page->iova = iova_dma_addr(iovad, iova);
} else {
msi_page->iova = cookie->msi_iova;
cookie->msi_iova += size;
}
if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
goto out_free_iova; goto out_free_iova;
INIT_LIST_HEAD(&msi_page->list); INIT_LIST_HEAD(&msi_page->list);
...@@ -689,7 +777,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, ...@@ -689,7 +777,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
return msi_page; return msi_page;
out_free_iova: out_free_iova:
__free_iova(iovad, iova); if (iovad)
__free_iova(iovad, iova);
else
cookie->msi_iova -= size;
out_free_page: out_free_page:
kfree(msi_page); kfree(msi_page);
return NULL; return NULL;
...@@ -730,7 +821,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) ...@@ -730,7 +821,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
msg->data = ~0U; msg->data = ~0U;
} else { } else {
msg->address_hi = upper_32_bits(msi_page->iova); msg->address_hi = upper_32_bits(msi_page->iova);
msg->address_lo &= iova_mask(&cookie->iovad); msg->address_lo &= cookie_msi_granule(cookie) - 1;
msg->address_lo += lower_32_bits(msi_page->iova); msg->address_lo += lower_32_bits(msi_page->iova);
} }
} }
...@@ -74,6 +74,8 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)]; ...@@ -74,6 +74,8 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
static int alloc_iommu(struct dmar_drhd_unit *drhd); static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu); static void free_iommu(struct intel_iommu *iommu);
extern const struct iommu_ops intel_iommu_ops;
static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{ {
/* /*
...@@ -1078,14 +1080,17 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -1078,14 +1080,17 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
raw_spin_lock_init(&iommu->register_lock); raw_spin_lock_init(&iommu->register_lock);
if (intel_iommu_enabled) { if (intel_iommu_enabled) {
iommu->iommu_dev = iommu_device_create(NULL, iommu, err = iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups, intel_iommu_groups,
"%s", iommu->name); "%s", iommu->name);
if (err)
goto err_unmap;
if (IS_ERR(iommu->iommu_dev)) { iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
err = PTR_ERR(iommu->iommu_dev);
err = iommu_device_register(&iommu->iommu);
if (err)
goto err_unmap; goto err_unmap;
}
} }
drhd->iommu = iommu; drhd->iommu = iommu;
...@@ -1103,7 +1108,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -1103,7 +1108,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
static void free_iommu(struct intel_iommu *iommu) static void free_iommu(struct intel_iommu *iommu)
{ {
iommu_device_destroy(iommu->iommu_dev); iommu_device_sysfs_remove(&iommu->iommu);
iommu_device_unregister(&iommu->iommu);
if (iommu->irq) { if (iommu->irq) {
if (iommu->pr_irq) { if (iommu->pr_irq) {
......
...@@ -276,6 +276,8 @@ struct sysmmu_drvdata { ...@@ -276,6 +276,8 @@ struct sysmmu_drvdata {
struct list_head owner_node; /* node for owner controllers list */ struct list_head owner_node; /* node for owner controllers list */
phys_addr_t pgtable; /* assigned page table structure */ phys_addr_t pgtable; /* assigned page table structure */
unsigned int version; /* our version */ unsigned int version; /* our version */
struct iommu_device iommu; /* IOMMU core handle */
}; };
static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom) static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
...@@ -381,13 +383,14 @@ static void show_fault_information(struct sysmmu_drvdata *data, ...@@ -381,13 +383,14 @@ static void show_fault_information(struct sysmmu_drvdata *data,
{ {
sysmmu_pte_t *ent; sysmmu_pte_t *ent;
dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n", dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
finfo->name, fault_addr, &data->pgtable); dev_name(data->master), finfo->name, fault_addr);
dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
ent = section_entry(phys_to_virt(data->pgtable), fault_addr); ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent); dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
if (lv1ent_page(ent)) { if (lv1ent_page(ent)) {
ent = page_entry(ent, fault_addr); ent = page_entry(ent, fault_addr);
dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
} }
} }
...@@ -611,6 +614,18 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev) ...@@ -611,6 +614,18 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
data->sysmmu = dev; data->sysmmu = dev;
spin_lock_init(&data->lock); spin_lock_init(&data->lock);
ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
dev_name(data->sysmmu));
if (ret)
return ret;
iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
ret = iommu_device_register(&data->iommu);
if (ret)
return ret;
platform_set_drvdata(pdev, data); platform_set_drvdata(pdev, data);
__sysmmu_get_version(data); __sysmmu_get_version(data);
...@@ -628,8 +643,6 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev) ...@@ -628,8 +643,6 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
pm_runtime_enable(dev); pm_runtime_enable(dev);
of_iommu_set_ops(dev->of_node, &exynos_iommu_ops);
return 0; return 0;
} }
...@@ -743,6 +756,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) ...@@ -743,6 +756,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
DMA_TO_DEVICE); DMA_TO_DEVICE);
/* For mapping page table entries we rely on dma == phys */ /* For mapping page table entries we rely on dma == phys */
BUG_ON(handle != virt_to_phys(domain->pgtable)); BUG_ON(handle != virt_to_phys(domain->pgtable));
if (dma_mapping_error(dma_dev, handle))
goto err_lv2ent;
spin_lock_init(&domain->lock); spin_lock_init(&domain->lock);
spin_lock_init(&domain->pgtablelock); spin_lock_init(&domain->pgtablelock);
...@@ -754,6 +769,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) ...@@ -754,6 +769,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
return &domain->domain; return &domain->domain;
err_lv2ent:
free_pages((unsigned long)domain->lv2entcnt, 1);
err_counter: err_counter:
free_pages((unsigned long)domain->pgtable, 2); free_pages((unsigned long)domain->pgtable, 2);
err_dma_cookie: err_dma_cookie:
...@@ -897,6 +914,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, ...@@ -897,6 +914,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
} }
if (lv1ent_fault(sent)) { if (lv1ent_fault(sent)) {
dma_addr_t handle;
sysmmu_pte_t *pent; sysmmu_pte_t *pent;
bool need_flush_flpd_cache = lv1ent_zero(sent); bool need_flush_flpd_cache = lv1ent_zero(sent);
...@@ -908,7 +926,12 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, ...@@ -908,7 +926,12 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
update_pte(sent, mk_lv1ent_page(virt_to_phys(pent))); update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
kmemleak_ignore(pent); kmemleak_ignore(pent);
*pgcounter = NUM_LV2ENTRIES; *pgcounter = NUM_LV2ENTRIES;
dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE); handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dma_dev, handle)) {
kmem_cache_free(lv2table_kmem_cache, pent);
return ERR_PTR(-EADDRINUSE);
}
/* /*
* If pre-fetched SLPD is a faulty SLPD in zero_l2_table, * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
...@@ -1231,9 +1254,21 @@ static int exynos_iommu_add_device(struct device *dev) ...@@ -1231,9 +1254,21 @@ static int exynos_iommu_add_device(struct device *dev)
static void exynos_iommu_remove_device(struct device *dev) static void exynos_iommu_remove_device(struct device *dev)
{ {
struct exynos_iommu_owner *owner = dev->archdata.iommu;
if (!has_sysmmu(dev)) if (!has_sysmmu(dev))
return; return;
if (owner->domain) {
struct iommu_group *group = iommu_group_get(dev);
if (group) {
WARN_ON(owner->domain !=
iommu_group_default_domain(group));
exynos_iommu_detach_device(owner->domain, dev);
iommu_group_put(group);
}
}
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
} }
...@@ -1242,7 +1277,7 @@ static int exynos_iommu_of_xlate(struct device *dev, ...@@ -1242,7 +1277,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
{ {
struct exynos_iommu_owner *owner = dev->archdata.iommu; struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct platform_device *sysmmu = of_find_device_by_node(spec->np); struct platform_device *sysmmu = of_find_device_by_node(spec->np);
struct sysmmu_drvdata *data; struct sysmmu_drvdata *data, *entry;
if (!sysmmu) if (!sysmmu)
return -ENODEV; return -ENODEV;
...@@ -1261,6 +1296,10 @@ static int exynos_iommu_of_xlate(struct device *dev, ...@@ -1261,6 +1296,10 @@ static int exynos_iommu_of_xlate(struct device *dev,
dev->archdata.iommu = owner; dev->archdata.iommu = owner;
} }
list_for_each_entry(entry, &owner->controllers, owner_node)
if (entry == data)
return 0;
list_add_tail(&data->owner_node, &owner->controllers); list_add_tail(&data->owner_node, &owner->controllers);
data->master = dev; data->master = dev;
......
...@@ -440,6 +440,7 @@ struct dmar_rmrr_unit { ...@@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
u64 end_address; /* reserved end address */ u64 end_address; /* reserved end address */
struct dmar_dev_scope *devices; /* target devices */ struct dmar_dev_scope *devices; /* target devices */
int devices_cnt; /* target device count */ int devices_cnt; /* target device count */
struct iommu_resv_region *resv; /* reserved region handle */
}; };
struct dmar_atsr_unit { struct dmar_atsr_unit {
...@@ -547,7 +548,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); ...@@ -547,7 +548,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
static DEFINE_SPINLOCK(device_domain_lock); static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list); static LIST_HEAD(device_domain_list);
static const struct iommu_ops intel_iommu_ops; const struct iommu_ops intel_iommu_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu) static bool translation_pre_enabled(struct intel_iommu *iommu)
{ {
...@@ -1144,7 +1145,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, ...@@ -1144,7 +1145,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
if (!dma_pte_present(pte) || dma_pte_superpage(pte)) if (!dma_pte_present(pte) || dma_pte_superpage(pte))
goto next; goto next;
level_pfn = pfn & level_mask(level - 1); level_pfn = pfn & level_mask(level);
level_pte = phys_to_virt(dma_pte_addr(pte)); level_pte = phys_to_virt(dma_pte_addr(pte));
if (level > 2) if (level > 2)
...@@ -3325,13 +3326,14 @@ static int __init init_dmars(void) ...@@ -3325,13 +3326,14 @@ static int __init init_dmars(void)
iommu_identity_mapping |= IDENTMAP_GFX; iommu_identity_mapping |= IDENTMAP_GFX;
#endif #endif
check_tylersburg_isoch();
if (iommu_identity_mapping) { if (iommu_identity_mapping) {
ret = si_domain_init(hw_pass_through); ret = si_domain_init(hw_pass_through);
if (ret) if (ret)
goto free_iommu; goto free_iommu;
} }
check_tylersburg_isoch();
/* /*
* If we copied translations from a previous kernel in the kdump * If we copied translations from a previous kernel in the kdump
...@@ -4246,27 +4248,40 @@ static inline void init_iommu_pm_ops(void) {} ...@@ -4246,27 +4248,40 @@ static inline void init_iommu_pm_ops(void) {}
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{ {
struct acpi_dmar_reserved_memory *rmrr; struct acpi_dmar_reserved_memory *rmrr;
int prot = DMA_PTE_READ|DMA_PTE_WRITE;
struct dmar_rmrr_unit *rmrru; struct dmar_rmrr_unit *rmrru;
size_t length;
rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
if (!rmrru) if (!rmrru)
return -ENOMEM; goto out;
rmrru->hdr = header; rmrru->hdr = header;
rmrr = (struct acpi_dmar_reserved_memory *)header; rmrr = (struct acpi_dmar_reserved_memory *)header;
rmrru->base_address = rmrr->base_address; rmrru->base_address = rmrr->base_address;
rmrru->end_address = rmrr->end_address; rmrru->end_address = rmrr->end_address;
length = rmrr->end_address - rmrr->base_address + 1;
rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
IOMMU_RESV_DIRECT);
if (!rmrru->resv)
goto free_rmrru;
rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
((void *)rmrr) + rmrr->header.length, ((void *)rmrr) + rmrr->header.length,
&rmrru->devices_cnt); &rmrru->devices_cnt);
if (rmrru->devices_cnt && rmrru->devices == NULL) { if (rmrru->devices_cnt && rmrru->devices == NULL)
kfree(rmrru); goto free_all;
return -ENOMEM;
}
list_add(&rmrru->list, &dmar_rmrr_units); list_add(&rmrru->list, &dmar_rmrr_units);
return 0; return 0;
free_all:
kfree(rmrru->resv);
free_rmrru:
kfree(rmrru);
out:
return -ENOMEM;
} }
static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
...@@ -4480,6 +4495,7 @@ static void intel_iommu_free_dmars(void) ...@@ -4480,6 +4495,7 @@ static void intel_iommu_free_dmars(void)
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) { list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
list_del(&rmrru->list); list_del(&rmrru->list);
dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
kfree(rmrru->resv);
kfree(rmrru); kfree(rmrru);
} }
...@@ -4853,10 +4869,13 @@ int __init intel_iommu_init(void) ...@@ -4853,10 +4869,13 @@ int __init intel_iommu_init(void)
init_iommu_pm_ops(); init_iommu_pm_ops();
for_each_active_iommu(iommu, drhd) for_each_active_iommu(iommu, drhd) {
iommu->iommu_dev = iommu_device_create(NULL, iommu, iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups, intel_iommu_groups,
"%s", iommu->name); "%s", iommu->name);
iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
iommu_device_register(&iommu->iommu);
}
bus_set_iommu(&pci_bus_type, &intel_iommu_ops); bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
bus_register_notifier(&pci_bus_type, &device_nb); bus_register_notifier(&pci_bus_type, &device_nb);
...@@ -5178,7 +5197,7 @@ static int intel_iommu_add_device(struct device *dev) ...@@ -5178,7 +5197,7 @@ static int intel_iommu_add_device(struct device *dev)
if (!iommu) if (!iommu)
return -ENODEV; return -ENODEV;
iommu_device_link(iommu->iommu_dev, dev); iommu_device_link(&iommu->iommu, dev);
group = iommu_group_get_for_dev(dev); group = iommu_group_get_for_dev(dev);
...@@ -5200,7 +5219,46 @@ static void intel_iommu_remove_device(struct device *dev) ...@@ -5200,7 +5219,46 @@ static void intel_iommu_remove_device(struct device *dev)
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
iommu_device_unlink(iommu->iommu_dev, dev); iommu_device_unlink(&iommu->iommu, dev);
}
static void intel_iommu_get_resv_regions(struct device *device,
struct list_head *head)
{
struct iommu_resv_region *reg;
struct dmar_rmrr_unit *rmrr;
struct device *i_dev;
int i;
rcu_read_lock();
for_each_rmrr_units(rmrr) {
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
i, i_dev) {
if (i_dev != device)
continue;
list_add_tail(&rmrr->resv->list, head);
}
}
rcu_read_unlock();
reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
0, IOMMU_RESV_RESERVED);
if (!reg)
return;
list_add_tail(&reg->list, head);
}
static void intel_iommu_put_resv_regions(struct device *dev,
struct list_head *head)
{
struct iommu_resv_region *entry, *next;
list_for_each_entry_safe(entry, next, head, list) {
if (entry->type == IOMMU_RESV_RESERVED)
kfree(entry);
}
} }
#ifdef CONFIG_INTEL_IOMMU_SVM #ifdef CONFIG_INTEL_IOMMU_SVM
...@@ -5332,20 +5390,22 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) ...@@ -5332,20 +5390,22 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
} }
#endif /* CONFIG_INTEL_IOMMU_SVM */ #endif /* CONFIG_INTEL_IOMMU_SVM */
static const struct iommu_ops intel_iommu_ops = { const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable, .capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc, .domain_alloc = intel_iommu_domain_alloc,
.domain_free = intel_iommu_domain_free, .domain_free = intel_iommu_domain_free,
.attach_dev = intel_iommu_attach_device, .attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device, .detach_dev = intel_iommu_detach_device,
.map = intel_iommu_map, .map = intel_iommu_map,
.unmap = intel_iommu_unmap, .unmap = intel_iommu_unmap,
.map_sg = default_iommu_map_sg, .map_sg = default_iommu_map_sg,
.iova_to_phys = intel_iommu_iova_to_phys, .iova_to_phys = intel_iommu_iova_to_phys,
.add_device = intel_iommu_add_device, .add_device = intel_iommu_add_device,
.remove_device = intel_iommu_remove_device, .remove_device = intel_iommu_remove_device,
.device_group = pci_device_group, .get_resv_regions = intel_iommu_get_resv_regions,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES, .put_resv_regions = intel_iommu_put_resv_regions,
.device_group = pci_device_group,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
}; };
static void quirk_iommu_g4x_gfx(struct pci_dev *dev) static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
......
...@@ -265,7 +265,9 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, ...@@ -265,7 +265,9 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
if (!(prot & IOMMU_MMIO)) if (!(prot & IOMMU_MMIO))
pte |= ARM_V7S_ATTR_TEX(1); pte |= ARM_V7S_ATTR_TEX(1);
if (ap) { if (ap) {
pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV; pte |= ARM_V7S_PTE_AF;
if (!(prot & IOMMU_PRIV))
pte |= ARM_V7S_PTE_AP_UNPRIV;
if (!(prot & IOMMU_WRITE)) if (!(prot & IOMMU_WRITE))
pte |= ARM_V7S_PTE_AP_RDONLY; pte |= ARM_V7S_PTE_AP_RDONLY;
} }
...@@ -288,6 +290,8 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl) ...@@ -288,6 +290,8 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
if (!(attr & ARM_V7S_PTE_AP_RDONLY)) if (!(attr & ARM_V7S_PTE_AP_RDONLY))
prot |= IOMMU_WRITE; prot |= IOMMU_WRITE;
if (!(attr & ARM_V7S_PTE_AP_UNPRIV))
prot |= IOMMU_PRIV;
if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0) if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
prot |= IOMMU_MMIO; prot |= IOMMU_MMIO;
else if (pte & ARM_V7S_ATTR_C) else if (pte & ARM_V7S_ATTR_C)
......
...@@ -350,11 +350,14 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, ...@@ -350,11 +350,14 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
if (data->iop.fmt == ARM_64_LPAE_S1 || if (data->iop.fmt == ARM_64_LPAE_S1 ||
data->iop.fmt == ARM_32_LPAE_S1) { data->iop.fmt == ARM_32_LPAE_S1) {
pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG; pte = ARM_LPAE_PTE_nG;
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
pte |= ARM_LPAE_PTE_AP_RDONLY; pte |= ARM_LPAE_PTE_AP_RDONLY;
if (!(prot & IOMMU_PRIV))
pte |= ARM_LPAE_PTE_AP_UNPRIV;
if (prot & IOMMU_MMIO) if (prot & IOMMU_MMIO)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
<< ARM_LPAE_PTE_ATTRINDX_SHIFT); << ARM_LPAE_PTE_ATTRINDX_SHIFT);
......
...@@ -50,85 +50,76 @@ static int __init iommu_dev_init(void) ...@@ -50,85 +50,76 @@ static int __init iommu_dev_init(void)
postcore_initcall(iommu_dev_init); postcore_initcall(iommu_dev_init);
/* /*
* Create an IOMMU device and return a pointer to it. IOMMU specific * Init the struct device for the IOMMU. IOMMU specific attributes can
* attributes can be provided as an attribute group, allowing a unique * be provided as an attribute group, allowing a unique namespace per
* namespace per IOMMU type. * IOMMU type.
*/ */
struct device *iommu_device_create(struct device *parent, void *drvdata, int iommu_device_sysfs_add(struct iommu_device *iommu,
const struct attribute_group **groups, struct device *parent,
const char *fmt, ...) const struct attribute_group **groups,
const char *fmt, ...)
{ {
struct device *dev;
va_list vargs; va_list vargs;
int ret; int ret;
dev = kzalloc(sizeof(*dev), GFP_KERNEL); device_initialize(&iommu->dev);
if (!dev)
return ERR_PTR(-ENOMEM);
device_initialize(dev); iommu->dev.class = &iommu_class;
iommu->dev.parent = parent;
dev->class = &iommu_class; iommu->dev.groups = groups;
dev->parent = parent;
dev->groups = groups;
dev_set_drvdata(dev, drvdata);
va_start(vargs, fmt); va_start(vargs, fmt);
ret = kobject_set_name_vargs(&dev->kobj, fmt, vargs); ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs);
va_end(vargs); va_end(vargs);
if (ret) if (ret)
goto error; goto error;
ret = device_add(dev); ret = device_add(&iommu->dev);
if (ret) if (ret)
goto error; goto error;
return dev; return 0;
error: error:
put_device(dev); put_device(&iommu->dev);
return ERR_PTR(ret); return ret;
} }
void iommu_device_destroy(struct device *dev) void iommu_device_sysfs_remove(struct iommu_device *iommu)
{ {
if (!dev || IS_ERR(dev)) device_unregister(&iommu->dev);
return;
device_unregister(dev);
} }
/* /*
* IOMMU drivers can indicate a device is managed by a given IOMMU using * IOMMU drivers can indicate a device is managed by a given IOMMU using
* this interface. A link to the device will be created in the "devices" * this interface. A link to the device will be created in the "devices"
* directory of the IOMMU device in sysfs and an "iommu" link will be * directory of the IOMMU device in sysfs and an "iommu" link will be
* created under the linked device, pointing back at the IOMMU device. * created under the linked device, pointing back at the IOMMU device.
*/ */
int iommu_device_link(struct device *dev, struct device *link) int iommu_device_link(struct iommu_device *iommu, struct device *link)
{ {
int ret; int ret;
if (!dev || IS_ERR(dev)) if (!iommu || IS_ERR(iommu))
return -ENODEV; return -ENODEV;
ret = sysfs_add_link_to_group(&dev->kobj, "devices", ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices",
&link->kobj, dev_name(link)); &link->kobj, dev_name(link));
if (ret) if (ret)
return ret; return ret;
ret = sysfs_create_link_nowarn(&link->kobj, &dev->kobj, "iommu"); ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu");
if (ret) if (ret)
sysfs_remove_link_from_group(&dev->kobj, "devices", sysfs_remove_link_from_group(&iommu->dev.kobj, "devices",
dev_name(link)); dev_name(link));
return ret; return ret;
} }
void iommu_device_unlink(struct device *dev, struct device *link) void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
{ {
if (!dev || IS_ERR(dev)) if (!iommu || IS_ERR(iommu))
return; return;
sysfs_remove_link(&link->kobj, "iommu"); sysfs_remove_link(&link->kobj, "iommu");
sysfs_remove_link_from_group(&dev->kobj, "devices", dev_name(link)); sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link));
} }
...@@ -55,7 +55,7 @@ struct iommu_group { ...@@ -55,7 +55,7 @@ struct iommu_group {
struct iommu_domain *domain; struct iommu_domain *domain;
}; };
struct iommu_device { struct group_device {
struct list_head list; struct list_head list;
struct device *dev; struct device *dev;
char *name; char *name;
...@@ -68,6 +68,12 @@ struct iommu_group_attribute { ...@@ -68,6 +68,12 @@ struct iommu_group_attribute {
const char *buf, size_t count); const char *buf, size_t count);
}; };
static const char * const iommu_group_resv_type_string[] = {
[IOMMU_RESV_DIRECT] = "direct",
[IOMMU_RESV_RESERVED] = "reserved",
[IOMMU_RESV_MSI] = "msi",
};
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \ struct iommu_group_attribute iommu_group_attr_##_name = \
__ATTR(_name, _mode, _show, _store) __ATTR(_name, _mode, _show, _store)
...@@ -77,6 +83,25 @@ struct iommu_group_attribute iommu_group_attr_##_name = \ ...@@ -77,6 +83,25 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
#define to_iommu_group(_kobj) \ #define to_iommu_group(_kobj) \
container_of(_kobj, struct iommu_group, kobj) container_of(_kobj, struct iommu_group, kobj)
static LIST_HEAD(iommu_device_list);
static DEFINE_SPINLOCK(iommu_device_lock);
int iommu_device_register(struct iommu_device *iommu)
{
spin_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
return 0;
}
void iommu_device_unregister(struct iommu_device *iommu)
{
spin_lock(&iommu_device_lock);
list_del(&iommu->list);
spin_unlock(&iommu_device_lock);
}
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
unsigned type); unsigned type);
static int __iommu_attach_device(struct iommu_domain *domain, static int __iommu_attach_device(struct iommu_domain *domain,
...@@ -133,8 +158,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) ...@@ -133,8 +158,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
return sprintf(buf, "%s\n", group->name); return sprintf(buf, "%s\n", group->name);
} }
/**
* iommu_insert_resv_region - Insert a new region in the
* list of reserved regions.
* @new: new region to insert
* @regions: list of regions
*
* The new element is sorted by address with respect to the other
* regions of the same type. In case it overlaps with another
* region of the same type, regions are merged. In case it
* overlaps with another region of different type, regions are
* not merged.
*/
static int iommu_insert_resv_region(struct iommu_resv_region *new,
struct list_head *regions)
{
struct iommu_resv_region *region;
phys_addr_t start = new->start;
phys_addr_t end = new->start + new->length - 1;
struct list_head *pos = regions->next;
while (pos != regions) {
struct iommu_resv_region *entry =
list_entry(pos, struct iommu_resv_region, list);
phys_addr_t a = entry->start;
phys_addr_t b = entry->start + entry->length - 1;
int type = entry->type;
if (end < a) {
goto insert;
} else if (start > b) {
pos = pos->next;
} else if ((start >= a) && (end <= b)) {
if (new->type == type)
goto done;
else
pos = pos->next;
} else {
if (new->type == type) {
phys_addr_t new_start = min(a, start);
phys_addr_t new_end = max(b, end);
list_del(&entry->list);
entry->start = new_start;
entry->length = new_end - new_start + 1;
iommu_insert_resv_region(entry, regions);
} else {
pos = pos->next;
}
}
}
insert:
region = iommu_alloc_resv_region(new->start, new->length,
new->prot, new->type);
if (!region)
return -ENOMEM;
list_add_tail(&region->list, pos);
done:
return 0;
}
static int
iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
struct list_head *group_resv_regions)
{
struct iommu_resv_region *entry;
int ret = 0;
list_for_each_entry(entry, dev_resv_regions, list) {
ret = iommu_insert_resv_region(entry, group_resv_regions);
if (ret)
break;
}
return ret;
}
int iommu_get_group_resv_regions(struct iommu_group *group,
struct list_head *head)
{
struct group_device *device;
int ret = 0;
mutex_lock(&group->mutex);
list_for_each_entry(device, &group->devices, list) {
struct list_head dev_resv_regions;
INIT_LIST_HEAD(&dev_resv_regions);
iommu_get_resv_regions(device->dev, &dev_resv_regions);
ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
iommu_put_resv_regions(device->dev, &dev_resv_regions);
if (ret)
break;
}
mutex_unlock(&group->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
char *buf)
{
struct iommu_resv_region *region, *next;
struct list_head group_resv_regions;
char *str = buf;
INIT_LIST_HEAD(&group_resv_regions);
iommu_get_group_resv_regions(group, &group_resv_regions);
list_for_each_entry_safe(region, next, &group_resv_regions, list) {
str += sprintf(str, "0x%016llx 0x%016llx %s\n",
(long long int)region->start,
(long long int)(region->start +
region->length - 1),
iommu_group_resv_type_string[region->type]);
kfree(region);
}
return (str - buf);
}
static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
static IOMMU_GROUP_ATTR(reserved_regions, 0444,
iommu_group_show_resv_regions, NULL);
static void iommu_group_release(struct kobject *kobj) static void iommu_group_release(struct kobject *kobj)
{ {
struct iommu_group *group = to_iommu_group(kobj); struct iommu_group *group = to_iommu_group(kobj);
...@@ -212,6 +360,11 @@ struct iommu_group *iommu_group_alloc(void) ...@@ -212,6 +360,11 @@ struct iommu_group *iommu_group_alloc(void)
*/ */
kobject_put(&group->kobj); kobject_put(&group->kobj);
ret = iommu_group_create_file(group,
&iommu_group_attr_reserved_regions);
if (ret)
return ERR_PTR(ret);
pr_debug("Allocated group %d\n", group->id); pr_debug("Allocated group %d\n", group->id);
return group; return group;
...@@ -318,7 +471,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, ...@@ -318,7 +471,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
struct device *dev) struct device *dev)
{ {
struct iommu_domain *domain = group->default_domain; struct iommu_domain *domain = group->default_domain;
struct iommu_dm_region *entry; struct iommu_resv_region *entry;
struct list_head mappings; struct list_head mappings;
unsigned long pg_size; unsigned long pg_size;
int ret = 0; int ret = 0;
...@@ -331,18 +484,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, ...@@ -331,18 +484,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
pg_size = 1UL << __ffs(domain->pgsize_bitmap); pg_size = 1UL << __ffs(domain->pgsize_bitmap);
INIT_LIST_HEAD(&mappings); INIT_LIST_HEAD(&mappings);
iommu_get_dm_regions(dev, &mappings); iommu_get_resv_regions(dev, &mappings);
/* We need to consider overlapping regions for different devices */ /* We need to consider overlapping regions for different devices */
list_for_each_entry(entry, &mappings, list) { list_for_each_entry(entry, &mappings, list) {
dma_addr_t start, end, addr; dma_addr_t start, end, addr;
if (domain->ops->apply_dm_region) if (domain->ops->apply_resv_region)
domain->ops->apply_dm_region(dev, domain, entry); domain->ops->apply_resv_region(dev, domain, entry);
start = ALIGN(entry->start, pg_size); start = ALIGN(entry->start, pg_size);
end = ALIGN(entry->start + entry->length, pg_size); end = ALIGN(entry->start + entry->length, pg_size);
if (entry->type != IOMMU_RESV_DIRECT)
continue;
for (addr = start; addr < end; addr += pg_size) { for (addr = start; addr < end; addr += pg_size) {
phys_addr_t phys_addr; phys_addr_t phys_addr;
...@@ -358,7 +514,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, ...@@ -358,7 +514,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
} }
out: out:
iommu_put_dm_regions(dev, &mappings); iommu_put_resv_regions(dev, &mappings);
return ret; return ret;
} }
...@@ -374,7 +530,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, ...@@ -374,7 +530,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
int iommu_group_add_device(struct iommu_group *group, struct device *dev) int iommu_group_add_device(struct iommu_group *group, struct device *dev)
{ {
int ret, i = 0; int ret, i = 0;
struct iommu_device *device; struct group_device *device;
device = kzalloc(sizeof(*device), GFP_KERNEL); device = kzalloc(sizeof(*device), GFP_KERNEL);
if (!device) if (!device)
...@@ -383,36 +539,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) ...@@ -383,36 +539,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
device->dev = dev; device->dev = dev;
ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
if (ret) { if (ret)
kfree(device); goto err_free_device;
return ret;
}
device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
rename: rename:
if (!device->name) { if (!device->name) {
sysfs_remove_link(&dev->kobj, "iommu_group"); ret = -ENOMEM;
kfree(device); goto err_remove_link;
return -ENOMEM;
} }
ret = sysfs_create_link_nowarn(group->devices_kobj, ret = sysfs_create_link_nowarn(group->devices_kobj,
&dev->kobj, device->name); &dev->kobj, device->name);
if (ret) { if (ret) {
kfree(device->name);
if (ret == -EEXIST && i >= 0) { if (ret == -EEXIST && i >= 0) {
/* /*
* Account for the slim chance of collision * Account for the slim chance of collision
* and append an instance to the name. * and append an instance to the name.
*/ */
kfree(device->name);
device->name = kasprintf(GFP_KERNEL, "%s.%d", device->name = kasprintf(GFP_KERNEL, "%s.%d",
kobject_name(&dev->kobj), i++); kobject_name(&dev->kobj), i++);
goto rename; goto rename;
} }
goto err_free_name;
sysfs_remove_link(&dev->kobj, "iommu_group");
kfree(device);
return ret;
} }
kobject_get(group->devices_kobj); kobject_get(group->devices_kobj);
...@@ -424,8 +574,10 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) ...@@ -424,8 +574,10 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
mutex_lock(&group->mutex); mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices); list_add_tail(&device->list, &group->devices);
if (group->domain) if (group->domain)
__iommu_attach_device(group->domain, dev); ret = __iommu_attach_device(group->domain, dev);
mutex_unlock(&group->mutex); mutex_unlock(&group->mutex);
if (ret)
goto err_put_group;
/* Notify any listeners about change to group. */ /* Notify any listeners about change to group. */
blocking_notifier_call_chain(&group->notifier, blocking_notifier_call_chain(&group->notifier,
...@@ -436,6 +588,21 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) ...@@ -436,6 +588,21 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
pr_info("Adding device %s to group %d\n", dev_name(dev), group->id); pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
return 0; return 0;
err_put_group:
mutex_lock(&group->mutex);
list_del(&device->list);
mutex_unlock(&group->mutex);
dev->iommu_group = NULL;
kobject_put(group->devices_kobj);
err_free_name:
kfree(device->name);
err_remove_link:
sysfs_remove_link(&dev->kobj, "iommu_group");
err_free_device:
kfree(device);
pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
return ret;
} }
EXPORT_SYMBOL_GPL(iommu_group_add_device); EXPORT_SYMBOL_GPL(iommu_group_add_device);
...@@ -449,7 +616,7 @@ EXPORT_SYMBOL_GPL(iommu_group_add_device); ...@@ -449,7 +616,7 @@ EXPORT_SYMBOL_GPL(iommu_group_add_device);
void iommu_group_remove_device(struct device *dev) void iommu_group_remove_device(struct device *dev)
{ {
struct iommu_group *group = dev->iommu_group; struct iommu_group *group = dev->iommu_group;
struct iommu_device *tmp_device, *device = NULL; struct group_device *tmp_device, *device = NULL;
pr_info("Removing device %s from group %d\n", dev_name(dev), group->id); pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
...@@ -484,7 +651,7 @@ EXPORT_SYMBOL_GPL(iommu_group_remove_device); ...@@ -484,7 +651,7 @@ EXPORT_SYMBOL_GPL(iommu_group_remove_device);
static int iommu_group_device_count(struct iommu_group *group) static int iommu_group_device_count(struct iommu_group *group)
{ {
struct iommu_device *entry; struct group_device *entry;
int ret = 0; int ret = 0;
list_for_each_entry(entry, &group->devices, list) list_for_each_entry(entry, &group->devices, list)
...@@ -507,7 +674,7 @@ static int iommu_group_device_count(struct iommu_group *group) ...@@ -507,7 +674,7 @@ static int iommu_group_device_count(struct iommu_group *group)
static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *)) int (*fn)(struct device *, void *))
{ {
struct iommu_device *device; struct group_device *device;
int ret = 0; int ret = 0;
list_for_each_entry(device, &group->devices, list) { list_for_each_entry(device, &group->devices, list) {
...@@ -1559,20 +1726,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain, ...@@ -1559,20 +1726,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
} }
EXPORT_SYMBOL_GPL(iommu_domain_set_attr); EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
void iommu_get_dm_regions(struct device *dev, struct list_head *list) void iommu_get_resv_regions(struct device *dev, struct list_head *list)
{ {
const struct iommu_ops *ops = dev->bus->iommu_ops; const struct iommu_ops *ops = dev->bus->iommu_ops;
if (ops && ops->get_dm_regions) if (ops && ops->get_resv_regions)
ops->get_dm_regions(dev, list); ops->get_resv_regions(dev, list);
} }
void iommu_put_dm_regions(struct device *dev, struct list_head *list) void iommu_put_resv_regions(struct device *dev, struct list_head *list)
{ {
const struct iommu_ops *ops = dev->bus->iommu_ops; const struct iommu_ops *ops = dev->bus->iommu_ops;
if (ops && ops->put_dm_regions) if (ops && ops->put_resv_regions)
ops->put_dm_regions(dev, list); ops->put_resv_regions(dev, list);
}
struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
size_t length,
int prot, int type)
{
struct iommu_resv_region *region;
region = kzalloc(sizeof(*region), GFP_KERNEL);
if (!region)
return NULL;
INIT_LIST_HEAD(&region->list);
region->start = start;
region->length = length;
region->prot = prot;
region->type = type;
return region;
} }
/* Request that a device is direct mapped by the IOMMU */ /* Request that a device is direct mapped by the IOMMU */
...@@ -1628,43 +1813,18 @@ int iommu_request_dm_for_dev(struct device *dev) ...@@ -1628,43 +1813,18 @@ int iommu_request_dm_for_dev(struct device *dev)
return ret; return ret;
} }
struct iommu_instance { const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
struct list_head list;
struct fwnode_handle *fwnode;
const struct iommu_ops *ops;
};
static LIST_HEAD(iommu_instance_list);
static DEFINE_SPINLOCK(iommu_instance_lock);
void iommu_register_instance(struct fwnode_handle *fwnode,
const struct iommu_ops *ops)
{ {
struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (WARN_ON(!iommu))
return;
of_node_get(to_of_node(fwnode));
INIT_LIST_HEAD(&iommu->list);
iommu->fwnode = fwnode;
iommu->ops = ops;
spin_lock(&iommu_instance_lock);
list_add_tail(&iommu->list, &iommu_instance_list);
spin_unlock(&iommu_instance_lock);
}
const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
{
struct iommu_instance *instance;
const struct iommu_ops *ops = NULL; const struct iommu_ops *ops = NULL;
struct iommu_device *iommu;
spin_lock(&iommu_instance_lock); spin_lock(&iommu_device_lock);
list_for_each_entry(instance, &iommu_instance_list, list) list_for_each_entry(iommu, &iommu_device_list, list)
if (instance->fwnode == fwnode) { if (iommu->fwnode == fwnode) {
ops = instance->ops; ops = iommu->ops;
break; break;
} }
spin_unlock(&iommu_instance_lock); spin_unlock(&iommu_device_lock);
return ops; return ops;
} }
...@@ -1714,13 +1874,14 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) ...@@ -1714,13 +1874,14 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL); fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
if (!fwspec) if (!fwspec)
return -ENOMEM; return -ENOMEM;
dev->iommu_fwspec = fwspec;
} }
for (i = 0; i < num_ids; i++) for (i = 0; i < num_ids; i++)
fwspec->ids[fwspec->num_ids + i] = ids[i]; fwspec->ids[fwspec->num_ids + i] = ids[i];
fwspec->num_ids += num_ids; fwspec->num_ids += num_ids;
dev->iommu_fwspec = fwspec;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
...@@ -62,7 +62,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) ...@@ -62,7 +62,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
else { else {
struct rb_node *prev_node = rb_prev(iovad->cached32_node); struct rb_node *prev_node = rb_prev(iovad->cached32_node);
struct iova *curr_iova = struct iova *curr_iova =
container_of(iovad->cached32_node, struct iova, node); rb_entry(iovad->cached32_node, struct iova, node);
*limit_pfn = curr_iova->pfn_lo - 1; *limit_pfn = curr_iova->pfn_lo - 1;
return prev_node; return prev_node;
} }
...@@ -86,11 +86,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) ...@@ -86,11 +86,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
if (!iovad->cached32_node) if (!iovad->cached32_node)
return; return;
curr = iovad->cached32_node; curr = iovad->cached32_node;
cached_iova = container_of(curr, struct iova, node); cached_iova = rb_entry(curr, struct iova, node);
if (free->pfn_lo >= cached_iova->pfn_lo) { if (free->pfn_lo >= cached_iova->pfn_lo) {
struct rb_node *node = rb_next(&free->node); struct rb_node *node = rb_next(&free->node);
struct iova *iova = container_of(node, struct iova, node); struct iova *iova = rb_entry(node, struct iova, node);
/* only cache if it's below 32bit pfn */ /* only cache if it's below 32bit pfn */
if (node && iova->pfn_lo < iovad->dma_32bit_pfn) if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
...@@ -125,7 +125,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, ...@@ -125,7 +125,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr = __get_cached_rbnode(iovad, &limit_pfn); curr = __get_cached_rbnode(iovad, &limit_pfn);
prev = curr; prev = curr;
while (curr) { while (curr) {
struct iova *curr_iova = container_of(curr, struct iova, node); struct iova *curr_iova = rb_entry(curr, struct iova, node);
if (limit_pfn < curr_iova->pfn_lo) if (limit_pfn < curr_iova->pfn_lo)
goto move_left; goto move_left;
...@@ -171,8 +171,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, ...@@ -171,8 +171,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
/* Figure out where to put new node */ /* Figure out where to put new node */
while (*entry) { while (*entry) {
struct iova *this = container_of(*entry, struct iova *this = rb_entry(*entry, struct iova, node);
struct iova, node);
parent = *entry; parent = *entry;
if (new->pfn_lo < this->pfn_lo) if (new->pfn_lo < this->pfn_lo)
...@@ -201,7 +200,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) ...@@ -201,7 +200,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
struct rb_node **new = &(root->rb_node), *parent = NULL; struct rb_node **new = &(root->rb_node), *parent = NULL;
/* Figure out where to put new node */ /* Figure out where to put new node */
while (*new) { while (*new) {
struct iova *this = container_of(*new, struct iova, node); struct iova *this = rb_entry(*new, struct iova, node);
parent = *new; parent = *new;
...@@ -311,7 +310,7 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn) ...@@ -311,7 +310,7 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn)
assert_spin_locked(&iovad->iova_rbtree_lock); assert_spin_locked(&iovad->iova_rbtree_lock);
while (node) { while (node) {
struct iova *iova = container_of(node, struct iova, node); struct iova *iova = rb_entry(node, struct iova, node);
/* If pfn falls within iova's range, return iova */ /* If pfn falls within iova's range, return iova */
if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
...@@ -463,7 +462,7 @@ void put_iova_domain(struct iova_domain *iovad) ...@@ -463,7 +462,7 @@ void put_iova_domain(struct iova_domain *iovad)
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
node = rb_first(&iovad->rbroot); node = rb_first(&iovad->rbroot);
while (node) { while (node) {
struct iova *iova = container_of(node, struct iova, node); struct iova *iova = rb_entry(node, struct iova, node);
rb_erase(node, &iovad->rbroot); rb_erase(node, &iovad->rbroot);
free_iova_mem(iova); free_iova_mem(iova);
...@@ -477,7 +476,7 @@ static int ...@@ -477,7 +476,7 @@ static int
__is_range_overlap(struct rb_node *node, __is_range_overlap(struct rb_node *node,
unsigned long pfn_lo, unsigned long pfn_hi) unsigned long pfn_lo, unsigned long pfn_hi)
{ {
struct iova *iova = container_of(node, struct iova, node); struct iova *iova = rb_entry(node, struct iova, node);
if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
return 1; return 1;
...@@ -541,7 +540,7 @@ reserve_iova(struct iova_domain *iovad, ...@@ -541,7 +540,7 @@ reserve_iova(struct iova_domain *iovad,
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) { if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
iova = container_of(node, struct iova, node); iova = rb_entry(node, struct iova, node);
__adjust_overlap_range(iova, &pfn_lo, &pfn_hi); __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
if ((pfn_lo >= iova->pfn_lo) && if ((pfn_lo >= iova->pfn_lo) &&
(pfn_hi <= iova->pfn_hi)) (pfn_hi <= iova->pfn_hi))
...@@ -578,7 +577,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) ...@@ -578,7 +577,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
spin_lock_irqsave(&from->iova_rbtree_lock, flags); spin_lock_irqsave(&from->iova_rbtree_lock, flags);
for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
struct iova *iova = container_of(node, struct iova, node); struct iova *iova = rb_entry(node, struct iova, node);
struct iova *new_iova; struct iova *new_iova;
new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
......
...@@ -313,6 +313,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) ...@@ -313,6 +313,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
domain->cfg.ias = 32; domain->cfg.ias = 32;
domain->cfg.oas = 40; domain->cfg.oas = 40;
domain->cfg.tlb = &ipmmu_gather_ops; domain->cfg.tlb = &ipmmu_gather_ops;
domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
domain->io_domain.geometry.force_aperture = true;
/* /*
* TODO: Add support for coherent walk through CCI with DVM and remove * TODO: Add support for coherent walk through CCI with DVM and remove
* cache handling. For now, delegate it to the io-pgtable code. * cache handling. For now, delegate it to the io-pgtable code.
......
...@@ -371,6 +371,58 @@ static int msm_iommu_domain_config(struct msm_priv *priv) ...@@ -371,6 +371,58 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
return 0; return 0;
} }
/* Must be called under msm_iommu_lock */
static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
{
struct msm_iommu_dev *iommu, *ret = NULL;
struct msm_iommu_ctx_dev *master;
list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
master = list_first_entry(&iommu->ctx_list,
struct msm_iommu_ctx_dev,
list);
if (master->of_node == dev->of_node) {
ret = iommu;
break;
}
}
return ret;
}
static int msm_iommu_add_device(struct device *dev)
{
struct msm_iommu_dev *iommu;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&msm_iommu_lock, flags);
iommu = find_iommu_for_dev(dev);
if (iommu)
iommu_device_link(&iommu->iommu, dev);
else
ret = -ENODEV;
spin_unlock_irqrestore(&msm_iommu_lock, flags);
return ret;
}
static void msm_iommu_remove_device(struct device *dev)
{
struct msm_iommu_dev *iommu;
unsigned long flags;
spin_lock_irqsave(&msm_iommu_lock, flags);
iommu = find_iommu_for_dev(dev);
if (iommu)
iommu_device_unlink(&iommu->iommu, dev);
spin_unlock_irqrestore(&msm_iommu_lock, flags);
}
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{ {
int ret = 0; int ret = 0;
...@@ -646,6 +698,8 @@ static struct iommu_ops msm_iommu_ops = { ...@@ -646,6 +698,8 @@ static struct iommu_ops msm_iommu_ops = {
.unmap = msm_iommu_unmap, .unmap = msm_iommu_unmap,
.map_sg = default_iommu_map_sg, .map_sg = default_iommu_map_sg,
.iova_to_phys = msm_iommu_iova_to_phys, .iova_to_phys = msm_iommu_iova_to_phys,
.add_device = msm_iommu_add_device,
.remove_device = msm_iommu_remove_device,
.pgsize_bitmap = MSM_IOMMU_PGSIZES, .pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate, .of_xlate = qcom_iommu_of_xlate,
}; };
...@@ -653,6 +707,7 @@ static struct iommu_ops msm_iommu_ops = { ...@@ -653,6 +707,7 @@ static struct iommu_ops msm_iommu_ops = {
static int msm_iommu_probe(struct platform_device *pdev) static int msm_iommu_probe(struct platform_device *pdev)
{ {
struct resource *r; struct resource *r;
resource_size_t ioaddr;
struct msm_iommu_dev *iommu; struct msm_iommu_dev *iommu;
int ret, par, val; int ret, par, val;
...@@ -696,6 +751,7 @@ static int msm_iommu_probe(struct platform_device *pdev) ...@@ -696,6 +751,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
ret = PTR_ERR(iommu->base); ret = PTR_ERR(iommu->base);
goto fail; goto fail;
} }
ioaddr = r->start;
iommu->irq = platform_get_irq(pdev, 0); iommu->irq = platform_get_irq(pdev, 0);
if (iommu->irq < 0) { if (iommu->irq < 0) {
...@@ -737,7 +793,22 @@ static int msm_iommu_probe(struct platform_device *pdev) ...@@ -737,7 +793,22 @@ static int msm_iommu_probe(struct platform_device *pdev)
} }
list_add(&iommu->dev_node, &qcom_iommu_devices); list_add(&iommu->dev_node, &qcom_iommu_devices);
of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops);
ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
"msm-smmu.%pa", &ioaddr);
if (ret) {
pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
goto fail;
}
iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
ret = iommu_device_register(&iommu->iommu);
if (ret) {
pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
goto fail;
}
pr_info("device mapped at %p, irq %d with %d ctx banks\n", pr_info("device mapped at %p, irq %d with %d ctx banks\n",
iommu->base, iommu->irq, iommu->ncb); iommu->base, iommu->irq, iommu->ncb);
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#define MSM_IOMMU_H #define MSM_IOMMU_H
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/clk.h> #include <linux/clk.h>
/* Sharability attributes of MSM IOMMU mappings */ /* Sharability attributes of MSM IOMMU mappings */
...@@ -68,6 +69,8 @@ struct msm_iommu_dev { ...@@ -68,6 +69,8 @@ struct msm_iommu_dev {
struct list_head dom_node; struct list_head dom_node;
struct list_head ctx_list; struct list_head ctx_list;
DECLARE_BITMAP(context_map, IOMMU_MAX_CBS); DECLARE_BITMAP(context_map, IOMMU_MAX_CBS);
struct iommu_device iommu;
}; };
/** /**
......
...@@ -360,11 +360,15 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -360,11 +360,15 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
static int mtk_iommu_add_device(struct device *dev) static int mtk_iommu_add_device(struct device *dev)
{ {
struct mtk_iommu_data *data;
struct iommu_group *group; struct iommu_group *group;
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return -ENODEV; /* Not a iommu client device */ return -ENODEV; /* Not a iommu client device */
data = dev->iommu_fwspec->iommu_priv;
iommu_device_link(&data->iommu, dev);
group = iommu_group_get_for_dev(dev); group = iommu_group_get_for_dev(dev);
if (IS_ERR(group)) if (IS_ERR(group))
return PTR_ERR(group); return PTR_ERR(group);
...@@ -375,9 +379,14 @@ static int mtk_iommu_add_device(struct device *dev) ...@@ -375,9 +379,14 @@ static int mtk_iommu_add_device(struct device *dev)
static void mtk_iommu_remove_device(struct device *dev) static void mtk_iommu_remove_device(struct device *dev)
{ {
struct mtk_iommu_data *data;
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return; return;
data = dev->iommu_fwspec->iommu_priv;
iommu_device_unlink(&data->iommu, dev);
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
iommu_fwspec_free(dev); iommu_fwspec_free(dev);
} }
...@@ -497,6 +506,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) ...@@ -497,6 +506,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
struct mtk_iommu_data *data; struct mtk_iommu_data *data;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct resource *res; struct resource *res;
resource_size_t ioaddr;
struct component_match *match = NULL; struct component_match *match = NULL;
void *protect; void *protect;
int i, larb_nr, ret; int i, larb_nr, ret;
...@@ -519,6 +529,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) ...@@ -519,6 +529,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->base = devm_ioremap_resource(dev, res); data->base = devm_ioremap_resource(dev, res);
if (IS_ERR(data->base)) if (IS_ERR(data->base))
return PTR_ERR(data->base); return PTR_ERR(data->base);
ioaddr = res->start;
data->irq = platform_get_irq(pdev, 0); data->irq = platform_get_irq(pdev, 0);
if (data->irq < 0) if (data->irq < 0)
...@@ -567,6 +578,18 @@ static int mtk_iommu_probe(struct platform_device *pdev) ...@@ -567,6 +578,18 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret) if (ret)
return ret; return ret;
ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
"mtk-iommu.%pa", &ioaddr);
if (ret)
return ret;
iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
ret = iommu_device_register(&data->iommu);
if (ret)
return ret;
if (!iommu_present(&platform_bus_type)) if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
...@@ -577,6 +600,9 @@ static int mtk_iommu_remove(struct platform_device *pdev) ...@@ -577,6 +600,9 @@ static int mtk_iommu_remove(struct platform_device *pdev)
{ {
struct mtk_iommu_data *data = platform_get_drvdata(pdev); struct mtk_iommu_data *data = platform_get_drvdata(pdev);
iommu_device_sysfs_remove(&data->iommu);
iommu_device_unregister(&data->iommu);
if (iommu_present(&platform_bus_type)) if (iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, NULL); bus_set_iommu(&platform_bus_type, NULL);
...@@ -655,7 +681,6 @@ static int mtk_iommu_init_fn(struct device_node *np) ...@@ -655,7 +681,6 @@ static int mtk_iommu_init_fn(struct device_node *np)
return ret; return ret;
} }
of_iommu_set_ops(np, &mtk_iommu_ops);
return 0; return 0;
} }
......
...@@ -47,6 +47,8 @@ struct mtk_iommu_data { ...@@ -47,6 +47,8 @@ struct mtk_iommu_data {
struct iommu_group *m4u_group; struct iommu_group *m4u_group;
struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
bool enable_4GB; bool enable_4GB;
struct iommu_device iommu;
}; };
static inline int compare_of(struct device *dev, void *data) static inline int compare_of(struct device *dev, void *data)
......
...@@ -127,7 +127,7 @@ static const struct iommu_ops ...@@ -127,7 +127,7 @@ static const struct iommu_ops
"iommu-map-mask", &iommu_spec.np, iommu_spec.args)) "iommu-map-mask", &iommu_spec.np, iommu_spec.args))
return NULL; return NULL;
ops = of_iommu_get_ops(iommu_spec.np); ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode);
if (!ops || !ops->of_xlate || if (!ops || !ops->of_xlate ||
iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) || iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) ||
ops->of_xlate(&pdev->dev, &iommu_spec)) ops->of_xlate(&pdev->dev, &iommu_spec))
...@@ -157,7 +157,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, ...@@ -157,7 +157,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
"#iommu-cells", idx, "#iommu-cells", idx,
&iommu_spec)) { &iommu_spec)) {
np = iommu_spec.np; np = iommu_spec.np;
ops = of_iommu_get_ops(np); ops = iommu_ops_from_fwnode(&np->fwnode);
if (!ops || !ops->of_xlate || if (!ops || !ops->of_xlate ||
iommu_fwspec_init(dev, &np->fwnode, ops) || iommu_fwspec_init(dev, &np->fwnode, ops) ||
......
...@@ -1646,6 +1646,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) ...@@ -1646,6 +1646,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
inner_domain->parent = its_parent; inner_domain->parent = its_parent;
inner_domain->bus_token = DOMAIN_BUS_NEXUS; inner_domain->bus_token = DOMAIN_BUS_NEXUS;
inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
info->ops = &its_msi_domain_ops; info->ops = &its_msi_domain_ops;
info->data = its; info->data = its;
inner_domain->host_data = info; inner_domain->host_data = info;
......
...@@ -38,6 +38,8 @@ ...@@ -38,6 +38,8 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/mdev.h> #include <linux/mdev.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/dma-iommu.h>
#include <linux/irqdomain.h>
#define DRIVER_VERSION "0.2" #define DRIVER_VERSION "0.2"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
...@@ -1179,6 +1181,28 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain, ...@@ -1179,6 +1181,28 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
return NULL; return NULL;
} }
static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
phys_addr_t *base)
{
struct list_head group_resv_regions;
struct iommu_resv_region *region, *next;
bool ret = false;
INIT_LIST_HEAD(&group_resv_regions);
iommu_get_group_resv_regions(group, &group_resv_regions);
list_for_each_entry(region, &group_resv_regions, list) {
if (region->type & IOMMU_RESV_MSI) {
*base = region->start;
ret = true;
goto out;
}
}
out:
list_for_each_entry_safe(region, next, &group_resv_regions, list)
kfree(region);
return ret;
}
static int vfio_iommu_type1_attach_group(void *iommu_data, static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group) struct iommu_group *iommu_group)
{ {
...@@ -1187,6 +1211,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, ...@@ -1187,6 +1211,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
struct vfio_domain *domain, *d; struct vfio_domain *domain, *d;
struct bus_type *bus = NULL, *mdev_bus; struct bus_type *bus = NULL, *mdev_bus;
int ret; int ret;
bool resv_msi, msi_remap;
phys_addr_t resv_msi_base;
mutex_lock(&iommu->lock); mutex_lock(&iommu->lock);
...@@ -1256,11 +1282,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, ...@@ -1256,11 +1282,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (ret) if (ret)
goto out_domain; goto out_domain;
resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base);
INIT_LIST_HEAD(&domain->group_list); INIT_LIST_HEAD(&domain->group_list);
list_add(&group->next, &domain->group_list); list_add(&group->next, &domain->group_list);
if (!allow_unsafe_interrupts && msi_remap = resv_msi ? irq_domain_check_msi_remap() :
!iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) { iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
if (!allow_unsafe_interrupts && !msi_remap) {
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
__func__); __func__);
ret = -EPERM; ret = -EPERM;
...@@ -1302,6 +1332,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, ...@@ -1302,6 +1332,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (ret) if (ret)
goto out_detach; goto out_detach;
if (resv_msi) {
ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
if (ret)
goto out_detach;
}
list_add(&domain->next, &iommu->domain_list); list_add(&domain->next, &iommu->domain_list);
mutex_unlock(&iommu->lock); mutex_unlock(&iommu->lock);
......
...@@ -27,6 +27,7 @@ int iommu_dma_init(void); ...@@ -27,6 +27,7 @@ int iommu_dma_init(void);
/* Domain management interface for IOMMU drivers */ /* Domain management interface for IOMMU drivers */
int iommu_get_dma_cookie(struct iommu_domain *domain); int iommu_get_dma_cookie(struct iommu_domain *domain);
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
void iommu_put_dma_cookie(struct iommu_domain *domain); void iommu_put_dma_cookie(struct iommu_domain *domain);
/* Setup call for arch DMA mapping code */ /* Setup call for arch DMA mapping code */
...@@ -34,7 +35,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, ...@@ -34,7 +35,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev); u64 size, struct device *dev);
/* General helpers for DMA-API <-> IOMMU-API interaction */ /* General helpers for DMA-API <-> IOMMU-API interaction */
int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs);
/* /*
* These implement the bulk of the relevant DMA mapping callbacks, but require * These implement the bulk of the relevant DMA mapping callbacks, but require
...@@ -65,7 +67,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, ...@@ -65,7 +67,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs); size_t size, enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs); size_t size, enum dma_data_direction dir, unsigned long attrs);
int iommu_dma_supported(struct device *dev, u64 mask);
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
/* The DMA API isn't _quite_ the whole story, though... */ /* The DMA API isn't _quite_ the whole story, though... */
...@@ -86,6 +87,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain) ...@@ -86,6 +87,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
return -ENODEV; return -ENODEV;
} }
static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
{
return -ENODEV;
}
static inline void iommu_put_dma_cookie(struct iommu_domain *domain) static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
{ {
} }
......
...@@ -62,6 +62,13 @@ ...@@ -62,6 +62,13 @@
*/ */
#define DMA_ATTR_NO_WARN (1UL << 8) #define DMA_ATTR_NO_WARN (1UL << 8)
/*
* DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
* accessible at an elevated privilege level (and ideally inaccessible or
* at least read-only at lesser-privileged levels).
*/
#define DMA_ATTR_PRIVILEGED (1UL << 9)
/* /*
* A dma_addr_t can hold any valid DMA or bus address for the platform. * A dma_addr_t can hold any valid DMA or bus address for the platform.
* It can be given to a device to use as a DMA source or target. A CPU cannot * It can be given to a device to use as a DMA source or target. A CPU cannot
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/dma_remapping.h> #include <linux/dma_remapping.h>
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/iommu.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/iommu.h> #include <asm/iommu.h>
...@@ -153,8 +154,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) ...@@ -153,8 +154,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
#define DMA_TLB_DSI_FLUSH (((u64)2) << 60) #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
#define DMA_TLB_PSI_FLUSH (((u64)3) << 60) #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
#define DMA_TLB_IIRG(type) ((type >> 60) & 7) #define DMA_TLB_IIRG(type) ((type >> 60) & 3)
#define DMA_TLB_IAIG(val) (((val) >> 57) & 7) #define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
#define DMA_TLB_READ_DRAIN (((u64)1) << 49) #define DMA_TLB_READ_DRAIN (((u64)1) << 49)
#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) #define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
...@@ -164,9 +165,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) ...@@ -164,9 +165,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
/* INVALID_DESC */ /* INVALID_DESC */
#define DMA_CCMD_INVL_GRANU_OFFSET 61 #define DMA_CCMD_INVL_GRANU_OFFSET 61
#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) #define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) #define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) #define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) #define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) #define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
...@@ -316,8 +317,8 @@ enum { ...@@ -316,8 +317,8 @@ enum {
#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) #define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
#define QI_DEV_EIOTLB_MAX_INVS 32 #define QI_DEV_EIOTLB_MAX_INVS 32
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
...@@ -439,7 +440,7 @@ struct intel_iommu { ...@@ -439,7 +440,7 @@ struct intel_iommu {
struct irq_domain *ir_domain; struct irq_domain *ir_domain;
struct irq_domain *ir_msi_domain; struct irq_domain *ir_msi_domain;
#endif #endif
struct device *iommu_dev; /* IOMMU-sysfs device */ struct iommu_device iommu; /* IOMMU core code handle */
int node; int node;
u32 flags; /* Software defined flags */ u32 flags; /* Software defined flags */
}; };
......
...@@ -31,6 +31,13 @@ ...@@ -31,6 +31,13 @@
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
#define IOMMU_NOEXEC (1 << 3) #define IOMMU_NOEXEC (1 << 3)
#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
/*
* This is to make the IOMMU API setup privileged
* mapppings accessible by the master only at higher
* privileged execution level and inaccessible at
* less privileged levels.
*/
#define IOMMU_PRIV (1 << 5)
struct iommu_ops; struct iommu_ops;
struct iommu_group; struct iommu_group;
...@@ -117,18 +124,25 @@ enum iommu_attr { ...@@ -117,18 +124,25 @@ enum iommu_attr {
DOMAIN_ATTR_MAX, DOMAIN_ATTR_MAX,
}; };
/* These are the possible reserved region types */
#define IOMMU_RESV_DIRECT (1 << 0)
#define IOMMU_RESV_RESERVED (1 << 1)
#define IOMMU_RESV_MSI (1 << 2)
/** /**
* struct iommu_dm_region - descriptor for a direct mapped memory region * struct iommu_resv_region - descriptor for a reserved memory region
* @list: Linked list pointers * @list: Linked list pointers
* @start: System physical start address of the region * @start: System physical start address of the region
* @length: Length of the region in bytes * @length: Length of the region in bytes
* @prot: IOMMU Protection flags (READ/WRITE/...) * @prot: IOMMU Protection flags (READ/WRITE/...)
* @type: Type of the reserved region
*/ */
struct iommu_dm_region { struct iommu_resv_region {
struct list_head list; struct list_head list;
phys_addr_t start; phys_addr_t start;
size_t length; size_t length;
int prot; int prot;
int type;
}; };
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
...@@ -150,9 +164,9 @@ struct iommu_dm_region { ...@@ -150,9 +164,9 @@ struct iommu_dm_region {
* @device_group: find iommu group for a particular device * @device_group: find iommu group for a particular device
* @domain_get_attr: Query domain attributes * @domain_get_attr: Query domain attributes
* @domain_set_attr: Change domain attributes * @domain_set_attr: Change domain attributes
* @get_dm_regions: Request list of direct mapping requirements for a device * @get_resv_regions: Request list of reserved regions for a device
* @put_dm_regions: Free list of direct mapping requirements for a device * @put_resv_regions: Free list of reserved regions for a device
* @apply_dm_region: Temporary helper call-back for iova reserved ranges * @apply_resv_region: Temporary helper call-back for iova reserved ranges
* @domain_window_enable: Configure and enable a particular window for a domain * @domain_window_enable: Configure and enable a particular window for a domain
* @domain_window_disable: Disable a particular window for a domain * @domain_window_disable: Disable a particular window for a domain
* @domain_set_windows: Set the number of windows for a domain * @domain_set_windows: Set the number of windows for a domain
...@@ -184,11 +198,12 @@ struct iommu_ops { ...@@ -184,11 +198,12 @@ struct iommu_ops {
int (*domain_set_attr)(struct iommu_domain *domain, int (*domain_set_attr)(struct iommu_domain *domain,
enum iommu_attr attr, void *data); enum iommu_attr attr, void *data);
/* Request/Free a list of direct mapping requirements for a device */ /* Request/Free a list of reserved regions for a device */
void (*get_dm_regions)(struct device *dev, struct list_head *list); void (*get_resv_regions)(struct device *dev, struct list_head *list);
void (*put_dm_regions)(struct device *dev, struct list_head *list); void (*put_resv_regions)(struct device *dev, struct list_head *list);
void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain, void (*apply_resv_region)(struct device *dev,
struct iommu_dm_region *region); struct iommu_domain *domain,
struct iommu_resv_region *region);
/* Window handling functions */ /* Window handling functions */
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
...@@ -204,6 +219,42 @@ struct iommu_ops { ...@@ -204,6 +219,42 @@ struct iommu_ops {
unsigned long pgsize_bitmap; unsigned long pgsize_bitmap;
}; };
/**
* struct iommu_device - IOMMU core representation of one IOMMU hardware
* instance
* @list: Used by the iommu-core to keep a list of registered iommus
* @ops: iommu-ops for talking to this iommu
* @dev: struct device for sysfs handling
*/
struct iommu_device {
struct list_head list;
const struct iommu_ops *ops;
struct fwnode_handle *fwnode;
struct device dev;
};
int iommu_device_register(struct iommu_device *iommu);
void iommu_device_unregister(struct iommu_device *iommu);
int iommu_device_sysfs_add(struct iommu_device *iommu,
struct device *parent,
const struct attribute_group **groups,
const char *fmt, ...) __printf(4, 5);
void iommu_device_sysfs_remove(struct iommu_device *iommu);
int iommu_device_link(struct iommu_device *iommu, struct device *link);
void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
static inline void iommu_device_set_ops(struct iommu_device *iommu,
const struct iommu_ops *ops)
{
iommu->ops = ops;
}
static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
struct fwnode_handle *fwnode)
{
iommu->fwnode = fwnode;
}
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
...@@ -233,9 +284,13 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io ...@@ -233,9 +284,13 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
extern void iommu_set_fault_handler(struct iommu_domain *domain, extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token); iommu_fault_handler_t handler, void *token);
extern void iommu_get_dm_regions(struct device *dev, struct list_head *list); extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_dm_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
extern int iommu_request_dm_for_dev(struct device *dev); extern int iommu_request_dm_for_dev(struct device *dev);
extern struct iommu_resv_region *
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type);
extern int iommu_get_group_resv_regions(struct iommu_group *group,
struct list_head *head);
extern int iommu_attach_group(struct iommu_domain *domain, extern int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group); struct iommu_group *group);
...@@ -267,12 +322,6 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, ...@@ -267,12 +322,6 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
void *data); void *data);
extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
void *data); void *data);
struct device *iommu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...) __printf(4, 5);
void iommu_device_destroy(struct device *dev);
int iommu_device_link(struct device *dev, struct device *link);
void iommu_device_unlink(struct device *dev, struct device *link);
/* Window handling function prototypes */ /* Window handling function prototypes */
extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
...@@ -352,15 +401,14 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, ...@@ -352,15 +401,14 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops); const struct iommu_ops *ops);
void iommu_fwspec_free(struct device *dev); void iommu_fwspec_free(struct device *dev);
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
void iommu_register_instance(struct fwnode_handle *fwnode, const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
const struct iommu_ops *ops);
const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
#else /* CONFIG_IOMMU_API */ #else /* CONFIG_IOMMU_API */
struct iommu_ops {}; struct iommu_ops {};
struct iommu_group {}; struct iommu_group {};
struct iommu_fwspec {}; struct iommu_fwspec {};
struct iommu_device {};
static inline bool iommu_present(struct bus_type *bus) static inline bool iommu_present(struct bus_type *bus)
{ {
...@@ -443,16 +491,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain, ...@@ -443,16 +491,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
{ {
} }
static inline void iommu_get_dm_regions(struct device *dev, static inline void iommu_get_resv_regions(struct device *dev,
struct list_head *list) struct list_head *list)
{ {
} }
static inline void iommu_put_dm_regions(struct device *dev, static inline void iommu_put_resv_regions(struct device *dev,
struct list_head *list) struct list_head *list)
{ {
} }
static inline int iommu_get_group_resv_regions(struct iommu_group *group,
struct list_head *head)
{
return -ENODEV;
}
static inline int iommu_request_dm_for_dev(struct device *dev) static inline int iommu_request_dm_for_dev(struct device *dev)
{ {
return -ENODEV; return -ENODEV;
...@@ -546,15 +600,34 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain, ...@@ -546,15 +600,34 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain,
return -EINVAL; return -EINVAL;
} }
static inline struct device *iommu_device_create(struct device *parent, static inline int iommu_device_register(struct iommu_device *iommu)
void *drvdata, {
const struct attribute_group **groups, return -ENODEV;
const char *fmt, ...) }
static inline void iommu_device_set_ops(struct iommu_device *iommu,
const struct iommu_ops *ops)
{
}
static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
struct fwnode_handle *fwnode)
{
}
static inline void iommu_device_unregister(struct iommu_device *iommu)
{ {
return ERR_PTR(-ENODEV);
} }
static inline void iommu_device_destroy(struct device *dev) static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
struct device *parent,
const struct attribute_group **groups,
const char *fmt, ...)
{
return -ENODEV;
}
static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
{ {
} }
...@@ -584,13 +657,8 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, ...@@ -584,13 +657,8 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
return -ENODEV; return -ENODEV;
} }
static inline void iommu_register_instance(struct fwnode_handle *fwnode,
const struct iommu_ops *ops)
{
}
static inline static inline
const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode) const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
{ {
return NULL; return NULL;
} }
......
...@@ -183,6 +183,12 @@ enum { ...@@ -183,6 +183,12 @@ enum {
/* Irq domain is an IPI domain with single virq */ /* Irq domain is an IPI domain with single virq */
IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3), IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3),
/* Irq domain implements MSIs */
IRQ_DOMAIN_FLAG_MSI = (1 << 4),
/* Irq domain implements MSI remapping */
IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
/* /*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
* for implementation specific purposes and ignored by the * for implementation specific purposes and ignored by the
...@@ -216,6 +222,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, ...@@ -216,6 +222,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
void *host_data); void *host_data);
extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token); enum irq_domain_bus_token bus_token);
extern bool irq_domain_check_msi_remap(void);
extern void irq_set_default_host(struct irq_domain *host); extern void irq_set_default_host(struct irq_domain *host);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
irq_hw_number_t hwirq, int node, irq_hw_number_t hwirq, int node,
...@@ -446,6 +453,19 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) ...@@ -446,6 +453,19 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
{ {
return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE; return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
} }
static inline bool irq_domain_is_msi(struct irq_domain *domain)
{
return domain->flags & IRQ_DOMAIN_FLAG_MSI;
}
static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
{
return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP;
}
extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
static inline void irq_domain_activate_irq(struct irq_data *data) { } static inline void irq_domain_activate_irq(struct irq_data *data) { }
static inline void irq_domain_deactivate_irq(struct irq_data *data) { } static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
...@@ -477,6 +497,22 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) ...@@ -477,6 +497,22 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
{ {
return false; return false;
} }
static inline bool irq_domain_is_msi(struct irq_domain *domain)
{
return false;
}
static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
{
return false;
}
static inline bool
irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
{
return false;
}
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
#else /* CONFIG_IRQ_DOMAIN */ #else /* CONFIG_IRQ_DOMAIN */
......
...@@ -31,17 +31,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev, ...@@ -31,17 +31,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
#endif /* CONFIG_OF_IOMMU */ #endif /* CONFIG_OF_IOMMU */
static inline void of_iommu_set_ops(struct device_node *np,
const struct iommu_ops *ops)
{
iommu_register_instance(&np->fwnode, ops);
}
static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
{
return iommu_get_instance(&np->fwnode);
}
extern struct of_device_id __iommu_of_table; extern struct of_device_id __iommu_of_table;
typedef int (*of_iommu_init_fn)(struct device_node *); typedef int (*of_iommu_init_fn)(struct device_node *);
......
...@@ -277,6 +277,31 @@ struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, ...@@ -277,6 +277,31 @@ struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
} }
EXPORT_SYMBOL_GPL(irq_find_matching_fwspec); EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
/**
* irq_domain_check_msi_remap - Check whether all MSI irq domains implement
* IRQ remapping
*
* Return: false if any MSI irq domain does not support IRQ remapping,
* true otherwise (including if there is no MSI irq domain)
*/
bool irq_domain_check_msi_remap(void)
{
struct irq_domain *h;
bool ret = true;
mutex_lock(&irq_domain_mutex);
list_for_each_entry(h, &irq_domain_list, link) {
if (irq_domain_is_msi(h) &&
!irq_domain_hierarchical_is_msi_remap(h)) {
ret = false;
break;
}
}
mutex_unlock(&irq_domain_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
/** /**
* irq_set_default_host() - Set a "default" irq domain * irq_set_default_host() - Set a "default" irq domain
* @domain: default domain pointer * @domain: default domain pointer
...@@ -1408,6 +1433,20 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain) ...@@ -1408,6 +1433,20 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain)
if (domain->ops->alloc) if (domain->ops->alloc)
domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
} }
/**
* irq_domain_hierarchical_is_msi_remap - Check if the domain or any
* parent has MSI remapping support
* @domain: domain pointer
*/
bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
{
for (; domain; domain = domain->parent) {
if (irq_domain_is_msi_remap(domain))
return true;
}
return false;
}
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
/** /**
* irq_domain_get_irq_data - Get irq_data associated with @virq and @domain * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
......
...@@ -270,8 +270,8 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, ...@@ -270,8 +270,8 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
msi_domain_update_chip_ops(info); msi_domain_update_chip_ops(info);
return irq_domain_create_hierarchy(parent, 0, 0, fwnode, return irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
&msi_domain_ops, info); fwnode, &msi_domain_ops, info);
} }
int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment