Commit 2f83766d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU updates from Joerg Roedel:
 "Not much stuff this time.  The only change to the IOMMU core code is
  the addition of a handle to the fault handling code.  A few updates to
  the AMD IOMMU driver to work around new errata.  The other patches are
  mostly fixes and enhancements to the existing ARM IOMMU drivers and
  documentation updates.

  A new IOMMU driver for the Exynos platform was also underway but got
  merged via the Samsung tree and is not part of this tree."

* tag 'iommu-updates-v3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  Documentation: kernel-parameters.txt Add amd_iommu_dump
  iommu/core: pass a user-provided token to fault handlers
  iommu/tegra: gart: Fix register offset correctly
  iommu: OMAP: device detach on domain destroy
  iommu: tegra/gart: Add device tree support
  iommu: tegra/gart: use correct gart_device
  iommu/tegra: smmu: Print device name correctly
  iommu/amd: Add workaround for event log erratum
  iommu/amd: Check for the right TLP prefix bit
  dma-debug: release free_entries_lock before saving stack trace
parents 4523e145 28f8571e
NVIDIA Tegra 20 GART
Required properties:
- compatible: "nvidia,tegra20-gart"
- reg: Two pairs of cells specifying the physical address and size of
the memory controller registers and the GART aperture respectively.
Example:
gart {
compatible = "nvidia,tegra20-gart";
reg = <0x7000f024 0x00000018 /* controller registers */
0x58000000 0x02000000>; /* GART aperture */
};
......@@ -335,6 +335,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
requirements as needed. This option
does not override iommu=pt
amd_iommu_dump= [HW,X86-64]
Enable AMD IOMMU driver option to dump the ACPI table
for AMD IOMMU. With this option enabled, AMD IOMMU
driver will print ACPI tables for AMD IOMMU during
IOMMU initialization.
amijoy.map= [HW,JOY] Amiga joystick support
Map of devices attached to JOY0DAT and JOY1DAT
Format: <a>,<b>
......
......@@ -450,12 +450,27 @@ static void dump_command(unsigned long phys_addr)
static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
{
u32 *event = __evt;
int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
u64 address = (u64)(((u64)event[3]) << 32) | event[2];
int type, devid, domid, flags;
volatile u32 *event = __evt;
int count = 0;
u64 address;
retry:
type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
address = (u64)(((u64)event[3]) << 32) | event[2];
if (type == 0) {
/* Did we hit the erratum? */
if (++count == LOOP_TIMEOUT) {
pr_err("AMD-Vi: No event written to event log\n");
return;
}
udelay(1);
goto retry;
}
printk(KERN_ERR "AMD-Vi: Event logged [");
......@@ -508,6 +523,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
default:
printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
}
memset(__evt, 0, 4 * sizeof(u32));
}
static void iommu_poll_events(struct amd_iommu *iommu)
......@@ -2035,20 +2052,20 @@ static int pdev_iommuv2_enable(struct pci_dev *pdev)
}
/* FIXME: Move this to PCI code */
#define PCI_PRI_TLP_OFF (1 << 2)
#define PCI_PRI_TLP_OFF (1 << 15)
bool pci_pri_tlp_required(struct pci_dev *pdev)
{
u16 control;
u16 status;
int pos;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return false;
pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
return (control & PCI_PRI_TLP_OFF) ? true : false;
return (status & PCI_PRI_TLP_OFF) ? true : false;
}
/*
......
......@@ -119,6 +119,7 @@ EXPORT_SYMBOL_GPL(iommu_present);
* iommu_set_fault_handler() - set a fault handler for an iommu domain
* @domain: iommu domain
* @handler: fault handler
* @token: user data, will be passed back to the fault handler
*
* This function should be used by IOMMU users which want to be notified
* whenever an IOMMU fault happens.
......@@ -127,11 +128,13 @@ EXPORT_SYMBOL_GPL(iommu_present);
* error code otherwise.
*/
void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler)
iommu_fault_handler_t handler,
void *token)
{
BUG_ON(!domain);
domain->handler = handler;
domain->handler_token = token;
}
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
......
......@@ -41,11 +41,13 @@
* @pgtable: the page table
* @iommu_dev: an omap iommu device attached to this domain. only a single
* iommu device can be attached for now.
* @dev: Device using this domain.
* @lock: domain lock, should be taken when attaching/detaching
*/
struct omap_iommu_domain {
u32 *pgtable;
struct omap_iommu *iommu_dev;
struct device *dev;
spinlock_t lock;
};
......@@ -1081,6 +1083,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
omap_domain->dev = dev;
oiommu->domain = domain;
out:
......@@ -1088,19 +1091,16 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
return ret;
}
static void omap_iommu_detach_dev(struct iommu_domain *domain,
static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
struct device *dev)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
spin_lock(&omap_domain->lock);
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
/* only a single device is supported per domain for now */
if (omap_domain->iommu_dev != oiommu) {
dev_err(dev, "invalid iommu device\n");
goto out;
return;
}
iopgtable_clear_entry_all(oiommu);
......@@ -1108,8 +1108,16 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
omap_iommu_detach(oiommu);
omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
omap_domain->dev = NULL;
}
out:
static void omap_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
struct omap_iommu_domain *omap_domain = domain->priv;
spin_lock(&omap_domain->lock);
_omap_iommu_detach_dev(omap_domain, dev);
spin_unlock(&omap_domain->lock);
}
......@@ -1148,13 +1156,19 @@ static int omap_iommu_domain_init(struct iommu_domain *domain)
return -ENOMEM;
}
/* assume device was already detached */
static void omap_iommu_domain_destroy(struct iommu_domain *domain)
{
struct omap_iommu_domain *omap_domain = domain->priv;
domain->priv = NULL;
/*
* An iommu device is still attached
* (currently, only one device can be attached) ?
*/
if (omap_domain->iommu_dev)
_omap_iommu_detach_dev(omap_domain, omap_domain->dev);
kfree(omap_domain->pgtable);
kfree(omap_domain);
}
......
......@@ -29,15 +29,17 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/of.h>
#include <asm/cacheflush.h>
/* bitmap of the page sizes currently supported */
#define GART_IOMMU_PGSIZES (SZ_4K)
#define GART_CONFIG 0x24
#define GART_ENTRY_ADDR 0x28
#define GART_ENTRY_DATA 0x2c
#define GART_REG_BASE 0x24
#define GART_CONFIG (0x24 - GART_REG_BASE)
#define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
#define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
#define GART_ENTRY_PHYS_ADDR_VALID (1 << 31)
#define GART_PAGE_SHIFT 12
......@@ -158,7 +160,7 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
struct gart_client *client, *c;
int err = 0;
gart = dev_get_drvdata(dev->parent);
gart = gart_handle;
if (!gart)
return -EINVAL;
domain->priv = gart;
......@@ -422,6 +424,14 @@ const struct dev_pm_ops tegra_gart_pm_ops = {
.resume = tegra_gart_resume,
};
#ifdef CONFIG_OF
static struct of_device_id tegra_gart_of_match[] __devinitdata = {
{ .compatible = "nvidia,tegra20-gart", },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_gart_of_match);
#endif
static struct platform_driver tegra_gart_driver = {
.probe = tegra_gart_probe,
.remove = tegra_gart_remove,
......@@ -429,6 +439,7 @@ static struct platform_driver tegra_gart_driver = {
.owner = THIS_MODULE,
.name = "tegra-gart",
.pm = &tegra_gart_pm_ops,
.of_match_table = of_match_ptr(tegra_gart_of_match),
},
};
......@@ -448,4 +459,5 @@ module_exit(tegra_gart_exit);
MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
MODULE_ALIAS("platform:tegra-gart");
MODULE_LICENSE("GPL v2");
......@@ -733,7 +733,7 @@ static int smmu_iommu_attach_dev(struct iommu_domain *domain,
pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n");
}
dev_dbg(smmu->dev, "%s is attached\n", dev_name(c->dev));
dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev));
return 0;
err_client:
......
......@@ -78,7 +78,7 @@ typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int avail);
* the recovery of the remote processor.
*/
static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags)
unsigned long iova, int flags, void *token)
{
dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
......@@ -117,7 +117,7 @@ static int rproc_enable_iommu(struct rproc *rproc)
return -ENOMEM;
}
iommu_set_fault_handler(domain, rproc_iommu_fault);
iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
ret = iommu_attach_device(domain, dev);
if (ret) {
......
......@@ -35,12 +35,13 @@ struct iommu_domain;
#define IOMMU_FAULT_WRITE 0x1
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int);
struct device *, unsigned long, int, void *);
struct iommu_domain {
struct iommu_ops *ops;
void *priv;
iommu_fault_handler_t handler;
void *handler_token;
};
#define IOMMU_CAP_CACHE_COHERENCY 0x1
......@@ -95,7 +96,7 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
extern int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler);
iommu_fault_handler_t handler, void *token);
extern int iommu_device_group(struct device *dev, unsigned int *groupid);
/**
......@@ -132,7 +133,8 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
* invoke it.
*/
if (domain->handler)
ret = domain->handler(domain, dev, iova, flags);
ret = domain->handler(domain, dev, iova, flags,
domain->handler_token);
return ret;
}
......@@ -191,7 +193,7 @@ static inline int domain_has_cap(struct iommu_domain *domain,
}
static inline void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler)
iommu_fault_handler_t handler, void *token)
{
}
......
......@@ -430,7 +430,7 @@ static struct dma_debug_entry *__dma_entry_alloc(void)
*/
static struct dma_debug_entry *dma_entry_alloc(void)
{
struct dma_debug_entry *entry = NULL;
struct dma_debug_entry *entry;
unsigned long flags;
spin_lock_irqsave(&free_entries_lock, flags);
......@@ -438,11 +438,14 @@ static struct dma_debug_entry *dma_entry_alloc(void)
if (list_empty(&free_entries)) {
pr_err("DMA-API: debugging out of memory - disabling\n");
global_disable = true;
goto out;
spin_unlock_irqrestore(&free_entries_lock, flags);
return NULL;
}
entry = __dma_entry_alloc();
spin_unlock_irqrestore(&free_entries_lock, flags);
#ifdef CONFIG_STACKTRACE
entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
entry->stacktrace.entries = entry->st_entries;
......@@ -450,9 +453,6 @@ static struct dma_debug_entry *dma_entry_alloc(void)
save_stack_trace(&entry->stacktrace);
#endif
out:
spin_unlock_irqrestore(&free_entries_lock, flags);
return entry;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment