Commit 021bb842 authored by Robin Murphy's avatar Robin Murphy Committed by Will Deacon

iommu/arm-smmu: Wire up generic configuration support

With everything else now in place, fill in an of_xlate callback and the
appropriate registration to plumb into the generic configuration
machinery, and watch everything just work.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent d0acbb75
...@@ -418,6 +418,8 @@ struct arm_smmu_option_prop { ...@@ -418,6 +418,8 @@ struct arm_smmu_option_prop {
static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0); static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
static bool using_legacy_binding, using_generic_binding;
static struct arm_smmu_option_prop arm_smmu_options[] = { static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
{ 0, NULL}, { 0, NULL},
...@@ -817,12 +819,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ...@@ -817,12 +819,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu) if (smmu_domain->smmu)
goto out_unlock; goto out_unlock;
/* We're bypassing these SIDs, so don't allocate an actual context */
if (domain->type == IOMMU_DOMAIN_DMA) {
smmu_domain->smmu = smmu;
goto out_unlock;
}
/* /*
* Mapping the requested stage onto what we support is surprisingly * Mapping the requested stage onto what we support is surprisingly
* complicated, mainly because the spec allows S1+S2 SMMUs without * complicated, mainly because the spec allows S1+S2 SMMUs without
...@@ -981,7 +977,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) ...@@ -981,7 +977,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
void __iomem *cb_base; void __iomem *cb_base;
int irq; int irq;
if (!smmu || domain->type == IOMMU_DOMAIN_DMA) if (!smmu)
return; return;
/* /*
...@@ -1015,8 +1011,8 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) ...@@ -1015,8 +1011,8 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
if (!smmu_domain) if (!smmu_domain)
return NULL; return NULL;
if (type == IOMMU_DOMAIN_DMA && if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
iommu_get_dma_cookie(&smmu_domain->domain)) { iommu_get_dma_cookie(&smmu_domain->domain))) {
kfree(smmu_domain); kfree(smmu_domain);
return NULL; return NULL;
} }
...@@ -1133,19 +1129,22 @@ static int arm_smmu_master_alloc_smes(struct device *dev) ...@@ -1133,19 +1129,22 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
mutex_lock(&smmu->stream_map_mutex); mutex_lock(&smmu->stream_map_mutex);
/* Figure out a viable stream map entry allocation */ /* Figure out a viable stream map entry allocation */
for_each_cfg_sme(fwspec, i, idx) { for_each_cfg_sme(fwspec, i, idx) {
u16 sid = fwspec->ids[i];
u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
if (idx != INVALID_SMENDX) { if (idx != INVALID_SMENDX) {
ret = -EEXIST; ret = -EEXIST;
goto out_err; goto out_err;
} }
ret = arm_smmu_find_sme(smmu, fwspec->ids[i], 0); ret = arm_smmu_find_sme(smmu, sid, mask);
if (ret < 0) if (ret < 0)
goto out_err; goto out_err;
idx = ret; idx = ret;
if (smrs && smmu->s2crs[idx].count == 0) { if (smrs && smmu->s2crs[idx].count == 0) {
smrs[idx].id = fwspec->ids[i]; smrs[idx].id = sid;
smrs[idx].mask = 0; /* We don't currently share SMRs */ smrs[idx].mask = mask;
smrs[idx].valid = true; smrs[idx].valid = true;
} }
smmu->s2crs[idx].count++; smmu->s2crs[idx].count++;
...@@ -1203,15 +1202,6 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, ...@@ -1203,15 +1202,6 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
u8 cbndx = smmu_domain->cfg.cbndx; u8 cbndx = smmu_domain->cfg.cbndx;
int i, idx; int i, idx;
/*
* FIXME: This won't be needed once we have IOMMU-backed DMA ops
* for all devices behind the SMMU. Note that we need to take
* care configuring SMRs for devices both a platform_device and
* and a PCI device (i.e. a PCI host controller)
*/
if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
type = S2CR_TYPE_BYPASS;
for_each_cfg_sme(fwspec, i, idx) { for_each_cfg_sme(fwspec, i, idx) {
if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
continue; continue;
...@@ -1373,25 +1363,50 @@ static bool arm_smmu_capable(enum iommu_cap cap) ...@@ -1373,25 +1363,50 @@ static bool arm_smmu_capable(enum iommu_cap cap)
} }
} }
static int arm_smmu_match_node(struct device *dev, void *data)
{
return dev->of_node == data;
}
static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
{
struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
np, arm_smmu_match_node);
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
static int arm_smmu_add_device(struct device *dev) static int arm_smmu_add_device(struct device *dev)
{ {
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct arm_smmu_master_cfg *cfg; struct arm_smmu_master_cfg *cfg;
struct iommu_fwspec *fwspec; struct iommu_fwspec *fwspec = dev->iommu_fwspec;
int i, ret; int i, ret;
if (using_legacy_binding) {
ret = arm_smmu_register_legacy_master(dev, &smmu); ret = arm_smmu_register_legacy_master(dev, &smmu);
fwspec = dev->iommu_fwspec; fwspec = dev->iommu_fwspec;
if (ret) if (ret)
goto out_free; goto out_free;
} else if (fwspec) {
smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
} else {
return -ENODEV;
}
ret = -EINVAL; ret = -EINVAL;
for (i = 0; i < fwspec->num_ids; i++) { for (i = 0; i < fwspec->num_ids; i++) {
u16 sid = fwspec->ids[i]; u16 sid = fwspec->ids[i];
u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
if (sid & ~smmu->streamid_mask) { if (sid & ~smmu->streamid_mask) {
dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
sid, cfg->smmu->streamid_mask); sid, smmu->streamid_mask);
goto out_free;
}
if (mask & ~smmu->smr_mask_mask) {
dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
sid, smmu->smr_mask_mask);
goto out_free; goto out_free;
} }
} }
...@@ -1503,6 +1518,19 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, ...@@ -1503,6 +1518,19 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
return ret; return ret;
} }
static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
u32 fwid = 0;
if (args->args_count > 0)
fwid |= (u16)args->args[0];
if (args->args_count > 1)
fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
return iommu_fwspec_add_ids(dev, &fwid, 1);
}
static struct iommu_ops arm_smmu_ops = { static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable, .capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc, .domain_alloc = arm_smmu_domain_alloc,
...@@ -1517,6 +1545,7 @@ static struct iommu_ops arm_smmu_ops = { ...@@ -1517,6 +1545,7 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group, .device_group = arm_smmu_device_group,
.domain_get_attr = arm_smmu_domain_get_attr, .domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr, .domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate,
.pgsize_bitmap = -1UL, /* Restricted during device attach */ .pgsize_bitmap = -1UL, /* Restricted during device attach */
}; };
...@@ -1870,6 +1899,19 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) ...@@ -1870,6 +1899,19 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
int num_irqs, i, err; int num_irqs, i, err;
bool legacy_binding;
legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
if (legacy_binding && !using_generic_binding) {
if (!using_legacy_binding)
pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
using_legacy_binding = true;
} else if (!legacy_binding && !using_legacy_binding) {
using_generic_binding = true;
} else {
dev_err(dev, "not probing due to mismatched DT properties\n");
return -ENODEV;
}
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) { if (!smmu) {
...@@ -1954,6 +1996,20 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) ...@@ -1954,6 +1996,20 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
of_iommu_set_ops(dev->of_node, &arm_smmu_ops); of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
platform_set_drvdata(pdev, smmu); platform_set_drvdata(pdev, smmu);
arm_smmu_device_reset(smmu); arm_smmu_device_reset(smmu);
/* Oh, for a proper bus abstraction */
if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
#ifdef CONFIG_ARM_AMBA
if (!iommu_present(&amba_bustype))
bus_set_iommu(&amba_bustype, &arm_smmu_ops);
#endif
#ifdef CONFIG_PCI
if (!iommu_present(&pci_bus_type)) {
pci_request_acs();
bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
}
#endif
return 0; return 0;
} }
...@@ -1983,41 +2039,14 @@ static struct platform_driver arm_smmu_driver = { ...@@ -1983,41 +2039,14 @@ static struct platform_driver arm_smmu_driver = {
static int __init arm_smmu_init(void) static int __init arm_smmu_init(void)
{ {
struct device_node *np; static bool registered;
int ret; int ret = 0;
/*
* Play nice with systems that don't have an ARM SMMU by checking that
* an ARM SMMU exists in the system before proceeding with the driver
* and IOMMU bus operation registration.
*/
np = of_find_matching_node(NULL, arm_smmu_of_match);
if (!np)
return 0;
of_node_put(np);
if (!registered) {
ret = platform_driver_register(&arm_smmu_driver); ret = platform_driver_register(&arm_smmu_driver);
if (ret) registered = !ret;
return ret;
/* Oh, for a proper bus abstraction */
if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
#ifdef CONFIG_ARM_AMBA
if (!iommu_present(&amba_bustype))
bus_set_iommu(&amba_bustype, &arm_smmu_ops);
#endif
#ifdef CONFIG_PCI
if (!iommu_present(&pci_bus_type)) {
pci_request_acs();
bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
} }
#endif return ret;
return 0;
} }
static void __exit arm_smmu_exit(void) static void __exit arm_smmu_exit(void)
...@@ -2028,6 +2057,25 @@ static void __exit arm_smmu_exit(void) ...@@ -2028,6 +2057,25 @@ static void __exit arm_smmu_exit(void)
subsys_initcall(arm_smmu_init); subsys_initcall(arm_smmu_init);
module_exit(arm_smmu_exit); module_exit(arm_smmu_exit);
static int __init arm_smmu_of_init(struct device_node *np)
{
int ret = arm_smmu_init();
if (ret)
return ret;
if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
return -ENODEV;
return 0;
}
IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment