Commit f80cd885 authored by Robin Murphy's avatar Robin Murphy Committed by Will Deacon

iommu/arm-smmu: Refactor mmu-masters handling

To be able to support the generic bindings and handle of_xlate() calls,
we need to be able to associate SMMUs and stream IDs directly with
devices *before* allocating IOMMU groups. Furthermore, to support real
default domains with multi-device groups we also have to handle domain
attach on a per-device basis, as the "whole group at a time" assumption
fails to properly handle subsequent devices added to a group after the
first has already triggered default domain creation and attachment.

To that end, use the now-vacant dev->archdata.iommu field for easy
config and SMMU instance lookup, and unify config management by chopping
down the platform-device-specific tree and probing the "mmu-masters"
property on-demand instead. This may add a bit of one-off overhead to
initially adding a new device, but we're about to deprecate that binding
in favour of the inherently-more-efficient generic ones anyway.

For the sake of simplicity, this patch does temporarily regress the case
of aliasing PCI devices by losing the duplicate stream ID detection that
the previous per-group config had. Stay tuned, because we'll be back to
fix that in a better and more general way momentarily...
Tested-by: default avatarLorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 8e8b203e
...@@ -317,18 +317,13 @@ struct arm_smmu_smr { ...@@ -317,18 +317,13 @@ struct arm_smmu_smr {
}; };
struct arm_smmu_master_cfg { struct arm_smmu_master_cfg {
struct arm_smmu_device *smmu;
int num_streamids; int num_streamids;
u16 streamids[MAX_MASTER_STREAMIDS]; u16 streamids[MAX_MASTER_STREAMIDS];
s16 smendx[MAX_MASTER_STREAMIDS]; s16 smendx[MAX_MASTER_STREAMIDS];
}; };
#define INVALID_SMENDX -1 #define INVALID_SMENDX -1
struct arm_smmu_master {
struct device_node *of_node;
struct rb_node node;
struct arm_smmu_master_cfg cfg;
};
struct arm_smmu_device { struct arm_smmu_device {
struct device *dev; struct device *dev;
...@@ -376,7 +371,6 @@ struct arm_smmu_device { ...@@ -376,7 +371,6 @@ struct arm_smmu_device {
unsigned int *irqs; unsigned int *irqs;
struct list_head list; struct list_head list;
struct rb_root masters;
u32 cavium_id_base; /* Specific to Cavium */ u32 cavium_id_base; /* Specific to Cavium */
}; };
...@@ -415,12 +409,6 @@ struct arm_smmu_domain { ...@@ -415,12 +409,6 @@ struct arm_smmu_domain {
struct iommu_domain domain; struct iommu_domain domain;
}; };
struct arm_smmu_phandle_args {
struct device_node *np;
int args_count;
uint32_t args[MAX_MASTER_STREAMIDS];
};
static DEFINE_SPINLOCK(arm_smmu_devices_lock); static DEFINE_SPINLOCK(arm_smmu_devices_lock);
static LIST_HEAD(arm_smmu_devices); static LIST_HEAD(arm_smmu_devices);
...@@ -462,132 +450,89 @@ static struct device_node *dev_get_dev_node(struct device *dev) ...@@ -462,132 +450,89 @@ static struct device_node *dev_get_dev_node(struct device *dev)
while (!pci_is_root_bus(bus)) while (!pci_is_root_bus(bus))
bus = bus->parent; bus = bus->parent;
return bus->bridge->parent->of_node; return of_node_get(bus->bridge->parent->of_node);
} }
return dev->of_node; return of_node_get(dev->of_node);
} }
static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
struct device_node *dev_node)
{ {
struct rb_node *node = smmu->masters.rb_node; *((__be32 *)data) = cpu_to_be32(alias);
return 0; /* Continue walking */
while (node) {
struct arm_smmu_master *master;
master = container_of(node, struct arm_smmu_master, node);
if (dev_node < master->of_node)
node = node->rb_left;
else if (dev_node > master->of_node)
node = node->rb_right;
else
return master;
}
return NULL;
} }
static struct arm_smmu_master_cfg * static int __find_legacy_master_phandle(struct device *dev, void *data)
find_smmu_master_cfg(struct device *dev)
{ {
struct arm_smmu_master_cfg *cfg = NULL; struct of_phandle_iterator *it = *(void **)data;
struct iommu_group *group = iommu_group_get(dev); struct device_node *np = it->node;
int err;
if (group) {
cfg = iommu_group_get_iommudata(group); of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
iommu_group_put(group); "#stream-id-cells", 0)
} if (it->node == np) {
*(void **)data = dev;
return cfg; return 1;
}
it->node = np;
return err == -ENOENT ? 0 : err;
} }
static int insert_smmu_master(struct arm_smmu_device *smmu, static int arm_smmu_register_legacy_master(struct device *dev)
struct arm_smmu_master *master)
{ {
struct rb_node **new, *parent; struct arm_smmu_device *smmu;
struct arm_smmu_master_cfg *cfg;
new = &smmu->masters.rb_node; struct device_node *np;
parent = NULL; struct of_phandle_iterator it;
while (*new) { void *data = &it;
struct arm_smmu_master *this __be32 pci_sid;
= container_of(*new, struct arm_smmu_master, node); int err;
parent = *new;
if (master->of_node < this->of_node)
new = &((*new)->rb_left);
else if (master->of_node > this->of_node)
new = &((*new)->rb_right);
else
return -EEXIST;
}
rb_link_node(&master->node, parent, new);
rb_insert_color(&master->node, &smmu->masters);
return 0;
}
static int register_smmu_master(struct arm_smmu_device *smmu, np = dev_get_dev_node(dev);
struct device *dev, if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
struct arm_smmu_phandle_args *masterspec) of_node_put(np);
{ return -ENODEV;
int i; }
struct arm_smmu_master *master;
master = find_smmu_master(smmu, masterspec->np); it.node = np;
if (master) { spin_lock(&arm_smmu_devices_lock);
dev_err(dev, list_for_each_entry(smmu, &arm_smmu_devices, list) {
"rejecting multiple registrations for master device %s\n", err = __find_legacy_master_phandle(smmu->dev, &data);
masterspec->np->name); if (err)
return -EBUSY; break;
} }
spin_unlock(&arm_smmu_devices_lock);
of_node_put(np);
if (err == 0)
return -ENODEV;
if (err < 0)
return err;
if (masterspec->args_count > MAX_MASTER_STREAMIDS) { if (it.cur_count > MAX_MASTER_STREAMIDS) {
dev_err(dev, dev_err(smmu->dev,
"reached maximum number (%d) of stream IDs for master device %s\n", "reached maximum number (%d) of stream IDs for master device %s\n",
MAX_MASTER_STREAMIDS, masterspec->np->name); MAX_MASTER_STREAMIDS, dev_name(dev));
return -ENOSPC; return -ENOSPC;
} }
if (dev_is_pci(dev)) {
/* "mmu-masters" assumes Stream ID == Requester ID */
pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
&pci_sid);
it.cur = &pci_sid;
it.cur_count = 1;
}
master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!master) if (!cfg)
return -ENOMEM; return -ENOMEM;
master->of_node = masterspec->np; cfg->smmu = smmu;
master->cfg.num_streamids = masterspec->args_count; dev->archdata.iommu = cfg;
for (i = 0; i < master->cfg.num_streamids; ++i) {
u16 streamid = masterspec->args[i];
if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
(streamid >= smmu->num_mapping_groups)) {
dev_err(dev,
"stream ID for master device %s greater than maximum allowed (%d)\n",
masterspec->np->name, smmu->num_mapping_groups);
return -ERANGE;
}
master->cfg.streamids[i] = streamid;
master->cfg.smendx[i] = INVALID_SMENDX;
}
return insert_smmu_master(smmu, master);
}
static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
{
struct arm_smmu_device *smmu;
struct arm_smmu_master *master = NULL;
struct device_node *dev_node = dev_get_dev_node(dev);
spin_lock(&arm_smmu_devices_lock); while (it.cur_count--)
list_for_each_entry(smmu, &arm_smmu_devices, list) { cfg->streamids[cfg->num_streamids++] = be32_to_cpup(it.cur++);
master = find_smmu_master(smmu, dev_node);
if (master)
break;
}
spin_unlock(&arm_smmu_devices_lock);
return master ? smmu : NULL; return 0;
} }
static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
...@@ -1119,8 +1064,7 @@ static void arm_smmu_free_smr(struct arm_smmu_device *smmu, int idx) ...@@ -1119,8 +1064,7 @@ static void arm_smmu_free_smr(struct arm_smmu_device *smmu, int idx)
static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
{ {
struct arm_smmu_smr *smr = smmu->smrs + idx; struct arm_smmu_smr *smr = smmu->smrs + idx;
u32 reg = (smr->id & smmu->streamid_mask) << SMR_ID_SHIFT | u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
(smr->mask & smmu->smr_mask_mask) << SMR_MASK_SHIFT;
if (smr->valid) if (smr->valid)
reg |= SMR_VALID; reg |= SMR_VALID;
...@@ -1189,9 +1133,9 @@ static int arm_smmu_master_alloc_smes(struct arm_smmu_device *smmu, ...@@ -1189,9 +1133,9 @@ static int arm_smmu_master_alloc_smes(struct arm_smmu_device *smmu,
return -ENOSPC; return -ENOSPC;
} }
static void arm_smmu_master_free_smes(struct arm_smmu_device *smmu, static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg)
struct arm_smmu_master_cfg *cfg)
{ {
struct arm_smmu_device *smmu = cfg->smmu;
int i; int i;
/* /*
...@@ -1262,17 +1206,15 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -1262,17 +1206,15 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{ {
int ret; int ret;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu; struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
struct arm_smmu_master_cfg *cfg;
smmu = find_smmu_for_device(dev); if (!cfg) {
if (!smmu) {
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
return -ENXIO; return -ENXIO;
} }
/* Ensure that the domain is finalised */ /* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu); ret = arm_smmu_init_domain_context(domain, cfg->smmu);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -1280,18 +1222,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -1280,18 +1222,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
* Sanity check the domain. We don't support domains across * Sanity check the domain. We don't support domains across
* different SMMUs. * different SMMUs.
*/ */
if (smmu_domain->smmu != smmu) { if (smmu_domain->smmu != cfg->smmu) {
dev_err(dev, dev_err(dev,
"cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); dev_name(smmu_domain->smmu->dev), dev_name(cfg->smmu->dev));
return -EINVAL; return -EINVAL;
} }
/* Looks ok, so add the device to the domain */ /* Looks ok, so add the device to the domain */
cfg = find_smmu_master_cfg(dev);
if (!cfg)
return -ENODEV;
return arm_smmu_domain_add_master(smmu_domain, cfg); return arm_smmu_domain_add_master(smmu_domain, cfg);
} }
...@@ -1411,120 +1349,65 @@ static bool arm_smmu_capable(enum iommu_cap cap) ...@@ -1411,120 +1349,65 @@ static bool arm_smmu_capable(enum iommu_cap cap)
} }
} }
static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data) static int arm_smmu_add_device(struct device *dev)
{
*((u16 *)data) = alias;
return 0; /* Continue walking */
}
static void __arm_smmu_release_pci_iommudata(void *data)
{
kfree(data);
}
static int arm_smmu_init_pci_device(struct pci_dev *pdev,
struct iommu_group *group)
{ {
struct arm_smmu_master_cfg *cfg; struct arm_smmu_master_cfg *cfg;
u16 sid; struct iommu_group *group;
int i; int i, ret;
cfg = iommu_group_get_iommudata(group);
if (!cfg) {
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return -ENOMEM;
iommu_group_set_iommudata(group, cfg,
__arm_smmu_release_pci_iommudata);
}
if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) ret = arm_smmu_register_legacy_master(dev);
return -ENOSPC; cfg = dev->archdata.iommu;
if (ret)
goto out_free;
/* ret = -EINVAL;
* Assume Stream ID == Requester ID for now. for (i = 0; i < cfg->num_streamids; i++) {
* We need a way to describe the ID mappings in FDT. u16 sid = cfg->streamids[i];
*/
pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
for (i = 0; i < cfg->num_streamids; ++i)
if (cfg->streamids[i] == sid)
break;
/* Avoid duplicate SIDs, as this can lead to SMR conflicts */ if (sid & ~cfg->smmu->streamid_mask) {
if (i == cfg->num_streamids) { dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
cfg->streamids[i] = sid; sid, cfg->smmu->streamid_mask);
goto out_free;
}
cfg->smendx[i] = INVALID_SMENDX; cfg->smendx[i] = INVALID_SMENDX;
cfg->num_streamids++;
} }
return 0;
}
static int arm_smmu_init_platform_device(struct device *dev,
struct iommu_group *group)
{
struct arm_smmu_device *smmu = find_smmu_for_device(dev);
struct arm_smmu_master *master;
if (!smmu)
return -ENODEV;
master = find_smmu_master(smmu, dev->of_node);
if (!master)
return -ENODEV;
iommu_group_set_iommudata(group, &master->cfg, NULL);
return 0;
}
static int arm_smmu_add_device(struct device *dev)
{
struct iommu_group *group;
group = iommu_group_get_for_dev(dev); group = iommu_group_get_for_dev(dev);
if (IS_ERR(group)) if (IS_ERR(group)) {
return PTR_ERR(group); ret = PTR_ERR(group);
goto out_free;
}
iommu_group_put(group); iommu_group_put(group);
return 0; return 0;
out_free:
kfree(cfg);
dev->archdata.iommu = NULL;
return ret;
} }
static void arm_smmu_remove_device(struct device *dev) static void arm_smmu_remove_device(struct device *dev)
{ {
struct arm_smmu_device *smmu = find_smmu_for_device(dev); struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
struct arm_smmu_master_cfg *cfg = find_smmu_master_cfg(dev);
if (smmu && cfg) if (!cfg)
arm_smmu_master_free_smes(smmu, cfg); return;
arm_smmu_master_free_smes(cfg);
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
kfree(cfg);
dev->archdata.iommu = NULL;
} }
static struct iommu_group *arm_smmu_device_group(struct device *dev) static struct iommu_group *arm_smmu_device_group(struct device *dev)
{ {
struct iommu_group *group; struct iommu_group *group;
int ret;
if (dev_is_pci(dev)) if (dev_is_pci(dev))
group = pci_device_group(dev); group = pci_device_group(dev);
else else
group = generic_device_group(dev); group = generic_device_group(dev);
if (IS_ERR(group))
return group;
if (dev_is_pci(dev))
ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
else
ret = arm_smmu_init_platform_device(dev, group);
if (ret) {
iommu_group_put(group);
group = ERR_PTR(ret);
}
return group; return group;
} }
...@@ -1938,9 +1821,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) ...@@ -1938,9 +1821,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
struct resource *res; struct resource *res;
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct rb_node *node;
struct of_phandle_iterator it;
struct arm_smmu_phandle_args *masterspec;
int num_irqs, i, err; int num_irqs, i, err;
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
...@@ -2001,37 +1881,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) ...@@ -2001,37 +1881,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (err) if (err)
return err; return err;
i = 0;
smmu->masters = RB_ROOT;
err = -ENOMEM;
/* No need to zero the memory for masterspec */
masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
if (!masterspec)
goto out_put_masters;
of_for_each_phandle(&it, err, dev->of_node,
"mmu-masters", "#stream-id-cells", 0) {
int count = of_phandle_iterator_args(&it, masterspec->args,
MAX_MASTER_STREAMIDS);
masterspec->np = of_node_get(it.node);
masterspec->args_count = count;
err = register_smmu_master(smmu, dev, masterspec);
if (err) {
dev_err(dev, "failed to add master %s\n",
masterspec->np->name);
kfree(masterspec);
goto out_put_masters;
}
i++;
}
dev_notice(dev, "registered %d master devices\n", i);
kfree(masterspec);
parse_driver_options(smmu); parse_driver_options(smmu);
if (smmu->version == ARM_SMMU_V2 && if (smmu->version == ARM_SMMU_V2 &&
...@@ -2039,8 +1888,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) ...@@ -2039,8 +1888,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
dev_err(dev, dev_err(dev,
"found only %d context interrupt(s) but %d required\n", "found only %d context interrupt(s) but %d required\n",
smmu->num_context_irqs, smmu->num_context_banks); smmu->num_context_irqs, smmu->num_context_banks);
err = -ENODEV; return -ENODEV;
goto out_put_masters;
} }
for (i = 0; i < smmu->num_global_irqs; ++i) { for (i = 0; i < smmu->num_global_irqs; ++i) {
...@@ -2052,7 +1900,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) ...@@ -2052,7 +1900,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (err) { if (err) {
dev_err(dev, "failed to request global IRQ %d (%u)\n", dev_err(dev, "failed to request global IRQ %d (%u)\n",
i, smmu->irqs[i]); i, smmu->irqs[i]);
goto out_put_masters; return err;
} }
} }
...@@ -2063,22 +1911,12 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) ...@@ -2063,22 +1911,12 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
arm_smmu_device_reset(smmu); arm_smmu_device_reset(smmu);
return 0; return 0;
out_put_masters:
for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
struct arm_smmu_master *master
= container_of(node, struct arm_smmu_master, node);
of_node_put(master->of_node);
}
return err;
} }
static int arm_smmu_device_remove(struct platform_device *pdev) static int arm_smmu_device_remove(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct arm_smmu_device *curr, *smmu = NULL; struct arm_smmu_device *curr, *smmu = NULL;
struct rb_node *node;
spin_lock(&arm_smmu_devices_lock); spin_lock(&arm_smmu_devices_lock);
list_for_each_entry(curr, &arm_smmu_devices, list) { list_for_each_entry(curr, &arm_smmu_devices, list) {
...@@ -2093,12 +1931,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev) ...@@ -2093,12 +1931,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
if (!smmu) if (!smmu)
return -ENODEV; return -ENODEV;
for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
struct arm_smmu_master *master
= container_of(node, struct arm_smmu_master, node);
of_node_put(master->of_node);
}
if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
dev_err(dev, "removing device with active domains!\n"); dev_err(dev, "removing device with active domains!\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment