Commit 52da6d51 authored by Jordan Crouse's avatar Jordan Crouse Committed by Rob Clark

drm/msm: Attach the IOMMU device during initialization

Everywhere an IOMMU object is created by msm_gpu_create_address_space
the IOMMU device is attached immediately after. Instead of carrying around
the infrastructure to do the attach from the device specific code do it
directly in the msm_iommu_init() function. This gets it out of the way for
more aggressive cleanups that follow.
Reviewed-by: default avatarRob Clark <robdclark@gmail.com>
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
Tested-by: default avatarShawn Guo <shawn.guo@linaro.org>
[squash in rebase fixups and fix for unused fxn]
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent 7d4eedb0
...@@ -1114,7 +1114,6 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, ...@@ -1114,7 +1114,6 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
{ {
struct iommu_domain *domain; struct iommu_domain *domain;
int ret;
domain = iommu_domain_alloc(&platform_bus_type); domain = iommu_domain_alloc(&platform_bus_type);
if (!domain) if (!domain)
...@@ -1129,12 +1128,6 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) ...@@ -1129,12 +1128,6 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
return PTR_ERR(gmu->aspace); return PTR_ERR(gmu->aspace);
} }
ret = gmu->aspace->mmu->funcs->attach(gmu->aspace->mmu);
if (ret) {
msm_gem_address_space_put(gmu->aspace);
return ret;
}
return 0; return 0;
} }
......
...@@ -794,7 +794,6 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) ...@@ -794,7 +794,6 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
{ {
struct iommu_domain *domain; struct iommu_domain *domain;
struct msm_gem_address_space *aspace; struct msm_gem_address_space *aspace;
int ret;
domain = iommu_domain_alloc(&platform_bus_type); domain = iommu_domain_alloc(&platform_bus_type);
if (!domain) if (!domain)
...@@ -810,13 +809,6 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) ...@@ -810,13 +809,6 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
return PTR_ERR(aspace); return PTR_ERR(aspace);
} }
ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret) {
DPU_ERROR("failed to attach iommu %d\n", ret);
msm_gem_address_space_put(aspace);
return ret;
}
dpu_kms->base.aspace = aspace; dpu_kms->base.aspace = aspace;
return 0; return 0;
} }
......
...@@ -518,10 +518,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -518,10 +518,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
} }
kms->aspace = aspace; kms->aspace = aspace;
ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret)
goto fail;
} else { } else {
DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys " DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n"); "contig buffers for scanout\n");
......
...@@ -644,13 +644,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) ...@@ -644,13 +644,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
} }
kms->aspace = aspace; kms->aspace = aspace;
ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
ret);
goto fail;
}
} else { } else {
DRM_DEV_INFO(&pdev->dev, DRM_DEV_INFO(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n"); "no iommu, fallback to phys contig buffers for scanout\n");
......
...@@ -133,8 +133,8 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, ...@@ -133,8 +133,8 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name) const char *name)
{ {
struct msm_gem_address_space *aspace; struct msm_gem_address_space *aspace;
u64 size = domain->geometry.aperture_end - u64 start = domain->geometry.aperture_start;
domain->geometry.aperture_start; u64 size = domain->geometry.aperture_end - start;
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
if (!aspace) if (!aspace)
...@@ -143,9 +143,18 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, ...@@ -143,9 +143,18 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
spin_lock_init(&aspace->lock); spin_lock_init(&aspace->lock);
aspace->name = name; aspace->name = name;
aspace->mmu = msm_iommu_new(dev, domain); aspace->mmu = msm_iommu_new(dev, domain);
if (IS_ERR(aspace->mmu)) {
int ret = PTR_ERR(aspace->mmu);
drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT), kfree(aspace);
size >> PAGE_SHIFT); return ERR_PTR(ret);
}
/*
* Attaching the IOMMU device changes the aperture values so use the
* cached values instead
*/
drm_mm_init(&aspace->mm, start >> PAGE_SHIFT, size >> PAGE_SHIFT);
kref_init(&aspace->kref); kref_init(&aspace->kref);
...@@ -166,6 +175,12 @@ msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu, ...@@ -166,6 +175,12 @@ msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
spin_lock_init(&aspace->lock); spin_lock_init(&aspace->lock);
aspace->name = name; aspace->name = name;
aspace->mmu = msm_gpummu_new(dev, gpu); aspace->mmu = msm_gpummu_new(dev, gpu);
if (IS_ERR(aspace->mmu)) {
int ret = PTR_ERR(aspace->mmu);
kfree(aspace);
return ERR_PTR(ret);
}
drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT), drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
size >> PAGE_SHIFT); size >> PAGE_SHIFT);
......
...@@ -826,7 +826,6 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev, ...@@ -826,7 +826,6 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
uint64_t va_start, uint64_t va_end) uint64_t va_start, uint64_t va_end)
{ {
struct msm_gem_address_space *aspace; struct msm_gem_address_space *aspace;
int ret;
/* /*
* Setup IOMMU.. eventually we will (I think) do this once per context * Setup IOMMU.. eventually we will (I think) do this once per context
...@@ -851,17 +850,9 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev, ...@@ -851,17 +850,9 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
va_start, va_end); va_start, va_end);
} }
if (IS_ERR(aspace)) { if (IS_ERR(aspace))
DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n", DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
PTR_ERR(aspace)); PTR_ERR(aspace));
return ERR_CAST(aspace);
}
ret = aspace->mmu->funcs->attach(aspace->mmu);
if (ret) {
msm_gem_address_space_put(aspace);
return ERR_PTR(ret);
}
return aspace; return aspace;
} }
......
...@@ -21,11 +21,6 @@ struct msm_gpummu { ...@@ -21,11 +21,6 @@ struct msm_gpummu {
#define GPUMMU_PAGE_SIZE SZ_4K #define GPUMMU_PAGE_SIZE SZ_4K
#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE) #define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
static int msm_gpummu_attach(struct msm_mmu *mmu)
{
return 0;
}
static void msm_gpummu_detach(struct msm_mmu *mmu) static void msm_gpummu_detach(struct msm_mmu *mmu)
{ {
} }
...@@ -85,7 +80,6 @@ static void msm_gpummu_destroy(struct msm_mmu *mmu) ...@@ -85,7 +80,6 @@ static void msm_gpummu_destroy(struct msm_mmu *mmu)
} }
static const struct msm_mmu_funcs funcs = { static const struct msm_mmu_funcs funcs = {
.attach = msm_gpummu_attach,
.detach = msm_gpummu_detach, .detach = msm_gpummu_detach,
.map = msm_gpummu_map, .map = msm_gpummu_map,
.unmap = msm_gpummu_unmap, .unmap = msm_gpummu_unmap,
......
...@@ -23,13 +23,6 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, ...@@ -23,13 +23,6 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
return 0; return 0;
} }
static int msm_iommu_attach(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
return iommu_attach_device(iommu->domain, mmu->dev);
}
static void msm_iommu_detach(struct msm_mmu *mmu) static void msm_iommu_detach(struct msm_mmu *mmu)
{ {
struct msm_iommu *iommu = to_msm_iommu(mmu); struct msm_iommu *iommu = to_msm_iommu(mmu);
...@@ -66,7 +59,6 @@ static void msm_iommu_destroy(struct msm_mmu *mmu) ...@@ -66,7 +59,6 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
} }
static const struct msm_mmu_funcs funcs = { static const struct msm_mmu_funcs funcs = {
.attach = msm_iommu_attach,
.detach = msm_iommu_detach, .detach = msm_iommu_detach,
.map = msm_iommu_map, .map = msm_iommu_map,
.unmap = msm_iommu_unmap, .unmap = msm_iommu_unmap,
...@@ -76,6 +68,7 @@ static const struct msm_mmu_funcs funcs = { ...@@ -76,6 +68,7 @@ static const struct msm_mmu_funcs funcs = {
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain) struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
{ {
struct msm_iommu *iommu; struct msm_iommu *iommu;
int ret;
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu) if (!iommu)
...@@ -85,5 +78,11 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain) ...@@ -85,5 +78,11 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
msm_mmu_init(&iommu->base, dev, &funcs); msm_mmu_init(&iommu->base, dev, &funcs);
iommu_set_fault_handler(domain, msm_fault_handler, iommu); iommu_set_fault_handler(domain, msm_fault_handler, iommu);
ret = iommu_attach_device(iommu->domain, dev);
if (ret) {
kfree(iommu);
return ERR_PTR(ret);
}
return &iommu->base; return &iommu->base;
} }
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#include <linux/iommu.h> #include <linux/iommu.h>
struct msm_mmu_funcs { struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu);
void (*detach)(struct msm_mmu *mmu); void (*detach)(struct msm_mmu *mmu);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned len, int prot); unsigned len, int prot);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment