Commit ccac7ce3 authored by Jordan Crouse's avatar Jordan Crouse Committed by Rob Clark

drm/msm: Refactor address space initialization

Refactor how address space initialization works. Instead of having the
address space function create the MMU object (and thus require separate but
equal functions for gpummu and iommu) use a single function and pass the
MMU struct in. Make the generic code cleaner by using target specific
functions to create the address space so a2xx can do its own thing in its
own space.  For all the other targets use a generic helper to initialize
IOMMU but leave the door open for newer targets to use customization
if they need it.
Reviewed-by: default avatarRob Clark <robdclark@gmail.com>
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
Tested-by: default avatarShawn Guo <shawn.guo@linaro.org>
[squash in rebase fixups]
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent 52da6d51
......@@ -401,6 +401,21 @@ static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
return state;
}
static struct msm_gem_address_space *
a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct msm_mmu *mmu = msm_gpummu_new(&pdev->dev, gpu);
struct msm_gem_address_space *aspace;
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
SZ_16M + 0xfff * SZ_64K);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
return aspace;
}
/* Register offset defines for A2XX - copy of A3XX */
static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
......@@ -429,6 +444,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a2xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
.create_address_space = a2xx_create_address_space,
},
};
......
......@@ -441,6 +441,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a3xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
.create_address_space = adreno_iommu_create_address_space,
},
};
......
......@@ -583,6 +583,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a4xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
.create_address_space = adreno_iommu_create_address_space,
},
.get_timestamp = a4xx_get_timestamp,
};
......
......@@ -1445,6 +1445,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a5xx_gpu_busy,
.gpu_state_get = a5xx_gpu_state_get,
.gpu_state_put = a5xx_gpu_state_put,
.create_address_space = adreno_iommu_create_address_space,
},
.get_timestamp = a5xx_get_timestamp,
};
......
......@@ -1114,15 +1114,14 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
{
struct iommu_domain *domain;
struct msm_mmu *mmu;
domain = iommu_domain_alloc(&platform_bus_type);
if (!domain)
return -ENODEV;
domain->geometry.aperture_start = 0x00000000;
domain->geometry.aperture_end = 0x7fffffff;
gmu->aspace = msm_gem_address_space_create(gmu->dev, domain, "gmu");
mmu = msm_iommu_new(gmu->dev, domain);
gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff);
if (IS_ERR(gmu->aspace)) {
iommu_domain_free(domain);
return PTR_ERR(gmu->aspace);
......
......@@ -893,6 +893,7 @@ static const struct adreno_gpu_funcs funcs = {
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
.create_address_space = adreno_iommu_create_address_space,
#endif
},
.get_timestamp = a6xx_get_timestamp,
......
......@@ -185,6 +185,23 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev)
{
struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
struct msm_gem_address_space *aspace;
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
0xfffffff);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
return aspace;
}
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
......@@ -988,12 +1005,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
adreno_gpu_config.va_start = SZ_16M;
adreno_gpu_config.va_end = 0xffffffff;
/* maximum range of a2xx mmu */
if (adreno_is_a2xx(adreno_gpu))
adreno_gpu_config.va_end = SZ_16M + 0xfff * SZ_64K;
adreno_gpu_config.nr_rings = nr_rings;
adreno_get_pwrlevels(&pdev->dev, gpu);
......
......@@ -287,6 +287,14 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state);
int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
int adreno_gpu_state_put(struct msm_gpu_state *state);
/*
* Common helper function to initialize the default address space for arm-smmu
* attached targets
*/
struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev);
/*
* For a5xx and a6xx targets load the zap shader that is used to pull the GPU
* out of secure mode
......
......@@ -794,18 +794,18 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
{
struct iommu_domain *domain;
struct msm_gem_address_space *aspace;
struct msm_mmu *mmu;
domain = iommu_domain_alloc(&platform_bus_type);
if (!domain)
return 0;
domain->geometry.aperture_start = 0x1000;
domain->geometry.aperture_end = 0xffffffff;
mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
aspace = msm_gem_address_space_create(mmu, "dpu1",
0x1000, 0xfffffff);
aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
domain, "dpu1");
if (IS_ERR(aspace)) {
iommu_domain_free(domain);
mmu->funcs->destroy(mmu);
return PTR_ERR(aspace);
}
......
......@@ -510,9 +510,15 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
aspace = msm_gem_address_space_create(&pdev->dev,
config->iommu, "mdp4");
struct msm_mmu *mmu = msm_iommu_new(&pdev->dev,
config->iommu);
aspace = msm_gem_address_space_create(mmu,
"mdp4", 0x1000, 0xffffffff);
if (IS_ERR(aspace)) {
if (!IS_ERR(mmu))
mmu->funcs->destroy(mmu);
ret = PTR_ERR(aspace);
goto fail;
}
......@@ -565,10 +571,6 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
config.max_clk = 266667000;
config.iommu = iommu_domain_alloc(&platform_bus_type);
if (config.iommu) {
config.iommu->geometry.aperture_start = 0x1000;
config.iommu->geometry.aperture_end = 0xffffffff;
}
return &config;
}
......@@ -1017,10 +1017,6 @@ static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
static struct mdp5_cfg_platform config = {};
config.iommu = iommu_domain_alloc(&platform_bus_type);
if (config.iommu) {
config.iommu->geometry.aperture_start = 0x1000;
config.iommu->geometry.aperture_end = 0xffffffff;
}
return &config;
}
......@@ -632,13 +632,20 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16);
if (config->platform.iommu) {
struct msm_mmu *mmu;
iommu_dev = &pdev->dev;
if (!dev_iommu_fwspec_get(iommu_dev))
iommu_dev = iommu_dev->parent;
aspace = msm_gem_address_space_create(iommu_dev,
config->platform.iommu, "mdp5");
mmu = msm_iommu_new(iommu_dev, config->platform.iommu);
aspace = msm_gem_address_space_create(mmu, "mdp5",
0x1000, 0xffffffff);
if (IS_ERR(aspace)) {
if (!IS_ERR(mmu))
mmu->funcs->destroy(mmu);
ret = PTR_ERR(aspace);
goto fail;
}
......
......@@ -252,12 +252,8 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace,
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
struct msm_gem_address_space *
msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
const char *name, uint64_t va_start, uint64_t va_end);
msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
u64 va_start, u64 size);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
......
......@@ -127,46 +127,14 @@ int msm_gem_init_vma(struct msm_gem_address_space *aspace,
return 0;
}
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)
msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
u64 va_start, u64 size)
{
struct msm_gem_address_space *aspace;
u64 start = domain->geometry.aperture_start;
u64 size = domain->geometry.aperture_end - start;
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
if (!aspace)
return ERR_PTR(-ENOMEM);
spin_lock_init(&aspace->lock);
aspace->name = name;
aspace->mmu = msm_iommu_new(dev, domain);
if (IS_ERR(aspace->mmu)) {
int ret = PTR_ERR(aspace->mmu);
kfree(aspace);
return ERR_PTR(ret);
}
/*
* Attaching the IOMMU device changes the aperture values so use the
* cached values instead
*/
drm_mm_init(&aspace->mm, start >> PAGE_SHIFT, size >> PAGE_SHIFT);
kref_init(&aspace->kref);
return aspace;
}
struct msm_gem_address_space *
msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
const char *name, uint64_t va_start, uint64_t va_end)
{
struct msm_gem_address_space *aspace;
u64 size = va_end - va_start;
if (IS_ERR(mmu))
return ERR_CAST(mmu);
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
if (!aspace)
......@@ -174,16 +142,9 @@ msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
spin_lock_init(&aspace->lock);
aspace->name = name;
aspace->mmu = msm_gpummu_new(dev, gpu);
if (IS_ERR(aspace->mmu)) {
int ret = PTR_ERR(aspace->mmu);
kfree(aspace);
return ERR_PTR(ret);
}
aspace->mmu = mmu;
drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
size >> PAGE_SHIFT);
drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT);
kref_init(&aspace->kref);
......
......@@ -821,42 +821,6 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
return 0;
}
static struct msm_gem_address_space *
msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
uint64_t va_start, uint64_t va_end)
{
struct msm_gem_address_space *aspace;
/*
* Setup IOMMU.. eventually we will (I think) do this once per context
* and have separate page tables per context. For now, to keep things
* simple and to get something working, just use a single address space:
*/
if (!adreno_is_a2xx(to_adreno_gpu(gpu))) {
struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
if (!iommu)
return NULL;
iommu->geometry.aperture_start = va_start;
iommu->geometry.aperture_end = va_end;
DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
if (IS_ERR(aspace))
iommu_domain_free(iommu);
} else {
aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu",
va_start, va_end);
}
if (IS_ERR(aspace))
DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
PTR_ERR(aspace));
return aspace;
}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
......@@ -929,8 +893,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
msm_devfreq_init(gpu);
gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
config->va_start, config->va_end);
gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
if (gpu->aspace == NULL)
DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
......
......@@ -21,8 +21,6 @@ struct msm_gpu_state;
struct msm_gpu_config {
const char *ioname;
uint64_t va_start;
uint64_t va_end;
unsigned int nr_rings;
};
......@@ -64,6 +62,8 @@ struct msm_gpu_funcs {
int (*gpu_state_put)(struct msm_gpu_state *state);
unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
void (*gpu_set_freq)(struct msm_gpu *gpu, unsigned long freq);
struct msm_gem_address_space *(*create_address_space)
(struct msm_gpu *gpu, struct platform_device *pdev);
};
struct msm_gpu {
......
......@@ -70,6 +70,9 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
struct msm_iommu *iommu;
int ret;
if (!domain)
return ERR_PTR(-ENODEV);
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return ERR_PTR(-ENOMEM);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment