Commit 667ce33e authored by Rob Clark's avatar Rob Clark

drm/msm: support multiple address spaces

We can have various combinations of 64b and 32b address space, ie. 64b
CPU but 32b display and gpu, or 64b CPU and GPU but 32b display.  So
best to decouple the device iova's from mmap offset.
Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent 394da4b8
......@@ -48,6 +48,7 @@ msm-y := \
msm_gem_prime.o \
msm_gem_shrinker.o \
msm_gem_submit.o \
msm_gem_vma.o \
msm_gpu.o \
msm_iommu.o \
msm_perf.o \
......
......@@ -583,7 +583,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
#endif
}
if (!gpu->mmu) {
if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
......
......@@ -672,7 +672,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
#endif
}
if (!gpu->mmu) {
if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
......
......@@ -381,7 +381,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
mmu = gpu->mmu;
mmu = gpu->aspace->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
......
......@@ -17,6 +17,7 @@
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp4_kms.h"
......@@ -159,17 +160,18 @@ static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct device *dev = mdp4_kms->dev->dev;
struct msm_mmu *mmu = mdp4_kms->mmu;
if (mmu) {
mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
mmu->funcs->destroy(mmu);
}
struct msm_gem_address_space *aspace = mdp4_kms->aspace;
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
msm_gem_address_space_destroy(aspace);
}
if (mdp4_kms->rpm_enabled)
pm_runtime_disable(dev);
......@@ -440,7 +442,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
struct mdp4_platform_config *config = mdp4_get_config(pdev);
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
int irq, ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
......@@ -531,24 +533,26 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
mmu = msm_iommu_new(&pdev->dev, config->iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
aspace = msm_gem_address_space_create(&pdev->dev,
config->iommu, "mdp4");
if (IS_ERR(aspace)) {
ret = PTR_ERR(aspace);
goto fail;
}
ret = mmu->funcs->attach(mmu, iommu_ports,
mdp4_kms->aspace = aspace;
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret)
goto fail;
mdp4_kms->mmu = mmu;
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
mmu = NULL;
aspace = NULL;
}
mdp4_kms->id = msm_register_mmu(dev, mmu);
mdp4_kms->id = msm_register_address_space(dev, aspace);
if (mdp4_kms->id < 0) {
ret = mdp4_kms->id;
dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
......@@ -598,6 +602,10 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
config.max_clk = 266667000;
config.iommu = iommu_domain_alloc(&platform_bus_type);
if (config.iommu) {
config.iommu->geometry.aperture_start = 0x1000;
config.iommu->geometry.aperture_end = 0xffffffff;
}
return &config;
}
......@@ -43,7 +43,7 @@ struct mdp4_kms {
struct clk *pclk;
struct clk *lut_clk;
struct clk *axi_clk;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
struct mdp_irq error_handler;
......
......@@ -550,6 +550,10 @@ static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
static struct mdp5_cfg_platform config = {};
config.iommu = iommu_domain_alloc(&platform_bus_type);
if (config.iommu) {
config.iommu->geometry.aperture_start = 0x1000;
config.iommu->geometry.aperture_end = 0xffffffff;
}
return &config;
}
......@@ -19,6 +19,7 @@
#include <linux/of_irq.h>
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp5_kms.h"
......@@ -117,11 +118,12 @@ static int mdp5_set_split_display(struct msm_kms *kms,
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_mmu *mmu = mdp5_kms->mmu;
struct msm_gem_address_space *aspace = mdp5_kms->aspace;
if (mmu) {
mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
mmu->funcs->destroy(mmu);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
msm_gem_address_space_destroy(aspace);
}
}
......@@ -564,7 +566,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
struct mdp5_kms *mdp5_kms;
struct mdp5_cfg *config;
struct msm_kms *kms;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
int irq, i, ret;
/* priv->kms would have been populated by the MDP5 driver */
......@@ -606,30 +608,29 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16);
if (config->platform.iommu) {
mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
dev_err(&pdev->dev, "failed to init iommu: %d\n", ret);
iommu_domain_free(config->platform.iommu);
aspace = msm_gem_address_space_create(&pdev->dev,
config->platform.iommu, "mdp5");
if (IS_ERR(aspace)) {
ret = PTR_ERR(aspace);
goto fail;
}
ret = mmu->funcs->attach(mmu, iommu_ports,
mdp5_kms->aspace = aspace;
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
dev_err(&pdev->dev, "failed to attach iommu: %d\n",
ret);
mmu->funcs->destroy(mmu);
goto fail;
}
} else {
dev_info(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
mmu = NULL;
aspace = NULL;;
}
mdp5_kms->mmu = mmu;
mdp5_kms->id = msm_register_mmu(dev, mmu);
mdp5_kms->id = msm_register_address_space(dev, aspace);
if (mdp5_kms->id < 0) {
ret = mdp5_kms->id;
dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
......
......@@ -39,7 +39,7 @@ struct mdp5_kms {
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
struct mdp5_smp *smp;
struct mdp5_ctl_manager *ctlm;
......
......@@ -48,15 +48,16 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.atomic_commit = msm_atomic_commit,
};
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
int msm_register_address_space(struct drm_device *dev,
struct msm_gem_address_space *aspace)
{
struct msm_drm_private *priv = dev->dev_private;
int idx = priv->num_mmus++;
int idx = priv->num_aspaces++;
if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace)))
return -EINVAL;
priv->mmus[idx] = mmu;
priv->aspace[idx] = aspace;
return idx;
}
......
......@@ -52,6 +52,8 @@ struct msm_perf_state;
struct msm_gem_submit;
struct msm_fence_context;
struct msm_fence_cb;
struct msm_gem_address_space;
struct msm_gem_vma;
#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
......@@ -121,9 +123,13 @@ struct msm_drm_private {
uint32_t pending_crtcs;
wait_queue_head_t pending_crtcs_event;
/* registered MMUs: */
unsigned int num_mmus;
struct msm_mmu *mmus[NUM_DOMAINS];
/* Registered address spaces.. currently this is fixed per # of
* iommu's. Ie. one for display block and one for gpu block.
* Eventually, to do per-process gpu pagetables, we'll want one
* of these per-process.
*/
unsigned int num_aspaces;
struct msm_gem_address_space *aspace[NUM_DOMAINS];
unsigned int num_planes;
struct drm_plane *planes[8];
......@@ -174,7 +180,18 @@ int msm_atomic_check(struct drm_device *dev,
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
int msm_register_address_space(struct drm_device *dev,
struct msm_gem_address_space *aspace);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace);
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
......
......@@ -296,12 +296,8 @@ put_iova(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
struct msm_mmu *mmu = priv->mmus[id];
if (mmu && msm_obj->domain[id].iova) {
uint32_t offset = msm_obj->domain[id].iova;
mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
msm_obj->domain[id].iova = 0;
}
msm_gem_unmap_vma(priv->aspace[id],
&msm_obj->domain[id], msm_obj->sgt);
}
}
......@@ -326,16 +322,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
return PTR_ERR(pages);
if (iommu_present(&platform_bus_type)) {
struct msm_mmu *mmu = priv->mmus[id];
uint32_t offset;
if (WARN_ON(!mmu))
return -EINVAL;
offset = (uint32_t)mmap_offset(obj);
ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
obj->size, IOMMU_READ | IOMMU_WRITE);
msm_obj->domain[id].iova = offset;
ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
msm_obj->sgt, obj->size >> PAGE_SHIFT);
} else {
msm_obj->domain[id].iova = physaddr(obj);
}
......@@ -631,9 +619,11 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object *robj = msm_obj->resv;
struct reservation_object_list *fobj;
struct msm_drm_private *priv = obj->dev->dev_private;
struct dma_fence *fence;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
unsigned id;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
......@@ -650,10 +640,15 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
break;
}
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, obj->refcount.refcount.counter,
off, msm_obj->vaddr, obj->size, madv);
off, msm_obj->vaddr);
for (id = 0; id < priv->num_aspaces; id++)
seq_printf(m, " %08llx", msm_obj->domain[id].iova);
seq_printf(m, " %zu%s\n", obj->size, madv);
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
......@@ -761,7 +756,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
unsigned sz;
bool use_vram = false;
switch (flags & MSM_BO_CACHE_MASK) {
......@@ -783,16 +777,12 @@ static int msm_gem_new_impl(struct drm_device *dev,
if (WARN_ON(use_vram && !priv->vram.size))
return -EINVAL;
sz = sizeof(*msm_obj);
if (use_vram)
sz += sizeof(struct drm_mm_node);
msm_obj = kzalloc(sz, GFP_KERNEL);
msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
if (use_vram)
msm_obj->vram_node = (void *)&msm_obj[1];
msm_obj->vram_node = &msm_obj->domain[0].node;
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
......
......@@ -24,6 +24,20 @@
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
struct msm_gem_address_space {
const char *name;
/* NOTE: mm managed at the page level, size is in # of pages
* and position mm_node->start is in # of pages:
*/
struct drm_mm mm;
struct msm_mmu *mmu;
};
struct msm_gem_vma {
struct drm_mm_node node;
uint64_t iova;
};
struct msm_gem_object {
struct drm_gem_object base;
......@@ -61,10 +75,7 @@ struct msm_gem_object {
struct sg_table *sgt;
void *vaddr;
struct {
// XXX
uint32_t iova;
} domain[NUM_DOMAINS];
struct msm_gem_vma domain[NUM_DOMAINS];
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
......
/*
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_mmu.h"
void
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt)
{
if (!vma->iova)
return;
if (aspace->mmu) {
unsigned size = vma->node.size << PAGE_SHIFT;
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
}
drm_mm_remove_node(&vma->node);
vma->iova = 0;
}
int
msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
{
int ret;
if (WARN_ON(drm_mm_node_allocated(&vma->node)))
return 0;
ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages,
0, DRM_MM_SEARCH_DEFAULT);
if (ret)
return ret;
vma->iova = vma->node.start << PAGE_SHIFT;
if (aspace->mmu) {
unsigned size = npages << PAGE_SHIFT;
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
size, IOMMU_READ | IOMMU_WRITE);
}
return ret;
}
void
msm_gem_address_space_destroy(struct msm_gem_address_space *aspace)
{
drm_mm_takedown(&aspace->mm);
if (aspace->mmu)
aspace->mmu->funcs->destroy(aspace->mmu);
kfree(aspace);
}
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)
{
struct msm_gem_address_space *aspace;
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
if (!aspace)
return ERR_PTR(-ENOMEM);
aspace->name = name;
aspace->mmu = msm_iommu_new(dev, domain);
drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
(domain->geometry.aperture_end >> PAGE_SHIFT) - 1);
return aspace;
}
......@@ -656,12 +656,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
*/
iommu = iommu_domain_alloc(&platform_bus_type);
if (iommu) {
/* TODO 32b vs 64b address space.. */
iommu->geometry.aperture_start = 0x1000;
iommu->geometry.aperture_end = 0xffffffff;
dev_info(drm->dev, "%s: using IOMMU\n", name);
gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
if (IS_ERR(gpu->mmu)) {
ret = PTR_ERR(gpu->mmu);
gpu->aspace = msm_gem_address_space_create(&pdev->dev,
iommu, "gpu");
if (IS_ERR(gpu->aspace)) {
ret = PTR_ERR(gpu->aspace);
dev_err(drm->dev, "failed to init iommu: %d\n", ret);
gpu->mmu = NULL;
gpu->aspace = NULL;
iommu_domain_free(iommu);
goto fail;
}
......@@ -669,7 +674,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
gpu->id = msm_register_mmu(drm, gpu->mmu);
gpu->id = msm_register_address_space(drm, gpu->aspace);
/* Create ringbuffer: */
......@@ -705,8 +710,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_ringbuffer_destroy(gpu->rb);
}
if (gpu->mmu)
gpu->mmu->funcs->destroy(gpu->mmu);
if (gpu->aspace)
msm_gem_address_space_destroy(gpu->aspace);
if (gpu->fctx)
msm_fence_context_free(gpu->fctx);
......
......@@ -98,7 +98,7 @@ struct msm_gpu {
void __iomem *mmio;
int irq;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
int id;
/* Power Control: */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment