Commit af9b3547 authored by Jonathan Marek's avatar Jonathan Marek Committed by Rob Clark

drm/msm: use the right pgprot when mapping BOs in the kernel

Use the same logic as the userspace mapping.

This fixes msm_rd with cached BOs.
Signed-off-by: default avatarJonathan Marek <jonathan@marek.ca>
Acked-by: default avatarJordan Crouse <jordan@cosmicpenguin.net>
Link: https://lore.kernel.org/r/20210423190833.25319-4-jonathan@marek.caSigned-off-by: default avatarRob Clark <robdclark@chromium.org>
parent a5fc7aa9
......@@ -211,6 +211,15 @@ void msm_gem_put_pages(struct drm_gem_object *obj)
msm_gem_unlock(obj);
}
static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
{
if (msm_obj->flags & MSM_BO_WC)
return pgprot_writecombine(prot);
if (msm_obj->flags & MSM_BO_UNCACHED)
return pgprot_noncached(prot);
return prot;
}
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
......@@ -218,13 +227,7 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
if (msm_obj->flags & MSM_BO_WC)
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
else if (msm_obj->flags & MSM_BO_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
else
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
}
......@@ -644,7 +647,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
goto fail;
}
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
if (msm_obj->vaddr == NULL) {
ret = -ENOMEM;
goto fail;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment