Commit e0106ac9 authored by Thomas Zimmermann's avatar Thomas Zimmermann

Revert "drm/shmem-helper: Switch to reservation lock"

This reverts commit 67b7836d.

The locking appears incomplete. A caller of SHMEM helper's pin
function never acquires the dma-buf reservation lock. So we get

  WARNING: CPU: 3 PID: 967 at drivers/gpu/drm/drm_gem_shmem_helper.c:243 drm_gem_shmem_pin+0x42/0x90 [drm_shmem_helper]
Signed-off-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Acked-by: default avatarDmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230228152612.19971-1-tzimmermann@suse.de
parent f9b9297b
This diff is collapsed.
...@@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) ...@@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
new_size = min(new_size, bo->base.base.size); new_size = min(new_size, bo->base.base.size);
dma_resv_lock(bo->base.base.resv, NULL); mutex_lock(&bo->base.pages_lock);
if (bo->base.pages) { if (bo->base.pages) {
pages = bo->base.pages; pages = bo->base.pages;
...@@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) ...@@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
sizeof(*pages), GFP_KERNEL | __GFP_ZERO); sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
if (!pages) { if (!pages) {
dma_resv_unlock(bo->base.base.resv); mutex_unlock(&bo->base.pages_lock);
return -ENOMEM; return -ENOMEM;
} }
...@@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) ...@@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
struct page *page = shmem_read_mapping_page(mapping, i); struct page *page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) { if (IS_ERR(page)) {
dma_resv_unlock(bo->base.base.resv); mutex_unlock(&bo->base.pages_lock);
return PTR_ERR(page); return PTR_ERR(page);
} }
pages[i] = page; pages[i] = page;
} }
dma_resv_unlock(bo->base.base.resv); mutex_unlock(&bo->base.pages_lock);
ret = sg_alloc_table_from_pages(&sgt, pages, i, 0, ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
new_size, GFP_KERNEL); new_size, GFP_KERNEL);
......
...@@ -407,10 +407,6 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, ...@@ -407,10 +407,6 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
bo = to_panfrost_bo(gem_obj); bo = to_panfrost_bo(gem_obj);
ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL);
if (ret)
goto out_put_object;
mutex_lock(&pfdev->shrinker_lock); mutex_lock(&pfdev->shrinker_lock);
mutex_lock(&bo->mappings.lock); mutex_lock(&bo->mappings.lock);
if (args->madv == PANFROST_MADV_DONTNEED) { if (args->madv == PANFROST_MADV_DONTNEED) {
...@@ -448,8 +444,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, ...@@ -448,8 +444,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
out_unlock_mappings: out_unlock_mappings:
mutex_unlock(&bo->mappings.lock); mutex_unlock(&bo->mappings.lock);
mutex_unlock(&pfdev->shrinker_lock); mutex_unlock(&pfdev->shrinker_lock);
dma_resv_unlock(bo->base.base.resv);
out_put_object:
drm_gem_object_put(gem_obj); drm_gem_object_put(gem_obj);
return ret; return ret;
} }
......
...@@ -48,14 +48,14 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj) ...@@ -48,14 +48,14 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
if (!mutex_trylock(&bo->mappings.lock)) if (!mutex_trylock(&bo->mappings.lock))
return false; return false;
if (!dma_resv_trylock(shmem->base.resv)) if (!mutex_trylock(&shmem->pages_lock))
goto unlock_mappings; goto unlock_mappings;
panfrost_gem_teardown_mappings_locked(bo); panfrost_gem_teardown_mappings_locked(bo);
drm_gem_shmem_purge(&bo->base); drm_gem_shmem_purge_locked(&bo->base);
ret = true; ret = true;
dma_resv_unlock(shmem->base.resv); mutex_unlock(&shmem->pages_lock);
unlock_mappings: unlock_mappings:
mutex_unlock(&bo->mappings.lock); mutex_unlock(&bo->mappings.lock);
......
...@@ -443,7 +443,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -443,7 +443,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
struct panfrost_gem_mapping *bomapping; struct panfrost_gem_mapping *bomapping;
struct panfrost_gem_object *bo; struct panfrost_gem_object *bo;
struct address_space *mapping; struct address_space *mapping;
struct drm_gem_object *obj;
pgoff_t page_offset; pgoff_t page_offset;
struct sg_table *sgt; struct sg_table *sgt;
struct page **pages; struct page **pages;
...@@ -466,16 +465,15 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -466,16 +465,15 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
page_offset = addr >> PAGE_SHIFT; page_offset = addr >> PAGE_SHIFT;
page_offset -= bomapping->mmnode.start; page_offset -= bomapping->mmnode.start;
obj = &bo->base.base; mutex_lock(&bo->base.pages_lock);
dma_resv_lock(obj->resv, NULL);
if (!bo->base.pages) { if (!bo->base.pages) {
bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M, bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
if (!bo->sgts) { if (!bo->sgts) {
mutex_unlock(&bo->base.pages_lock);
ret = -ENOMEM; ret = -ENOMEM;
goto err_unlock; goto err_bo;
} }
pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
...@@ -483,8 +481,9 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -483,8 +481,9 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
if (!pages) { if (!pages) {
kvfree(bo->sgts); kvfree(bo->sgts);
bo->sgts = NULL; bo->sgts = NULL;
mutex_unlock(&bo->base.pages_lock);
ret = -ENOMEM; ret = -ENOMEM;
goto err_unlock; goto err_bo;
} }
bo->base.pages = pages; bo->base.pages = pages;
bo->base.pages_use_count = 1; bo->base.pages_use_count = 1;
...@@ -492,6 +491,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -492,6 +491,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
pages = bo->base.pages; pages = bo->base.pages;
if (pages[page_offset]) { if (pages[page_offset]) {
/* Pages are already mapped, bail out. */ /* Pages are already mapped, bail out. */
mutex_unlock(&bo->base.pages_lock);
goto out; goto out;
} }
} }
...@@ -502,11 +502,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -502,11 +502,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
pages[i] = shmem_read_mapping_page(mapping, i); pages[i] = shmem_read_mapping_page(mapping, i);
if (IS_ERR(pages[i])) { if (IS_ERR(pages[i])) {
mutex_unlock(&bo->base.pages_lock);
ret = PTR_ERR(pages[i]); ret = PTR_ERR(pages[i]);
goto err_pages; goto err_pages;
} }
} }
mutex_unlock(&bo->base.pages_lock);
sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
ret = sg_alloc_table_from_pages(sgt, pages + page_offset, ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL); NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
...@@ -525,8 +528,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -525,8 +528,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
out: out:
dma_resv_unlock(obj->resv);
panfrost_gem_mapping_put(bomapping); panfrost_gem_mapping_put(bomapping);
return 0; return 0;
...@@ -535,8 +536,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -535,8 +536,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
sg_free_table(sgt); sg_free_table(sgt);
err_pages: err_pages:
drm_gem_shmem_put_pages(&bo->base); drm_gem_shmem_put_pages(&bo->base);
err_unlock:
dma_resv_unlock(obj->resv);
err_bo: err_bo:
panfrost_gem_mapping_put(bomapping); panfrost_gem_mapping_put(bomapping);
return ret; return ret;
......
...@@ -26,6 +26,11 @@ struct drm_gem_shmem_object { ...@@ -26,6 +26,11 @@ struct drm_gem_shmem_object {
*/ */
struct drm_gem_object base; struct drm_gem_object base;
/**
* @pages_lock: Protects the page table and use count
*/
struct mutex pages_lock;
/** /**
* @pages: Page table * @pages: Page table
*/ */
...@@ -60,6 +65,11 @@ struct drm_gem_shmem_object { ...@@ -60,6 +65,11 @@ struct drm_gem_shmem_object {
*/ */
struct sg_table *sgt; struct sg_table *sgt;
/**
* @vmap_lock: Protects the vmap address and use count
*/
struct mutex vmap_lock;
/** /**
* @vaddr: Kernel virtual address of the backing memory * @vaddr: Kernel virtual address of the backing memory
*/ */
...@@ -99,6 +109,7 @@ struct drm_gem_shmem_object { ...@@ -99,6 +109,7 @@ struct drm_gem_shmem_object {
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
...@@ -117,7 +128,8 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem ...@@ -117,7 +128,8 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem
!shmem->base.dma_buf && !shmem->base.import_attach; !shmem->base.dma_buf && !shmem->base.import_attach;
} }
void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment