Commit 0cdbe8ac authored by David Herrmann's avatar David Herrmann

drm/gem: remove misleading gfp parameter to get_pages()

drm_gem_get_pages() currently allows passing a 'gfp' parameter that is
passed to shmem combined with mapping_gfp_mask(). Given that the default
mapping_gfp_mask() is GFP_HIGHUSER, it is _very_ unlikely that anyone will
ever make use of that parameter. In fact, all drivers currently pass
redundant flags or 0.

This patch removes the 'gfp' parameter. The only reason to keep it is to
remove flags like __GFP_WAIT. But in its current form, it can only be used
to add flags. So to remove __GFP_WAIT, you'd have to drop it from the
mapping_gfp_mask, which again is stupid as this mask is used by shmem-core
for other allocations, too.

If any driver ever requires that parameter, we can introduce a new helper
that takes the raw 'gfp' parameter. The caller'd be responsible to combine
it with mapping_gfp_mask() in a suitable way. The current
drm_gem_get_pages() helper would then simply use mapping_gfp_mask() and
call the new helper. This is what shmem_read_mapping_pages{_gfp,} does
right now.

Moreover, the gfp-zone flag-usage is not obvious: If you pass a modified
zone, shmem core will WARN() or even BUG(). In other words, the following
must be true for 'gfp' passed to shmem_read_mapping_pages_gfp():
    gfp_zone(mapping_gfp_mask(mapping)) == gfp_zone(gfp)
Add a comment to drm_gem_read_pages() explaining that constraint.
Signed-off-by: default avatarDavid Herrmann <dh.herrmann@gmail.com>
parent ab5a60c3
...@@ -441,18 +441,31 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset); ...@@ -441,18 +441,31 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset);
* drm_gem_get_pages - helper to allocate backing pages for a GEM object * drm_gem_get_pages - helper to allocate backing pages for a GEM object
* from shmem * from shmem
* @obj: obj in question * @obj: obj in question
* @gfpmask: gfp mask of requested pages *
* This reads the page-array of the shmem-backing storage of the given gem
* object. An array of pages is returned. If a page is not allocated or
* swapped-out, this will allocate/swap-in the required pages. Note that the
* whole object is covered by the page-array and pinned in memory.
*
* Use drm_gem_put_pages() to release the array and unpin all pages.
*
* This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
* If you require other GFP-masks, you have to do those allocations yourself.
*
* Note that you are not allowed to change gfp-zones during runtime. That is,
* shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
* set during initialization. If you have special zone constraints, set them
* after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
* to keep pages in the required zone during swap-in.
*/ */
struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) struct page **drm_gem_get_pages(struct drm_gem_object *obj)
{ {
struct inode *inode;
struct address_space *mapping; struct address_space *mapping;
struct page *p, **pages; struct page *p, **pages;
int i, npages; int i, npages;
/* This is the shared memory object that backs the GEM resource */ /* This is the shared memory object that backs the GEM resource */
inode = file_inode(obj->filp); mapping = file_inode(obj->filp)->i_mapping;
mapping = inode->i_mapping;
/* We already BUG_ON() for non-page-aligned sizes in /* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless * drm_gem_object_init(), so we should never hit this unless
...@@ -466,10 +479,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) ...@@ -466,10 +479,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
if (pages == NULL) if (pages == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
gfpmask |= mapping_gfp_mask(mapping);
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); p = shmem_read_mapping_page(mapping, i);
if (IS_ERR(p)) if (IS_ERR(p))
goto fail; goto fail;
pages[i] = p; pages[i] = p;
...@@ -479,7 +490,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) ...@@ -479,7 +490,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
* __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
* so shmem can relocate pages during swapin if required. * so shmem can relocate pages during swapin if required.
*/ */
BUG_ON((gfpmask & __GFP_DMA32) && BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
(page_to_pfn(p) >= 0x00100000UL)); (page_to_pfn(p) >= 0x00100000UL));
} }
......
...@@ -206,7 +206,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt) ...@@ -206,7 +206,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
WARN_ON(gt->pages); WARN_ON(gt->pages);
pages = drm_gem_get_pages(&gt->gem, 0); pages = drm_gem_get_pages(&gt->gem);
if (IS_ERR(pages)) if (IS_ERR(pages))
return PTR_ERR(pages); return PTR_ERR(pages);
......
...@@ -73,7 +73,7 @@ static struct page **get_pages(struct drm_gem_object *obj) ...@@ -73,7 +73,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
int npages = obj->size >> PAGE_SHIFT; int npages = obj->size >> PAGE_SHIFT;
if (iommu_present(&platform_bus_type)) if (iommu_present(&platform_bus_type))
p = drm_gem_get_pages(obj, 0); p = drm_gem_get_pages(obj);
else else
p = get_pages_vram(obj, npages); p = get_pages_vram(obj, npages);
......
...@@ -233,7 +233,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) ...@@ -233,7 +233,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
WARN_ON(omap_obj->pages); WARN_ON(omap_obj->pages);
pages = drm_gem_get_pages(obj, GFP_KERNEL); pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) { if (IS_ERR(pages)) {
dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
return PTR_ERR(pages); return PTR_ERR(pages);
......
...@@ -107,14 +107,14 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -107,14 +107,14 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
} }
static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) static int udl_gem_get_pages(struct udl_gem_object *obj)
{ {
struct page **pages; struct page **pages;
if (obj->pages) if (obj->pages)
return 0; return 0;
pages = drm_gem_get_pages(&obj->base, gfpmask); pages = drm_gem_get_pages(&obj->base);
if (IS_ERR(pages)) if (IS_ERR(pages))
return PTR_ERR(pages); return PTR_ERR(pages);
...@@ -147,7 +147,7 @@ int udl_gem_vmap(struct udl_gem_object *obj) ...@@ -147,7 +147,7 @@ int udl_gem_vmap(struct udl_gem_object *obj)
return 0; return 0;
} }
ret = udl_gem_get_pages(obj, GFP_KERNEL); ret = udl_gem_get_pages(obj);
if (ret) if (ret)
return ret; return ret;
...@@ -205,7 +205,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, ...@@ -205,7 +205,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
} }
gobj = to_udl_bo(obj); gobj = to_udl_bo(obj);
ret = udl_gem_get_pages(gobj, GFP_KERNEL); ret = udl_gem_get_pages(gobj);
if (ret) if (ret)
goto out; goto out;
ret = drm_gem_create_mmap_offset(obj); ret = drm_gem_create_mmap_offset(obj);
......
...@@ -1574,7 +1574,7 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj); ...@@ -1574,7 +1574,7 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
int drm_gem_create_mmap_offset(struct drm_gem_object *obj); int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); struct page **drm_gem_get_pages(struct drm_gem_object *obj);
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed); bool dirty, bool accessed);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment