Commit fb4b4927 authored by Kuo-Hsin Yang's avatar Kuo-Hsin Yang Committed by Chris Wilson

drm/gem: Mark pinned pages as unevictable

The gem drivers use shmemfs to allocate backing storage for gem objects.
On Samsung Chromebook Plus, the drm/rockchip driver may call
rockchip_gem_get_pages -> drm_gem_get_pages -> shmem_read_mapping_page
to pin a lot of pages, breaking the page reclaim mechanism and causing
oom-killer invocation.

E.g. when the size of a zone is 3.9 GiB, the inactive_ratio is 5. If
active_anon / inactive_anon < 5 and all pages in the inactive_anon lru
are pinned, page reclaim would keep scanning inactive_anon lru without
reclaiming memory. It breaks page reclaim when the rockchip driver only
pins about 1/6 of the anon lru pages.

Mark these pinned pages as unevictable to avoid the premature oom-killer
invocation. See also similar patch on i915 driver [1].

[1]: https://patchwork.freedesktop.org/patch/msgid/20181106132324.17390-1-chris@chris-wilson.co.ukSigned-off-by: default avatarKuo-Hsin Yang <vovoy@chromium.org>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190108074517.209860-1-vovoy@chromium.org
parent 2513147d
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/shmem_fs.h> #include <linux/shmem_fs.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <linux/mem_encrypt.h> #include <linux/mem_encrypt.h>
#include <linux/pagevec.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_vma_manager.h> #include <drm/drm_vma_manager.h>
#include <drm/drm_gem.h> #include <drm/drm_gem.h>
...@@ -526,6 +527,17 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj) ...@@ -526,6 +527,17 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
} }
EXPORT_SYMBOL(drm_gem_create_mmap_offset); EXPORT_SYMBOL(drm_gem_create_mmap_offset);
/*
* Move pages to appropriate lru and release the pagevec, decrementing the
* ref count of those pages.
*/
static void drm_gem_check_release_pagevec(struct pagevec *pvec)
{
check_move_unevictable_pages(pvec);
__pagevec_release(pvec);
cond_resched();
}
/** /**
* drm_gem_get_pages - helper to allocate backing pages for a GEM object * drm_gem_get_pages - helper to allocate backing pages for a GEM object
* from shmem * from shmem
...@@ -551,6 +563,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) ...@@ -551,6 +563,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
{ {
struct address_space *mapping; struct address_space *mapping;
struct page *p, **pages; struct page *p, **pages;
struct pagevec pvec;
int i, npages; int i, npages;
/* This is the shared memory object that backs the GEM resource */ /* This is the shared memory object that backs the GEM resource */
...@@ -568,6 +581,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) ...@@ -568,6 +581,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
if (pages == NULL) if (pages == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mapping_set_unevictable(mapping);
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
p = shmem_read_mapping_page(mapping, i); p = shmem_read_mapping_page(mapping, i);
if (IS_ERR(p)) if (IS_ERR(p))
...@@ -586,8 +601,14 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) ...@@ -586,8 +601,14 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
return pages; return pages;
fail: fail:
while (i--) mapping_clear_unevictable(mapping);
put_page(pages[i]); pagevec_init(&pvec);
while (i--) {
if (!pagevec_add(&pvec, pages[i]))
drm_gem_check_release_pagevec(&pvec);
}
if (pagevec_count(&pvec))
drm_gem_check_release_pagevec(&pvec);
kvfree(pages); kvfree(pages);
return ERR_CAST(p); return ERR_CAST(p);
...@@ -605,6 +626,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, ...@@ -605,6 +626,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed) bool dirty, bool accessed)
{ {
int i, npages; int i, npages;
struct address_space *mapping;
struct pagevec pvec;
mapping = file_inode(obj->filp)->i_mapping;
mapping_clear_unevictable(mapping);
/* We already BUG_ON() for non-page-aligned sizes in /* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless * drm_gem_object_init(), so we should never hit this unless
...@@ -614,6 +640,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, ...@@ -614,6 +640,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
npages = obj->size >> PAGE_SHIFT; npages = obj->size >> PAGE_SHIFT;
pagevec_init(&pvec);
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
if (dirty) if (dirty)
set_page_dirty(pages[i]); set_page_dirty(pages[i]);
...@@ -622,8 +649,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, ...@@ -622,8 +649,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
mark_page_accessed(pages[i]); mark_page_accessed(pages[i]);
/* Undo the reference we took when populating the table */ /* Undo the reference we took when populating the table */
put_page(pages[i]); if (!pagevec_add(&pvec, pages[i]))
drm_gem_check_release_pagevec(&pvec);
} }
if (pagevec_count(&pvec))
drm_gem_check_release_pagevec(&pvec);
kvfree(pages); kvfree(pages);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment