Commit 8593e9b8 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: move more logic into ttm_page_pool_get_pages

Make it easier to add huge page pool.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 0284f1ea
...@@ -627,11 +627,11 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, ...@@ -627,11 +627,11 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
} }
/** /**
* Cut 'count' number of pages from the pool and put them on the return list. * Allocate pages from the pool and put them on the return list.
* *
* @return count of pages still required to fulfill the request. * @return zero for success or negative error code.
*/ */
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
struct list_head *pages, struct list_head *pages,
int ttm_flags, int ttm_flags,
enum ttm_caching_state cstate, enum ttm_caching_state cstate,
...@@ -640,6 +640,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, ...@@ -640,6 +640,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
unsigned long irq_flags; unsigned long irq_flags;
struct list_head *p; struct list_head *p;
unsigned i; unsigned i;
int r = 0;
spin_lock_irqsave(&pool->lock, irq_flags); spin_lock_irqsave(&pool->lock, irq_flags);
ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
...@@ -672,7 +673,35 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, ...@@ -672,7 +673,35 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
count = 0; count = 0;
out: out:
spin_unlock_irqrestore(&pool->lock, irq_flags); spin_unlock_irqrestore(&pool->lock, irq_flags);
return count;
/* clear the pages coming from the pool if requested */
if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
struct page *page;
list_for_each_entry(page, pages, lru) {
if (PageHighMem(page))
clear_highpage(page);
else
clear_page(page_address(page));
}
}
/* If pool didn't have enough pages allocate new one. */
if (count) {
gfp_t gfp_flags = pool->gfp_flags;
/* set zero flag for page allocation if required */
if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
/* ttm_alloc_new_pages doesn't reference pool so we can run
* multiple requests in parallel.
**/
r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
count);
}
return r;
} }
/* Put all pages in pages list to correct pool to wait for reuse */ /* Put all pages in pages list to correct pool to wait for reuse */
...@@ -742,18 +771,18 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, ...@@ -742,18 +771,18 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct list_head plist; struct list_head plist;
struct page *p = NULL; struct page *p = NULL;
gfp_t gfp_flags = GFP_USER;
unsigned count; unsigned count;
int r; int r;
/* set zero flag for page allocation if required */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
/* No pool for cached pages */ /* No pool for cached pages */
if (pool == NULL) { if (pool == NULL) {
gfp_t gfp_flags = GFP_USER;
unsigned i, j; unsigned i, j;
/* set zero flag for page allocation if required */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
if (flags & TTM_PAGE_FLAG_DMA32) if (flags & TTM_PAGE_FLAG_DMA32)
gfp_flags |= GFP_DMA32; gfp_flags |= GFP_DMA32;
else else
...@@ -792,45 +821,22 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, ...@@ -792,45 +821,22 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
return 0; return 0;
} }
/* combine zero flag to pool flags */
gfp_flags |= pool->gfp_flags;
/* First we take pages from the pool */ /* First we take pages from the pool */
INIT_LIST_HEAD(&plist); INIT_LIST_HEAD(&plist);
npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
count = 0; count = 0;
list_for_each_entry(p, &plist, lru) { list_for_each_entry(p, &plist, lru)
pages[count++] = p; pages[count++] = p;
}
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
list_for_each_entry(p, &plist, lru) {
if (PageHighMem(p))
clear_highpage(p);
else
clear_page(page_address(p));
}
}
/* If pool didn't have enough pages allocate new one. */
if (npages > 0) {
/* ttm_alloc_new_pages doesn't reference pool so we can run
* multiple requests in parallel.
**/
INIT_LIST_HEAD(&plist);
r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
list_for_each_entry(p, &plist, lru) {
pages[count++] = p;
}
if (r) { if (r) {
/* If there is any pages in the list put them back to /* If there is any pages in the list put them back to
* the pool. */ * the pool.
*/
pr_err("Failed to allocate extra pages for large request\n"); pr_err("Failed to allocate extra pages for large request\n");
ttm_put_pages(pages, count, flags, cstate); ttm_put_pages(pages, count, flags, cstate);
return r; return r;
} }
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment