Commit 822c4d9a authored by Jerome Glisse's avatar Jerome Glisse Committed by Dave Airlie

drm/ttm: page allocation use page array instead of list

Use the ttm_tt pages array for pages allocations, move the list
unwinding into the page allocation functions.
Signed-off-by: default avatarJerome Glisse <jglisse@redhat.com>
parent f9517e63
...@@ -619,8 +619,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, ...@@ -619,8 +619,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
* @return count of pages still required to fulfill the request. * @return count of pages still required to fulfill the request.
*/ */
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
struct list_head *pages, int ttm_flags, struct list_head *pages,
enum ttm_caching_state cstate, unsigned count) int ttm_flags,
enum ttm_caching_state cstate,
unsigned count)
{ {
unsigned long irq_flags; unsigned long irq_flags;
struct list_head *p; struct list_head *p;
...@@ -664,13 +666,15 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, ...@@ -664,13 +666,15 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
* On success pages list will hold count number of correctly * On success pages list will hold count number of correctly
* cached pages. * cached pages.
*/ */
int ttm_get_pages(struct list_head *pages, int flags, int ttm_get_pages(struct page **pages, int flags,
enum ttm_caching_state cstate, unsigned count, enum ttm_caching_state cstate, unsigned npages,
dma_addr_t *dma_address) dma_addr_t *dma_address)
{ {
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct list_head plist;
struct page *p = NULL; struct page *p = NULL;
gfp_t gfp_flags = GFP_USER; gfp_t gfp_flags = GFP_USER;
unsigned count;
int r; int r;
/* set zero flag for page allocation if required */ /* set zero flag for page allocation if required */
...@@ -684,7 +688,7 @@ int ttm_get_pages(struct list_head *pages, int flags, ...@@ -684,7 +688,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
else else
gfp_flags |= GFP_HIGHUSER; gfp_flags |= GFP_HIGHUSER;
for (r = 0; r < count; ++r) { for (r = 0; r < npages; ++r) {
p = alloc_page(gfp_flags); p = alloc_page(gfp_flags);
if (!p) { if (!p) {
...@@ -693,85 +697,100 @@ int ttm_get_pages(struct list_head *pages, int flags, ...@@ -693,85 +697,100 @@ int ttm_get_pages(struct list_head *pages, int flags,
return -ENOMEM; return -ENOMEM;
} }
list_add(&p->lru, pages); pages[r] = p;
} }
return 0; return 0;
} }
/* combine zero flag to pool flags */ /* combine zero flag to pool flags */
gfp_flags |= pool->gfp_flags; gfp_flags |= pool->gfp_flags;
/* First we take pages from the pool */ /* First we take pages from the pool */
count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count); INIT_LIST_HEAD(&plist);
npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
count = 0;
list_for_each_entry(p, &plist, lru) {
pages[count++] = p;
}
/* clear the pages coming from the pool if requested */ /* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
list_for_each_entry(p, pages, lru) { list_for_each_entry(p, &plist, lru) {
clear_page(page_address(p)); clear_page(page_address(p));
} }
} }
/* If pool didn't have enough pages allocate new one. */ /* If pool didn't have enough pages allocate new one. */
if (count > 0) { if (npages > 0) {
/* ttm_alloc_new_pages doesn't reference pool so we can run /* ttm_alloc_new_pages doesn't reference pool so we can run
* multiple requests in parallel. * multiple requests in parallel.
**/ **/
r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count); INIT_LIST_HEAD(&plist);
r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
list_for_each_entry(p, &plist, lru) {
pages[count++] = p;
}
if (r) { if (r) {
/* If there is any pages in the list put them back to /* If there is any pages in the list put them back to
* the pool. */ * the pool. */
printk(KERN_ERR TTM_PFX printk(KERN_ERR TTM_PFX
"Failed to allocate extra pages " "Failed to allocate extra pages "
"for large request."); "for large request.");
ttm_put_pages(pages, 0, flags, cstate, NULL); ttm_put_pages(pages, count, flags, cstate, NULL);
return r; return r;
} }
} }
return 0; return 0;
} }
/* Put all pages in pages list to correct pool to wait for reuse */ /* Put all pages in pages list to correct pool to wait for reuse */
void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, void ttm_put_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate, dma_addr_t *dma_address) enum ttm_caching_state cstate, dma_addr_t *dma_address)
{ {
unsigned long irq_flags; unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p, *tmp; unsigned i;
if (pool == NULL) { if (pool == NULL) {
/* No pool for this memory type so free the pages */ /* No pool for this memory type so free the pages */
for (i = 0; i < npages; i++) {
list_for_each_entry_safe(p, tmp, pages, lru) { if (pages[i]) {
__free_page(p); if (page_count(pages[i]) != 1)
printk(KERN_ERR TTM_PFX
"Erroneous page count. "
"Leaking pages.\n");
__free_page(pages[i]);
pages[i] = NULL;
}
} }
/* Make the pages list empty */
INIT_LIST_HEAD(pages);
return; return;
} }
if (page_count == 0) {
list_for_each_entry_safe(p, tmp, pages, lru) {
++page_count;
}
}
spin_lock_irqsave(&pool->lock, irq_flags); spin_lock_irqsave(&pool->lock, irq_flags);
list_splice_init(pages, &pool->list); for (i = 0; i < npages; i++) {
pool->npages += page_count; if (pages[i]) {
if (page_count(pages[i]) != 1)
printk(KERN_ERR TTM_PFX
"Erroneous page count. "
"Leaking pages.\n");
list_add_tail(&pages[i]->lru, &pool->list);
pages[i] = NULL;
pool->npages++;
}
}
/* Check that we don't go over the pool limit */ /* Check that we don't go over the pool limit */
page_count = 0; npages = 0;
if (pool->npages > _manager->options.max_size) { if (pool->npages > _manager->options.max_size) {
page_count = pool->npages - _manager->options.max_size; npages = pool->npages - _manager->options.max_size;
/* free at least NUM_PAGES_TO_ALLOC number of pages /* free at least NUM_PAGES_TO_ALLOC number of pages
* to reduce calls to set_memory_wb */ * to reduce calls to set_memory_wb */
if (page_count < NUM_PAGES_TO_ALLOC) if (npages < NUM_PAGES_TO_ALLOC)
page_count = NUM_PAGES_TO_ALLOC; npages = NUM_PAGES_TO_ALLOC;
} }
spin_unlock_irqrestore(&pool->lock, irq_flags); spin_unlock_irqrestore(&pool->lock, irq_flags);
if (page_count) if (npages)
ttm_page_pool_free(pool, page_count); ttm_page_pool_free(pool, npages);
} }
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
......
...@@ -66,22 +66,16 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm) ...@@ -66,22 +66,16 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
{ {
struct page *p; struct page *p;
struct list_head h;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
int ret; int ret;
if (NULL == (p = ttm->pages[index])) { if (NULL == (p = ttm->pages[index])) {
INIT_LIST_HEAD(&h); ret = ttm_get_pages(&p, ttm->page_flags, ttm->caching_state, 1,
ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
&ttm->dma_address[index]); &ttm->dma_address[index]);
if (ret != 0) if (ret != 0)
return NULL; return NULL;
p = list_first_entry(&h, struct page, lru);
ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; goto out_err;
...@@ -90,9 +84,7 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) ...@@ -90,9 +84,7 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
} }
return p; return p;
out_err: out_err:
INIT_LIST_HEAD(&h); ttm_put_pages(&p, 1, ttm->page_flags,
list_add(&p->lru, &h);
ttm_put_pages(&h, 1, ttm->page_flags,
ttm->caching_state, &ttm->dma_address[index]); ttm->caching_state, &ttm->dma_address[index]);
return NULL; return NULL;
} }
...@@ -243,33 +235,19 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching); ...@@ -243,33 +235,19 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
{ {
int i;
unsigned count = 0;
struct list_head h;
struct page *cur_page;
struct ttm_backend *be = ttm->be; struct ttm_backend *be = ttm->be;
unsigned i;
INIT_LIST_HEAD(&h);
if (be) if (be)
be->func->clear(be); be->func->clear(be);
for (i = 0; i < ttm->num_pages; ++i) { for (i = 0; i < ttm->num_pages; ++i) {
if (ttm->pages[i]) {
cur_page = ttm->pages[i];
ttm->pages[i] = NULL;
if (cur_page) {
if (page_count(cur_page) != 1)
printk(KERN_ERR TTM_PFX
"Erroneous page count. "
"Leaking pages.\n");
ttm_mem_global_free_page(ttm->glob->mem_glob, ttm_mem_global_free_page(ttm->glob->mem_glob,
cur_page); ttm->pages[i]);
list_add(&cur_page->lru, &h); ttm_put_pages(&ttm->pages[i], 1, ttm->page_flags,
count++; ttm->caching_state, &ttm->dma_address[i]);
} }
} }
ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
ttm->dma_address);
ttm->state = tt_unpopulated; ttm->state = tt_unpopulated;
} }
......
...@@ -38,10 +38,10 @@ ...@@ -38,10 +38,10 @@
* @count: number of pages to allocate. * @count: number of pages to allocate.
* @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set). * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
*/ */
int ttm_get_pages(struct list_head *pages, int ttm_get_pages(struct page **pages,
int flags, int flags,
enum ttm_caching_state cstate, enum ttm_caching_state cstate,
unsigned count, unsigned npages,
dma_addr_t *dma_address); dma_addr_t *dma_address);
/** /**
* Put linked list of pages to pool. * Put linked list of pages to pool.
...@@ -53,8 +53,8 @@ int ttm_get_pages(struct list_head *pages, ...@@ -53,8 +53,8 @@ int ttm_get_pages(struct list_head *pages,
* @cstate: ttm caching state. * @cstate: ttm caching state.
* @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set). * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
*/ */
void ttm_put_pages(struct list_head *pages, void ttm_put_pages(struct page **pages,
unsigned page_count, unsigned npages,
int flags, int flags,
enum ttm_caching_state cstate, enum ttm_caching_state cstate,
dma_addr_t *dma_address); dma_addr_t *dma_address);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment