Commit 735c4664 authored by Christian König's avatar Christian König

drm/ttm: optimize pool allocations a bit v2

If we got a page pool use it as much as possible.

If we can't get more pages from the pool allocate as much as possible.

Only if that still doesn't work reduce the order and try again.

v2: minor cleanups
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221107195808.1873-1-christian.koenig@amd.com
parent 611fc22c
...@@ -344,6 +344,28 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) ...@@ -344,6 +344,28 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
return p->private; return p->private;
} }
/* Called when we got a page, either from a pool or newly allocated */
static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
struct page *p, dma_addr_t **dma_addr,
unsigned long *num_pages,
struct page ***pages)
{
unsigned int i;
int r;
if (*dma_addr) {
r = ttm_pool_map(pool, order, p, dma_addr);
if (r)
return r;
}
*num_pages -= 1 << order;
for (i = 1 << order; i; --i, ++(*pages), ++p)
**pages = p;
return 0;
}
/** /**
* ttm_pool_alloc - Fill a ttm_tt object * ttm_pool_alloc - Fill a ttm_tt object
* *
...@@ -385,45 +407,57 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, ...@@ -385,45 +407,57 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages)); for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
num_pages; num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) { order = min_t(unsigned int, order, __fls(num_pages))) {
bool apply_caching = false;
struct ttm_pool_type *pt; struct ttm_pool_type *pt;
pt = ttm_pool_select_type(pool, tt->caching, order); pt = ttm_pool_select_type(pool, tt->caching, order);
p = pt ? ttm_pool_type_take(pt) : NULL; p = pt ? ttm_pool_type_take(pt) : NULL;
if (p) { if (p) {
apply_caching = true;
} else {
p = ttm_pool_alloc_page(pool, gfp_flags, order);
if (p && PageHighMem(p))
apply_caching = true;
}
if (!p) {
if (order) {
--order;
continue;
}
r = -ENOMEM;
goto error_free_all;
}
if (apply_caching) {
r = ttm_pool_apply_caching(caching, pages, r = ttm_pool_apply_caching(caching, pages,
tt->caching); tt->caching);
if (r) if (r)
goto error_free_page; goto error_free_page;
caching = pages + (1 << order);
do {
r = ttm_pool_page_allocated(pool, order, p,
&dma_addr,
&num_pages,
&pages);
if (r)
goto error_free_page;
if (num_pages < (1 << order))
break;
p = ttm_pool_type_take(pt);
} while (p);
caching = pages;
} }
if (dma_addr) { while (num_pages >= (1 << order) &&
r = ttm_pool_map(pool, order, p, &dma_addr); (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
if (PageHighMem(p)) {
r = ttm_pool_apply_caching(caching, pages,
tt->caching);
if (r)
goto error_free_page;
}
r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
&num_pages, &pages);
if (r) if (r)
goto error_free_page; goto error_free_page;
if (PageHighMem(p))
caching = pages;
} }
num_pages -= 1 << order; if (!p) {
for (i = 1 << order; i; --i) if (order) {
*(pages++) = p++; --order;
continue;
}
r = -ENOMEM;
goto error_free_all;
}
} }
r = ttm_pool_apply_caching(caching, pages, tt->caching); r = ttm_pool_apply_caching(caching, pages, tt->caching);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment