Commit d188bfa5 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: add support for different pool sizes

Correctly handle different page sizes in the memory accounting.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f9ebec52
...@@ -546,7 +546,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, ...@@ -546,7 +546,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
EXPORT_SYMBOL(ttm_mem_global_alloc); EXPORT_SYMBOL(ttm_mem_global_alloc);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page) struct page *page, uint64_t size)
{ {
struct ttm_mem_zone *zone = NULL; struct ttm_mem_zone *zone = NULL;
...@@ -563,10 +563,11 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, ...@@ -563,10 +563,11 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel; zone = glob->zone_kernel;
#endif #endif
return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, false, false); return ttm_mem_global_alloc_zone(glob, zone, size, false, false);
} }
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page) void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
uint64_t size)
{ {
struct ttm_mem_zone *zone = NULL; struct ttm_mem_zone *zone = NULL;
...@@ -577,10 +578,9 @@ void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page) ...@@ -577,10 +578,9 @@ void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel; zone = glob->zone_kernel;
#endif #endif
ttm_mem_global_free_zone(glob, zone, PAGE_SIZE); ttm_mem_global_free_zone(glob, zone, size);
} }
size_t ttm_round_pot(size_t size) size_t ttm_round_pot(size_t size)
{ {
if ((size & (size - 1)) == 0) if ((size & (size - 1)) == 0)
......
...@@ -882,7 +882,8 @@ int ttm_pool_populate(struct ttm_tt *ttm) ...@@ -882,7 +882,8 @@ int ttm_pool_populate(struct ttm_tt *ttm)
return -ENOMEM; return -ENOMEM;
} }
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i]); ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
PAGE_SIZE);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
ttm_pool_unpopulate(ttm); ttm_pool_unpopulate(ttm);
return -ENOMEM; return -ENOMEM;
...@@ -909,7 +910,7 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm) ...@@ -909,7 +910,7 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; ++i) { for (i = 0; i < ttm->num_pages; ++i) {
if (ttm->pages[i]) { if (ttm->pages[i]) {
ttm_mem_global_free_page(ttm->glob->mem_glob, ttm_mem_global_free_page(ttm->glob->mem_glob,
ttm->pages[i]); ttm->pages[i], PAGE_SIZE);
ttm_put_pages(&ttm->pages[i], 1, ttm_put_pages(&ttm->pages[i], 1,
ttm->page_flags, ttm->page_flags,
ttm->caching_state); ttm->caching_state);
......
...@@ -902,7 +902,8 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) ...@@ -902,7 +902,8 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
return -ENOMEM; return -ENOMEM;
} }
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i]); ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev); ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM; return -ENOMEM;
...@@ -967,13 +968,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) ...@@ -967,13 +968,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
if (is_cached) { if (is_cached) {
list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) { list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
ttm_mem_global_free_page(ttm->glob->mem_glob, ttm_mem_global_free_page(ttm->glob->mem_glob,
d_page->p); d_page->p, pool->size);
ttm_dma_page_put(pool, d_page); ttm_dma_page_put(pool, d_page);
} }
} else { } else {
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
ttm_mem_global_free_page(ttm->glob->mem_glob, ttm_mem_global_free_page(ttm->glob->mem_glob,
ttm->pages[i]); ttm->pages[i], pool->size);
} }
} }
......
...@@ -150,9 +150,9 @@ extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, ...@@ -150,9 +150,9 @@ extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
extern void ttm_mem_global_free(struct ttm_mem_global *glob, extern void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount); uint64_t amount);
extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page); struct page *page, uint64_t size);
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
struct page *page); struct page *page, uint64_t size);
extern size_t ttm_round_pot(size_t size); extern size_t ttm_round_pot(size_t size);
extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob); extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment