Commit 9de2fb99 authored by Roger He's avatar Roger He Committed by Alex Deucher

drm/ttm: use an operation ctx for ttm_mem_global_alloc_page

forward the operation context to ttm_mem_global_alloc_page as well,
and the ultimate goal is swapout enablement for reserved BOs.

Here reserved BOs refer to all the BOs which share same reservation object
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarRoger He <Hongbo.He@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 279c01f6
...@@ -539,14 +539,10 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, ...@@ -539,14 +539,10 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
EXPORT_SYMBOL(ttm_mem_global_alloc); EXPORT_SYMBOL(ttm_mem_global_alloc);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page, uint64_t size) struct page *page, uint64_t size,
struct ttm_operation_ctx *ctx)
{ {
struct ttm_mem_zone *zone = NULL; struct ttm_mem_zone *zone = NULL;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
/** /**
* Page allocations may be registed in a single zone * Page allocations may be registed in a single zone
...@@ -560,7 +556,7 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, ...@@ -560,7 +556,7 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel; zone = glob->zone_kernel;
#endif #endif
return ttm_mem_global_alloc_zone(glob, zone, size, &ctx); return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
} }
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page, void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
......
...@@ -1061,6 +1061,10 @@ void ttm_page_alloc_fini(void) ...@@ -1061,6 +1061,10 @@ void ttm_page_alloc_fini(void)
int ttm_pool_populate(struct ttm_tt *ttm) int ttm_pool_populate(struct ttm_tt *ttm)
{ {
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
unsigned i; unsigned i;
int ret; int ret;
...@@ -1076,7 +1080,7 @@ int ttm_pool_populate(struct ttm_tt *ttm) ...@@ -1076,7 +1080,7 @@ int ttm_pool_populate(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; ++i) { for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
PAGE_SIZE); PAGE_SIZE, &ctx);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
ttm_pool_unpopulate(ttm); ttm_pool_unpopulate(ttm);
return -ENOMEM; return -ENOMEM;
......
...@@ -927,6 +927,10 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) ...@@ -927,6 +927,10 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{ {
struct ttm_tt *ttm = &ttm_dma->ttm; struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
unsigned long num_pages = ttm->num_pages; unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool; struct dma_pool *pool;
enum pool_type type; enum pool_type type;
...@@ -962,7 +966,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) ...@@ -962,7 +966,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
break; break;
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size); pool->size, &ctx);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev); ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM; return -ENOMEM;
...@@ -998,7 +1002,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) ...@@ -998,7 +1002,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
} }
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size); pool->size, &ctx);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev); ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM; return -ENOMEM;
......
...@@ -84,7 +84,8 @@ extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, ...@@ -84,7 +84,8 @@ extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
extern void ttm_mem_global_free(struct ttm_mem_global *glob, extern void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount); uint64_t amount);
extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page, uint64_t size); struct page *page, uint64_t size,
struct ttm_operation_ctx *ctx);
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
struct page *page, uint64_t size); struct page *page, uint64_t size);
extern size_t ttm_round_pot(size_t size); extern size_t ttm_round_pot(size_t size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment