Commit 811ee9df authored by Christian König's avatar Christian König

drm/ttm: make sure pool pages are cleared

The old implementation wasn't consistend on this.

But it looks like we depend on this so better bring it back.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reported-and-tested-by: default avatarMike Galbraith <efault@gmx.de>
Fixes: d099fc8f ("drm/ttm: new TT backend allocation pool v3")
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210210160549.1462-1-christian.koenig@amd.com
parent 1926a050
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/highmem.h>
#ifdef CONFIG_X86 #ifdef CONFIG_X86
#include <asm/set_memory.h> #include <asm/set_memory.h>
...@@ -218,6 +219,15 @@ static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr, ...@@ -218,6 +219,15 @@ static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
/* Give pages into a specific pool_type */ /* Give pages into a specific pool_type */
static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p) static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
{ {
unsigned int i, num_pages = 1 << pt->order;
for (i = 0; i < num_pages; ++i) {
if (PageHighMem(p))
clear_highpage(p + i);
else
clear_page(page_address(p + i));
}
spin_lock(&pt->lock); spin_lock(&pt->lock);
list_add(&p->lru, &pt->pages); list_add(&p->lru, &pt->pages);
spin_unlock(&pt->lock); spin_unlock(&pt->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment