Commit 5a893fc2 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk

ttm: Include the 'struct dev' when using the DMA API.

This makes the accounting when using 'debug_dma_dump_mappings()'
and CONFIG_DMA_API_DEBUG=y be assigned to the correct device
instead of 'fallback'.

No functional change - just cosmetic.
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent e0138c26
...@@ -559,6 +559,7 @@ nouveau_mem_vram_init(struct drm_device *dev) ...@@ -559,6 +559,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
if (ret) if (ret)
return ret; return ret;
dev_priv->ttm.bdev.dev = dev->dev;
ret = ttm_bo_device_init(&dev_priv->ttm.bdev, ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
dev_priv->ttm.bo_global_ref.ref.object, dev_priv->ttm.bo_global_ref.ref.object,
&nouveau_bo_driver, DRM_FILE_PAGE_OFFSET, &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
......
...@@ -513,6 +513,7 @@ int radeon_ttm_init(struct radeon_device *rdev) ...@@ -513,6 +513,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
if (r) { if (r) {
return r; return r;
} }
rdev->mman.bdev.dev = rdev->dev;
/* No others user of address space so set it to 0 */ /* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev, r = ttm_bo_device_init(&rdev->mman.bdev,
rdev->mman.bo_global_ref.ref.object, rdev->mman.bo_global_ref.ref.object,
......
...@@ -664,7 +664,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, ...@@ -664,7 +664,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
*/ */
int ttm_get_pages(struct list_head *pages, int flags, int ttm_get_pages(struct list_head *pages, int flags,
enum ttm_caching_state cstate, unsigned count, enum ttm_caching_state cstate, unsigned count,
dma_addr_t *dma_address) dma_addr_t *dma_address, struct device *dev)
{ {
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p = NULL; struct page *p = NULL;
...@@ -685,7 +685,7 @@ int ttm_get_pages(struct list_head *pages, int flags, ...@@ -685,7 +685,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
for (r = 0; r < count; ++r) { for (r = 0; r < count; ++r) {
if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) { if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
void *addr; void *addr;
addr = dma_alloc_coherent(NULL, PAGE_SIZE, addr = dma_alloc_coherent(dev, PAGE_SIZE,
&dma_address[r], &dma_address[r],
gfp_flags); gfp_flags);
if (addr == NULL) if (addr == NULL)
...@@ -730,7 +730,7 @@ int ttm_get_pages(struct list_head *pages, int flags, ...@@ -730,7 +730,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
printk(KERN_ERR TTM_PFX printk(KERN_ERR TTM_PFX
"Failed to allocate extra pages " "Failed to allocate extra pages "
"for large request."); "for large request.");
ttm_put_pages(pages, 0, flags, cstate, NULL); ttm_put_pages(pages, 0, flags, cstate, NULL, NULL);
return r; return r;
} }
} }
...@@ -741,7 +741,8 @@ int ttm_get_pages(struct list_head *pages, int flags, ...@@ -741,7 +741,8 @@ int ttm_get_pages(struct list_head *pages, int flags,
/* Put all pages in pages list to correct pool to wait for reuse */ /* Put all pages in pages list to correct pool to wait for reuse */
void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
enum ttm_caching_state cstate, dma_addr_t *dma_address) enum ttm_caching_state cstate, dma_addr_t *dma_address,
struct device *dev)
{ {
unsigned long irq_flags; unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
...@@ -757,7 +758,7 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, ...@@ -757,7 +758,7 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
void *addr = page_address(p); void *addr = page_address(p);
WARN_ON(!addr || !dma_address[r]); WARN_ON(!addr || !dma_address[r]);
if (addr) if (addr)
dma_free_coherent(NULL, PAGE_SIZE, dma_free_coherent(dev, PAGE_SIZE,
addr, addr,
dma_address[r]); dma_address[r]);
dma_address[r] = 0; dma_address[r] = 0;
......
...@@ -110,7 +110,7 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) ...@@ -110,7 +110,7 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
INIT_LIST_HEAD(&h); INIT_LIST_HEAD(&h);
ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1, ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
&ttm->dma_address[index]); &ttm->dma_address[index], ttm->be->bdev->dev);
if (ret != 0) if (ret != 0)
return NULL; return NULL;
...@@ -304,7 +304,7 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) ...@@ -304,7 +304,7 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
} }
} }
ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state, ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
ttm->dma_address); ttm->dma_address, ttm->be->bdev->dev);
ttm->state = tt_unpopulated; ttm->state = tt_unpopulated;
ttm->first_himem_page = ttm->num_pages; ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1; ttm->last_lomem_page = -1;
......
...@@ -322,7 +322,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -322,7 +322,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
dev_priv->active_master = &dev_priv->fbdev_master; dev_priv->active_master = &dev_priv->fbdev_master;
dev_priv->bdev.dev = dev->dev;
ret = ttm_bo_device_init(&dev_priv->bdev, ret = ttm_bo_device_init(&dev_priv->bdev,
dev_priv->bo_global_ref.ref.object, dev_priv->bo_global_ref.ref.object,
&vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET, &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
......
...@@ -533,6 +533,7 @@ struct ttm_bo_device { ...@@ -533,6 +533,7 @@ struct ttm_bo_device {
struct list_head device_list; struct list_head device_list;
struct ttm_bo_global *glob; struct ttm_bo_global *glob;
struct ttm_bo_driver *driver; struct ttm_bo_driver *driver;
struct device *dev;
rwlock_t vm_lock; rwlock_t vm_lock;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
/* /*
......
...@@ -37,12 +37,14 @@ ...@@ -37,12 +37,14 @@
* @cstate: ttm caching state for the page. * @cstate: ttm caching state for the page.
* @count: number of pages to allocate. * @count: number of pages to allocate.
* @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set). * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
* @dev: struct device for appropiate DMA accounting.
*/ */
int ttm_get_pages(struct list_head *pages, int ttm_get_pages(struct list_head *pages,
int flags, int flags,
enum ttm_caching_state cstate, enum ttm_caching_state cstate,
unsigned count, unsigned count,
dma_addr_t *dma_address); dma_addr_t *dma_address,
struct device *dev);
/** /**
* Put linked list of pages to pool. * Put linked list of pages to pool.
* *
...@@ -52,12 +54,14 @@ int ttm_get_pages(struct list_head *pages, ...@@ -52,12 +54,14 @@ int ttm_get_pages(struct list_head *pages,
* @flags: ttm flags for page allocation. * @flags: ttm flags for page allocation.
* @cstate: ttm caching state. * @cstate: ttm caching state.
* @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set). * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
* @dev: struct device for appropiate DMA accounting.
*/ */
void ttm_put_pages(struct list_head *pages, void ttm_put_pages(struct list_head *pages,
unsigned page_count, unsigned page_count,
int flags, int flags,
enum ttm_caching_state cstate, enum ttm_caching_state cstate,
dma_addr_t *dma_address); dma_addr_t *dma_address,
struct device *dev);
/** /**
* Initialize pool allocator. * Initialize pool allocator.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment