Commit de1e7cd6 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'stable/ttm.pci-api.v5' of...

Merge branch 'stable/ttm.pci-api.v5' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen into drm-next

* 'stable/ttm.pci-api.v5' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  ttm: Include the 'struct dev' when using the DMA API.
  nouveau/ttm/PCIe: Use dma_addr if TTM has set it.
  radeon/ttm/PCIe: Use dma_addr if TTM has set it.
  ttm: Expand (*populate) to support an array of DMA addresses.
  ttm: Utilize the DMA API for pages that have TTM_PAGE_FLAG_DMA32 set.
  ttm: Introduce a placeholder for DMA (bus) addresses.
parents 7811bddb 5a893fc2
...@@ -409,6 +409,7 @@ nouveau_mem_vram_init(struct drm_device *dev) ...@@ -409,6 +409,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
if (ret) if (ret)
return ret; return ret;
dev_priv->ttm.bdev.dev = dev->dev;
ret = ttm_bo_device_init(&dev_priv->ttm.bdev, ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
dev_priv->ttm.bo_global_ref.ref.object, dev_priv->ttm.bo_global_ref.ref.object,
&nouveau_bo_driver, DRM_FILE_PAGE_OFFSET, &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
......
...@@ -12,6 +12,7 @@ struct nouveau_sgdma_be { ...@@ -12,6 +12,7 @@ struct nouveau_sgdma_be {
struct drm_device *dev; struct drm_device *dev;
dma_addr_t *pages; dma_addr_t *pages;
bool *ttm_alloced;
unsigned nr_pages; unsigned nr_pages;
u64 offset; u64 offset;
...@@ -20,7 +21,8 @@ struct nouveau_sgdma_be { ...@@ -20,7 +21,8 @@ struct nouveau_sgdma_be {
static int static int
nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
struct page **pages, struct page *dummy_read_page) struct page **pages, struct page *dummy_read_page,
dma_addr_t *dma_addrs)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev; struct drm_device *dev = nvbe->dev;
...@@ -34,8 +36,17 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, ...@@ -34,8 +36,17 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
if (!nvbe->pages) if (!nvbe->pages)
return -ENOMEM; return -ENOMEM;
nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
if (!nvbe->ttm_alloced)
return -ENOMEM;
nvbe->nr_pages = 0; nvbe->nr_pages = 0;
while (num_pages--) { while (num_pages--) {
if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
nvbe->pages[nvbe->nr_pages] =
dma_addrs[nvbe->nr_pages];
nvbe->ttm_alloced[nvbe->nr_pages] = true;
} else {
nvbe->pages[nvbe->nr_pages] = nvbe->pages[nvbe->nr_pages] =
pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0, pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
...@@ -44,6 +55,7 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, ...@@ -44,6 +55,7 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
be->func->clear(be); be->func->clear(be);
return -EFAULT; return -EFAULT;
} }
}
nvbe->nr_pages++; nvbe->nr_pages++;
} }
...@@ -65,11 +77,14 @@ nouveau_sgdma_clear(struct ttm_backend *be) ...@@ -65,11 +77,14 @@ nouveau_sgdma_clear(struct ttm_backend *be)
be->func->unbind(be); be->func->unbind(be);
while (nvbe->nr_pages--) { while (nvbe->nr_pages--) {
if (!nvbe->ttm_alloced[nvbe->nr_pages])
pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
} }
kfree(nvbe->pages); kfree(nvbe->pages);
kfree(nvbe->ttm_alloced);
nvbe->pages = NULL; nvbe->pages = NULL;
nvbe->ttm_alloced = NULL;
nvbe->nr_pages = 0; nvbe->nr_pages = 0;
} }
} }
......
...@@ -328,6 +328,7 @@ struct radeon_gart { ...@@ -328,6 +328,7 @@ struct radeon_gart {
union radeon_gart_table table; union radeon_gart_table table;
struct page **pages; struct page **pages;
dma_addr_t *pages_addr; dma_addr_t *pages_addr;
bool *ttm_alloced;
bool ready; bool ready;
}; };
...@@ -340,7 +341,8 @@ void radeon_gart_fini(struct radeon_device *rdev); ...@@ -340,7 +341,8 @@ void radeon_gart_fini(struct radeon_device *rdev);
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages); int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, struct page **pagelist); int pages, struct page **pagelist,
dma_addr_t *dma_addr);
/* /*
......
...@@ -149,6 +149,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, ...@@ -149,6 +149,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) { for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) { if (rdev->gart.pages[p]) {
if (!rdev->gart.ttm_alloced[p])
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL; rdev->gart.pages[p] = NULL;
...@@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, ...@@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
} }
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, struct page **pagelist) int pages, struct page **pagelist, dma_addr_t *dma_addr)
{ {
unsigned t; unsigned t;
unsigned p; unsigned p;
...@@ -180,6 +181,12 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, ...@@ -180,6 +181,12 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) { for (i = 0; i < pages; i++, p++) {
/* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32
* is requested. */
if (dma_addr[i] != DMA_ERROR_CODE) {
rdev->gart.ttm_alloced[p] = true;
rdev->gart.pages_addr[p] = dma_addr[i];
} else {
/* we need to support large memory configurations */ /* we need to support large memory configurations */
/* assume that unbind have already been call on the range */ /* assume that unbind have already been call on the range */
rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i], rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
...@@ -190,6 +197,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, ...@@ -190,6 +197,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
radeon_gart_unbind(rdev, offset, pages); radeon_gart_unbind(rdev, offset, pages);
return -ENOMEM; return -ENOMEM;
} }
}
rdev->gart.pages[p] = pagelist[i]; rdev->gart.pages[p] = pagelist[i];
page_base = rdev->gart.pages_addr[p]; page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
...@@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev) ...@@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev); radeon_gart_fini(rdev);
return -ENOMEM; return -ENOMEM;
} }
rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
rdev->gart.num_cpu_pages, GFP_KERNEL);
if (rdev->gart.ttm_alloced == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
/* set GART entry to point to the dummy page by default */ /* set GART entry to point to the dummy page by default */
for (i = 0; i < rdev->gart.num_cpu_pages; i++) { for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
rdev->gart.pages_addr[i] = rdev->dummy_page.addr; rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
...@@ -267,6 +281,8 @@ void radeon_gart_fini(struct radeon_device *rdev) ...@@ -267,6 +281,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
rdev->gart.ready = false; rdev->gart.ready = false;
kfree(rdev->gart.pages); kfree(rdev->gart.pages);
kfree(rdev->gart.pages_addr); kfree(rdev->gart.pages_addr);
kfree(rdev->gart.ttm_alloced);
rdev->gart.pages = NULL; rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL; rdev->gart.pages_addr = NULL;
rdev->gart.ttm_alloced = NULL;
} }
...@@ -513,6 +513,7 @@ int radeon_ttm_init(struct radeon_device *rdev) ...@@ -513,6 +513,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
if (r) { if (r) {
return r; return r;
} }
rdev->mman.bdev.dev = rdev->dev;
/* No others user of address space so set it to 0 */ /* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev, r = ttm_bo_device_init(&rdev->mman.bdev,
rdev->mman.bo_global_ref.ref.object, rdev->mman.bo_global_ref.ref.object,
...@@ -647,6 +648,7 @@ struct radeon_ttm_backend { ...@@ -647,6 +648,7 @@ struct radeon_ttm_backend {
unsigned long num_pages; unsigned long num_pages;
struct page **pages; struct page **pages;
struct page *dummy_read_page; struct page *dummy_read_page;
dma_addr_t *dma_addrs;
bool populated; bool populated;
bool bound; bool bound;
unsigned offset; unsigned offset;
...@@ -655,12 +657,14 @@ struct radeon_ttm_backend { ...@@ -655,12 +657,14 @@ struct radeon_ttm_backend {
static int radeon_ttm_backend_populate(struct ttm_backend *backend, static int radeon_ttm_backend_populate(struct ttm_backend *backend,
unsigned long num_pages, unsigned long num_pages,
struct page **pages, struct page **pages,
struct page *dummy_read_page) struct page *dummy_read_page,
dma_addr_t *dma_addrs)
{ {
struct radeon_ttm_backend *gtt; struct radeon_ttm_backend *gtt;
gtt = container_of(backend, struct radeon_ttm_backend, backend); gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->pages = pages; gtt->pages = pages;
gtt->dma_addrs = dma_addrs;
gtt->num_pages = num_pages; gtt->num_pages = num_pages;
gtt->dummy_read_page = dummy_read_page; gtt->dummy_read_page = dummy_read_page;
gtt->populated = true; gtt->populated = true;
...@@ -673,6 +677,7 @@ static void radeon_ttm_backend_clear(struct ttm_backend *backend) ...@@ -673,6 +677,7 @@ static void radeon_ttm_backend_clear(struct ttm_backend *backend)
gtt = container_of(backend, struct radeon_ttm_backend, backend); gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->pages = NULL; gtt->pages = NULL;
gtt->dma_addrs = NULL;
gtt->num_pages = 0; gtt->num_pages = 0;
gtt->dummy_read_page = NULL; gtt->dummy_read_page = NULL;
gtt->populated = false; gtt->populated = false;
...@@ -693,7 +698,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend, ...@@ -693,7 +698,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
gtt->num_pages, bo_mem, backend); gtt->num_pages, bo_mem, backend);
} }
r = radeon_gart_bind(gtt->rdev, gtt->offset, r = radeon_gart_bind(gtt->rdev, gtt->offset,
gtt->num_pages, gtt->pages); gtt->num_pages, gtt->pages, gtt->dma_addrs);
if (r) { if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n", DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
gtt->num_pages, gtt->offset); gtt->num_pages, gtt->offset);
......
...@@ -47,7 +47,8 @@ struct ttm_agp_backend { ...@@ -47,7 +47,8 @@ struct ttm_agp_backend {
static int ttm_agp_populate(struct ttm_backend *backend, static int ttm_agp_populate(struct ttm_backend *backend,
unsigned long num_pages, struct page **pages, unsigned long num_pages, struct page **pages,
struct page *dummy_read_page) struct page *dummy_read_page,
dma_addr_t *dma_addrs)
{ {
struct ttm_agp_backend *agp_be = struct ttm_agp_backend *agp_be =
container_of(backend, struct ttm_agp_backend, backend); container_of(backend, struct ttm_agp_backend, backend);
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/seq_file.h> /* for seq_printf */ #include <linux/seq_file.h> /* for seq_printf */
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <asm/atomic.h> #include <asm/atomic.h>
...@@ -662,7 +663,8 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, ...@@ -662,7 +663,8 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
* cached pages. * cached pages.
*/ */
int ttm_get_pages(struct list_head *pages, int flags, int ttm_get_pages(struct list_head *pages, int flags,
enum ttm_caching_state cstate, unsigned count) enum ttm_caching_state cstate, unsigned count,
dma_addr_t *dma_address, struct device *dev)
{ {
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p = NULL; struct page *p = NULL;
...@@ -681,6 +683,15 @@ int ttm_get_pages(struct list_head *pages, int flags, ...@@ -681,6 +683,15 @@ int ttm_get_pages(struct list_head *pages, int flags,
gfp_flags |= GFP_HIGHUSER; gfp_flags |= GFP_HIGHUSER;
for (r = 0; r < count; ++r) { for (r = 0; r < count; ++r) {
if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
void *addr;
addr = dma_alloc_coherent(dev, PAGE_SIZE,
&dma_address[r],
gfp_flags);
if (addr == NULL)
return -ENOMEM;
p = virt_to_page(addr);
} else
p = alloc_page(gfp_flags); p = alloc_page(gfp_flags);
if (!p) { if (!p) {
...@@ -688,7 +699,6 @@ int ttm_get_pages(struct list_head *pages, int flags, ...@@ -688,7 +699,6 @@ int ttm_get_pages(struct list_head *pages, int flags,
"Unable to allocate page."); "Unable to allocate page.");
return -ENOMEM; return -ENOMEM;
} }
list_add(&p->lru, pages); list_add(&p->lru, pages);
} }
return 0; return 0;
...@@ -720,7 +730,7 @@ int ttm_get_pages(struct list_head *pages, int flags, ...@@ -720,7 +730,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
printk(KERN_ERR TTM_PFX printk(KERN_ERR TTM_PFX
"Failed to allocate extra pages " "Failed to allocate extra pages "
"for large request."); "for large request.");
ttm_put_pages(pages, 0, flags, cstate); ttm_put_pages(pages, 0, flags, cstate, NULL, NULL);
return r; return r;
} }
} }
...@@ -731,17 +741,30 @@ int ttm_get_pages(struct list_head *pages, int flags, ...@@ -731,17 +741,30 @@ int ttm_get_pages(struct list_head *pages, int flags,
/* Put all pages in pages list to correct pool to wait for reuse */ /* Put all pages in pages list to correct pool to wait for reuse */
void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
enum ttm_caching_state cstate) enum ttm_caching_state cstate, dma_addr_t *dma_address,
struct device *dev)
{ {
unsigned long irq_flags; unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p, *tmp; struct page *p, *tmp;
unsigned r;
if (pool == NULL) { if (pool == NULL) {
/* No pool for this memory type so free the pages */ /* No pool for this memory type so free the pages */
r = page_count-1;
list_for_each_entry_safe(p, tmp, pages, lru) { list_for_each_entry_safe(p, tmp, pages, lru) {
if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
void *addr = page_address(p);
WARN_ON(!addr || !dma_address[r]);
if (addr)
dma_free_coherent(dev, PAGE_SIZE,
addr,
dma_address[r]);
dma_address[r] = 0;
} else
__free_page(p); __free_page(p);
r--;
} }
/* Make the pages list empty */ /* Make the pages list empty */
INIT_LIST_HEAD(pages); INIT_LIST_HEAD(pages);
......
...@@ -49,12 +49,16 @@ static int ttm_tt_swapin(struct ttm_tt *ttm); ...@@ -49,12 +49,16 @@ static int ttm_tt_swapin(struct ttm_tt *ttm);
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{ {
ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
ttm->dma_address = drm_calloc_large(ttm->num_pages,
sizeof(*ttm->dma_address));
} }
static void ttm_tt_free_page_directory(struct ttm_tt *ttm) static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
{ {
drm_free_large(ttm->pages); drm_free_large(ttm->pages);
ttm->pages = NULL; ttm->pages = NULL;
drm_free_large(ttm->dma_address);
ttm->dma_address = NULL;
} }
static void ttm_tt_free_user_pages(struct ttm_tt *ttm) static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
...@@ -105,7 +109,8 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) ...@@ -105,7 +109,8 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
INIT_LIST_HEAD(&h); INIT_LIST_HEAD(&h);
ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1); ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
&ttm->dma_address[index], ttm->be->bdev->dev);
if (ret != 0) if (ret != 0)
return NULL; return NULL;
...@@ -164,7 +169,7 @@ int ttm_tt_populate(struct ttm_tt *ttm) ...@@ -164,7 +169,7 @@ int ttm_tt_populate(struct ttm_tt *ttm)
} }
be->func->populate(be, ttm->num_pages, ttm->pages, be->func->populate(be, ttm->num_pages, ttm->pages,
ttm->dummy_read_page); ttm->dummy_read_page, ttm->dma_address);
ttm->state = tt_unbound; ttm->state = tt_unbound;
return 0; return 0;
} }
...@@ -298,7 +303,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) ...@@ -298,7 +303,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
count++; count++;
} }
} }
ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state); ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
ttm->dma_address, ttm->be->bdev->dev);
ttm->state = tt_unpopulated; ttm->state = tt_unpopulated;
ttm->first_himem_page = ttm->num_pages; ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1; ttm->last_lomem_page = -1;
......
...@@ -102,7 +102,8 @@ struct vmw_ttm_backend { ...@@ -102,7 +102,8 @@ struct vmw_ttm_backend {
static int vmw_ttm_populate(struct ttm_backend *backend, static int vmw_ttm_populate(struct ttm_backend *backend,
unsigned long num_pages, struct page **pages, unsigned long num_pages, struct page **pages,
struct page *dummy_read_page) struct page *dummy_read_page,
dma_addr_t *dma_addrs)
{ {
struct vmw_ttm_backend *vmw_be = struct vmw_ttm_backend *vmw_be =
container_of(backend, struct vmw_ttm_backend, backend); container_of(backend, struct vmw_ttm_backend, backend);
......
...@@ -322,7 +322,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -322,7 +322,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
dev_priv->active_master = &dev_priv->fbdev_master; dev_priv->active_master = &dev_priv->fbdev_master;
dev_priv->bdev.dev = dev->dev;
ret = ttm_bo_device_init(&dev_priv->bdev, ret = ttm_bo_device_init(&dev_priv->bdev,
dev_priv->bo_global_ref.ref.object, dev_priv->bo_global_ref.ref.object,
&vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET, &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
......
...@@ -50,13 +50,15 @@ struct ttm_backend_func { ...@@ -50,13 +50,15 @@ struct ttm_backend_func {
* @pages: Array of pointers to ttm pages. * @pages: Array of pointers to ttm pages.
* @dummy_read_page: Page to be used instead of NULL pages in the * @dummy_read_page: Page to be used instead of NULL pages in the
* array @pages. * array @pages.
* @dma_addrs: Array of DMA (bus) address of the ttm pages.
* *
* Populate the backend with ttm pages. Depending on the backend, * Populate the backend with ttm pages. Depending on the backend,
* it may or may not copy the @pages array. * it may or may not copy the @pages array.
*/ */
int (*populate) (struct ttm_backend *backend, int (*populate) (struct ttm_backend *backend,
unsigned long num_pages, struct page **pages, unsigned long num_pages, struct page **pages,
struct page *dummy_read_page); struct page *dummy_read_page,
dma_addr_t *dma_addrs);
/** /**
* struct ttm_backend_func member clear * struct ttm_backend_func member clear
* *
...@@ -149,6 +151,7 @@ enum ttm_caching_state { ...@@ -149,6 +151,7 @@ enum ttm_caching_state {
* @swap_storage: Pointer to shmem struct file for swap storage. * @swap_storage: Pointer to shmem struct file for swap storage.
* @caching_state: The current caching state of the pages. * @caching_state: The current caching state of the pages.
* @state: The current binding state of the pages. * @state: The current binding state of the pages.
* @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32)
* *
* This is a structure holding the pages, caching- and aperture binding * This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP) * status for a buffer object that isn't backed by fixed (VRAM / AGP)
...@@ -173,6 +176,7 @@ struct ttm_tt { ...@@ -173,6 +176,7 @@ struct ttm_tt {
tt_unbound, tt_unbound,
tt_unpopulated, tt_unpopulated,
} state; } state;
dma_addr_t *dma_address;
}; };
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
...@@ -547,6 +551,7 @@ struct ttm_bo_device { ...@@ -547,6 +551,7 @@ struct ttm_bo_device {
struct list_head device_list; struct list_head device_list;
struct ttm_bo_global *glob; struct ttm_bo_global *glob;
struct ttm_bo_driver *driver; struct ttm_bo_driver *driver;
struct device *dev;
rwlock_t vm_lock; rwlock_t vm_lock;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
spinlock_t fence_lock; spinlock_t fence_lock;
......
...@@ -36,11 +36,15 @@ ...@@ -36,11 +36,15 @@
* @flags: ttm flags for page allocation. * @flags: ttm flags for page allocation.
* @cstate: ttm caching state for the page. * @cstate: ttm caching state for the page.
* @count: number of pages to allocate. * @count: number of pages to allocate.
* @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
* @dev: struct device for appropiate DMA accounting.
*/ */
int ttm_get_pages(struct list_head *pages, int ttm_get_pages(struct list_head *pages,
int flags, int flags,
enum ttm_caching_state cstate, enum ttm_caching_state cstate,
unsigned count); unsigned count,
dma_addr_t *dma_address,
struct device *dev);
/** /**
* Put linked list of pages to pool. * Put linked list of pages to pool.
* *
...@@ -49,11 +53,15 @@ int ttm_get_pages(struct list_head *pages, ...@@ -49,11 +53,15 @@ int ttm_get_pages(struct list_head *pages,
* count. * count.
* @flags: ttm flags for page allocation. * @flags: ttm flags for page allocation.
* @cstate: ttm caching state. * @cstate: ttm caching state.
* @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
* @dev: struct device for appropiate DMA accounting.
*/ */
void ttm_put_pages(struct list_head *pages, void ttm_put_pages(struct list_head *pages,
unsigned page_count, unsigned page_count,
int flags, int flags,
enum ttm_caching_state cstate); enum ttm_caching_state cstate,
dma_addr_t *dma_address,
struct device *dev);
/** /**
* Initialize pool allocator. * Initialize pool allocator.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment