Commit 3230cfc3 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk Committed by Dave Airlie

drm/nouveau: enable the ttm dma pool when swiotlb is active V3

If the card is capable of more than 32-bit, then use the default
TTM page pool code which allocates from anywhere in the memory.

Note: If the 'ttm.no_dma' parameter is set, the override is ignored
and the default TTM pool is used.

V2 use pci_set_consistent_dma_mask
V3 Rebase on top of no memory account changes (where/when is my
   delorean when i need it ?)

CC: Ben Skeggs <bskeggs@redhat.com>
CC: Francisco Jerez <currojerez@riseup.net>
CC: Dave Airlie <airlied@redhat.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: default avatarJerome Glisse <jglisse@redhat.com>
parent c52494f6
...@@ -1049,10 +1049,79 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) ...@@ -1049,10 +1049,79 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
nouveau_fence_unref(&old_fence); nouveau_fence_unref(&old_fence);
} }
static int
nouveau_ttm_tt_populate(struct ttm_tt *ttm)
{
struct drm_nouveau_private *dev_priv;
struct drm_device *dev;
unsigned i;
int r;
if (ttm->state != tt_unpopulated)
return 0;
dev_priv = nouveau_bdev(ttm->bdev);
dev = dev_priv->dev;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
return ttm_dma_populate(ttm, dev->dev);
}
#endif
r = ttm_pool_populate(ttm);
if (r) {
return r;
}
for (i = 0; i < ttm->num_pages; i++) {
ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) {
while (--i) {
pci_unmap_page(dev->pdev, ttm->dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ttm->dma_address[i] = 0;
}
ttm_pool_unpopulate(ttm);
return -EFAULT;
}
}
return 0;
}
static void
nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct drm_nouveau_private *dev_priv;
struct drm_device *dev;
unsigned i;
dev_priv = nouveau_bdev(ttm->bdev);
dev = dev_priv->dev;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
ttm_dma_unpopulate(ttm, dev->dev);
return;
}
#endif
for (i = 0; i < ttm->num_pages; i++) {
if (ttm->dma_address[i]) {
pci_unmap_page(dev->pdev, ttm->dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
}
ttm_pool_unpopulate(ttm);
}
struct ttm_bo_driver nouveau_bo_driver = { struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create, .ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &ttm_pool_populate, .ttm_tt_populate = &nouveau_ttm_tt_populate,
.ttm_tt_unpopulate = &ttm_pool_unpopulate, .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
.invalidate_caches = nouveau_bo_invalidate_caches, .invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type, .init_mem_type = nouveau_bo_init_mem_type,
.evict_flags = nouveau_bo_evict_flags, .evict_flags = nouveau_bo_evict_flags,
......
...@@ -178,6 +178,7 @@ static struct drm_info_list nouveau_debugfs_list[] = { ...@@ -178,6 +178,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
{ "memory", nouveau_debugfs_memory_info, 0, NULL }, { "memory", nouveau_debugfs_memory_info, 0, NULL },
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
{ "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL }, { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
{ "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
}; };
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
......
...@@ -407,6 +407,12 @@ nouveau_mem_vram_init(struct drm_device *dev) ...@@ -407,6 +407,12 @@ nouveau_mem_vram_init(struct drm_device *dev)
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret) if (ret)
return ret; return ret;
ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret) {
/* Reset to default value. */
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
}
ret = nouveau_ttm_global_init(dev_priv); ret = nouveau_ttm_global_init(dev_priv);
if (ret) if (ret)
......
...@@ -13,41 +13,6 @@ struct nouveau_sgdma_be { ...@@ -13,41 +13,6 @@ struct nouveau_sgdma_be {
u64 offset; u64 offset;
}; };
static int
nouveau_sgdma_dma_map(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
int i;
for (i = 0; i < ttm->num_pages; i++) {
ttm->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, ttm->dma_address[i])) {
return -EFAULT;
}
}
return 0;
}
static void
nouveau_sgdma_dma_unmap(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
int i;
for (i = 0; i < ttm->num_pages; i++) {
if (ttm->dma_address[i]) {
pci_unmap_page(dev->pdev, ttm->dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
ttm->dma_address[i] = 0;
}
}
static void static void
nouveau_sgdma_destroy(struct ttm_tt *ttm) nouveau_sgdma_destroy(struct ttm_tt *ttm)
{ {
...@@ -67,13 +32,8 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) ...@@ -67,13 +32,8 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
unsigned i, j, pte; unsigned i, j, pte;
int r;
NV_DEBUG(dev, "pg=0x%lx\n", mem->start); NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
r = nouveau_sgdma_dma_map(ttm);
if (r) {
return r;
}
nvbe->offset = mem->start << PAGE_SHIFT; nvbe->offset = mem->start << PAGE_SHIFT;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
...@@ -110,7 +70,6 @@ nv04_sgdma_unbind(struct ttm_tt *ttm) ...@@ -110,7 +70,6 @@ nv04_sgdma_unbind(struct ttm_tt *ttm)
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
} }
nouveau_sgdma_dma_unmap(ttm);
return 0; return 0;
} }
...@@ -141,13 +100,8 @@ nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) ...@@ -141,13 +100,8 @@ nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
dma_addr_t *list = ttm->dma_address; dma_addr_t *list = ttm->dma_address;
u32 pte = mem->start << 2; u32 pte = mem->start << 2;
u32 cnt = ttm->num_pages; u32 cnt = ttm->num_pages;
int r;
nvbe->offset = mem->start << PAGE_SHIFT; nvbe->offset = mem->start << PAGE_SHIFT;
r = nouveau_sgdma_dma_map(ttm);
if (r) {
return r;
}
while (cnt--) { while (cnt--) {
nv_wo32(pgt, pte, (*list++ >> 7) | 1); nv_wo32(pgt, pte, (*list++ >> 7) | 1);
...@@ -173,7 +127,6 @@ nv41_sgdma_unbind(struct ttm_tt *ttm) ...@@ -173,7 +127,6 @@ nv41_sgdma_unbind(struct ttm_tt *ttm)
} }
nv41_sgdma_flush(nvbe); nv41_sgdma_flush(nvbe);
nouveau_sgdma_dma_unmap(ttm);
return 0; return 0;
} }
...@@ -256,13 +209,9 @@ nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) ...@@ -256,13 +209,9 @@ nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
dma_addr_t *list = ttm->dma_address; dma_addr_t *list = ttm->dma_address;
u32 pte = mem->start << 2, tmp[4]; u32 pte = mem->start << 2, tmp[4];
u32 cnt = ttm->num_pages; u32 cnt = ttm->num_pages;
int i, r; int i;
nvbe->offset = mem->start << PAGE_SHIFT; nvbe->offset = mem->start << PAGE_SHIFT;
r = nouveau_sgdma_dma_map(ttm);
if (r) {
return r;
}
if (pte & 0x0000000c) { if (pte & 0x0000000c) {
u32 max = 4 - ((pte >> 2) & 0x3); u32 max = 4 - ((pte >> 2) & 0x3);
...@@ -321,7 +270,6 @@ nv44_sgdma_unbind(struct ttm_tt *ttm) ...@@ -321,7 +270,6 @@ nv44_sgdma_unbind(struct ttm_tt *ttm)
nv44_sgdma_fill(pgt, NULL, pte, cnt); nv44_sgdma_fill(pgt, NULL, pte, cnt);
nv44_sgdma_flush(ttm); nv44_sgdma_flush(ttm);
nouveau_sgdma_dma_unmap(ttm);
return 0; return 0;
} }
...@@ -335,13 +283,8 @@ static int ...@@ -335,13 +283,8 @@ static int
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{ {
struct nouveau_mem *node = mem->mm_node; struct nouveau_mem *node = mem->mm_node;
int r;
/* noop: bound in move_notify() */ /* noop: bound in move_notify() */
r = nouveau_sgdma_dma_map(ttm);
if (r) {
return r;
}
node->pages = ttm->dma_address; node->pages = ttm->dma_address;
return 0; return 0;
} }
...@@ -350,7 +293,6 @@ static int ...@@ -350,7 +293,6 @@ static int
nv50_sgdma_unbind(struct ttm_tt *ttm) nv50_sgdma_unbind(struct ttm_tt *ttm)
{ {
/* noop: unbound in move_notify() */ /* noop: unbound in move_notify() */
nouveau_sgdma_dma_unmap(ttm);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment