Commit d5f42394 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau: rename nouveau_vram to nouveau_mem

This structure will also be used for GART in the near future.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent b5e2f076
...@@ -509,7 +509,7 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -509,7 +509,7 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
src_offset = old_mem->start << PAGE_SHIFT; src_offset = old_mem->start << PAGE_SHIFT;
if (old_mem->mem_type == TTM_PL_VRAM) { if (old_mem->mem_type == TTM_PL_VRAM) {
struct nouveau_vram *node = old_mem->mm_node; struct nouveau_mem *node = old_mem->mm_node;
src_offset = node->tmp_vma.offset; src_offset = node->tmp_vma.offset;
} else { } else {
src_offset += dev_priv->gart_info.aper_base; src_offset += dev_priv->gart_info.aper_base;
...@@ -562,7 +562,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -562,7 +562,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
src_offset = old_mem->start << PAGE_SHIFT; src_offset = old_mem->start << PAGE_SHIFT;
if (old_mem->mem_type == TTM_PL_VRAM) { if (old_mem->mem_type == TTM_PL_VRAM) {
struct nouveau_vram *node = old_mem->mm_node; struct nouveau_mem *node = old_mem->mm_node;
src_offset = node->tmp_vma.offset; src_offset = node->tmp_vma.offset;
} else { } else {
src_offset += dev_priv->gart_info.aper_base; src_offset += dev_priv->gart_info.aper_base;
...@@ -729,7 +729,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, ...@@ -729,7 +729,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
* up after ttm destroys the ttm_mem_reg * up after ttm destroys the ttm_mem_reg
*/ */
if (dev_priv->card_type >= NV_50 && old_mem->mem_type == TTM_PL_VRAM) { if (dev_priv->card_type >= NV_50 && old_mem->mem_type == TTM_PL_VRAM) {
struct nouveau_vram *node = old_mem->mm_node; struct nouveau_mem *node = old_mem->mm_node;
ret = nouveau_vm_get(chan->vm, old_mem->num_pages << PAGE_SHIFT, ret = nouveau_vm_get(chan->vm, old_mem->num_pages << PAGE_SHIFT,
nvbo->vma.node->type, NV_MEM_ACCESS_RO, nvbo->vma.node->type, NV_MEM_ACCESS_RO,
...@@ -972,7 +972,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) ...@@ -972,7 +972,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
break; break;
case TTM_PL_VRAM: case TTM_PL_VRAM:
{ {
struct nouveau_vram *vram = mem->mm_node; struct nouveau_mem *node = mem->mm_node;
u8 page_shift; u8 page_shift;
if (!dev_priv->bar1_vm) { if (!dev_priv->bar1_vm) {
...@@ -983,23 +983,23 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) ...@@ -983,23 +983,23 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
} }
if (dev_priv->card_type == NV_C0) if (dev_priv->card_type == NV_C0)
page_shift = vram->page_shift; page_shift = node->page_shift;
else else
page_shift = 12; page_shift = 12;
ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
page_shift, NV_MEM_ACCESS_RW, page_shift, NV_MEM_ACCESS_RW,
&vram->bar_vma); &node->bar_vma);
if (ret) if (ret)
return ret; return ret;
nouveau_vm_map(&vram->bar_vma, vram); nouveau_vm_map(&node->bar_vma, node);
if (ret) { if (ret) {
nouveau_vm_put(&vram->bar_vma); nouveau_vm_put(&node->bar_vma);
return ret; return ret;
} }
mem->bus.offset = vram->bar_vma.offset; mem->bus.offset = node->bar_vma.offset;
if (dev_priv->card_type == NV_50) /*XXX*/ if (dev_priv->card_type == NV_50) /*XXX*/
mem->bus.offset -= 0x0020000000ULL; mem->bus.offset -= 0x0020000000ULL;
mem->bus.base = pci_resource_start(dev->pdev, 1); mem->bus.base = pci_resource_start(dev->pdev, 1);
...@@ -1016,16 +1016,16 @@ static void ...@@ -1016,16 +1016,16 @@ static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{ {
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct nouveau_vram *vram = mem->mm_node; struct nouveau_mem *node = mem->mm_node;
if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM) if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
return; return;
if (!vram->bar_vma.node) if (!node->bar_vma.node)
return; return;
nouveau_vm_unmap(&vram->bar_vma); nouveau_vm_unmap(&node->bar_vma);
nouveau_vm_put(&vram->bar_vma); nouveau_vm_put(&node->bar_vma);
} }
static int static int
......
...@@ -57,7 +57,7 @@ struct nouveau_fpriv { ...@@ -57,7 +57,7 @@ struct nouveau_fpriv {
#include "nouveau_util.h" #include "nouveau_util.h"
struct nouveau_grctx; struct nouveau_grctx;
struct nouveau_vram; struct nouveau_mem;
#include "nouveau_vm.h" #include "nouveau_vm.h"
#define MAX_NUM_DCB_ENTRIES 16 #define MAX_NUM_DCB_ENTRIES 16
...@@ -65,7 +65,7 @@ struct nouveau_vram; ...@@ -65,7 +65,7 @@ struct nouveau_vram;
#define NOUVEAU_MAX_CHANNEL_NR 128 #define NOUVEAU_MAX_CHANNEL_NR 128
#define NOUVEAU_MAX_TILE_NR 15 #define NOUVEAU_MAX_TILE_NR 15
struct nouveau_vram { struct nouveau_mem {
struct drm_device *dev; struct drm_device *dev;
struct nouveau_vma bar_vma; struct nouveau_vma bar_vma;
...@@ -510,8 +510,8 @@ struct nouveau_crypt_engine { ...@@ -510,8 +510,8 @@ struct nouveau_crypt_engine {
struct nouveau_vram_engine { struct nouveau_vram_engine {
int (*init)(struct drm_device *); int (*init)(struct drm_device *);
int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
u32 type, struct nouveau_vram **); u32 type, struct nouveau_mem **);
void (*put)(struct drm_device *, struct nouveau_vram **); void (*put)(struct drm_device *, struct nouveau_mem **);
bool (*flags_valid)(struct drm_device *, u32 tile_flags); bool (*flags_valid)(struct drm_device *, u32 tile_flags);
}; };
......
...@@ -710,7 +710,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man, ...@@ -710,7 +710,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
{ {
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_vram_engine *vram = &dev_priv->engine.vram; struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
struct nouveau_vram *node = mem->mm_node; struct nouveau_mem *node = mem->mm_node;
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
if (node->tmp_vma.node) { if (node->tmp_vma.node) {
...@@ -718,7 +718,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man, ...@@ -718,7 +718,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
nouveau_vm_put(&node->tmp_vma); nouveau_vm_put(&node->tmp_vma);
} }
vram->put(dev, (struct nouveau_vram **)&mem->mm_node); vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
} }
static int static int
...@@ -731,7 +731,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, ...@@ -731,7 +731,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct nouveau_vram_engine *vram = &dev_priv->engine.vram; struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_vram *node; struct nouveau_mem *node;
u32 size_nc = 0; u32 size_nc = 0;
int ret; int ret;
......
...@@ -53,13 +53,13 @@ void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *); ...@@ -53,13 +53,13 @@ void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
int nv50_vram_init(struct drm_device *); int nv50_vram_init(struct drm_device *);
int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc, int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
u32 memtype, struct nouveau_vram **); u32 memtype, struct nouveau_mem **);
void nv50_vram_del(struct drm_device *, struct nouveau_vram **); void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags); bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
int nvc0_vram_init(struct drm_device *); int nvc0_vram_init(struct drm_device *);
int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin, int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nouveau_vram **); u32 memtype, struct nouveau_mem **);
bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags); bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
#endif #endif
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include "nouveau_vm.h" #include "nouveau_vm.h"
void void
nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram) nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
{ {
struct nouveau_vm *vm = vma->vm; struct nouveau_vm *vm = vma->vm;
struct nouveau_mm_node *r; struct nouveau_mm_node *r;
...@@ -40,7 +40,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram) ...@@ -40,7 +40,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
u32 max = 1 << (vm->pgt_bits - bits); u32 max = 1 << (vm->pgt_bits - bits);
u32 end, len; u32 end, len;
list_for_each_entry(r, &vram->regions, rl_entry) { list_for_each_entry(r, &node->regions, rl_entry) {
u64 phys = (u64)r->offset << 12; u64 phys = (u64)r->offset << 12;
u32 num = r->length >> bits; u32 num = r->length >> bits;
...@@ -52,7 +52,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram) ...@@ -52,7 +52,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
end = max; end = max;
len = end - pte; len = end - pte;
vm->map(vma, pgt, vram, pte, len, phys); vm->map(vma, pgt, node, pte, len, phys);
num -= len; num -= len;
pte += len; pte += len;
...@@ -67,9 +67,9 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram) ...@@ -67,9 +67,9 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
} }
void void
nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram) nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
{ {
nouveau_vm_map_at(vma, 0, vram); nouveau_vm_map_at(vma, 0, node);
} }
void void
......
...@@ -67,7 +67,7 @@ struct nouveau_vm { ...@@ -67,7 +67,7 @@ struct nouveau_vm {
void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde, void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]); struct nouveau_gpuobj *pgt[2]);
void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *, void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *, void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
u32 pte, dma_addr_t *, u32 cnt); u32 pte, dma_addr_t *, u32 cnt);
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt); void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
...@@ -82,8 +82,8 @@ int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **, ...@@ -82,8 +82,8 @@ int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift, int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
u32 access, struct nouveau_vma *); u32 access, struct nouveau_vma *);
void nouveau_vm_put(struct nouveau_vma *); void nouveau_vm_put(struct nouveau_vma *);
void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *); void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *); void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *); void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length); void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
...@@ -93,7 +93,7 @@ void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, ...@@ -93,7 +93,7 @@ void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]); struct nouveau_gpuobj *pgt[2]);
void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *, void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *, void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
u32 pte, dma_addr_t *, u32 cnt); u32 pte, dma_addr_t *, u32 cnt);
void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt); void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
...@@ -104,7 +104,7 @@ void nv50_vm_flush_engine(struct drm_device *, int engine); ...@@ -104,7 +104,7 @@ void nv50_vm_flush_engine(struct drm_device *, int engine);
void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]); struct nouveau_gpuobj *pgt[2]);
void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *, void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *, void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
u32 pte, dma_addr_t *, u32 cnt); u32 pte, dma_addr_t *, u32 cnt);
void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt); void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
......
...@@ -300,7 +300,7 @@ nv50_instmem_resume(struct drm_device *dev) ...@@ -300,7 +300,7 @@ nv50_instmem_resume(struct drm_device *dev)
} }
struct nv50_gpuobj_node { struct nv50_gpuobj_node {
struct nouveau_vram *vram; struct nouveau_mem *vram;
struct nouveau_vma chan_vma; struct nouveau_vma chan_vma;
u32 align; u32 align;
}; };
......
...@@ -84,7 +84,7 @@ nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, ...@@ -84,7 +84,7 @@ nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void void
nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys) struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys)
{ {
u32 block; u32 block;
int i; int i;
......
...@@ -48,42 +48,42 @@ nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags) ...@@ -48,42 +48,42 @@ nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
} }
void void
nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram) nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv; struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *this; struct nouveau_mm_node *this;
struct nouveau_vram *vram; struct nouveau_mem *mem;
vram = *pvram; mem = *pmem;
*pvram = NULL; *pmem = NULL;
if (unlikely(vram == NULL)) if (unlikely(mem == NULL))
return; return;
mutex_lock(&mm->mutex); mutex_lock(&mm->mutex);
while (!list_empty(&vram->regions)) { while (!list_empty(&mem->regions)) {
this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); this = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
list_del(&this->rl_entry); list_del(&this->rl_entry);
nouveau_mm_put(mm, this); nouveau_mm_put(mm, this);
} }
mutex_unlock(&mm->mutex); mutex_unlock(&mm->mutex);
kfree(vram); kfree(mem);
} }
int int
nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc, nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
u32 type, struct nouveau_vram **pvram) u32 type, struct nouveau_mem **pmem)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv; struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r; struct nouveau_mm_node *r;
struct nouveau_vram *vram; struct nouveau_mem *mem;
int ret; int ret;
if (!types[type]) if (!types[type])
...@@ -92,32 +92,32 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc, ...@@ -92,32 +92,32 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
align >>= 12; align >>= 12;
size_nc >>= 12; size_nc >>= 12;
vram = kzalloc(sizeof(*vram), GFP_KERNEL); mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!vram) if (!mem)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&vram->regions); INIT_LIST_HEAD(&mem->regions);
vram->dev = dev_priv->dev; mem->dev = dev_priv->dev;
vram->memtype = type; mem->memtype = type;
vram->size = size; mem->size = size;
mutex_lock(&mm->mutex); mutex_lock(&mm->mutex);
do { do {
ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r); ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
if (ret) { if (ret) {
mutex_unlock(&mm->mutex); mutex_unlock(&mm->mutex);
nv50_vram_del(dev, &vram); nv50_vram_del(dev, &mem);
return ret; return ret;
} }
list_add_tail(&r->rl_entry, &vram->regions); list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length; size -= r->length;
} while (size); } while (size);
mutex_unlock(&mm->mutex); mutex_unlock(&mm->mutex);
r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
vram->offset = (u64)r->offset << 12; mem->offset = (u64)r->offset << 12;
*pvram = vram; *pmem = mem;
return 0; return 0;
} }
......
...@@ -59,7 +59,7 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) ...@@ -59,7 +59,7 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
void void
nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys) struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys)
{ {
u32 next = 1 << (vma->node->type - 8); u32 next = 1 << (vma->node->type - 8);
......
...@@ -58,46 +58,46 @@ nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags) ...@@ -58,46 +58,46 @@ nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
int int
nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin, nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
u32 type, struct nouveau_vram **pvram) u32 type, struct nouveau_mem **pmem)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv; struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r; struct nouveau_mm_node *r;
struct nouveau_vram *vram; struct nouveau_mem *mem;
int ret; int ret;
size >>= 12; size >>= 12;
align >>= 12; align >>= 12;
ncmin >>= 12; ncmin >>= 12;
vram = kzalloc(sizeof(*vram), GFP_KERNEL); mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!vram) if (!mem)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&vram->regions); INIT_LIST_HEAD(&mem->regions);
vram->dev = dev_priv->dev; mem->dev = dev_priv->dev;
vram->memtype = type; mem->memtype = type;
vram->size = size; mem->size = size;
mutex_lock(&mm->mutex); mutex_lock(&mm->mutex);
do { do {
ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r); ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
if (ret) { if (ret) {
mutex_unlock(&mm->mutex); mutex_unlock(&mm->mutex);
nv50_vram_del(dev, &vram); nv50_vram_del(dev, &mem);
return ret; return ret;
} }
list_add_tail(&r->rl_entry, &vram->regions); list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length; size -= r->length;
} while (size); } while (size);
mutex_unlock(&mm->mutex); mutex_unlock(&mm->mutex);
r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
vram->offset = (u64)r->offset << 12; mem->offset = (u64)r->offset << 12;
*pvram = vram; *pmem = mem;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment