Commit c706d7b9 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-nouveau-next' of...

Merge branch 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6 into drm-core-next

* 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6: (58 commits)
  drm/nouveau: fix off-by-one
  drm/nouveau/temp: Add default calibration values for nv67
  drm/nouveau/temp: Fix signed/unsigned int logic
  drm/nvc0: push prunk140 irq messages to debug loglevel
  drm/nouveau: un-blacklist nvce accel
  drm/nouveau: fix null pointer deref on pre-nv50 chipsets
  drm/nouveau: rework vram init/fini ordering a little
  drm/nouveau: shut lockdep up if last vm ref needs to destroy pgd
  drm/nouveau: fix display takedown order to match reverse init order
  drm/nvc0: enable per-client address spaces
  drm/nouveau: add some debug output if nouveau_mm busy at destroy time
  drm/nv50: enable use of per-client gpu address space
  drm/nouveau: remove implicit mapping of every bo into chan_vm
  drm/nouveau: remove 'chan' argument from nouveau_bo_new
  drm/nouveau: fixup gem_info ioctl to return client-specific bo virtual
  drm/nvc0: explicitly map PDISP semaphore buffer into each channel's vm
  drm/nv50-nvc0: lookup pushbuf virtual address on dma_push
  drm/nv84-nvc0: explicitly map semaphore buffer into channel vm
  drm/nv50-nvc0: explicitly map pushbuf bo into channel vm
  drm/nv50-nvc0: explicitly map notifier bo into channel vm
  ...
parents 033b5650 9a11dd65
...@@ -5186,7 +5186,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st ...@@ -5186,7 +5186,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
load_table_ptr = ROM16(bios->data[bitentry->offset]); load_table_ptr = ROM16(bios->data[bitentry->offset]);
if (load_table_ptr == 0x0) { if (load_table_ptr == 0x0) {
NV_ERROR(dev, "Pointer to BIT loadval table invalid\n"); NV_DEBUG(dev, "Pointer to BIT loadval table invalid\n");
return -EINVAL; return -EINVAL;
} }
...@@ -6377,6 +6377,37 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) ...@@ -6377,6 +6377,37 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
} }
} }
/* Some other twisted XFX board (rhbz#694914)
*
* The DVI/VGA encoder combo that's supposed to represent the
* DVI-I connector actually point at two different ones, and
* the HDMI connector ends up paired with the VGA instead.
*
* Connector table is missing anything for VGA at all, pointing it
* an invalid conntab entry 2 so we figure it out ourself.
*/
if (nv_match_device(dev, 0x0615, 0x1682, 0x2605)) {
if (idx == 0) {
*conn = 0x02002300; /* VGA, connector 2 */
*conf = 0x00000028;
} else
if (idx == 1) {
*conn = 0x01010312; /* DVI, connector 0 */
*conf = 0x00020030;
} else
if (idx == 2) {
*conn = 0x04020310; /* VGA, connector 0 */
*conf = 0x00000028;
} else
if (idx == 3) {
*conn = 0x02021322; /* HDMI, connector 1 */
*conf = 0x00020010;
} else {
*conn = 0x0000000e; /* EOL */
*conf = 0x00000000;
}
}
return true; return true;
} }
......
...@@ -49,16 +49,12 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) ...@@ -49,16 +49,12 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
DRM_ERROR("bo %p still attached to GEM object\n", bo); DRM_ERROR("bo %p still attached to GEM object\n", bo);
nv10_mem_put_tile_region(dev, nvbo->tile, NULL); nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
if (nvbo->vma.node) {
nouveau_vm_unmap(&nvbo->vma);
nouveau_vm_put(&nvbo->vma);
}
kfree(nvbo); kfree(nvbo);
} }
static void static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
int *align, int *size, int *page_shift) int *align, int *size)
{ {
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
...@@ -82,67 +78,51 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, ...@@ -82,67 +78,51 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
} }
} }
} else { } else {
if (likely(dev_priv->chan_vm)) { *size = roundup(*size, (1 << nvbo->page_shift));
if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024) *align = max((1 << nvbo->page_shift), *align);
*page_shift = dev_priv->chan_vm->lpg_shift;
else
*page_shift = dev_priv->chan_vm->spg_shift;
} else {
*page_shift = 12;
}
*size = roundup(*size, (1 << *page_shift));
*align = max((1 << *page_shift), *align);
} }
*size = roundup(*size, PAGE_SIZE); *size = roundup(*size, PAGE_SIZE);
} }
int int
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, nouveau_bo_new(struct drm_device *dev, int size, int align,
int size, int align, uint32_t flags, uint32_t tile_mode, uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
uint32_t tile_flags, struct nouveau_bo **pnvbo) struct nouveau_bo **pnvbo)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo; struct nouveau_bo *nvbo;
int ret = 0, page_shift = 0; int ret;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo) if (!nvbo)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&nvbo->head); INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry); INIT_LIST_HEAD(&nvbo->entry);
INIT_LIST_HEAD(&nvbo->vma_list);
nvbo->tile_mode = tile_mode; nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags; nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev; nvbo->bo.bdev = &dev_priv->ttm.bdev;
nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift); nvbo->page_shift = 12;
align >>= PAGE_SHIFT; if (dev_priv->bar1_vm) {
if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
if (dev_priv->chan_vm) { nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
NV_MEM_ACCESS_RW, &nvbo->vma);
if (ret) {
kfree(nvbo);
return ret;
}
} }
nouveau_bo_fixup_align(nvbo, flags, &align, &size);
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0); nouveau_bo_placement_set(nvbo, flags, 0);
nvbo->channel = chan;
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
ttm_bo_type_device, &nvbo->placement, align, 0, ttm_bo_type_device, &nvbo->placement,
false, NULL, size, nouveau_bo_del_ttm); align >> PAGE_SHIFT, 0, false, NULL, size,
nouveau_bo_del_ttm);
if (ret) { if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */ /* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret; return ret;
} }
nvbo->channel = NULL;
if (nvbo->vma.node)
nvbo->bo.offset = nvbo->vma.offset;
*pnvbo = nvbo; *pnvbo = nvbo;
return 0; return 0;
} }
...@@ -312,8 +292,6 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, ...@@ -312,8 +292,6 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
if (ret) if (ret)
return ret; return ret;
if (nvbo->vma.node)
nvbo->bo.offset = nvbo->vma.offset;
return 0; return 0;
} }
...@@ -440,7 +418,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -440,7 +418,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
TTM_MEMTYPE_FLAG_CMA; TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING; man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED;
man->gpu_offset = dev_priv->gart_info.aper_base;
break; break;
default: default:
NV_ERROR(dev, "Unknown GART type: %d\n", NV_ERROR(dev, "Unknown GART type: %d\n",
...@@ -501,19 +478,12 @@ static int ...@@ -501,19 +478,12 @@ static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{ {
struct nouveau_mem *old_node = old_mem->mm_node; struct nouveau_mem *node = old_mem->mm_node;
struct nouveau_mem *new_node = new_mem->mm_node; u64 src_offset = node->vma[0].offset;
struct nouveau_bo *nvbo = nouveau_bo(bo); u64 dst_offset = node->vma[1].offset;
u32 page_count = new_mem->num_pages; u32 page_count = new_mem->num_pages;
u64 src_offset, dst_offset;
int ret; int ret;
src_offset = old_node->tmp_vma.offset;
if (new_node->tmp_vma.node)
dst_offset = new_node->tmp_vma.offset;
else
dst_offset = nvbo->vma.offset;
page_count = new_mem->num_pages; page_count = new_mem->num_pages;
while (page_count) { while (page_count) {
int line_count = (page_count > 2047) ? 2047 : page_count; int line_count = (page_count > 2047) ? 2047 : page_count;
...@@ -547,19 +517,13 @@ static int ...@@ -547,19 +517,13 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{ {
struct nouveau_mem *old_node = old_mem->mm_node; struct nouveau_mem *node = old_mem->mm_node;
struct nouveau_mem *new_node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 length = (new_mem->num_pages << PAGE_SHIFT); u64 length = (new_mem->num_pages << PAGE_SHIFT);
u64 src_offset, dst_offset; u64 src_offset = node->vma[0].offset;
u64 dst_offset = node->vma[1].offset;
int ret; int ret;
src_offset = old_node->tmp_vma.offset;
if (new_node->tmp_vma.node)
dst_offset = new_node->tmp_vma.offset;
else
dst_offset = nvbo->vma.offset;
while (length) { while (length) {
u32 amount, stride, height; u32 amount, stride, height;
...@@ -694,6 +658,27 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -694,6 +658,27 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
return 0; return 0;
} }
static int
nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
struct ttm_mem_reg *mem, struct nouveau_vma *vma)
{
struct nouveau_mem *node = mem->mm_node;
int ret;
ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
node->page_shift, NV_MEM_ACCESS_RO, vma);
if (ret)
return ret;
if (mem->mem_type == TTM_PL_VRAM)
nouveau_vm_map(vma, node);
else
nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
node, node->pages);
return 0;
}
static int static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu, bool no_wait_reserve, bool no_wait_gpu,
...@@ -711,31 +696,20 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, ...@@ -711,31 +696,20 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
} }
/* create temporary vma for old memory, this will get cleaned /* create temporary vmas for the transfer and attach them to the
* up after ttm destroys the ttm_mem_reg * old nouveau_mem node, these will get cleaned up after ttm has
* destroyed the ttm_mem_reg
*/ */
if (dev_priv->card_type >= NV_50) { if (dev_priv->card_type >= NV_50) {
struct nouveau_mem *node = old_mem->mm_node; struct nouveau_mem *node = old_mem->mm_node;
if (!node->tmp_vma.node) {
u32 page_shift = nvbo->vma.node->type;
if (old_mem->mem_type == TTM_PL_TT)
page_shift = nvbo->vma.vm->spg_shift;
ret = nouveau_vm_get(chan->vm,
old_mem->num_pages << PAGE_SHIFT,
page_shift, NV_MEM_ACCESS_RO,
&node->tmp_vma);
if (ret)
goto out;
}
if (old_mem->mem_type == TTM_PL_VRAM) ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
nouveau_vm_map(&node->tmp_vma, node); if (ret)
else { goto out;
nouveau_vm_map_sg(&node->tmp_vma, 0,
old_mem->num_pages << PAGE_SHIFT, ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
node, node->pages); if (ret)
} goto out;
} }
if (dev_priv->card_type < NV_50) if (dev_priv->card_type < NV_50)
...@@ -762,7 +736,6 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, ...@@ -762,7 +736,6 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu, bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem) struct ttm_mem_reg *new_mem)
{ {
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement; struct ttm_placement placement;
struct ttm_mem_reg tmp_mem; struct ttm_mem_reg tmp_mem;
...@@ -782,23 +755,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, ...@@ -782,23 +755,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret) if (ret)
goto out; goto out;
if (dev_priv->card_type >= NV_50) {
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_mem *node = tmp_mem.mm_node;
struct nouveau_vma *vma = &nvbo->vma;
if (vma->node->type != vma->vm->spg_shift)
vma = &node->tmp_vma;
nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
node, node->pages);
}
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (dev_priv->card_type >= NV_50) {
struct nouveau_bo *nvbo = nouveau_bo(bo);
nouveau_vm_unmap(&nvbo->vma);
}
if (ret) if (ret)
goto out; goto out;
...@@ -844,30 +801,22 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, ...@@ -844,30 +801,22 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
static void static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
{ {
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_mem *node = new_mem->mm_node; struct nouveau_mem *node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_vma *vma = &nvbo->vma; struct nouveau_vma *vma;
struct nouveau_vm *vm = vma->vm;
list_for_each_entry(vma, &nvbo->vma_list, head) {
if (dev_priv->card_type < NV_50) if (new_mem->mem_type == TTM_PL_VRAM) {
return; nouveau_vm_map(vma, new_mem->mm_node);
} else
switch (new_mem->mem_type) { if (new_mem->mem_type == TTM_PL_TT &&
case TTM_PL_VRAM: nvbo->page_shift == vma->vm->spg_shift) {
nouveau_vm_map(vma, node); nouveau_vm_map_sg(vma, 0, new_mem->
break; num_pages << PAGE_SHIFT,
case TTM_PL_TT: node, node->pages);
if (vma->node->type != vm->spg_shift) { } else {
nouveau_vm_unmap(vma); nouveau_vm_unmap(vma);
vma = &node->tmp_vma;
} }
nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
node, node->pages);
break;
default:
nouveau_vm_unmap(&nvbo->vma);
break;
} }
} }
...@@ -1113,3 +1062,54 @@ struct ttm_bo_driver nouveau_bo_driver = { ...@@ -1113,3 +1062,54 @@ struct ttm_bo_driver nouveau_bo_driver = {
.io_mem_free = &nouveau_ttm_io_mem_free, .io_mem_free = &nouveau_ttm_io_mem_free,
}; };
struct nouveau_vma *
nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
{
struct nouveau_vma *vma;
list_for_each_entry(vma, &nvbo->vma_list, head) {
if (vma->vm == vm)
return vma;
}
return NULL;
}
int
nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
struct nouveau_vma *vma)
{
const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
struct nouveau_mem *node = nvbo->bo.mem.mm_node;
int ret;
ret = nouveau_vm_get(vm, size, nvbo->page_shift,
NV_MEM_ACCESS_RW, vma);
if (ret)
return ret;
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
else
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
nouveau_vm_map_sg(vma, 0, size, node, node->pages);
list_add_tail(&vma->head, &nvbo->vma_list);
vma->refcount = 1;
return 0;
}
void
nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
if (vma->node) {
if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
spin_lock(&nvbo->bo.bdev->fence_lock);
ttm_bo_wait(&nvbo->bo, false, false, false);
spin_unlock(&nvbo->bo.bdev->fence_lock);
nouveau_vm_unmap(vma);
}
nouveau_vm_put(vma);
list_del(&vma->head);
}
}
...@@ -27,40 +27,63 @@ ...@@ -27,40 +27,63 @@
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_drm.h" #include "nouveau_drm.h"
#include "nouveau_dma.h" #include "nouveau_dma.h"
#include "nouveau_ramht.h"
static int static int
nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
{ {
u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT;
struct drm_device *dev = chan->dev; struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *pb = chan->pushbuf_bo; int ret;
struct nouveau_gpuobj *pushbuf = NULL;
int ret = 0; /* allocate buffer object */
ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, &chan->pushbuf_bo);
if (ret)
goto out;
ret = nouveau_bo_pin(chan->pushbuf_bo, mem);
if (ret)
goto out;
ret = nouveau_bo_map(chan->pushbuf_bo);
if (ret)
goto out;
/* create DMA object covering the entire memtype where the push
* buffer resides, userspace can submit its own push buffers from
* anywhere within the same memtype.
*/
chan->pushbuf_base = chan->pushbuf_bo->bo.offset;
if (dev_priv->card_type >= NV_50) { if (dev_priv->card_type >= NV_50) {
ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm,
&chan->pushbuf_vma);
if (ret)
goto out;
if (dev_priv->card_type < NV_C0) { if (dev_priv->card_type < NV_C0) {
ret = nouveau_gpuobj_dma_new(chan, ret = nouveau_gpuobj_dma_new(chan,
NV_CLASS_DMA_IN_MEMORY, 0, NV_CLASS_DMA_IN_MEMORY, 0,
(1ULL << 40), (1ULL << 40),
NV_MEM_ACCESS_RO, NV_MEM_ACCESS_RO,
NV_MEM_TARGET_VM, NV_MEM_TARGET_VM,
&pushbuf); &chan->pushbuf);
} }
chan->pushbuf_base = pb->bo.offset; chan->pushbuf_base = chan->pushbuf_vma.offset;
} else } else
if (pb->bo.mem.mem_type == TTM_PL_TT) { if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->gart_info.aper_size, dev_priv->gart_info.aper_size,
NV_MEM_ACCESS_RO, NV_MEM_ACCESS_RO,
NV_MEM_TARGET_GART, &pushbuf); NV_MEM_TARGET_GART,
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; &chan->pushbuf);
} else } else
if (dev_priv->card_type != NV_04) { if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->fb_available_size, dev_priv->fb_available_size,
NV_MEM_ACCESS_RO, NV_MEM_ACCESS_RO,
NV_MEM_TARGET_VRAM, &pushbuf); NV_MEM_TARGET_VRAM,
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; &chan->pushbuf);
} else { } else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's /* NV04 cmdbuf hack, from original ddx.. not sure of it's
* exact reason for existing :) PCI access to cmdbuf in * exact reason for existing :) PCI access to cmdbuf in
...@@ -70,47 +93,22 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) ...@@ -70,47 +93,22 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
pci_resource_start(dev->pdev, 1), pci_resource_start(dev->pdev, 1),
dev_priv->fb_available_size, dev_priv->fb_available_size,
NV_MEM_ACCESS_RO, NV_MEM_ACCESS_RO,
NV_MEM_TARGET_PCI, &pushbuf); NV_MEM_TARGET_PCI,
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; &chan->pushbuf);
} }
nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); out:
nouveau_gpuobj_ref(NULL, &pushbuf);
return ret;
}
static struct nouveau_bo *
nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
{
struct nouveau_bo *pushbuf = NULL;
int location, ret;
if (nouveau_vram_pushbuf)
location = TTM_PL_FLAG_VRAM;
else
location = TTM_PL_FLAG_TT;
ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);
if (ret) {
NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
return NULL;
}
ret = nouveau_bo_pin(pushbuf, location);
if (ret) {
NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
nouveau_bo_ref(NULL, &pushbuf);
return NULL;
}
ret = nouveau_bo_map(pushbuf);
if (ret) { if (ret) {
nouveau_bo_unpin(pushbuf); NV_ERROR(dev, "error initialising pushbuf: %d\n", ret);
nouveau_bo_ref(NULL, &pushbuf); nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
return NULL; nouveau_gpuobj_ref(NULL, &chan->pushbuf);
if (chan->pushbuf_bo) {
nouveau_bo_unmap(chan->pushbuf_bo);
nouveau_bo_ref(NULL, &chan->pushbuf_bo);
}
} }
return pushbuf; return 0;
} }
/* allocates and initializes a fifo for user space consumption */ /* allocates and initializes a fifo for user space consumption */
...@@ -121,6 +119,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, ...@@ -121,6 +119,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_channel *chan; struct nouveau_channel *chan;
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -160,19 +159,14 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, ...@@ -160,19 +159,14 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
INIT_LIST_HEAD(&chan->nvsw.flip); INIT_LIST_HEAD(&chan->nvsw.flip);
INIT_LIST_HEAD(&chan->fence.pending); INIT_LIST_HEAD(&chan->fence.pending);
/* Allocate DMA push buffer */ /* setup channel's memory and vm */
chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
if (!chan->pushbuf_bo) { if (ret) {
ret = -ENOMEM; NV_ERROR(dev, "gpuobj %d\n", ret);
NV_ERROR(dev, "pushbuf %d\n", ret);
nouveau_channel_put(&chan); nouveau_channel_put(&chan);
return ret; return ret;
} }
nouveau_dma_pre_init(chan);
chan->user_put = 0x40;
chan->user_get = 0x44;
/* Allocate space for per-channel fixed notifier memory */ /* Allocate space for per-channel fixed notifier memory */
ret = nouveau_notifier_init_channel(chan); ret = nouveau_notifier_init_channel(chan);
if (ret) { if (ret) {
...@@ -181,21 +175,17 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, ...@@ -181,21 +175,17 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
return ret; return ret;
} }
/* Setup channel's default objects */ /* Allocate DMA push buffer */
ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); ret = nouveau_channel_pushbuf_init(chan);
if (ret) { if (ret) {
NV_ERROR(dev, "gpuobj %d\n", ret); NV_ERROR(dev, "pushbuf %d\n", ret);
nouveau_channel_put(&chan); nouveau_channel_put(&chan);
return ret; return ret;
} }
/* Create a dma object for the push buffer */ nouveau_dma_pre_init(chan);
ret = nouveau_channel_pushbuf_ctxdma_init(chan); chan->user_put = 0x40;
if (ret) { chan->user_get = 0x44;
NV_ERROR(dev, "pbctxdma %d\n", ret);
nouveau_channel_put(&chan);
return ret;
}
/* disable the fifo caches */ /* disable the fifo caches */
pfifo->reassign(dev, false); pfifo->reassign(dev, false);
...@@ -220,6 +210,11 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, ...@@ -220,6 +210,11 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
nouveau_debugfs_channel_init(chan); nouveau_debugfs_channel_init(chan);
NV_DEBUG(dev, "channel %d initialised\n", chan->id); NV_DEBUG(dev, "channel %d initialised\n", chan->id);
if (fpriv) {
spin_lock(&fpriv->lock);
list_add(&chan->list, &fpriv->channels);
spin_unlock(&fpriv->lock);
}
*chan_ret = chan; *chan_ret = chan;
return 0; return 0;
} }
...@@ -236,29 +231,23 @@ nouveau_channel_get_unlocked(struct nouveau_channel *ref) ...@@ -236,29 +231,23 @@ nouveau_channel_get_unlocked(struct nouveau_channel *ref)
} }
struct nouveau_channel * struct nouveau_channel *
nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) nouveau_channel_get(struct drm_file *file_priv, int id)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_channel *chan; struct nouveau_channel *chan;
unsigned long flags;
if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR))
return ERR_PTR(-EINVAL);
spin_lock_irqsave(&dev_priv->channels.lock, flags);
chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
if (unlikely(!chan))
return ERR_PTR(-EINVAL);
if (unlikely(file_priv && chan->file_priv != file_priv)) { spin_lock(&fpriv->lock);
nouveau_channel_put_unlocked(&chan); list_for_each_entry(chan, &fpriv->channels, list) {
return ERR_PTR(-EINVAL); if (chan->id == id) {
chan = nouveau_channel_get_unlocked(chan);
spin_unlock(&fpriv->lock);
mutex_lock(&chan->mutex);
return chan;
}
} }
spin_unlock(&fpriv->lock);
mutex_lock(&chan->mutex); return ERR_PTR(-EINVAL);
return chan;
} }
void void
...@@ -312,12 +301,14 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) ...@@ -312,12 +301,14 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
/* destroy any resources the channel owned */ /* destroy any resources the channel owned */
nouveau_gpuobj_ref(NULL, &chan->pushbuf); nouveau_gpuobj_ref(NULL, &chan->pushbuf);
if (chan->pushbuf_bo) { if (chan->pushbuf_bo) {
nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
nouveau_bo_unmap(chan->pushbuf_bo); nouveau_bo_unmap(chan->pushbuf_bo);
nouveau_bo_unpin(chan->pushbuf_bo); nouveau_bo_unpin(chan->pushbuf_bo);
nouveau_bo_ref(NULL, &chan->pushbuf_bo); nouveau_bo_ref(NULL, &chan->pushbuf_bo);
} }
nouveau_gpuobj_channel_takedown(chan); nouveau_ramht_ref(NULL, &chan->ramht, chan);
nouveau_notifier_takedown_channel(chan); nouveau_notifier_takedown_channel(chan);
nouveau_gpuobj_channel_takedown(chan);
nouveau_channel_ref(NULL, pchan); nouveau_channel_ref(NULL, pchan);
} }
...@@ -383,10 +374,11 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) ...@@ -383,10 +374,11 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
for (i = 0; i < engine->fifo.channels; i++) { for (i = 0; i < engine->fifo.channels; i++) {
chan = nouveau_channel_get(dev, file_priv, i); chan = nouveau_channel_get(file_priv, i);
if (IS_ERR(chan)) if (IS_ERR(chan))
continue; continue;
list_del(&chan->list);
atomic_dec(&chan->users); atomic_dec(&chan->users);
nouveau_channel_put(&chan); nouveau_channel_put(&chan);
} }
...@@ -459,10 +451,11 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, ...@@ -459,10 +451,11 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
struct drm_nouveau_channel_free *req = data; struct drm_nouveau_channel_free *req = data;
struct nouveau_channel *chan; struct nouveau_channel *chan;
chan = nouveau_channel_get(dev, file_priv, req->channel); chan = nouveau_channel_get(file_priv, req->channel);
if (IS_ERR(chan)) if (IS_ERR(chan))
return PTR_ERR(chan); return PTR_ERR(chan);
list_del(&chan->list);
atomic_dec(&chan->users); atomic_dec(&chan->users);
nouveau_channel_put(&chan); nouveau_channel_put(&chan);
return 0; return 0;
......
...@@ -167,8 +167,13 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, ...@@ -167,8 +167,13 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
int delta, int length) int delta, int length)
{ {
struct nouveau_bo *pb = chan->pushbuf_bo; struct nouveau_bo *pb = chan->pushbuf_bo;
uint64_t offset = bo->bo.offset + delta; struct nouveau_vma *vma;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
u64 offset;
vma = nouveau_bo_vma_find(bo, chan->vm);
BUG_ON(!vma);
offset = vma->offset + delta;
BUG_ON(chan->dma.ib_free < 1); BUG_ON(chan->dma.ib_free < 1);
nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
......
...@@ -73,7 +73,7 @@ int nouveau_ignorelid = 0; ...@@ -73,7 +73,7 @@ int nouveau_ignorelid = 0;
module_param_named(ignorelid, nouveau_ignorelid, int, 0400); module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
MODULE_PARM_DESC(noaccel, "Disable all acceleration"); MODULE_PARM_DESC(noaccel, "Disable all acceleration");
int nouveau_noaccel = 0; int nouveau_noaccel = -1;
module_param_named(noaccel, nouveau_noaccel, int, 0400); module_param_named(noaccel, nouveau_noaccel, int, 0400);
MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
...@@ -119,6 +119,10 @@ MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n"); ...@@ -119,6 +119,10 @@ MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
int nouveau_msi; int nouveau_msi;
module_param_named(msi, nouveau_msi, int, 0400); module_param_named(msi, nouveau_msi, int, 0400);
MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n");
int nouveau_ctxfw;
module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
int nouveau_fbpercrtc; int nouveau_fbpercrtc;
#if 0 #if 0
module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
...@@ -354,7 +358,7 @@ nouveau_pci_resume(struct pci_dev *pdev) ...@@ -354,7 +358,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT; u32 offset = nv_crtc->cursor.nvbo->bo.offset;
nv_crtc->cursor.set_offset(nv_crtc, offset); nv_crtc->cursor.set_offset(nv_crtc, offset);
nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
...@@ -389,7 +393,9 @@ static struct drm_driver driver = { ...@@ -389,7 +393,9 @@ static struct drm_driver driver = {
.firstopen = nouveau_firstopen, .firstopen = nouveau_firstopen,
.lastclose = nouveau_lastclose, .lastclose = nouveau_lastclose,
.unload = nouveau_unload, .unload = nouveau_unload,
.open = nouveau_open,
.preclose = nouveau_preclose, .preclose = nouveau_preclose,
.postclose = nouveau_postclose,
#if defined(CONFIG_DRM_NOUVEAU_DEBUG) #if defined(CONFIG_DRM_NOUVEAU_DEBUG)
.debugfs_init = nouveau_debugfs_init, .debugfs_init = nouveau_debugfs_init,
.debugfs_cleanup = nouveau_debugfs_takedown, .debugfs_cleanup = nouveau_debugfs_takedown,
...@@ -420,6 +426,8 @@ static struct drm_driver driver = { ...@@ -420,6 +426,8 @@ static struct drm_driver driver = {
.gem_init_object = nouveau_gem_object_new, .gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del, .gem_free_object = nouveau_gem_object_del,
.gem_open_object = nouveau_gem_object_open,
.gem_close_object = nouveau_gem_object_close,
.name = DRIVER_NAME, .name = DRIVER_NAME,
.desc = DRIVER_DESC, .desc = DRIVER_DESC,
......
...@@ -46,9 +46,17 @@ ...@@ -46,9 +46,17 @@
#include "ttm/ttm_module.h" #include "ttm/ttm_module.h"
struct nouveau_fpriv { struct nouveau_fpriv {
struct ttm_object_file *tfile; spinlock_t lock;
struct list_head channels;
struct nouveau_vm *vm;
}; };
static inline struct nouveau_fpriv *
nouveau_fpriv(struct drm_file *file_priv)
{
return file_priv ? file_priv->driver_priv : NULL;
}
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
#include "nouveau_drm.h" #include "nouveau_drm.h"
...@@ -69,7 +77,7 @@ struct nouveau_mem { ...@@ -69,7 +77,7 @@ struct nouveau_mem {
struct drm_device *dev; struct drm_device *dev;
struct nouveau_vma bar_vma; struct nouveau_vma bar_vma;
struct nouveau_vma tmp_vma; struct nouveau_vma vma[2];
u8 page_shift; u8 page_shift;
struct drm_mm_node *tag; struct drm_mm_node *tag;
...@@ -107,7 +115,8 @@ struct nouveau_bo { ...@@ -107,7 +115,8 @@ struct nouveau_bo {
struct nouveau_channel *channel; struct nouveau_channel *channel;
struct nouveau_vma vma; struct list_head vma_list;
unsigned page_shift;
uint32_t tile_mode; uint32_t tile_mode;
uint32_t tile_flags; uint32_t tile_flags;
...@@ -176,9 +185,10 @@ struct nouveau_gpuobj { ...@@ -176,9 +185,10 @@ struct nouveau_gpuobj {
uint32_t flags; uint32_t flags;
u32 size; u32 size;
u32 pinst; u32 pinst; /* PRAMIN BAR offset */
u32 cinst; u32 cinst; /* Channel offset */
u64 vinst; u64 vinst; /* VRAM address */
u64 linst; /* VM address */
uint32_t engine; uint32_t engine;
uint32_t class; uint32_t class;
...@@ -201,6 +211,7 @@ enum nouveau_channel_mutex_class { ...@@ -201,6 +211,7 @@ enum nouveau_channel_mutex_class {
struct nouveau_channel { struct nouveau_channel {
struct drm_device *dev; struct drm_device *dev;
struct list_head list;
int id; int id;
/* references to the channel data structure */ /* references to the channel data structure */
...@@ -228,15 +239,18 @@ struct nouveau_channel { ...@@ -228,15 +239,18 @@ struct nouveau_channel {
uint32_t sequence; uint32_t sequence;
uint32_t sequence_ack; uint32_t sequence_ack;
atomic_t last_sequence_irq; atomic_t last_sequence_irq;
struct nouveau_vma vma;
} fence; } fence;
/* DMA push buffer */ /* DMA push buffer */
struct nouveau_gpuobj *pushbuf; struct nouveau_gpuobj *pushbuf;
struct nouveau_bo *pushbuf_bo; struct nouveau_bo *pushbuf_bo;
struct nouveau_vma pushbuf_vma;
uint32_t pushbuf_base; uint32_t pushbuf_base;
/* Notifier memory */ /* Notifier memory */
struct nouveau_bo *notifier_bo; struct nouveau_bo *notifier_bo;
struct nouveau_vma notifier_vma;
struct drm_mm notifier_heap; struct drm_mm notifier_heap;
/* PFIFO context */ /* PFIFO context */
...@@ -278,6 +292,7 @@ struct nouveau_channel { ...@@ -278,6 +292,7 @@ struct nouveau_channel {
uint32_t sw_subchannel[8]; uint32_t sw_subchannel[8];
struct nouveau_vma dispc_vma[2];
struct { struct {
struct nouveau_gpuobj *vblsem; struct nouveau_gpuobj *vblsem;
uint32_t vblsem_head; uint32_t vblsem_head;
...@@ -314,7 +329,8 @@ struct nouveau_instmem_engine { ...@@ -314,7 +329,8 @@ struct nouveau_instmem_engine {
int (*suspend)(struct drm_device *dev); int (*suspend)(struct drm_device *dev);
void (*resume)(struct drm_device *dev); void (*resume)(struct drm_device *dev);
int (*get)(struct nouveau_gpuobj *, u32 size, u32 align); int (*get)(struct nouveau_gpuobj *, struct nouveau_channel *,
u32 size, u32 align);
void (*put)(struct nouveau_gpuobj *); void (*put)(struct nouveau_gpuobj *);
int (*map)(struct nouveau_gpuobj *); int (*map)(struct nouveau_gpuobj *);
void (*unmap)(struct nouveau_gpuobj *); void (*unmap)(struct nouveau_gpuobj *);
...@@ -445,9 +461,9 @@ struct nouveau_pm_level { ...@@ -445,9 +461,9 @@ struct nouveau_pm_level {
struct nouveau_pm_temp_sensor_constants { struct nouveau_pm_temp_sensor_constants {
u16 offset_constant; u16 offset_constant;
s16 offset_mult; s16 offset_mult;
u16 offset_div; s16 offset_div;
u16 slope_mult; s16 slope_mult;
u16 slope_div; s16 slope_div;
}; };
struct nouveau_pm_threshold_temp { struct nouveau_pm_threshold_temp {
...@@ -488,7 +504,10 @@ struct nouveau_pm_engine { ...@@ -488,7 +504,10 @@ struct nouveau_pm_engine {
}; };
struct nouveau_vram_engine { struct nouveau_vram_engine {
struct nouveau_mm *mm;
int (*init)(struct drm_device *); int (*init)(struct drm_device *);
void (*takedown)(struct drm_device *dev);
int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
u32 type, struct nouveau_mem **); u32 type, struct nouveau_mem **);
void (*put)(struct drm_device *, struct nouveau_mem **); void (*put)(struct drm_device *, struct nouveau_mem **);
...@@ -608,6 +627,7 @@ enum nouveau_card_type { ...@@ -608,6 +627,7 @@ enum nouveau_card_type {
struct drm_nouveau_private { struct drm_nouveau_private {
struct drm_device *dev; struct drm_device *dev;
bool noaccel;
/* the card type, takes NV_* as values */ /* the card type, takes NV_* as values */
enum nouveau_card_type card_type; enum nouveau_card_type card_type;
...@@ -700,7 +720,6 @@ struct drm_nouveau_private { ...@@ -700,7 +720,6 @@ struct drm_nouveau_private {
/* VRAM/fb configuration */ /* VRAM/fb configuration */
uint64_t vram_size; uint64_t vram_size;
uint64_t vram_sys_base; uint64_t vram_sys_base;
u32 vram_rblock_size;
uint64_t fb_phys; uint64_t fb_phys;
uint64_t fb_available_size; uint64_t fb_available_size;
...@@ -784,12 +803,15 @@ extern int nouveau_override_conntype; ...@@ -784,12 +803,15 @@ extern int nouveau_override_conntype;
extern char *nouveau_perflvl; extern char *nouveau_perflvl;
extern int nouveau_perflvl_wr; extern int nouveau_perflvl_wr;
extern int nouveau_msi; extern int nouveau_msi;
extern int nouveau_ctxfw;
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
extern int nouveau_pci_resume(struct pci_dev *pdev); extern int nouveau_pci_resume(struct pci_dev *pdev);
/* nouveau_state.c */ /* nouveau_state.c */
extern int nouveau_open(struct drm_device *, struct drm_file *);
extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
extern void nouveau_postclose(struct drm_device *, struct drm_file *);
extern int nouveau_load(struct drm_device *, unsigned long flags); extern int nouveau_load(struct drm_device *, unsigned long flags);
extern int nouveau_firstopen(struct drm_device *); extern int nouveau_firstopen(struct drm_device *);
extern void nouveau_lastclose(struct drm_device *); extern void nouveau_lastclose(struct drm_device *);
...@@ -847,7 +869,7 @@ extern int nouveau_channel_alloc(struct drm_device *dev, ...@@ -847,7 +869,7 @@ extern int nouveau_channel_alloc(struct drm_device *dev,
extern struct nouveau_channel * extern struct nouveau_channel *
nouveau_channel_get_unlocked(struct nouveau_channel *); nouveau_channel_get_unlocked(struct nouveau_channel *);
extern struct nouveau_channel * extern struct nouveau_channel *
nouveau_channel_get(struct drm_device *, struct drm_file *, int id); nouveau_channel_get(struct drm_file *, int id);
extern void nouveau_channel_put_unlocked(struct nouveau_channel **); extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
extern void nouveau_channel_put(struct nouveau_channel **); extern void nouveau_channel_put(struct nouveau_channel **);
extern void nouveau_channel_ref(struct nouveau_channel *chan, extern void nouveau_channel_ref(struct nouveau_channel *chan,
...@@ -1169,7 +1191,8 @@ extern int nv04_instmem_init(struct drm_device *); ...@@ -1169,7 +1191,8 @@ extern int nv04_instmem_init(struct drm_device *);
extern void nv04_instmem_takedown(struct drm_device *); extern void nv04_instmem_takedown(struct drm_device *);
extern int nv04_instmem_suspend(struct drm_device *); extern int nv04_instmem_suspend(struct drm_device *);
extern void nv04_instmem_resume(struct drm_device *); extern void nv04_instmem_resume(struct drm_device *);
extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); extern int nv04_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
u32 size, u32 align);
extern void nv04_instmem_put(struct nouveau_gpuobj *); extern void nv04_instmem_put(struct nouveau_gpuobj *);
extern int nv04_instmem_map(struct nouveau_gpuobj *); extern int nv04_instmem_map(struct nouveau_gpuobj *);
extern void nv04_instmem_unmap(struct nouveau_gpuobj *); extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
...@@ -1180,7 +1203,8 @@ extern int nv50_instmem_init(struct drm_device *); ...@@ -1180,7 +1203,8 @@ extern int nv50_instmem_init(struct drm_device *);
extern void nv50_instmem_takedown(struct drm_device *); extern void nv50_instmem_takedown(struct drm_device *);
extern int nv50_instmem_suspend(struct drm_device *); extern int nv50_instmem_suspend(struct drm_device *);
extern void nv50_instmem_resume(struct drm_device *); extern void nv50_instmem_resume(struct drm_device *);
extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); extern int nv50_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
u32 size, u32 align);
extern void nv50_instmem_put(struct nouveau_gpuobj *); extern void nv50_instmem_put(struct nouveau_gpuobj *);
extern int nv50_instmem_map(struct nouveau_gpuobj *); extern int nv50_instmem_map(struct nouveau_gpuobj *);
extern void nv50_instmem_unmap(struct nouveau_gpuobj *); extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
...@@ -1247,10 +1271,9 @@ extern int nv04_crtc_create(struct drm_device *, int index); ...@@ -1247,10 +1271,9 @@ extern int nv04_crtc_create(struct drm_device *, int index);
/* nouveau_bo.c */ /* nouveau_bo.c */
extern struct ttm_bo_driver nouveau_bo_driver; extern struct ttm_bo_driver nouveau_bo_driver;
extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *, extern int nouveau_bo_new(struct drm_device *, int size, int align,
int size, int align, uint32_t flags, uint32_t flags, uint32_t tile_mode,
uint32_t tile_mode, uint32_t tile_flags, uint32_t tile_flags, struct nouveau_bo **);
struct nouveau_bo **);
extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags); extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
extern int nouveau_bo_unpin(struct nouveau_bo *); extern int nouveau_bo_unpin(struct nouveau_bo *);
extern int nouveau_bo_map(struct nouveau_bo *); extern int nouveau_bo_map(struct nouveau_bo *);
...@@ -1265,6 +1288,12 @@ extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); ...@@ -1265,6 +1288,12 @@ extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
bool no_wait_reserve, bool no_wait_gpu); bool no_wait_reserve, bool no_wait_gpu);
extern struct nouveau_vma *
nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
struct nouveau_vma *);
extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
/* nouveau_fence.c */ /* nouveau_fence.c */
struct nouveau_fence; struct nouveau_fence;
extern int nouveau_fence_init(struct drm_device *); extern int nouveau_fence_init(struct drm_device *);
...@@ -1310,12 +1339,14 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj) ...@@ -1310,12 +1339,14 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
} }
/* nouveau_gem.c */ /* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, extern int nouveau_gem_new(struct drm_device *, int size, int align,
int size, int align, uint32_t domain, uint32_t domain, uint32_t tile_mode,
uint32_t tile_mode, uint32_t tile_flags, uint32_t tile_flags, struct nouveau_bo **);
struct nouveau_bo **);
extern int nouveau_gem_object_new(struct drm_gem_object *); extern int nouveau_gem_object_new(struct drm_gem_object *);
extern void nouveau_gem_object_del(struct drm_gem_object *); extern void nouveau_gem_object_del(struct drm_gem_object *);
extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
extern void nouveau_gem_object_close(struct drm_gem_object *,
struct drm_file *);
extern int nouveau_gem_ioctl_new(struct drm_device *, void *, extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
struct drm_file *); struct drm_file *);
extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *, extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
struct nouveau_framebuffer { struct nouveau_framebuffer {
struct drm_framebuffer base; struct drm_framebuffer base;
struct nouveau_bo *nvbo; struct nouveau_bo *nvbo;
struct nouveau_vma vma;
u32 r_dma; u32 r_dma;
u32 r_format; u32 r_format;
u32 r_pitch; u32 r_pitch;
......
...@@ -279,6 +279,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, ...@@ -279,6 +279,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
struct fb_info *info; struct fb_info *info;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct nouveau_framebuffer *nouveau_fb; struct nouveau_framebuffer *nouveau_fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo; struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd mode_cmd; struct drm_mode_fb_cmd mode_cmd;
struct pci_dev *pdev = dev->pdev; struct pci_dev *pdev = dev->pdev;
...@@ -296,8 +297,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, ...@@ -296,8 +297,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
size = mode_cmd.pitch * mode_cmd.height; size = mode_cmd.pitch * mode_cmd.height;
size = roundup(size, PAGE_SIZE); size = roundup(size, PAGE_SIZE);
ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo); 0, 0x0000, &nvbo);
if (ret) { if (ret) {
NV_ERROR(dev, "failed to allocate framebuffer\n"); NV_ERROR(dev, "failed to allocate framebuffer\n");
goto out; goto out;
...@@ -318,6 +319,15 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, ...@@ -318,6 +319,15 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
goto out; goto out;
} }
chan = nouveau_nofbaccel ? NULL : dev_priv->channel;
if (chan && dev_priv->card_type >= NV_50) {
ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma);
if (ret) {
NV_ERROR(dev, "failed to map fb into chan: %d\n", ret);
chan = NULL;
}
}
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
info = framebuffer_alloc(0, device); info = framebuffer_alloc(0, device);
...@@ -448,6 +458,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) ...@@ -448,6 +458,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
if (nouveau_fb->nvbo) { if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo); nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL; nouveau_fb->nvbo = NULL;
} }
......
...@@ -336,6 +336,7 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema) ...@@ -336,6 +336,7 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{ {
struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL; struct nouveau_fence *fence = NULL;
u64 offset = chan->fence.vma.offset + sema->mem->start;
int ret; int ret;
if (dev_priv->chipset < 0x84) { if (dev_priv->chipset < 0x84) {
...@@ -345,13 +346,10 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema) ...@@ -345,13 +346,10 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3); BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
OUT_RING (chan, NvSema); OUT_RING (chan, NvSema);
OUT_RING (chan, sema->mem->start); OUT_RING (chan, offset);
OUT_RING (chan, 1); OUT_RING (chan, 1);
} else } else
if (dev_priv->chipset < 0xc0) { if (dev_priv->chipset < 0xc0) {
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
ret = RING_SPACE(chan, 7); ret = RING_SPACE(chan, 7);
if (ret) if (ret)
return ret; return ret;
...@@ -364,9 +362,6 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema) ...@@ -364,9 +362,6 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
OUT_RING (chan, 1); OUT_RING (chan, 1);
OUT_RING (chan, 1); /* ACQUIRE_EQ */ OUT_RING (chan, 1); /* ACQUIRE_EQ */
} else { } else {
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
ret = RING_SPACE(chan, 5); ret = RING_SPACE(chan, 5);
if (ret) if (ret)
return ret; return ret;
...@@ -394,6 +389,7 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema) ...@@ -394,6 +389,7 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{ {
struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL; struct nouveau_fence *fence = NULL;
u64 offset = chan->fence.vma.offset + sema->mem->start;
int ret; int ret;
if (dev_priv->chipset < 0x84) { if (dev_priv->chipset < 0x84) {
...@@ -403,14 +399,11 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema) ...@@ -403,14 +399,11 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2); BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvSema); OUT_RING (chan, NvSema);
OUT_RING (chan, sema->mem->start); OUT_RING (chan, offset);
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1); BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, 1); OUT_RING (chan, 1);
} else } else
if (dev_priv->chipset < 0xc0) { if (dev_priv->chipset < 0xc0) {
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
ret = RING_SPACE(chan, 7); ret = RING_SPACE(chan, 7);
if (ret) if (ret)
return ret; return ret;
...@@ -423,9 +416,6 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema) ...@@ -423,9 +416,6 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
OUT_RING (chan, 1); OUT_RING (chan, 1);
OUT_RING (chan, 2); /* RELEASE */ OUT_RING (chan, 2); /* RELEASE */
} else { } else {
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
ret = RING_SPACE(chan, 5); ret = RING_SPACE(chan, 5);
if (ret) if (ret)
return ret; return ret;
...@@ -540,6 +530,12 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) ...@@ -540,6 +530,12 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
nouveau_gpuobj_ref(NULL, &obj); nouveau_gpuobj_ref(NULL, &obj);
if (ret) if (ret)
return ret; return ret;
} else {
/* map fence bo into channel's vm */
ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
&chan->fence.vma);
if (ret)
return ret;
} }
INIT_LIST_HEAD(&chan->fence.pending); INIT_LIST_HEAD(&chan->fence.pending);
...@@ -551,10 +547,10 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) ...@@ -551,10 +547,10 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
void void
nouveau_fence_channel_fini(struct nouveau_channel *chan) nouveau_fence_channel_fini(struct nouveau_channel *chan)
{ {
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *tmp, *fence; struct nouveau_fence *tmp, *fence;
spin_lock(&chan->fence.lock); spin_lock(&chan->fence.lock);
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
fence->signalled = true; fence->signalled = true;
list_del(&fence->entry); list_del(&fence->entry);
...@@ -564,8 +560,9 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan) ...@@ -564,8 +560,9 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
kref_put(&fence->refcount, nouveau_fence_del); kref_put(&fence->refcount, nouveau_fence_del);
} }
spin_unlock(&chan->fence.lock); spin_unlock(&chan->fence.lock);
nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
} }
int int
...@@ -577,7 +574,7 @@ nouveau_fence_init(struct drm_device *dev) ...@@ -577,7 +574,7 @@ nouveau_fence_init(struct drm_device *dev)
/* Create a shared VRAM heap for cross-channel sync. */ /* Create a shared VRAM heap for cross-channel sync. */
if (USE_SEMA(dev)) { if (USE_SEMA(dev)) {
ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM, ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
0, 0, &dev_priv->fence.bo); 0, 0, &dev_priv->fence.bo);
if (ret) if (ret)
return ret; return ret;
......
...@@ -60,9 +60,69 @@ nouveau_gem_object_del(struct drm_gem_object *gem) ...@@ -60,9 +60,69 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
} }
int int
nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
int size, int align, uint32_t domain, uint32_t tile_mode, {
uint32_t tile_flags, struct nouveau_bo **pnvbo) struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_vma *vma;
int ret;
if (!fpriv->vm)
return 0;
ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
if (ret)
return ret;
vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
if (!vma) {
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma) {
ret = -ENOMEM;
goto out;
}
ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
if (ret) {
kfree(vma);
goto out;
}
} else {
vma->refcount++;
}
out:
ttm_bo_unreserve(&nvbo->bo);
return ret;
}
void
nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_vma *vma;
int ret;
if (!fpriv->vm)
return;
ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
if (ret)
return;
vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
if (vma) {
if (--vma->refcount == 0)
nouveau_bo_vma_del(nvbo, vma);
}
ttm_bo_unreserve(&nvbo->bo);
}
int
nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags,
struct nouveau_bo **pnvbo)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo; struct nouveau_bo *nvbo;
...@@ -76,7 +136,7 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, ...@@ -76,7 +136,7 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
flags |= TTM_PL_FLAG_SYSTEM; flags |= TTM_PL_FLAG_SYSTEM;
ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode, ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
tile_flags, pnvbo); tile_flags, pnvbo);
if (ret) if (ret)
return ret; return ret;
...@@ -103,17 +163,28 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, ...@@ -103,17 +163,28 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
} }
static int static int
nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
struct drm_nouveau_gem_info *rep)
{ {
struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_vma *vma;
if (nvbo->bo.mem.mem_type == TTM_PL_TT) if (nvbo->bo.mem.mem_type == TTM_PL_TT)
rep->domain = NOUVEAU_GEM_DOMAIN_GART; rep->domain = NOUVEAU_GEM_DOMAIN_GART;
else else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->offset = nvbo->bo.offset; rep->offset = nvbo->bo.offset;
if (fpriv->vm) {
vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
if (!vma)
return -EINVAL;
rep->offset = vma->offset;
}
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->map_handle = nvbo->bo.addr_space_offset; rep->map_handle = nvbo->bo.addr_space_offset;
rep->tile_mode = nvbo->tile_mode; rep->tile_mode = nvbo->tile_mode;
rep->tile_flags = nvbo->tile_flags; rep->tile_flags = nvbo->tile_flags;
...@@ -127,7 +198,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, ...@@ -127,7 +198,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_gem_new *req = data; struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL; struct nouveau_bo *nvbo = NULL;
struct nouveau_channel *chan = NULL;
int ret = 0; int ret = 0;
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
...@@ -138,28 +208,21 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, ...@@ -138,28 +208,21 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
if (req->channel_hint) { ret = nouveau_gem_new(dev, req->info.size, req->align,
chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
if (IS_ERR(chan))
return PTR_ERR(chan);
}
ret = nouveau_gem_new(dev, chan, req->info.size, req->align,
req->info.domain, req->info.tile_mode, req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo); req->info.tile_flags, &nvbo);
if (chan)
nouveau_channel_put(&chan);
if (ret) if (ret)
return ret; return ret;
ret = nouveau_gem_info(nvbo->gem, &req->info);
if (ret)
goto out;
ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
if (ret == 0) {
ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
if (ret)
drm_gem_handle_delete(file_priv, req->info.handle);
}
/* drop reference from allocate - handle holds it now */ /* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(nvbo->gem); drm_gem_object_unreference_unlocked(nvbo->gem);
out:
return ret; return ret;
} }
...@@ -318,6 +381,7 @@ static int ...@@ -318,6 +381,7 @@ static int
validate_list(struct nouveau_channel *chan, struct list_head *list, validate_list(struct nouveau_channel *chan, struct list_head *list,
struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
{ {
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_nouveau_gem_pushbuf_bo __user *upbbo = struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
(void __force __user *)(uintptr_t)user_pbbo_ptr; (void __force __user *)(uintptr_t)user_pbbo_ptr;
struct drm_device *dev = chan->dev; struct drm_device *dev = chan->dev;
...@@ -356,24 +420,26 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, ...@@ -356,24 +420,26 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret; return ret;
} }
if (nvbo->bo.offset == b->presumed.offset && if (dev_priv->card_type < NV_50) {
((nvbo->bo.mem.mem_type == TTM_PL_VRAM && if (nvbo->bo.offset == b->presumed.offset &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
(nvbo->bo.mem.mem_type == TTM_PL_TT && b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) (nvbo->bo.mem.mem_type == TTM_PL_TT &&
continue; b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
continue;
if (nvbo->bo.mem.mem_type == TTM_PL_TT) if (nvbo->bo.mem.mem_type == TTM_PL_TT)
b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
else else
b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
b->presumed.offset = nvbo->bo.offset; b->presumed.offset = nvbo->bo.offset;
b->presumed.valid = 0; b->presumed.valid = 0;
relocs++; relocs++;
if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
&b->presumed, sizeof(b->presumed))) &b->presumed, sizeof(b->presumed)))
return -EFAULT; return -EFAULT;
}
} }
return relocs; return relocs;
...@@ -548,7 +614,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, ...@@ -548,7 +614,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct nouveau_fence *fence = NULL; struct nouveau_fence *fence = NULL;
int i, j, ret = 0, do_reloc = 0; int i, j, ret = 0, do_reloc = 0;
chan = nouveau_channel_get(dev, file_priv, req->channel); chan = nouveau_channel_get(file_priv, req->channel);
if (IS_ERR(chan)) if (IS_ERR(chan))
return PTR_ERR(chan); return PTR_ERR(chan);
...@@ -782,7 +848,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data, ...@@ -782,7 +848,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
if (!gem) if (!gem)
return -ENOENT; return -ENOENT;
ret = nouveau_gem_info(gem, req); ret = nouveau_gem_info(file_priv, gem, req);
drm_gem_object_unreference_unlocked(gem); drm_gem_object_unreference_unlocked(gem);
return ret; return ret;
} }
......
...@@ -451,10 +451,6 @@ nouveau_mem_vram_init(struct drm_device *dev) ...@@ -451,10 +451,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
dev_priv->ramin_rsvd_vram = 512 * 1024; dev_priv->ramin_rsvd_vram = 512 * 1024;
} }
ret = dev_priv->engine.vram.init(dev);
if (ret)
return ret;
NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
if (dev_priv->vram_sys_base) { if (dev_priv->vram_sys_base) {
NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
...@@ -479,7 +475,7 @@ nouveau_mem_vram_init(struct drm_device *dev) ...@@ -479,7 +475,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
} }
if (dev_priv->card_type < NV_50) { if (dev_priv->card_type < NV_50) {
ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM, ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
0, 0, &dev_priv->vga_ram); 0, 0, &dev_priv->vga_ram);
if (ret == 0) if (ret == 0)
ret = nouveau_bo_pin(dev_priv->vga_ram, ret = nouveau_bo_pin(dev_priv->vga_ram,
...@@ -729,37 +725,31 @@ nouveau_mem_timing_fini(struct drm_device *dev) ...@@ -729,37 +725,31 @@ nouveau_mem_timing_fini(struct drm_device *dev)
} }
static int static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size) nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{ {
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); /* nothing to do */
struct nouveau_mm *mm;
u64 size, block, rsvd;
int ret;
rsvd = (256 * 1024); /* vga memory */
size = (p_size << PAGE_SHIFT) - rsvd;
block = dev_priv->vram_rblock_size;
ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12);
if (ret)
return ret;
man->priv = mm;
return 0; return 0;
} }
static int static int
nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
{ {
struct nouveau_mm *mm = man->priv; /* nothing to do */
int ret; return 0;
}
ret = nouveau_mm_fini(&mm); static inline void
if (ret) nouveau_mem_node_cleanup(struct nouveau_mem *node)
return ret; {
if (node->vma[0].node) {
nouveau_vm_unmap(&node->vma[0]);
nouveau_vm_put(&node->vma[0]);
}
man->priv = NULL; if (node->vma[1].node) {
return 0; nouveau_vm_unmap(&node->vma[1]);
nouveau_vm_put(&node->vma[1]);
}
} }
static void static void
...@@ -768,14 +758,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man, ...@@ -768,14 +758,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
{ {
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_vram_engine *vram = &dev_priv->engine.vram; struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
struct nouveau_mem *node = mem->mm_node;
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
if (node->tmp_vma.node) { nouveau_mem_node_cleanup(mem->mm_node);
nouveau_vm_unmap(&node->tmp_vma);
nouveau_vm_put(&node->tmp_vma);
}
vram->put(dev, (struct nouveau_mem **)&mem->mm_node); vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
} }
...@@ -794,7 +779,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, ...@@ -794,7 +779,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
int ret; int ret;
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
size_nc = 1 << nvbo->vma.node->type; size_nc = 1 << nvbo->page_shift;
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc, mem->page_alignment << PAGE_SHIFT, size_nc,
...@@ -804,9 +789,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, ...@@ -804,9 +789,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
return (ret == -ENOSPC) ? 0 : ret; return (ret == -ENOSPC) ? 0 : ret;
} }
node->page_shift = 12; node->page_shift = nvbo->page_shift;
if (nvbo->vma.node)
node->page_shift = nvbo->vma.node->type;
mem->mm_node = node; mem->mm_node = node;
mem->start = node->offset >> PAGE_SHIFT; mem->start = node->offset >> PAGE_SHIFT;
...@@ -862,15 +845,9 @@ static void ...@@ -862,15 +845,9 @@ static void
nouveau_gart_manager_del(struct ttm_mem_type_manager *man, nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
struct nouveau_mem *node = mem->mm_node; nouveau_mem_node_cleanup(mem->mm_node);
if (node->tmp_vma.node) {
nouveau_vm_unmap(&node->tmp_vma);
nouveau_vm_put(&node->tmp_vma);
}
mem->mm_node = NULL; mem->mm_node = NULL;
kfree(node); kfree(mem->mm_node);
} }
static int static int
...@@ -880,11 +857,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, ...@@ -880,11 +857,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_vma *vma = &nvbo->vma;
struct nouveau_vm *vm = vma->vm;
struct nouveau_mem *node; struct nouveau_mem *node;
int ret;
if (unlikely((mem->num_pages << PAGE_SHIFT) >= if (unlikely((mem->num_pages << PAGE_SHIFT) >=
dev_priv->gart_info.aper_size)) dev_priv->gart_info.aper_size))
...@@ -893,24 +866,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, ...@@ -893,24 +866,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
node = kzalloc(sizeof(*node), GFP_KERNEL); node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node) if (!node)
return -ENOMEM; return -ENOMEM;
node->page_shift = 12;
/* This node must be for evicting large-paged VRAM
* to system memory. Due to a nv50 limitation of
* not being able to mix large/small pages within
* the same PDE, we need to create a temporary
* small-paged VMA for the eviction.
*/
if (vma->node->type != vm->spg_shift) {
ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
vm->spg_shift, NV_MEM_ACCESS_RW,
&node->tmp_vma);
if (ret) {
kfree(node);
return ret;
}
}
node->page_shift = nvbo->vma.node->type;
mem->mm_node = node; mem->mm_node = node;
mem->start = 0; mem->start = 0;
return 0; return 0;
......
...@@ -158,11 +158,18 @@ int ...@@ -158,11 +158,18 @@ int
nouveau_mm_fini(struct nouveau_mm **prmm) nouveau_mm_fini(struct nouveau_mm **prmm)
{ {
struct nouveau_mm *rmm = *prmm; struct nouveau_mm *rmm = *prmm;
struct nouveau_mm_node *heap = struct nouveau_mm_node *node, *heap =
list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
if (!list_is_singular(&rmm->nodes)) if (!list_is_singular(&rmm->nodes)) {
printk(KERN_ERR "nouveau_mm not empty at destroy time!\n");
list_for_each_entry(node, &rmm->nodes, nl_entry) {
printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
node->type, node->offset, node->length);
}
WARN_ON(1);
return -EBUSY; return -EBUSY;
}
kfree(heap); kfree(heap);
kfree(rmm); kfree(rmm);
......
...@@ -52,6 +52,7 @@ int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc, ...@@ -52,6 +52,7 @@ int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *); void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
int nv50_vram_init(struct drm_device *); int nv50_vram_init(struct drm_device *);
void nv50_vram_fini(struct drm_device *);
int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc, int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
u32 memtype, struct nouveau_mem **); u32 memtype, struct nouveau_mem **);
void nv50_vram_del(struct drm_device *, struct nouveau_mem **); void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
......
...@@ -34,6 +34,7 @@ int ...@@ -34,6 +34,7 @@ int
nouveau_notifier_init_channel(struct nouveau_channel *chan) nouveau_notifier_init_channel(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev; struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *ntfy = NULL; struct nouveau_bo *ntfy = NULL;
uint32_t flags, ttmpl; uint32_t flags, ttmpl;
int ret; int ret;
...@@ -46,7 +47,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) ...@@ -46,7 +47,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
ttmpl = TTM_PL_FLAG_TT; ttmpl = TTM_PL_FLAG_TT;
} }
ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); ret = nouveau_gem_new(dev, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
if (ret) if (ret)
return ret; return ret;
...@@ -58,14 +59,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) ...@@ -58,14 +59,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
if (ret) if (ret)
goto out_err; goto out_err;
if (dev_priv->card_type >= NV_50) {
ret = nouveau_bo_vma_add(ntfy, chan->vm, &chan->notifier_vma);
if (ret)
goto out_err;
}
ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size); ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
if (ret) if (ret)
goto out_err; goto out_err;
chan->notifier_bo = ntfy; chan->notifier_bo = ntfy;
out_err: out_err:
if (ret) if (ret) {
nouveau_bo_vma_del(ntfy, &chan->notifier_vma);
drm_gem_object_unreference_unlocked(ntfy->gem); drm_gem_object_unreference_unlocked(ntfy->gem);
}
return ret; return ret;
} }
...@@ -78,6 +87,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) ...@@ -78,6 +87,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
if (!chan->notifier_bo) if (!chan->notifier_bo)
return; return;
nouveau_bo_vma_del(chan->notifier_bo, &chan->notifier_vma);
nouveau_bo_unmap(chan->notifier_bo); nouveau_bo_unmap(chan->notifier_bo);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
nouveau_bo_unpin(chan->notifier_bo); nouveau_bo_unpin(chan->notifier_bo);
...@@ -122,10 +132,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, ...@@ -122,10 +132,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
target = NV_MEM_TARGET_VRAM; target = NV_MEM_TARGET_VRAM;
else else
target = NV_MEM_TARGET_GART; target = NV_MEM_TARGET_GART;
offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; offset = chan->notifier_bo->bo.offset;
} else { } else {
target = NV_MEM_TARGET_VM; target = NV_MEM_TARGET_VM;
offset = chan->notifier_bo->vma.offset; offset = chan->notifier_vma.offset;
} }
offset += mem->start; offset += mem->start;
...@@ -183,7 +193,7 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, ...@@ -183,7 +193,7 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
if (unlikely(dev_priv->card_type >= NV_C0)) if (unlikely(dev_priv->card_type >= NV_C0))
return -EINVAL; return -EINVAL;
chan = nouveau_channel_get(dev, file_priv, na->channel); chan = nouveau_channel_get(file_priv, na->channel);
if (IS_ERR(chan)) if (IS_ERR(chan))
return PTR_ERR(chan); return PTR_ERR(chan);
......
...@@ -125,7 +125,7 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid, ...@@ -125,7 +125,7 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
int ret = -EINVAL; int ret = -EINVAL;
spin_lock_irqsave(&dev_priv->channels.lock, flags); spin_lock_irqsave(&dev_priv->channels.lock, flags);
if (chid > 0 && chid < dev_priv->engine.fifo.channels) if (chid >= 0 && chid < dev_priv->engine.fifo.channels)
chan = dev_priv->channels.ptr[chid]; chan = dev_priv->channels.ptr[chid];
if (chan) if (chan)
ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
...@@ -191,7 +191,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, ...@@ -191,7 +191,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
spin_unlock(&dev_priv->ramin_lock); spin_unlock(&dev_priv->ramin_lock);
if (chan) { if (!(flags & NVOBJ_FLAG_VM) && chan) {
ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
if (ramin) if (ramin)
ramin = drm_mm_get_block(ramin, size, align); ramin = drm_mm_get_block(ramin, size, align);
...@@ -208,7 +208,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, ...@@ -208,7 +208,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
gpuobj->vinst = ramin->start + chan->ramin->vinst; gpuobj->vinst = ramin->start + chan->ramin->vinst;
gpuobj->node = ramin; gpuobj->node = ramin;
} else { } else {
ret = instmem->get(gpuobj, size, align); ret = instmem->get(gpuobj, chan, size, align);
if (ret) { if (ret) {
nouveau_gpuobj_ref(NULL, &gpuobj); nouveau_gpuobj_ref(NULL, &gpuobj);
return ret; return ret;
...@@ -690,35 +690,64 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) ...@@ -690,35 +690,64 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
return 0; return 0;
} }
static int
nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
{
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *pgd = NULL;
struct nouveau_vm_pgd *vpgd;
int ret, i;
ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
if (ret)
return ret;
/* create page directory for this vm if none currently exists,
* will be destroyed automagically when last reference to the
* vm is removed
*/
if (list_empty(&vm->pgd_list)) {
ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
if (ret)
return ret;
}
nouveau_vm_ref(vm, &chan->vm, pgd);
nouveau_gpuobj_ref(NULL, &pgd);
/* point channel at vm's page directory */
vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
nv_wo32(chan->ramin, 0x0208, 0xffffffff);
nv_wo32(chan->ramin, 0x020c, 0x000000ff);
/* map display semaphore buffers into channel's vm */
for (i = 0; i < 2; i++) {
struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i];
ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm,
&chan->dispc_vma[i]);
if (ret)
return ret;
}
return 0;
}
int int
nouveau_gpuobj_channel_init(struct nouveau_channel *chan, nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
uint32_t vram_h, uint32_t tt_h) uint32_t vram_h, uint32_t tt_h)
{ {
struct drm_device *dev = chan->dev; struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
struct nouveau_gpuobj *vram = NULL, *tt = NULL; struct nouveau_gpuobj *vram = NULL, *tt = NULL;
int ret, i; int ret, i;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
if (dev_priv->card_type == NV_C0)
if (dev_priv->card_type == NV_C0) { return nvc0_gpuobj_channel_init(chan, vm);
struct nouveau_vm *vm = dev_priv->chan_vm;
struct nouveau_vm_pgd *vpgd;
ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
&chan->ramin);
if (ret)
return ret;
nouveau_vm_ref(vm, &chan->vm, NULL);
vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
nv_wo32(chan->ramin, 0x0208, 0xffffffff);
nv_wo32(chan->ramin, 0x020c, 0x000000ff);
return 0;
}
/* Allocate a chunk of memory for per-channel object storage */ /* Allocate a chunk of memory for per-channel object storage */
ret = nouveau_gpuobj_channel_init_pramin(chan); ret = nouveau_gpuobj_channel_init_pramin(chan);
...@@ -731,7 +760,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, ...@@ -731,7 +760,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
* - Allocate per-channel page-directory * - Allocate per-channel page-directory
* - Link with shared channel VM * - Link with shared channel VM
*/ */
if (dev_priv->chan_vm) { if (vm) {
u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
u64 vm_vinst = chan->ramin->vinst + pgd_offs; u64 vm_vinst = chan->ramin->vinst + pgd_offs;
u32 vm_pinst = chan->ramin->pinst; u32 vm_pinst = chan->ramin->pinst;
...@@ -744,7 +773,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, ...@@ -744,7 +773,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
if (ret) if (ret)
return ret; return ret;
nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd); nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
} }
/* RAMHT */ /* RAMHT */
...@@ -768,7 +797,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, ...@@ -768,7 +797,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
struct nouveau_gpuobj *sem = NULL; struct nouveau_gpuobj *sem = NULL;
struct nv50_display_crtc *dispc = struct nv50_display_crtc *dispc =
&nv50_display(dev)->crtc[i]; &nv50_display(dev)->crtc[i];
u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT; u64 offset = dispc->sem.bo->bo.offset;
ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff, ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
NV_MEM_ACCESS_RW, NV_MEM_ACCESS_RW,
...@@ -841,13 +870,22 @@ void ...@@ -841,13 +870,22 @@ void
nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev; struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
NV_DEBUG(dev, "ch%d\n", chan->id); NV_DEBUG(dev, "ch%d\n", chan->id);
nouveau_ramht_ref(NULL, &chan->ramht, chan); if (dev_priv->card_type >= NV_50) {
struct nv50_display *disp = nv50_display(dev);
for (i = 0; i < 2; i++) {
struct nv50_display_crtc *dispc = &disp->crtc[i];
nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
}
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd); nouveau_gpuobj_ref(NULL, &chan->vm_pd);
}
if (drm_mm_initialized(&chan->ramin_heap)) if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap); drm_mm_takedown(&chan->ramin_heap);
...@@ -909,7 +947,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, ...@@ -909,7 +947,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
if (init->handle == ~0) if (init->handle == ~0)
return -EINVAL; return -EINVAL;
chan = nouveau_channel_get(dev, file_priv, init->channel); chan = nouveau_channel_get(file_priv, init->channel);
if (IS_ERR(chan)) if (IS_ERR(chan))
return PTR_ERR(chan); return PTR_ERR(chan);
...@@ -936,7 +974,7 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, ...@@ -936,7 +974,7 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
struct nouveau_channel *chan; struct nouveau_channel *chan;
int ret; int ret;
chan = nouveau_channel_get(dev, file_priv, objfree->channel); chan = nouveau_channel_get(file_priv, objfree->channel);
if (IS_ERR(chan)) if (IS_ERR(chan))
return PTR_ERR(chan); return PTR_ERR(chan);
......
...@@ -91,6 +91,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) ...@@ -91,6 +91,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_pre = nv04_pm_clock_pre; engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set; engine->pm.clock_set = nv04_pm_clock_set;
engine->vram.init = nouveau_mem_detect; engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid; engine->vram.flags_valid = nouveau_mem_flags_valid;
break; break;
case 0x10: case 0x10:
...@@ -139,6 +140,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) ...@@ -139,6 +140,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_pre = nv04_pm_clock_pre; engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set; engine->pm.clock_set = nv04_pm_clock_set;
engine->vram.init = nouveau_mem_detect; engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid; engine->vram.flags_valid = nouveau_mem_flags_valid;
break; break;
case 0x20: case 0x20:
...@@ -187,6 +189,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) ...@@ -187,6 +189,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_pre = nv04_pm_clock_pre; engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set; engine->pm.clock_set = nv04_pm_clock_set;
engine->vram.init = nouveau_mem_detect; engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid; engine->vram.flags_valid = nouveau_mem_flags_valid;
break; break;
case 0x30: case 0x30:
...@@ -237,6 +240,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) ...@@ -237,6 +240,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.voltage_get = nouveau_voltage_gpio_get; engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set; engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->vram.init = nouveau_mem_detect; engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid; engine->vram.flags_valid = nouveau_mem_flags_valid;
break; break;
case 0x40: case 0x40:
...@@ -289,6 +293,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) ...@@ -289,6 +293,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.voltage_set = nouveau_voltage_gpio_set; engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->pm.temp_get = nv40_temp_get; engine->pm.temp_get = nv40_temp_get;
engine->vram.init = nouveau_mem_detect; engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid; engine->vram.flags_valid = nouveau_mem_flags_valid;
break; break;
case 0x50: case 0x50:
...@@ -366,6 +371,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) ...@@ -366,6 +371,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
else else
engine->pm.temp_get = nv40_temp_get; engine->pm.temp_get = nv40_temp_get;
engine->vram.init = nv50_vram_init; engine->vram.init = nv50_vram_init;
engine->vram.takedown = nv50_vram_fini;
engine->vram.get = nv50_vram_new; engine->vram.get = nv50_vram_new;
engine->vram.put = nv50_vram_del; engine->vram.put = nv50_vram_del;
engine->vram.flags_valid = nv50_vram_flags_valid; engine->vram.flags_valid = nv50_vram_flags_valid;
...@@ -412,9 +418,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) ...@@ -412,9 +418,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.irq_unregister = nv50_gpio_irq_unregister; engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
engine->gpio.irq_enable = nv50_gpio_irq_enable; engine->gpio.irq_enable = nv50_gpio_irq_enable;
engine->vram.init = nvc0_vram_init; engine->vram.init = nvc0_vram_init;
engine->vram.takedown = nv50_vram_fini;
engine->vram.get = nvc0_vram_new; engine->vram.get = nvc0_vram_new;
engine->vram.put = nv50_vram_del; engine->vram.put = nv50_vram_del;
engine->vram.flags_valid = nvc0_vram_flags_valid; engine->vram.flags_valid = nvc0_vram_flags_valid;
engine->pm.temp_get = nv84_temp_get;
break; break;
default: default:
NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
...@@ -448,8 +456,8 @@ nouveau_card_init_channel(struct drm_device *dev) ...@@ -448,8 +456,8 @@ nouveau_card_init_channel(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret; int ret;
ret = nouveau_channel_alloc(dev, &dev_priv->channel, ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL,
(struct drm_file *)-2, NvDmaFB, NvDmaTT); NvDmaFB, NvDmaTT);
if (ret) if (ret)
return ret; return ret;
...@@ -528,7 +536,7 @@ nouveau_card_init(struct drm_device *dev) ...@@ -528,7 +536,7 @@ nouveau_card_init(struct drm_device *dev)
nouveau_pm_init(dev); nouveau_pm_init(dev);
ret = nouveau_mem_vram_init(dev); ret = engine->vram.init(dev);
if (ret) if (ret)
goto out_bios; goto out_bios;
...@@ -540,10 +548,14 @@ nouveau_card_init(struct drm_device *dev) ...@@ -540,10 +548,14 @@ nouveau_card_init(struct drm_device *dev)
if (ret) if (ret)
goto out_gpuobj; goto out_gpuobj;
ret = nouveau_mem_gart_init(dev); ret = nouveau_mem_vram_init(dev);
if (ret) if (ret)
goto out_instmem; goto out_instmem;
ret = nouveau_mem_gart_init(dev);
if (ret)
goto out_ttmvram;
/* PMC */ /* PMC */
ret = engine->mc.init(dev); ret = engine->mc.init(dev);
if (ret) if (ret)
...@@ -564,7 +576,7 @@ nouveau_card_init(struct drm_device *dev) ...@@ -564,7 +576,7 @@ nouveau_card_init(struct drm_device *dev)
if (ret) if (ret)
goto out_timer; goto out_timer;
if (!nouveau_noaccel) { if (!dev_priv->noaccel) {
switch (dev_priv->card_type) { switch (dev_priv->card_type) {
case NV_04: case NV_04:
nv04_graph_create(dev); nv04_graph_create(dev);
...@@ -676,10 +688,10 @@ nouveau_card_init(struct drm_device *dev) ...@@ -676,10 +688,10 @@ nouveau_card_init(struct drm_device *dev)
drm_vblank_cleanup(dev); drm_vblank_cleanup(dev);
engine->display.destroy(dev); engine->display.destroy(dev);
out_fifo: out_fifo:
if (!nouveau_noaccel) if (!dev_priv->noaccel)
engine->fifo.takedown(dev); engine->fifo.takedown(dev);
out_engine: out_engine:
if (!nouveau_noaccel) { if (!dev_priv->noaccel) {
for (e = e - 1; e >= 0; e--) { for (e = e - 1; e >= 0; e--) {
if (!dev_priv->eng[e]) if (!dev_priv->eng[e])
continue; continue;
...@@ -697,12 +709,14 @@ nouveau_card_init(struct drm_device *dev) ...@@ -697,12 +709,14 @@ nouveau_card_init(struct drm_device *dev)
engine->mc.takedown(dev); engine->mc.takedown(dev);
out_gart: out_gart:
nouveau_mem_gart_fini(dev); nouveau_mem_gart_fini(dev);
out_ttmvram:
nouveau_mem_vram_fini(dev);
out_instmem: out_instmem:
engine->instmem.takedown(dev); engine->instmem.takedown(dev);
out_gpuobj: out_gpuobj:
nouveau_gpuobj_takedown(dev); nouveau_gpuobj_takedown(dev);
out_vram: out_vram:
nouveau_mem_vram_fini(dev); engine->vram.takedown(dev);
out_bios: out_bios:
nouveau_pm_fini(dev); nouveau_pm_fini(dev);
nouveau_bios_takedown(dev); nouveau_bios_takedown(dev);
...@@ -719,12 +733,17 @@ static void nouveau_card_takedown(struct drm_device *dev) ...@@ -719,12 +733,17 @@ static void nouveau_card_takedown(struct drm_device *dev)
struct nouveau_engine *engine = &dev_priv->engine; struct nouveau_engine *engine = &dev_priv->engine;
int e; int e;
drm_kms_helper_poll_fini(dev);
nouveau_fbcon_fini(dev);
if (dev_priv->channel) { if (dev_priv->channel) {
nouveau_fence_fini(dev);
nouveau_channel_put_unlocked(&dev_priv->channel); nouveau_channel_put_unlocked(&dev_priv->channel);
nouveau_fence_fini(dev);
} }
if (!nouveau_noaccel) { engine->display.destroy(dev);
if (!dev_priv->noaccel) {
engine->fifo.takedown(dev); engine->fifo.takedown(dev);
for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) { for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
if (dev_priv->eng[e]) { if (dev_priv->eng[e]) {
...@@ -749,10 +768,11 @@ static void nouveau_card_takedown(struct drm_device *dev) ...@@ -749,10 +768,11 @@ static void nouveau_card_takedown(struct drm_device *dev)
ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
nouveau_mem_gart_fini(dev); nouveau_mem_gart_fini(dev);
nouveau_mem_vram_fini(dev);
engine->instmem.takedown(dev); engine->instmem.takedown(dev);
nouveau_gpuobj_takedown(dev); nouveau_gpuobj_takedown(dev);
nouveau_mem_vram_fini(dev); engine->vram.takedown(dev);
nouveau_irq_fini(dev); nouveau_irq_fini(dev);
drm_vblank_cleanup(dev); drm_vblank_cleanup(dev);
...@@ -763,6 +783,41 @@ static void nouveau_card_takedown(struct drm_device *dev) ...@@ -763,6 +783,41 @@ static void nouveau_card_takedown(struct drm_device *dev)
vga_client_register(dev->pdev, NULL, NULL, NULL); vga_client_register(dev->pdev, NULL, NULL, NULL);
} }
int
nouveau_open(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fpriv *fpriv;
int ret;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
if (unlikely(!fpriv))
return -ENOMEM;
spin_lock_init(&fpriv->lock);
INIT_LIST_HEAD(&fpriv->channels);
if (dev_priv->card_type == NV_50) {
ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
&fpriv->vm);
if (ret) {
kfree(fpriv);
return ret;
}
} else
if (dev_priv->card_type >= NV_C0) {
ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
&fpriv->vm);
if (ret) {
kfree(fpriv);
return ret;
}
}
file_priv->driver_priv = fpriv;
return 0;
}
/* here a client dies, release the stuff that was allocated for its /* here a client dies, release the stuff that was allocated for its
* file_priv */ * file_priv */
void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
...@@ -770,6 +825,14 @@ void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) ...@@ -770,6 +825,14 @@ void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
nouveau_channel_cleanup(dev, file_priv); nouveau_channel_cleanup(dev, file_priv);
} }
void
nouveau_postclose(struct drm_device *dev, struct drm_file *file_priv)
{
struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
nouveau_vm_ref(NULL, &fpriv->vm, NULL);
kfree(fpriv);
}
/* first module load, setup the mmio/fb mapping */ /* first module load, setup the mmio/fb mapping */
/* KMS: we need mmio at load time, not when the first drm client opens. */ /* KMS: we need mmio at load time, not when the first drm client opens. */
int nouveau_firstopen(struct drm_device *dev) int nouveau_firstopen(struct drm_device *dev)
...@@ -935,6 +998,25 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) ...@@ -935,6 +998,25 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
dev_priv->card_type, reg0); dev_priv->card_type, reg0);
/* Determine whether we'll attempt acceleration or not, some
* cards are disabled by default here due to them being known
* non-functional, or never been tested due to lack of hw.
*/
dev_priv->noaccel = !!nouveau_noaccel;
if (nouveau_noaccel == -1) {
switch (dev_priv->chipset) {
case 0xc1: /* known broken */
case 0xc8: /* never tested */
NV_INFO(dev, "acceleration disabled by default, pass "
"noaccel=0 to force enable\n");
dev_priv->noaccel = true;
break;
default:
dev_priv->noaccel = false;
break;
}
}
ret = nouveau_remove_conflicting_drivers(dev); ret = nouveau_remove_conflicting_drivers(dev);
if (ret) if (ret)
goto err_mmio; goto err_mmio;
...@@ -999,11 +1081,7 @@ void nouveau_lastclose(struct drm_device *dev) ...@@ -999,11 +1081,7 @@ void nouveau_lastclose(struct drm_device *dev)
int nouveau_unload(struct drm_device *dev) int nouveau_unload(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
drm_kms_helper_poll_fini(dev);
nouveau_fbcon_fini(dev);
engine->display.destroy(dev);
nouveau_card_takedown(dev); nouveau_card_takedown(dev);
iounmap(dev_priv->mmio); iounmap(dev_priv->mmio);
......
...@@ -43,7 +43,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) ...@@ -43,7 +43,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
/* Set the default sensor's contants */ /* Set the default sensor's contants */
sensor->offset_constant = 0; sensor->offset_constant = 0;
sensor->offset_mult = 1; sensor->offset_mult = 0;
sensor->offset_div = 1; sensor->offset_div = 1;
sensor->slope_mult = 1; sensor->slope_mult = 1;
sensor->slope_div = 1; sensor->slope_div = 1;
...@@ -99,6 +99,13 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) ...@@ -99,6 +99,13 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
sensor->slope_mult = 431; sensor->slope_mult = 431;
sensor->slope_div = 10000; sensor->slope_div = 10000;
break; break;
case 0x67:
sensor->offset_mult = -26149;
sensor->offset_div = 100;
sensor->slope_mult = 484;
sensor->slope_div = 10000;
break;
} }
} }
...@@ -109,7 +116,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) ...@@ -109,7 +116,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
/* Read the entries from the table */ /* Read the entries from the table */
for (i = 0; i < entries; i++) { for (i = 0; i < entries; i++) {
u16 value = ROM16(temp[1]); s16 value = ROM16(temp[1]);
switch (temp[0]) { switch (temp[0]) {
case 0x01: case 0x01:
...@@ -160,8 +167,8 @@ nv40_sensor_setup(struct drm_device *dev) ...@@ -160,8 +167,8 @@ nv40_sensor_setup(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
u32 offset = sensor->offset_mult / sensor->offset_div; s32 offset = sensor->offset_mult / sensor->offset_div;
u32 sensor_calibration; s32 sensor_calibration;
/* set up the sensors */ /* set up the sensors */
sensor_calibration = 120 - offset - sensor->offset_constant; sensor_calibration = 120 - offset - sensor->offset_constant;
......
...@@ -369,23 +369,26 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) ...@@ -369,23 +369,26 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
} }
static void static void
nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
{ {
struct nouveau_vm_pgd *vpgd, *tmp; struct nouveau_vm_pgd *vpgd, *tmp;
struct nouveau_gpuobj *pgd = NULL;
if (!pgd) if (!mpgd)
return; return;
mutex_lock(&vm->mm->mutex); mutex_lock(&vm->mm->mutex);
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
if (vpgd->obj != pgd) if (vpgd->obj == mpgd) {
continue; pgd = vpgd->obj;
list_del(&vpgd->head);
list_del(&vpgd->head); kfree(vpgd);
nouveau_gpuobj_ref(NULL, &vpgd->obj); break;
kfree(vpgd); }
} }
mutex_unlock(&vm->mm->mutex); mutex_unlock(&vm->mm->mutex);
nouveau_gpuobj_ref(NULL, &pgd);
} }
static void static void
...@@ -396,8 +399,8 @@ nouveau_vm_del(struct nouveau_vm *vm) ...@@ -396,8 +399,8 @@ nouveau_vm_del(struct nouveau_vm *vm)
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
nouveau_vm_unlink(vm, vpgd->obj); nouveau_vm_unlink(vm, vpgd->obj);
} }
WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
nouveau_mm_fini(&vm->mm);
kfree(vm->pgt); kfree(vm->pgt);
kfree(vm); kfree(vm);
} }
......
...@@ -41,6 +41,8 @@ struct nouveau_vm_pgd { ...@@ -41,6 +41,8 @@ struct nouveau_vm_pgd {
}; };
struct nouveau_vma { struct nouveau_vma {
struct list_head head;
int refcount;
struct nouveau_vm *vm; struct nouveau_vm *vm;
struct nouveau_mm_node *node; struct nouveau_mm_node *node;
u64 offset; u64 offset;
......
...@@ -1035,7 +1035,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) ...@@ -1035,7 +1035,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, &nv_crtc->cursor.nvbo); 0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) { if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
......
...@@ -112,7 +112,8 @@ nv04_instmem_resume(struct drm_device *dev) ...@@ -112,7 +112,8 @@ nv04_instmem_resume(struct drm_device *dev)
} }
int int
nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
u32 size, u32 align)
{ {
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_mm_node *ramin = NULL; struct drm_mm_node *ramin = NULL;
......
...@@ -104,7 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) ...@@ -104,7 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
OUT_RING(evo, nv_crtc->lut.depth == 8 ? OUT_RING(evo, nv_crtc->lut.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_OFF :
NV50_EVO_CRTC_CLUT_MODE_ON); NV50_EVO_CRTC_CLUT_MODE_ON);
OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8); OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
if (dev_priv->chipset != 0x50) { if (dev_priv->chipset != 0x50) {
BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
OUT_RING(evo, NvEvoVRAM); OUT_RING(evo, NvEvoVRAM);
...@@ -372,7 +372,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, ...@@ -372,7 +372,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nouveau_bo_unmap(cursor); nouveau_bo_unmap(cursor);
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT); nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
nv_crtc->cursor.show(nv_crtc, true); nv_crtc->cursor.show(nv_crtc, true);
out: out:
...@@ -546,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, ...@@ -546,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
} }
} }
nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT; nv_crtc->fb.offset = fb->nvbo->bo.offset;
nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
...@@ -747,7 +747,7 @@ nv50_crtc_create(struct drm_device *dev, int index) ...@@ -747,7 +747,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
} }
nv_crtc->lut.depth = 0; nv_crtc->lut.depth = 0;
ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM, ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, &nv_crtc->lut.nvbo); 0, 0x0000, &nv_crtc->lut.nvbo);
if (!ret) { if (!ret) {
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
...@@ -773,7 +773,7 @@ nv50_crtc_create(struct drm_device *dev, int index) ...@@ -773,7 +773,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs); drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, &nv_crtc->cursor.nvbo); 0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) { if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
......
...@@ -415,8 +415,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -415,8 +415,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* synchronise with the rendering channel, if necessary */ /* synchronise with the rendering channel, if necessary */
if (likely(chan)) { if (likely(chan)) {
u64 offset = dispc->sem.bo->vma.offset + dispc->sem.offset;
ret = RING_SPACE(chan, 10); ret = RING_SPACE(chan, 10);
if (ret) { if (ret) {
WIND_RING(evo); WIND_RING(evo);
...@@ -438,6 +436,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -438,6 +436,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
else else
OUT_RING (chan, chan->vram_handle); OUT_RING (chan, chan->vram_handle);
} else { } else {
u64 offset = chan->dispc_vma[nv_crtc->index].offset;
offset += dispc->sem.offset;
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4); BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset)); OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset)); OUT_RING (chan, lower_32_bits(offset));
...@@ -484,7 +484,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -484,7 +484,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (evo, 0x00000000); OUT_RING (evo, 0x00000000);
OUT_RING (evo, 0x00000000); OUT_RING (evo, 0x00000000);
BEGIN_RING(evo, 0, 0x0800, 5); BEGIN_RING(evo, 0, 0x0800, 5);
OUT_RING (evo, (nv_fb->nvbo->bo.mem.start << PAGE_SHIFT) >> 8); OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
OUT_RING (evo, 0); OUT_RING (evo, 0);
OUT_RING (evo, (fb->height << 16) | fb->width); OUT_RING (evo, (fb->height << 16) | fb->width);
OUT_RING (evo, nv_fb->r_pitch); OUT_RING (evo, nv_fb->r_pitch);
......
...@@ -38,6 +38,7 @@ nv50_evo_channel_del(struct nouveau_channel **pevo) ...@@ -38,6 +38,7 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
return; return;
*pevo = NULL; *pevo = NULL;
nouveau_ramht_ref(NULL, &evo->ramht, evo);
nouveau_gpuobj_channel_takedown(evo); nouveau_gpuobj_channel_takedown(evo);
nouveau_bo_unmap(evo->pushbuf_bo); nouveau_bo_unmap(evo->pushbuf_bo);
nouveau_bo_ref(NULL, &evo->pushbuf_bo); nouveau_bo_ref(NULL, &evo->pushbuf_bo);
...@@ -116,7 +117,7 @@ nv50_evo_channel_new(struct drm_device *dev, int chid, ...@@ -116,7 +117,7 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
evo->user_get = 4; evo->user_get = 4;
evo->user_put = 0; evo->user_put = 0;
ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
&evo->pushbuf_bo); &evo->pushbuf_bo);
if (ret == 0) if (ret == 0)
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
...@@ -153,7 +154,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo) ...@@ -153,7 +154,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
{ {
struct drm_device *dev = evo->dev; struct drm_device *dev = evo->dev;
int id = evo->id, ret, i; int id = evo->id, ret, i;
u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT; u64 pushbuf = evo->pushbuf_bo->bo.offset;
u32 tmp; u32 tmp;
tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
...@@ -331,16 +332,15 @@ nv50_evo_create(struct drm_device *dev) ...@@ -331,16 +332,15 @@ nv50_evo_create(struct drm_device *dev)
if (ret) if (ret)
goto err; goto err;
ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM, ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, &dispc->sem.bo); 0, 0x0000, &dispc->sem.bo);
if (!ret) { if (!ret) {
offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM); ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
if (!ret) if (!ret)
ret = nouveau_bo_map(dispc->sem.bo); ret = nouveau_bo_map(dispc->sem.bo);
if (ret) if (ret)
nouveau_bo_ref(NULL, &dispc->sem.bo); nouveau_bo_ref(NULL, &dispc->sem.bo);
offset = dispc->sem.bo->bo.offset;
} }
if (ret) if (ret)
......
...@@ -159,7 +159,7 @@ nv50_fbcon_accel_init(struct fb_info *info) ...@@ -159,7 +159,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev; struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel; struct nouveau_channel *chan = dev_priv->channel;
struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo; struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
int ret, format; int ret, format;
switch (info->var.bits_per_pixel) { switch (info->var.bits_per_pixel) {
...@@ -247,8 +247,8 @@ nv50_fbcon_accel_init(struct fb_info *info) ...@@ -247,8 +247,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length); OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual); OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual); OUT_RING(chan, info->var.yres_virtual);
OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); OUT_RING(chan, upper_32_bits(fb->vma.offset));
OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); OUT_RING(chan, lower_32_bits(fb->vma.offset));
BEGIN_RING(chan, NvSub2D, 0x0230, 2); BEGIN_RING(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format); OUT_RING(chan, format);
OUT_RING(chan, 1); OUT_RING(chan, 1);
...@@ -256,8 +256,8 @@ nv50_fbcon_accel_init(struct fb_info *info) ...@@ -256,8 +256,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length); OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual); OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual); OUT_RING(chan, info->var.yres_virtual);
OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); OUT_RING(chan, upper_32_bits(fb->vma.offset));
OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); OUT_RING(chan, lower_32_bits(fb->vma.offset));
return 0; return 0;
} }
......
...@@ -305,9 +305,9 @@ struct nv50_gpuobj_node { ...@@ -305,9 +305,9 @@ struct nv50_gpuobj_node {
u32 align; u32 align;
}; };
int int
nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
u32 size, u32 align)
{ {
struct drm_device *dev = gpuobj->dev; struct drm_device *dev = gpuobj->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
...@@ -336,7 +336,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) ...@@ -336,7 +336,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER)) if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
flags |= NV_MEM_ACCESS_SYS; flags |= NV_MEM_ACCESS_SYS;
ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags, ret = nouveau_vm_get(chan->vm, size, 12, flags,
&node->chan_vma); &node->chan_vma);
if (ret) { if (ret) {
vram->put(dev, &node->vram); vram->put(dev, &node->vram);
...@@ -345,7 +345,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) ...@@ -345,7 +345,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
} }
nouveau_vm_map(&node->chan_vma, node->vram); nouveau_vm_map(&node->chan_vma, node->vram);
gpuobj->vinst = node->chan_vma.offset; gpuobj->linst = node->chan_vma.offset;
} }
gpuobj->size = size; gpuobj->size = size;
......
...@@ -156,7 +156,7 @@ nv50_vm_flush(struct nouveau_vm *vm) ...@@ -156,7 +156,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
pinstmem->flush(vm->dev); pinstmem->flush(vm->dev);
/* BAR */ /* BAR */
if (vm != dev_priv->chan_vm) { if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) {
nv50_vm_flush_engine(vm->dev, 6); nv50_vm_flush_engine(vm->dev, 6);
return; return;
} }
......
...@@ -51,9 +51,7 @@ void ...@@ -51,9 +51,7 @@ void
nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem) nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; struct nouveau_mm *mm = dev_priv->engine.vram.mm;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *this; struct nouveau_mm_node *this;
struct nouveau_mem *mem; struct nouveau_mem *mem;
...@@ -84,9 +82,7 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc, ...@@ -84,9 +82,7 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
u32 memtype, struct nouveau_mem **pmem) u32 memtype, struct nouveau_mem **pmem)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; struct nouveau_mm *mm = dev_priv->engine.vram.mm;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r; struct nouveau_mm_node *r;
struct nouveau_mem *mem; struct nouveau_mem *mem;
int comp = (memtype & 0x300) >> 8; int comp = (memtype & 0x300) >> 8;
...@@ -190,22 +186,35 @@ int ...@@ -190,22 +186,35 @@ int
nv50_vram_init(struct drm_device *dev) nv50_vram_init(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
u32 rblock, length;
dev_priv->vram_size = nv_rd32(dev, 0x10020c); dev_priv->vram_size = nv_rd32(dev, 0x10020c);
dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
dev_priv->vram_size &= 0xffffffff00ULL; dev_priv->vram_size &= 0xffffffff00ULL;
switch (dev_priv->chipset) { /* IGPs, no funky reordering happens here, they don't have VRAM */
case 0xaa: if (dev_priv->chipset == 0xaa ||
case 0xac: dev_priv->chipset == 0xac ||
case 0xaf: dev_priv->chipset == 0xaf) {
dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12; dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
dev_priv->vram_rblock_size = 4096; rblock = 4096 >> 12;
break; } else {
default: rblock = nv50_vram_rblock(dev) >> 12;
dev_priv->vram_rblock_size = nv50_vram_rblock(dev);
break;
} }
return 0; length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
return nouveau_mm_init(&vram->mm, rsvd_head, length, rblock);
}
void
nv50_vram_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
nouveau_mm_fini(&vram->mm);
} }
...@@ -48,14 +48,14 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine) ...@@ -48,14 +48,14 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
struct nouveau_gpuobj *ctx = NULL; struct nouveau_gpuobj *ctx = NULL;
int ret; int ret;
ret = nouveau_gpuobj_new(dev, NULL, 256, 256, ret = nouveau_gpuobj_new(dev, chan, 256, 256,
NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER | NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
NVOBJ_FLAG_ZERO_ALLOC, &ctx); NVOBJ_FLAG_ZERO_ALLOC, &ctx);
if (ret) if (ret)
return ret; return ret;
nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->vinst)); nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst));
nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->vinst)); nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst));
dev_priv->engine.instmem.flush(dev); dev_priv->engine.instmem.flush(dev);
chan->engctx[engine] = ctx; chan->engctx[engine] = ctx;
......
/* /*
* Copyright 2010 Red Hat Inc. * Copyright 2011 Red Hat Inc.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
...@@ -23,16 +23,80 @@ ...@@ -23,16 +23,80 @@
*/ */
#include "drmP.h" #include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_drm.h"
struct nvc0_fb_priv {
struct page *r100c10_page;
dma_addr_t r100c10;
};
static void
nvc0_fb_destroy(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nvc0_fb_priv *priv = pfb->priv;
if (priv->r100c10_page) {
pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
__free_page(priv->r100c10_page);
}
kfree(priv);
pfb->priv = NULL;
}
static int
nvc0_fb_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nvc0_fb_priv *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
pfb->priv = priv;
priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!priv->r100c10_page) {
nvc0_fb_destroy(dev);
return -ENOMEM;
}
priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) {
nvc0_fb_destroy(dev);
return -EFAULT;
}
return 0;
}
int int
nvc0_fb_init(struct drm_device *dev) nvc0_fb_init(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvc0_fb_priv *priv;
int ret;
if (!dev_priv->engine.fb.priv) {
ret = nvc0_fb_create(dev);
if (ret)
return ret;
}
priv = dev_priv->engine.fb.priv;
nv_wr32(dev, 0x100c10, priv->r100c10 >> 8);
return 0; return 0;
} }
void void
nvc0_fb_takedown(struct drm_device *dev) nvc0_fb_takedown(struct drm_device *dev)
{ {
nvc0_fb_destroy(dev);
} }
...@@ -159,7 +159,7 @@ nvc0_fbcon_accel_init(struct fb_info *info) ...@@ -159,7 +159,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev; struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel; struct nouveau_channel *chan = dev_priv->channel;
struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo; struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
int ret, format; int ret, format;
ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d); ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d);
...@@ -203,8 +203,8 @@ nvc0_fbcon_accel_init(struct fb_info *info) ...@@ -203,8 +203,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1); BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
OUT_RING (chan, 0x0000902d); OUT_RING (chan, 0x0000902d);
BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2); BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
OUT_RING (chan, upper_32_bits(chan->notifier_bo->bo.offset)); OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset));
OUT_RING (chan, lower_32_bits(chan->notifier_bo->bo.offset)); OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset));
BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1); BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
OUT_RING (chan, 0); OUT_RING (chan, 0);
BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1); BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
...@@ -249,8 +249,8 @@ nvc0_fbcon_accel_init(struct fb_info *info) ...@@ -249,8 +249,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length); OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual); OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual); OUT_RING (chan, info->var.yres_virtual);
OUT_RING (chan, upper_32_bits(nvbo->vma.offset)); OUT_RING (chan, upper_32_bits(fb->vma.offset));
OUT_RING (chan, lower_32_bits(nvbo->vma.offset)); OUT_RING (chan, lower_32_bits(fb->vma.offset));
BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10); BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
OUT_RING (chan, format); OUT_RING (chan, format);
OUT_RING (chan, 1); OUT_RING (chan, 1);
...@@ -260,8 +260,8 @@ nvc0_fbcon_accel_init(struct fb_info *info) ...@@ -260,8 +260,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length); OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual); OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual); OUT_RING (chan, info->var.yres_virtual);
OUT_RING (chan, upper_32_bits(nvbo->vma.offset)); OUT_RING (chan, upper_32_bits(fb->vma.offset));
OUT_RING (chan, lower_32_bits(nvbo->vma.offset)); OUT_RING (chan, lower_32_bits(fb->vma.offset));
FIRE_RING (chan); FIRE_RING (chan);
return 0; return 0;
......
...@@ -210,10 +210,10 @@ nvc0_fifo_unload_context(struct drm_device *dev) ...@@ -210,10 +210,10 @@ nvc0_fifo_unload_context(struct drm_device *dev)
int i; int i;
for (i = 0; i < 128; i++) { for (i = 0; i < 128; i++) {
if (!(nv_rd32(dev, 0x003004 + (i * 4)) & 1)) if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
continue; continue;
nv_mask(dev, 0x003004 + (i * 4), 0x00000001, 0x00000000); nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
nv_wr32(dev, 0x002634, i); nv_wr32(dev, 0x002634, i);
if (!nv_wait(dev, 0x002634, 0xffffffff, i)) { if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n", NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
......
...@@ -28,7 +28,34 @@ ...@@ -28,7 +28,34 @@
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_mm.h" #include "nouveau_mm.h"
#include "nvc0_graph.h" #include "nvc0_graph.h"
#include "nvc0_grhub.fuc.h"
#include "nvc0_grgpc.fuc.h"
static void
nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
{
NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
nv_rd32(dev, base + 0x400));
NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
}
static void
nvc0_graph_ctxctl_debug(struct drm_device *dev)
{
u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
u32 gpc;
nvc0_graph_ctxctl_debug_unit(dev, 0x409000);
for (gpc = 0; gpc < gpcnr; gpc++)
nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
}
static int static int
nvc0_graph_load_context(struct nouveau_channel *chan) nvc0_graph_load_context(struct nouveau_channel *chan)
...@@ -72,13 +99,24 @@ nvc0_graph_construct_context(struct nouveau_channel *chan) ...@@ -72,13 +99,24 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
if (!ctx) if (!ctx)
return -ENOMEM; return -ENOMEM;
nvc0_graph_load_context(chan); if (!nouveau_ctxfw) {
nv_wr32(dev, 0x409840, 0x80000000);
nv_wo32(grch->grctx, 0x1c, 1); nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
nv_wo32(grch->grctx, 0x20, 0); nv_wr32(dev, 0x409504, 0x00000001);
nv_wo32(grch->grctx, 0x28, 0); if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
nv_wo32(grch->grctx, 0x2c, 0); NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n");
dev_priv->engine.instmem.flush(dev); nvc0_graph_ctxctl_debug(dev);
return -EBUSY;
}
} else {
nvc0_graph_load_context(chan);
nv_wo32(grch->grctx, 0x1c, 1);
nv_wo32(grch->grctx, 0x20, 0);
nv_wo32(grch->grctx, 0x28, 0);
nv_wo32(grch->grctx, 0x2c, 0);
dev_priv->engine.instmem.flush(dev);
}
ret = nvc0_grctx_generate(chan); ret = nvc0_grctx_generate(chan);
if (ret) { if (ret) {
...@@ -86,10 +124,21 @@ nvc0_graph_construct_context(struct nouveau_channel *chan) ...@@ -86,10 +124,21 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
return ret; return ret;
} }
ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst); if (!nouveau_ctxfw) {
if (ret) { nv_wr32(dev, 0x409840, 0x80000000);
kfree(ctx); nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
return ret; nv_wr32(dev, 0x409504, 0x00000002);
if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n");
nvc0_graph_ctxctl_debug(dev);
return -EBUSY;
}
} else {
ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
if (ret) {
kfree(ctx);
return ret;
}
} }
for (i = 0; i < priv->grctx_size; i += 4) for (i = 0; i < priv->grctx_size; i += 4)
...@@ -108,50 +157,50 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) ...@@ -108,50 +157,50 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
int i = 0, gpc, tp, ret; int i = 0, gpc, tp, ret;
u32 magic; u32 magic;
ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM, ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
&grch->unk408004); &grch->unk408004);
if (ret) if (ret)
return ret; return ret;
ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM, ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
&grch->unk40800c); &grch->unk40800c);
if (ret) if (ret)
return ret; return ret;
ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER, NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
&grch->unk418810); &grch->unk418810);
if (ret) if (ret)
return ret; return ret;
ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM, ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
&grch->mmio); &grch->mmio);
if (ret) if (ret)
return ret; return ret;
nv_wo32(grch->mmio, i++ * 4, 0x00408004); nv_wo32(grch->mmio, i++ * 4, 0x00408004);
nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8); nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x00408008); nv_wo32(grch->mmio, i++ * 4, 0x00408008);
nv_wo32(grch->mmio, i++ * 4, 0x80000018); nv_wo32(grch->mmio, i++ * 4, 0x80000018);
nv_wo32(grch->mmio, i++ * 4, 0x0040800c); nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8); nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x00408010); nv_wo32(grch->mmio, i++ * 4, 0x00408010);
nv_wo32(grch->mmio, i++ * 4, 0x80000000); nv_wo32(grch->mmio, i++ * 4, 0x80000000);
nv_wo32(grch->mmio, i++ * 4, 0x00418810); nv_wo32(grch->mmio, i++ * 4, 0x00418810);
nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12); nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->linst >> 12);
nv_wo32(grch->mmio, i++ * 4, 0x00419848); nv_wo32(grch->mmio, i++ * 4, 0x00419848);
nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12); nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->linst >> 12);
nv_wo32(grch->mmio, i++ * 4, 0x00419004); nv_wo32(grch->mmio, i++ * 4, 0x00419004);
nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8); nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x00419008); nv_wo32(grch->mmio, i++ * 4, 0x00419008);
nv_wo32(grch->mmio, i++ * 4, 0x00000000); nv_wo32(grch->mmio, i++ * 4, 0x00000000);
nv_wo32(grch->mmio, i++ * 4, 0x00418808); nv_wo32(grch->mmio, i++ * 4, 0x00418808);
nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8); nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x0041880c); nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
nv_wo32(grch->mmio, i++ * 4, 0x80000018); nv_wo32(grch->mmio, i++ * 4, 0x80000018);
...@@ -159,7 +208,7 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) ...@@ -159,7 +208,7 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
nv_wo32(grch->mmio, i++ * 4, 0x00405830); nv_wo32(grch->mmio, i++ * 4, 0x00405830);
nv_wo32(grch->mmio, i++ * 4, magic); nv_wo32(grch->mmio, i++ * 4, magic);
for (gpc = 0; gpc < priv->gpc_nr; gpc++) { for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) { for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) {
u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
nv_wo32(grch->mmio, i++ * 4, reg); nv_wo32(grch->mmio, i++ * 4, reg);
nv_wo32(grch->mmio, i++ * 4, magic); nv_wo32(grch->mmio, i++ * 4, magic);
...@@ -186,7 +235,7 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine) ...@@ -186,7 +235,7 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
return -ENOMEM; return -ENOMEM;
chan->engctx[NVOBJ_ENGINE_GR] = grch; chan->engctx[NVOBJ_ENGINE_GR] = grch;
ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256, ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC, NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
&grch->grctx); &grch->grctx);
if (ret) if (ret)
...@@ -197,8 +246,8 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine) ...@@ -197,8 +246,8 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
if (ret) if (ret)
goto error; goto error;
nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4); nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst)); nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
pinstmem->flush(dev); pinstmem->flush(dev);
if (!priv->grctx_vals) { if (!priv->grctx_vals) {
...@@ -210,15 +259,20 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine) ...@@ -210,15 +259,20 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
for (i = 0; i < priv->grctx_size; i += 4) for (i = 0; i < priv->grctx_size; i += 4)
nv_wo32(grctx, i, priv->grctx_vals[i / 4]); nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
nv_wo32(grctx, 0xf4, 0); if (!nouveau_ctxfw) {
nv_wo32(grctx, 0xf8, 0); nv_wo32(grctx, 0x00, grch->mmio_nr);
nv_wo32(grctx, 0x10, grch->mmio_nr); nv_wo32(grctx, 0x04, grch->mmio->linst >> 8);
nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst)); } else {
nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst)); nv_wo32(grctx, 0xf4, 0);
nv_wo32(grctx, 0x1c, 1); nv_wo32(grctx, 0xf8, 0);
nv_wo32(grctx, 0x20, 0); nv_wo32(grctx, 0x10, grch->mmio_nr);
nv_wo32(grctx, 0x28, 0); nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
nv_wo32(grctx, 0x2c, 0); nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
nv_wo32(grctx, 0x1c, 1);
nv_wo32(grctx, 0x20, 0);
nv_wo32(grctx, 0x28, 0);
nv_wo32(grctx, 0x2c, 0);
}
pinstmem->flush(dev); pinstmem->flush(dev);
return 0; return 0;
...@@ -296,6 +350,7 @@ static void ...@@ -296,6 +350,7 @@ static void
nvc0_graph_init_gpc_0(struct drm_device *dev) nvc0_graph_init_gpc_0(struct drm_device *dev)
{ {
struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tp_total);
u32 data[TP_MAX / 8]; u32 data[TP_MAX / 8];
u8 tpnr[GPC_MAX]; u8 tpnr[GPC_MAX];
int i, gpc, tpc; int i, gpc, tpc;
...@@ -307,13 +362,6 @@ nvc0_graph_init_gpc_0(struct drm_device *dev) ...@@ -307,13 +362,6 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
* 465: 3/4/4/0 4 7 * 465: 3/4/4/0 4 7
* 470: 3/3/4/4 5 5 * 470: 3/3/4/4 5 5
* 480: 3/4/4/4 6 6 * 480: 3/4/4/4 6 6
*
* magicgpc918
* 450: 00200000 00000000001000000000000000000000
* 460: 00124925 00000000000100100100100100100101
* 465: 000ba2e9 00000000000010111010001011101001
* 470: 00092493 00000000000010010010010010010011
* 480: 00088889 00000000000010001000100010001001
*/ */
memset(data, 0x00, sizeof(data)); memset(data, 0x00, sizeof(data));
...@@ -336,10 +384,10 @@ nvc0_graph_init_gpc_0(struct drm_device *dev) ...@@ -336,10 +384,10 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 | nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
priv->tp_nr[gpc]); priv->tp_nr[gpc]);
nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total); nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918); nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
} }
nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918); nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr); nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr);
} }
...@@ -419,8 +467,51 @@ nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base, ...@@ -419,8 +467,51 @@ nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
static int static int
nvc0_graph_init_ctxctl(struct drm_device *dev) nvc0_graph_init_ctxctl(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR); struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
u32 r000260; u32 r000260;
int i;
if (!nouveau_ctxfw) {
/* load HUB microcode */
r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
nv_wr32(dev, 0x4091c0, 0x01000000);
for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]);
nv_wr32(dev, 0x409180, 0x01000000);
for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
if ((i & 0x3f) == 0)
nv_wr32(dev, 0x409188, i >> 6);
nv_wr32(dev, 0x409184, nvc0_grhub_code[i]);
}
/* load GPC microcode */
nv_wr32(dev, 0x41a1c0, 0x01000000);
for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]);
nv_wr32(dev, 0x41a180, 0x01000000);
for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
if ((i & 0x3f) == 0)
nv_wr32(dev, 0x41a188, i >> 6);
nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]);
}
nv_wr32(dev, 0x000260, r000260);
/* start HUB ucode running, it'll init the GPCs */
nv_wr32(dev, 0x409800, dev_priv->chipset);
nv_wr32(dev, 0x40910c, 0x00000000);
nv_wr32(dev, 0x409100, 0x00000002);
if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n");
nvc0_graph_ctxctl_debug(dev);
return -EBUSY;
}
priv->grctx_size = nv_rd32(dev, 0x409804);
return 0;
}
/* load fuc microcode */ /* load fuc microcode */
r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
...@@ -527,6 +618,22 @@ nvc0_graph_isr_chid(struct drm_device *dev, u64 inst) ...@@ -527,6 +618,22 @@ nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
return i; return i;
} }
static void
nvc0_graph_ctxctl_isr(struct drm_device *dev)
{
u32 ustat = nv_rd32(dev, 0x409c18);
if (ustat & 0x00000001)
NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
if (ustat & 0x00080000)
NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
if (ustat & ~0x00080001)
NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
nvc0_graph_ctxctl_debug(dev);
nv_wr32(dev, 0x409c20, ustat);
}
static void static void
nvc0_graph_isr(struct drm_device *dev) nvc0_graph_isr(struct drm_device *dev)
{ {
...@@ -578,11 +685,7 @@ nvc0_graph_isr(struct drm_device *dev) ...@@ -578,11 +685,7 @@ nvc0_graph_isr(struct drm_device *dev)
} }
if (stat & 0x00080000) { if (stat & 0x00080000) {
u32 ustat = nv_rd32(dev, 0x409c18); nvc0_graph_ctxctl_isr(dev);
NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat);
nv_wr32(dev, 0x409c20, ustat);
nv_wr32(dev, 0x400100, 0x00080000); nv_wr32(dev, 0x400100, 0x00080000);
stat &= ~0x00080000; stat &= ~0x00080000;
} }
...@@ -606,7 +709,7 @@ nvc0_runk140_isr(struct drm_device *dev) ...@@ -606,7 +709,7 @@ nvc0_runk140_isr(struct drm_device *dev)
u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0); u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0);
u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0); u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0);
NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1); NV_DEBUG(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
units &= ~(1 << unit); units &= ~(1 << unit);
} }
} }
...@@ -651,10 +754,12 @@ nvc0_graph_destroy(struct drm_device *dev, int engine) ...@@ -651,10 +754,12 @@ nvc0_graph_destroy(struct drm_device *dev, int engine)
{ {
struct nvc0_graph_priv *priv = nv_engine(dev, engine); struct nvc0_graph_priv *priv = nv_engine(dev, engine);
nvc0_graph_destroy_fw(&priv->fuc409c); if (nouveau_ctxfw) {
nvc0_graph_destroy_fw(&priv->fuc409d); nvc0_graph_destroy_fw(&priv->fuc409c);
nvc0_graph_destroy_fw(&priv->fuc41ac); nvc0_graph_destroy_fw(&priv->fuc409d);
nvc0_graph_destroy_fw(&priv->fuc41ad); nvc0_graph_destroy_fw(&priv->fuc41ac);
nvc0_graph_destroy_fw(&priv->fuc41ad);
}
nouveau_irq_unregister(dev, 12); nouveau_irq_unregister(dev, 12);
nouveau_irq_unregister(dev, 25); nouveau_irq_unregister(dev, 25);
...@@ -675,13 +780,10 @@ nvc0_graph_create(struct drm_device *dev) ...@@ -675,13 +780,10 @@ nvc0_graph_create(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvc0_graph_priv *priv; struct nvc0_graph_priv *priv;
int ret, gpc, i; int ret, gpc, i;
u32 fermi;
switch (dev_priv->chipset) { fermi = nvc0_graph_class(dev);
case 0xc0: if (!fermi) {
case 0xc3:
case 0xc4:
break;
default:
NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n"); NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
return 0; return 0;
} }
...@@ -701,15 +803,17 @@ nvc0_graph_create(struct drm_device *dev) ...@@ -701,15 +803,17 @@ nvc0_graph_create(struct drm_device *dev)
nouveau_irq_register(dev, 12, nvc0_graph_isr); nouveau_irq_register(dev, 12, nvc0_graph_isr);
nouveau_irq_register(dev, 25, nvc0_runk140_isr); nouveau_irq_register(dev, 25, nvc0_runk140_isr);
if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) || if (nouveau_ctxfw) {
nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) || NV_INFO(dev, "PGRAPH: using external firmware\n");
nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) || if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) { nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
ret = 0; nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
goto error; nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
ret = 0;
goto error;
}
} }
ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4); ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
if (ret) if (ret)
goto error; goto error;
...@@ -735,25 +839,28 @@ nvc0_graph_create(struct drm_device *dev) ...@@ -735,25 +839,28 @@ nvc0_graph_create(struct drm_device *dev)
case 0xc0: case 0xc0:
if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */ if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
priv->magic_not_rop_nr = 0x07; priv->magic_not_rop_nr = 0x07;
/* filled values up to tp_total, the rest 0 */
priv->magicgpc918 = 0x000ba2e9;
} else } else
if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */ if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
priv->magic_not_rop_nr = 0x05; priv->magic_not_rop_nr = 0x05;
priv->magicgpc918 = 0x00092493;
} else } else
if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */ if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
priv->magic_not_rop_nr = 0x06; priv->magic_not_rop_nr = 0x06;
priv->magicgpc918 = 0x00088889;
} }
break; break;
case 0xc3: /* 450, 4/0/0/0, 2 */ case 0xc3: /* 450, 4/0/0/0, 2 */
priv->magic_not_rop_nr = 0x03; priv->magic_not_rop_nr = 0x03;
priv->magicgpc918 = 0x00200000;
break; break;
case 0xc4: /* 460, 3/4/0/0, 4 */ case 0xc4: /* 460, 3/4/0/0, 4 */
priv->magic_not_rop_nr = 0x01; priv->magic_not_rop_nr = 0x01;
priv->magicgpc918 = 0x00124925; break;
case 0xc1: /* 2/0/0/0, 1 */
priv->magic_not_rop_nr = 0x01;
break;
case 0xc8: /* 4/4/3/4, 5 */
priv->magic_not_rop_nr = 0x06;
break;
case 0xce: /* 4/4/0/0, 4 */
priv->magic_not_rop_nr = 0x03;
break; break;
} }
...@@ -763,13 +870,16 @@ nvc0_graph_create(struct drm_device *dev) ...@@ -763,13 +870,16 @@ nvc0_graph_create(struct drm_device *dev)
priv->tp_nr[3], priv->rop_nr); priv->tp_nr[3], priv->rop_nr);
/* use 0xc3's values... */ /* use 0xc3's values... */
priv->magic_not_rop_nr = 0x03; priv->magic_not_rop_nr = 0x03;
priv->magicgpc918 = 0x00200000;
} }
NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */ NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip); NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */ NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
if (fermi >= 0x9197)
NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */
if (fermi >= 0x9297)
NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */
NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */ NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
return 0; return 0;
......
/* fuc microcode util functions for nvc0 PGRAPH
*
* Copyright 2011 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)')
define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))')
ifdef(`include_code', `
// Error codes
define(`E_BAD_COMMAND', 0x01)
define(`E_CMD_OVERFLOW', 0x02)
// Util macros to help with debugging ucode hangs etc
define(`T_WAIT', 0)
define(`T_MMCTX', 1)
define(`T_STRWAIT', 2)
define(`T_STRINIT', 3)
define(`T_AUTO', 4)
define(`T_CHAN', 5)
define(`T_LOAD', 6)
define(`T_SAVE', 7)
define(`T_LCHAN', 8)
define(`T_LCTXH', 9)
define(`trace_set', `
mov $r8 0x83c
shl b32 $r8 6
clear b32 $r9
bset $r9 $1
iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
')
define(`trace_clr', `
mov $r8 0x85c
shl b32 $r8 6
clear b32 $r9
bset $r9 $1
iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
')
// queue_put - add request to queue
//
// In : $r13 queue pointer
// $r14 command
// $r15 data
//
queue_put:
// make sure we have space..
ld b32 $r8 D[$r13 + 0x0] // GET
ld b32 $r9 D[$r13 + 0x4] // PUT
xor $r8 8
cmpu b32 $r8 $r9
bra ne queue_put_next
mov $r15 E_CMD_OVERFLOW
call error
ret
// store cmd/data on queue
queue_put_next:
and $r8 $r9 7
shl b32 $r8 3
add b32 $r8 $r13
add b32 $r8 8
st b32 D[$r8 + 0x0] $r14
st b32 D[$r8 + 0x4] $r15
// update PUT
add b32 $r9 1
and $r9 0xf
st b32 D[$r13 + 0x4] $r9
ret
// queue_get - fetch request from queue
//
// In : $r13 queue pointer
//
// Out: $p1 clear on success (data available)
// $r14 command
// $r15 data
//
queue_get:
bset $flags $p1
ld b32 $r8 D[$r13 + 0x0] // GET
ld b32 $r9 D[$r13 + 0x4] // PUT
cmpu b32 $r8 $r9
bra e queue_get_done
// fetch first cmd/data pair
and $r9 $r8 7
shl b32 $r9 3
add b32 $r9 $r13
add b32 $r9 8
ld b32 $r14 D[$r9 + 0x0]
ld b32 $r15 D[$r9 + 0x4]
// update GET
add b32 $r8 1
and $r8 0xf
st b32 D[$r13 + 0x0] $r8
bclr $flags $p1
queue_get_done:
ret
// nv_rd32 - read 32-bit value from nv register
//
// In : $r14 register
// Out: $r15 value
//
nv_rd32:
mov $r11 0x728
shl b32 $r11 6
mov b32 $r12 $r14
bset $r12 31 // MMIO_CTRL_PENDING
iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
nv_rd32_wait:
iord $r12 I[$r11 + 0x000]
xbit $r12 $r12 31
bra ne nv_rd32_wait
mov $r10 6 // DONE_MMIO_RD
call wait_doneo
iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
ret
// nv_wr32 - write 32-bit value to nv register
//
// In : $r14 register
// $r15 value
//
nv_wr32:
mov $r11 0x728
shl b32 $r11 6
iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL
mov b32 $r12 $r14
bset $r12 31 // MMIO_CTRL_PENDING
bset $r12 30 // MMIO_CTRL_WRITE
iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
nv_wr32_wait:
iord $r12 I[$r11 + 0x000]
xbit $r12 $r12 31
bra ne nv_wr32_wait
ret
// (re)set watchdog timer
//
// In : $r15 timeout
//
watchdog_reset:
mov $r8 0x430
shl b32 $r8 6
bset $r15 31
iowr I[$r8 + 0x000] $r15
ret
// clear watchdog timer
watchdog_clear:
mov $r8 0x430
shl b32 $r8 6
iowr I[$r8 + 0x000] $r0
ret
// wait_done{z,o} - wait on FUC_DONE bit to become clear/set
//
// In : $r10 bit to wait on
//
define(`wait_done', `
$1:
trace_set(T_WAIT);
mov $r8 0x818
shl b32 $r8 6
iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit
wait_done_$1:
mov $r8 0x400
shl b32 $r8 6
iord $r8 I[$r8 + 0x000] // DONE
xbit $r8 $r8 $r10
bra $2 wait_done_$1
trace_clr(T_WAIT)
ret
')
wait_done(wait_donez, ne)
wait_done(wait_doneo, e)
// mmctx_size - determine size of a mmio list transfer
//
// In : $r14 mmio list head
// $r15 mmio list tail
// Out: $r15 transfer size (in bytes)
//
mmctx_size:
clear b32 $r9
nv_mmctx_size_loop:
ld b32 $r8 D[$r14]
shr b32 $r8 26
add b32 $r8 1
shl b32 $r8 2
add b32 $r9 $r8
add b32 $r14 4
cmpu b32 $r14 $r15
bra ne nv_mmctx_size_loop
mov b32 $r15 $r9
ret
// mmctx_xfer - execute a list of mmio transfers
//
// In : $r10 flags
// bit 0: direction (0 = save, 1 = load)
// bit 1: set if first transfer
// bit 2: set if last transfer
// $r11 base
// $r12 mmio list head
// $r13 mmio list tail
// $r14 multi_stride
// $r15 multi_mask
//
mmctx_xfer:
trace_set(T_MMCTX)
mov $r8 0x710
shl b32 $r8 6
clear b32 $r9
or $r11 $r11
bra e mmctx_base_disabled
iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
bset $r9 0 // BASE_EN
mmctx_base_disabled:
or $r14 $r14
bra e mmctx_multi_disabled
iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
bset $r9 1 // MULTI_EN
mmctx_multi_disabled:
add b32 $r8 0x100
xbit $r11 $r10 0
shl b32 $r11 16 // DIR
bset $r11 12 // QLIMIT = 0x10
xbit $r14 $r10 1
shl b32 $r14 17
or $r11 $r14 // START_TRIGGER
iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
// loop over the mmio list, and send requests to the hw
mmctx_exec_loop:
// wait for space in mmctx queue
mmctx_wait_free:
iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
and $r14 0x1f
bra e mmctx_wait_free
// queue up an entry
ld b32 $r14 D[$r12]
or $r14 $r9
iowr I[$r8 + 0x300] $r14
add b32 $r12 4
cmpu b32 $r12 $r13
bra ne mmctx_exec_loop
xbit $r11 $r10 2
bra ne mmctx_stop
// wait for queue to empty
mmctx_fini_wait:
iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
and $r11 0x1f
cmpu b32 $r11 0x10
bra ne mmctx_fini_wait
mov $r10 2 // DONE_MMCTX
call wait_donez
bra mmctx_done
mmctx_stop:
xbit $r11 $r10 0
shl b32 $r11 16 // DIR
bset $r11 12 // QLIMIT = 0x10
bset $r11 18 // STOP_TRIGGER
iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
mmctx_stop_wait:
// wait for STOP_TRIGGER to clear
iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
xbit $r11 $r11 18
bra ne mmctx_stop_wait
mmctx_done:
trace_clr(T_MMCTX)
ret
// Wait for DONE_STRAND
//
strand_wait:
push $r10
mov $r10 2
call wait_donez
pop $r10
ret
// unknown - call before issuing strand commands
//
strand_pre:
mov $r8 0x4afc
sethi $r8 0x20000
mov $r9 0xc
iowr I[$r8] $r9
call strand_wait
ret
// unknown - call after issuing strand commands
//
strand_post:
mov $r8 0x4afc
sethi $r8 0x20000
mov $r9 0xd
iowr I[$r8] $r9
call strand_wait
ret
// Selects strand set?!
//
// In: $r14 id
//
strand_set:
mov $r10 0x4ffc
sethi $r10 0x20000
sub b32 $r11 $r10 0x500
mov $r12 0xf
iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
mov $r12 0xb
iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
call strand_wait
iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
mov $r12 0xa
iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
call strand_wait
ret
// Initialise strand context data
//
// In : $r15 context base
// Out: $r15 context size (in bytes)
//
// Strandset(?) 3 hardcoded currently
//
strand_ctx_init:
trace_set(T_STRINIT)
call strand_pre
mov $r14 3
call strand_set
mov $r10 0x46fc
sethi $r10 0x20000
add b32 $r11 $r10 0x400
iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
mov $r12 1
iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
call strand_wait
sub b32 $r12 $r0 1
iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
mov $r12 2
iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
call strand_wait
call strand_post
// read the size of each strand, poke the context offset of
// each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
// about it later then.
mov $r8 0x880
shl b32 $r8 6
iord $r9 I[$r8 + 0x000] // STRANDS
add b32 $r8 0x2200
shr b32 $r14 $r15 8
ctx_init_strand_loop:
iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE
iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE
iord $r10 I[$r8 + 0x200] // STRAND_SIZE
shr b32 $r10 6
add b32 $r10 1
add b32 $r14 $r10
add b32 $r8 4
sub b32 $r9 1
bra ne ctx_init_strand_loop
shl b32 $r14 8
sub b32 $r15 $r14 $r15
trace_clr(T_STRINIT)
ret
')
...@@ -57,8 +57,7 @@ struct nvc0_graph_priv { ...@@ -57,8 +57,7 @@ struct nvc0_graph_priv {
struct nouveau_gpuobj *unk4188b4; struct nouveau_gpuobj *unk4188b4;
struct nouveau_gpuobj *unk4188b8; struct nouveau_gpuobj *unk4188b8;
u8 magic_not_rop_nr; u8 magic_not_rop_nr;
u32 magicgpc918;
}; };
struct nvc0_graph_chan { struct nvc0_graph_chan {
...@@ -72,4 +71,25 @@ struct nvc0_graph_chan { ...@@ -72,4 +71,25 @@ struct nvc0_graph_chan {
int nvc0_grctx_generate(struct nouveau_channel *); int nvc0_grctx_generate(struct nouveau_channel *);
/* nvc0_graph.c uses this also to determine supported chipsets */
static inline u32
nvc0_graph_class(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
switch (dev_priv->chipset) {
case 0xc0:
case 0xc3:
case 0xc4:
case 0xce: /* guess, mmio trace shows only 0x9097 state */
return 0x9097;
case 0xc1:
return 0x9197;
case 0xc8:
return 0x9297;
default:
return 0;
}
}
#endif #endif
...@@ -45,6 +45,9 @@ nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data) ...@@ -45,6 +45,9 @@ nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
static void static void
nvc0_grctx_generate_9097(struct drm_device *dev) nvc0_grctx_generate_9097(struct drm_device *dev)
{ {
u32 fermi = nvc0_graph_class(dev);
u32 mthd;
nv_mthd(dev, 0x9097, 0x0800, 0x00000000); nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
nv_mthd(dev, 0x9097, 0x0840, 0x00000000); nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
nv_mthd(dev, 0x9097, 0x0880, 0x00000000); nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
...@@ -824,134 +827,10 @@ nvc0_grctx_generate_9097(struct drm_device *dev) ...@@ -824,134 +827,10 @@ nvc0_grctx_generate_9097(struct drm_device *dev)
nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001); nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001); nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001); nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
nv_mthd(dev, 0x9097, 0x3400, 0x00000000); if (fermi == 0x9097) {
nv_mthd(dev, 0x9097, 0x3404, 0x00000000); for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
nv_mthd(dev, 0x9097, 0x3408, 0x00000000); nv_mthd(dev, 0x9097, mthd, 0x00000000);
nv_mthd(dev, 0x9097, 0x340c, 0x00000000); }
nv_mthd(dev, 0x9097, 0x3410, 0x00000000);
nv_mthd(dev, 0x9097, 0x3414, 0x00000000);
nv_mthd(dev, 0x9097, 0x3418, 0x00000000);
nv_mthd(dev, 0x9097, 0x341c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3420, 0x00000000);
nv_mthd(dev, 0x9097, 0x3424, 0x00000000);
nv_mthd(dev, 0x9097, 0x3428, 0x00000000);
nv_mthd(dev, 0x9097, 0x342c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3430, 0x00000000);
nv_mthd(dev, 0x9097, 0x3434, 0x00000000);
nv_mthd(dev, 0x9097, 0x3438, 0x00000000);
nv_mthd(dev, 0x9097, 0x343c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3440, 0x00000000);
nv_mthd(dev, 0x9097, 0x3444, 0x00000000);
nv_mthd(dev, 0x9097, 0x3448, 0x00000000);
nv_mthd(dev, 0x9097, 0x344c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3450, 0x00000000);
nv_mthd(dev, 0x9097, 0x3454, 0x00000000);
nv_mthd(dev, 0x9097, 0x3458, 0x00000000);
nv_mthd(dev, 0x9097, 0x345c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3460, 0x00000000);
nv_mthd(dev, 0x9097, 0x3464, 0x00000000);
nv_mthd(dev, 0x9097, 0x3468, 0x00000000);
nv_mthd(dev, 0x9097, 0x346c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3470, 0x00000000);
nv_mthd(dev, 0x9097, 0x3474, 0x00000000);
nv_mthd(dev, 0x9097, 0x3478, 0x00000000);
nv_mthd(dev, 0x9097, 0x347c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3480, 0x00000000);
nv_mthd(dev, 0x9097, 0x3484, 0x00000000);
nv_mthd(dev, 0x9097, 0x3488, 0x00000000);
nv_mthd(dev, 0x9097, 0x348c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3490, 0x00000000);
nv_mthd(dev, 0x9097, 0x3494, 0x00000000);
nv_mthd(dev, 0x9097, 0x3498, 0x00000000);
nv_mthd(dev, 0x9097, 0x349c, 0x00000000);
nv_mthd(dev, 0x9097, 0x34a0, 0x00000000);
nv_mthd(dev, 0x9097, 0x34a4, 0x00000000);
nv_mthd(dev, 0x9097, 0x34a8, 0x00000000);
nv_mthd(dev, 0x9097, 0x34ac, 0x00000000);
nv_mthd(dev, 0x9097, 0x34b0, 0x00000000);
nv_mthd(dev, 0x9097, 0x34b4, 0x00000000);
nv_mthd(dev, 0x9097, 0x34b8, 0x00000000);
nv_mthd(dev, 0x9097, 0x34bc, 0x00000000);
nv_mthd(dev, 0x9097, 0x34c0, 0x00000000);
nv_mthd(dev, 0x9097, 0x34c4, 0x00000000);
nv_mthd(dev, 0x9097, 0x34c8, 0x00000000);
nv_mthd(dev, 0x9097, 0x34cc, 0x00000000);
nv_mthd(dev, 0x9097, 0x34d0, 0x00000000);
nv_mthd(dev, 0x9097, 0x34d4, 0x00000000);
nv_mthd(dev, 0x9097, 0x34d8, 0x00000000);
nv_mthd(dev, 0x9097, 0x34dc, 0x00000000);
nv_mthd(dev, 0x9097, 0x34e0, 0x00000000);
nv_mthd(dev, 0x9097, 0x34e4, 0x00000000);
nv_mthd(dev, 0x9097, 0x34e8, 0x00000000);
nv_mthd(dev, 0x9097, 0x34ec, 0x00000000);
nv_mthd(dev, 0x9097, 0x34f0, 0x00000000);
nv_mthd(dev, 0x9097, 0x34f4, 0x00000000);
nv_mthd(dev, 0x9097, 0x34f8, 0x00000000);
nv_mthd(dev, 0x9097, 0x34fc, 0x00000000);
nv_mthd(dev, 0x9097, 0x3500, 0x00000000);
nv_mthd(dev, 0x9097, 0x3504, 0x00000000);
nv_mthd(dev, 0x9097, 0x3508, 0x00000000);
nv_mthd(dev, 0x9097, 0x350c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3510, 0x00000000);
nv_mthd(dev, 0x9097, 0x3514, 0x00000000);
nv_mthd(dev, 0x9097, 0x3518, 0x00000000);
nv_mthd(dev, 0x9097, 0x351c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3520, 0x00000000);
nv_mthd(dev, 0x9097, 0x3524, 0x00000000);
nv_mthd(dev, 0x9097, 0x3528, 0x00000000);
nv_mthd(dev, 0x9097, 0x352c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3530, 0x00000000);
nv_mthd(dev, 0x9097, 0x3534, 0x00000000);
nv_mthd(dev, 0x9097, 0x3538, 0x00000000);
nv_mthd(dev, 0x9097, 0x353c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3540, 0x00000000);
nv_mthd(dev, 0x9097, 0x3544, 0x00000000);
nv_mthd(dev, 0x9097, 0x3548, 0x00000000);
nv_mthd(dev, 0x9097, 0x354c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3550, 0x00000000);
nv_mthd(dev, 0x9097, 0x3554, 0x00000000);
nv_mthd(dev, 0x9097, 0x3558, 0x00000000);
nv_mthd(dev, 0x9097, 0x355c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3560, 0x00000000);
nv_mthd(dev, 0x9097, 0x3564, 0x00000000);
nv_mthd(dev, 0x9097, 0x3568, 0x00000000);
nv_mthd(dev, 0x9097, 0x356c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3570, 0x00000000);
nv_mthd(dev, 0x9097, 0x3574, 0x00000000);
nv_mthd(dev, 0x9097, 0x3578, 0x00000000);
nv_mthd(dev, 0x9097, 0x357c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3580, 0x00000000);
nv_mthd(dev, 0x9097, 0x3584, 0x00000000);
nv_mthd(dev, 0x9097, 0x3588, 0x00000000);
nv_mthd(dev, 0x9097, 0x358c, 0x00000000);
nv_mthd(dev, 0x9097, 0x3590, 0x00000000);
nv_mthd(dev, 0x9097, 0x3594, 0x00000000);
nv_mthd(dev, 0x9097, 0x3598, 0x00000000);
nv_mthd(dev, 0x9097, 0x359c, 0x00000000);
nv_mthd(dev, 0x9097, 0x35a0, 0x00000000);
nv_mthd(dev, 0x9097, 0x35a4, 0x00000000);
nv_mthd(dev, 0x9097, 0x35a8, 0x00000000);
nv_mthd(dev, 0x9097, 0x35ac, 0x00000000);
nv_mthd(dev, 0x9097, 0x35b0, 0x00000000);
nv_mthd(dev, 0x9097, 0x35b4, 0x00000000);
nv_mthd(dev, 0x9097, 0x35b8, 0x00000000);
nv_mthd(dev, 0x9097, 0x35bc, 0x00000000);
nv_mthd(dev, 0x9097, 0x35c0, 0x00000000);
nv_mthd(dev, 0x9097, 0x35c4, 0x00000000);
nv_mthd(dev, 0x9097, 0x35c8, 0x00000000);
nv_mthd(dev, 0x9097, 0x35cc, 0x00000000);
nv_mthd(dev, 0x9097, 0x35d0, 0x00000000);
nv_mthd(dev, 0x9097, 0x35d4, 0x00000000);
nv_mthd(dev, 0x9097, 0x35d8, 0x00000000);
nv_mthd(dev, 0x9097, 0x35dc, 0x00000000);
nv_mthd(dev, 0x9097, 0x35e0, 0x00000000);
nv_mthd(dev, 0x9097, 0x35e4, 0x00000000);
nv_mthd(dev, 0x9097, 0x35e8, 0x00000000);
nv_mthd(dev, 0x9097, 0x35ec, 0x00000000);
nv_mthd(dev, 0x9097, 0x35f0, 0x00000000);
nv_mthd(dev, 0x9097, 0x35f4, 0x00000000);
nv_mthd(dev, 0x9097, 0x35f8, 0x00000000);
nv_mthd(dev, 0x9097, 0x35fc, 0x00000000);
nv_mthd(dev, 0x9097, 0x030c, 0x00000001); nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
nv_mthd(dev, 0x9097, 0x1944, 0x00000000); nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
nv_mthd(dev, 0x9097, 0x1514, 0x00000000); nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
...@@ -1320,6 +1199,37 @@ nvc0_grctx_generate_9097(struct drm_device *dev) ...@@ -1320,6 +1199,37 @@ nvc0_grctx_generate_9097(struct drm_device *dev)
nv_mthd(dev, 0x9097, 0x3410, 0x80002006); nv_mthd(dev, 0x9097, 0x3410, 0x80002006);
} }
static void
nvc0_grctx_generate_9197(struct drm_device *dev)
{
u32 fermi = nvc0_graph_class(dev);
u32 mthd;
if (fermi == 0x9197) {
for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
nv_mthd(dev, 0x9197, mthd, 0x00000000);
}
nv_mthd(dev, 0x9197, 0x02e4, 0x0000b001);
}
static void
nvc0_grctx_generate_9297(struct drm_device *dev)
{
u32 fermi = nvc0_graph_class(dev);
u32 mthd;
if (fermi == 0x9297) {
for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
nv_mthd(dev, 0x9297, mthd, 0x00000000);
}
nv_mthd(dev, 0x9297, 0x036c, 0x00000000);
nv_mthd(dev, 0x9297, 0x0370, 0x00000000);
nv_mthd(dev, 0x9297, 0x07a4, 0x00000000);
nv_mthd(dev, 0x9297, 0x07a8, 0x00000000);
nv_mthd(dev, 0x9297, 0x0374, 0x00000000);
nv_mthd(dev, 0x9297, 0x0378, 0x00000020);
}
static void static void
nvc0_grctx_generate_902d(struct drm_device *dev) nvc0_grctx_generate_902d(struct drm_device *dev)
{ {
...@@ -1559,8 +1469,15 @@ nvc0_grctx_generate_unk47xx(struct drm_device *dev) ...@@ -1559,8 +1469,15 @@ nvc0_grctx_generate_unk47xx(struct drm_device *dev)
static void static void
nvc0_grctx_generate_shaders(struct drm_device *dev) nvc0_grctx_generate_shaders(struct drm_device *dev)
{ {
nv_wr32(dev, 0x405800, 0x078000bf); struct drm_nouveau_private *dev_priv = dev->dev_private;
nv_wr32(dev, 0x405830, 0x02180000);
if (dev_priv->chipset != 0xc1) {
nv_wr32(dev, 0x405800, 0x078000bf);
nv_wr32(dev, 0x405830, 0x02180000);
} else {
nv_wr32(dev, 0x405800, 0x0f8000bf);
nv_wr32(dev, 0x405830, 0x02180218);
}
nv_wr32(dev, 0x405834, 0x00000000); nv_wr32(dev, 0x405834, 0x00000000);
nv_wr32(dev, 0x405838, 0x00000000); nv_wr32(dev, 0x405838, 0x00000000);
nv_wr32(dev, 0x405854, 0x00000000); nv_wr32(dev, 0x405854, 0x00000000);
...@@ -1586,10 +1503,16 @@ nvc0_grctx_generate_unk60xx(struct drm_device *dev) ...@@ -1586,10 +1503,16 @@ nvc0_grctx_generate_unk60xx(struct drm_device *dev)
static void static void
nvc0_grctx_generate_unk64xx(struct drm_device *dev) nvc0_grctx_generate_unk64xx(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private;
nv_wr32(dev, 0x4064a8, 0x00000000); nv_wr32(dev, 0x4064a8, 0x00000000);
nv_wr32(dev, 0x4064ac, 0x00003fff); nv_wr32(dev, 0x4064ac, 0x00003fff);
nv_wr32(dev, 0x4064b4, 0x00000000); nv_wr32(dev, 0x4064b4, 0x00000000);
nv_wr32(dev, 0x4064b8, 0x00000000); nv_wr32(dev, 0x4064b8, 0x00000000);
if (dev_priv->chipset == 0xc1) {
nv_wr32(dev, 0x4064c0, 0x80140078);
nv_wr32(dev, 0x4064c4, 0x0086ffff);
}
} }
static void static void
...@@ -1622,21 +1545,14 @@ static void ...@@ -1622,21 +1545,14 @@ static void
nvc0_grctx_generate_rop(struct drm_device *dev) nvc0_grctx_generate_rop(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
int chipset = dev_priv->chipset;
/* ROPC_BROADCAST */ /* ROPC_BROADCAST */
nv_wr32(dev, 0x408800, 0x02802a3c); nv_wr32(dev, 0x408800, 0x02802a3c);
nv_wr32(dev, 0x408804, 0x00000040); nv_wr32(dev, 0x408804, 0x00000040);
nv_wr32(dev, 0x408808, 0x0003e00d); nv_wr32(dev, 0x408808, chipset != 0xc1 ? 0x0003e00d : 0x1003e005);
switch (dev_priv->chipset) { nv_wr32(dev, 0x408900, 0x3080b801);
case 0xc0: nv_wr32(dev, 0x408904, chipset != 0xc1 ? 0x02000001 : 0x62000001);
nv_wr32(dev, 0x408900, 0x0080b801);
break;
case 0xc3:
case 0xc4:
nv_wr32(dev, 0x408900, 0x3080b801);
break;
}
nv_wr32(dev, 0x408904, 0x02000001);
nv_wr32(dev, 0x408908, 0x00c80929); nv_wr32(dev, 0x408908, 0x00c80929);
nv_wr32(dev, 0x40890c, 0x00000000); nv_wr32(dev, 0x40890c, 0x00000000);
nv_wr32(dev, 0x408980, 0x0000011d); nv_wr32(dev, 0x408980, 0x0000011d);
...@@ -1645,6 +1561,8 @@ nvc0_grctx_generate_rop(struct drm_device *dev) ...@@ -1645,6 +1561,8 @@ nvc0_grctx_generate_rop(struct drm_device *dev)
static void static void
nvc0_grctx_generate_gpc(struct drm_device *dev) nvc0_grctx_generate_gpc(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private;
int chipset = dev_priv->chipset;
int i; int i;
/* GPC_BROADCAST */ /* GPC_BROADCAST */
...@@ -1676,7 +1594,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev) ...@@ -1676,7 +1594,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x41880c, 0x00000000); nv_wr32(dev, 0x41880c, 0x00000000);
nv_wr32(dev, 0x418810, 0x00000000); nv_wr32(dev, 0x418810, 0x00000000);
nv_wr32(dev, 0x418828, 0x00008442); nv_wr32(dev, 0x418828, 0x00008442);
nv_wr32(dev, 0x418830, 0x00000001); nv_wr32(dev, 0x418830, chipset != 0xc1 ? 0x00000001 : 0x10000001);
nv_wr32(dev, 0x4188d8, 0x00000008); nv_wr32(dev, 0x4188d8, 0x00000008);
nv_wr32(dev, 0x4188e0, 0x01000000); nv_wr32(dev, 0x4188e0, 0x01000000);
nv_wr32(dev, 0x4188e8, 0x00000000); nv_wr32(dev, 0x4188e8, 0x00000000);
...@@ -1684,7 +1602,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev) ...@@ -1684,7 +1602,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x4188f0, 0x00000000); nv_wr32(dev, 0x4188f0, 0x00000000);
nv_wr32(dev, 0x4188f4, 0x00000000); nv_wr32(dev, 0x4188f4, 0x00000000);
nv_wr32(dev, 0x4188f8, 0x00000000); nv_wr32(dev, 0x4188f8, 0x00000000);
nv_wr32(dev, 0x4188fc, 0x00100000); nv_wr32(dev, 0x4188fc, chipset != 0xc1 ? 0x00100000 : 0x00100018);
nv_wr32(dev, 0x41891c, 0x00ff00ff); nv_wr32(dev, 0x41891c, 0x00ff00ff);
nv_wr32(dev, 0x418924, 0x00000000); nv_wr32(dev, 0x418924, 0x00000000);
nv_wr32(dev, 0x418928, 0x00ffff00); nv_wr32(dev, 0x418928, 0x00ffff00);
...@@ -1715,6 +1633,8 @@ nvc0_grctx_generate_gpc(struct drm_device *dev) ...@@ -1715,6 +1633,8 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418c24, 0x00000000); nv_wr32(dev, 0x418c24, 0x00000000);
nv_wr32(dev, 0x418c28, 0x00000000); nv_wr32(dev, 0x418c28, 0x00000000);
nv_wr32(dev, 0x418c2c, 0x00000000); nv_wr32(dev, 0x418c2c, 0x00000000);
if (chipset == 0xc1)
nv_wr32(dev, 0x418c6c, 0x00000001);
nv_wr32(dev, 0x418c80, 0x20200004); nv_wr32(dev, 0x418c80, 0x20200004);
nv_wr32(dev, 0x418c8c, 0x00000001); nv_wr32(dev, 0x418c8c, 0x00000001);
nv_wr32(dev, 0x419000, 0x00000780); nv_wr32(dev, 0x419000, 0x00000780);
...@@ -1727,10 +1647,13 @@ static void ...@@ -1727,10 +1647,13 @@ static void
nvc0_grctx_generate_tp(struct drm_device *dev) nvc0_grctx_generate_tp(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
int chipset = dev_priv->chipset;
/* GPC_BROADCAST.TP_BROADCAST */ /* GPC_BROADCAST.TP_BROADCAST */
nv_wr32(dev, 0x419818, 0x00000000);
nv_wr32(dev, 0x41983c, 0x00038bc7);
nv_wr32(dev, 0x419848, 0x00000000); nv_wr32(dev, 0x419848, 0x00000000);
nv_wr32(dev, 0x419864, 0x0000012a); nv_wr32(dev, 0x419864, chipset != 0xc1 ? 0x0000012a : 0x00000129);
nv_wr32(dev, 0x419888, 0x00000000); nv_wr32(dev, 0x419888, 0x00000000);
nv_wr32(dev, 0x419a00, 0x000001f0); nv_wr32(dev, 0x419a00, 0x000001f0);
nv_wr32(dev, 0x419a04, 0x00000001); nv_wr32(dev, 0x419a04, 0x00000001);
...@@ -1740,8 +1663,8 @@ nvc0_grctx_generate_tp(struct drm_device *dev) ...@@ -1740,8 +1663,8 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419a14, 0x00000200); nv_wr32(dev, 0x419a14, 0x00000200);
nv_wr32(dev, 0x419a1c, 0x00000000); nv_wr32(dev, 0x419a1c, 0x00000000);
nv_wr32(dev, 0x419a20, 0x00000800); nv_wr32(dev, 0x419a20, 0x00000800);
if (dev_priv->chipset != 0xc0) if (chipset != 0xc0 && chipset != 0xc8)
nv_wr32(dev, 0x00419ac4, 0x0007f440); /* 0xc3 */ nv_wr32(dev, 0x00419ac4, 0x0007f440);
nv_wr32(dev, 0x419b00, 0x0a418820); nv_wr32(dev, 0x419b00, 0x0a418820);
nv_wr32(dev, 0x419b04, 0x062080e6); nv_wr32(dev, 0x419b04, 0x062080e6);
nv_wr32(dev, 0x419b08, 0x020398a4); nv_wr32(dev, 0x419b08, 0x020398a4);
...@@ -1749,17 +1672,19 @@ nvc0_grctx_generate_tp(struct drm_device *dev) ...@@ -1749,17 +1672,19 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419b10, 0x0a418820); nv_wr32(dev, 0x419b10, 0x0a418820);
nv_wr32(dev, 0x419b14, 0x000000e6); nv_wr32(dev, 0x419b14, 0x000000e6);
nv_wr32(dev, 0x419bd0, 0x00900103); nv_wr32(dev, 0x419bd0, 0x00900103);
nv_wr32(dev, 0x419be0, 0x00000001); nv_wr32(dev, 0x419be0, chipset != 0xc1 ? 0x00000001 : 0x00400001);
nv_wr32(dev, 0x419be4, 0x00000000); nv_wr32(dev, 0x419be4, 0x00000000);
nv_wr32(dev, 0x419c00, 0x00000002); nv_wr32(dev, 0x419c00, 0x00000002);
nv_wr32(dev, 0x419c04, 0x00000006); nv_wr32(dev, 0x419c04, 0x00000006);
nv_wr32(dev, 0x419c08, 0x00000002); nv_wr32(dev, 0x419c08, 0x00000002);
nv_wr32(dev, 0x419c20, 0x00000000); nv_wr32(dev, 0x419c20, 0x00000000);
nv_wr32(dev, 0x419cbc, 0x28137606); nv_wr32(dev, 0x419cb0, 0x00060048); //XXX: 0xce 0x00020048
nv_wr32(dev, 0x419ce8, 0x00000000); nv_wr32(dev, 0x419ce8, 0x00000000);
nv_wr32(dev, 0x419cf4, 0x00000183); nv_wr32(dev, 0x419cf4, 0x00000183);
nv_wr32(dev, 0x419d20, 0x02180000); nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000);
nv_wr32(dev, 0x419d24, 0x00001fff); nv_wr32(dev, 0x419d24, 0x00001fff);
if (chipset == 0xc1)
nv_wr32(dev, 0x419d44, 0x02180218);
nv_wr32(dev, 0x419e04, 0x00000000); nv_wr32(dev, 0x419e04, 0x00000000);
nv_wr32(dev, 0x419e08, 0x00000000); nv_wr32(dev, 0x419e08, 0x00000000);
nv_wr32(dev, 0x419e0c, 0x00000000); nv_wr32(dev, 0x419e0c, 0x00000000);
...@@ -1785,11 +1710,11 @@ nvc0_grctx_generate_tp(struct drm_device *dev) ...@@ -1785,11 +1710,11 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419e8c, 0x00000000); nv_wr32(dev, 0x419e8c, 0x00000000);
nv_wr32(dev, 0x419e90, 0x00000000); nv_wr32(dev, 0x419e90, 0x00000000);
nv_wr32(dev, 0x419e98, 0x00000000); nv_wr32(dev, 0x419e98, 0x00000000);
if (dev_priv->chipset != 0xc0) if (chipset != 0xc0 && chipset != 0xc8)
nv_wr32(dev, 0x419ee0, 0x00011110); nv_wr32(dev, 0x419ee0, 0x00011110);
nv_wr32(dev, 0x419f50, 0x00000000); nv_wr32(dev, 0x419f50, 0x00000000);
nv_wr32(dev, 0x419f54, 0x00000000); nv_wr32(dev, 0x419f54, 0x00000000);
if (dev_priv->chipset != 0xc0) if (chipset != 0xc0 && chipset != 0xc8)
nv_wr32(dev, 0x419f58, 0x00000000); nv_wr32(dev, 0x419f58, 0x00000000);
} }
...@@ -1801,6 +1726,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan) ...@@ -1801,6 +1726,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
struct drm_device *dev = chan->dev; struct drm_device *dev = chan->dev;
int i, gpc, tp, id; int i, gpc, tp, id;
u32 fermi = nvc0_graph_class(dev);
u32 r000260, tmp; u32 r000260, tmp;
r000260 = nv_rd32(dev, 0x000260); r000260 = nv_rd32(dev, 0x000260);
...@@ -1857,10 +1783,11 @@ nvc0_grctx_generate(struct nouveau_channel *chan) ...@@ -1857,10 +1783,11 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_wr32(dev, 0x40587c, 0x00000000); nv_wr32(dev, 0x40587c, 0x00000000);
if (1) { if (1) {
const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 }; const u8 chipset_tp_max[] = { 16, 4, 0, 4, 8, 0, 0, 0,
16, 0, 0, 0, 0, 0, 8, 0 };
u8 max = chipset_tp_max[dev_priv->chipset & 0x0f]; u8 max = chipset_tp_max[dev_priv->chipset & 0x0f];
u8 tpnr[GPC_MAX]; u8 tpnr[GPC_MAX];
u8 data[32]; u8 data[TP_MAX];
memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
memset(data, 0x1f, sizeof(data)); memset(data, 0x1f, sizeof(data));
...@@ -2633,6 +2560,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan) ...@@ -2633,6 +2560,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x0000053f, 0xffff0000); nv_icmd(dev, 0x0000053f, 0xffff0000);
nv_icmd(dev, 0x00000585, 0x0000003f); nv_icmd(dev, 0x00000585, 0x0000003f);
nv_icmd(dev, 0x00000576, 0x00000003); nv_icmd(dev, 0x00000576, 0x00000003);
if (dev_priv->chipset == 0xc1)
nv_icmd(dev, 0x0000057b, 0x00000059);
nv_icmd(dev, 0x00000586, 0x00000040); nv_icmd(dev, 0x00000586, 0x00000040);
nv_icmd(dev, 0x00000582, 0x00000080); nv_icmd(dev, 0x00000582, 0x00000080);
nv_icmd(dev, 0x00000583, 0x00000080); nv_icmd(dev, 0x00000583, 0x00000080);
...@@ -2865,6 +2794,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan) ...@@ -2865,6 +2794,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_wr32(dev, 0x404154, 0x00000400); nv_wr32(dev, 0x404154, 0x00000400);
nvc0_grctx_generate_9097(dev); nvc0_grctx_generate_9097(dev);
if (fermi >= 0x9197)
nvc0_grctx_generate_9197(dev);
if (fermi >= 0x9297)
nvc0_grctx_generate_9297(dev);
nvc0_grctx_generate_902d(dev); nvc0_grctx_generate_902d(dev);
nvc0_grctx_generate_9039(dev); nvc0_grctx_generate_9039(dev);
nvc0_grctx_generate_90c0(dev); nvc0_grctx_generate_90c0(dev);
......
/* fuc microcode for nvc0 PGRAPH/GPC
*
* Copyright 2011 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
/* To build:
* m4 nvc0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grgpc.fuc.h
*/
/* TODO
* - bracket certain functions with scratch writes, useful for debugging
* - watchdog timer around ctx operations
*/
.section nvc0_grgpc_data
include(`nvc0_graph.fuc')
gpc_id: .b32 0
gpc_mmio_list_head: .b32 0
gpc_mmio_list_tail: .b32 0
tpc_count: .b32 0
tpc_mask: .b32 0
tpc_mmio_list_head: .b32 0
tpc_mmio_list_tail: .b32 0
cmd_queue: queue_init
// chipset descriptions
chipsets:
.b8 0xc0 0 0 0
.b16 nvc0_gpc_mmio_head
.b16 nvc0_gpc_mmio_tail
.b16 nvc0_tpc_mmio_head
.b16 nvc0_tpc_mmio_tail
.b8 0xc1 0 0 0
.b16 nvc0_gpc_mmio_head
.b16 nvc1_gpc_mmio_tail
.b16 nvc0_tpc_mmio_head
.b16 nvc1_tpc_mmio_tail
.b8 0xc3 0 0 0
.b16 nvc0_gpc_mmio_head
.b16 nvc0_gpc_mmio_tail
.b16 nvc0_tpc_mmio_head
.b16 nvc3_tpc_mmio_tail
.b8 0xc4 0 0 0
.b16 nvc0_gpc_mmio_head
.b16 nvc0_gpc_mmio_tail
.b16 nvc0_tpc_mmio_head
.b16 nvc3_tpc_mmio_tail
.b8 0xc8 0 0 0
.b16 nvc0_gpc_mmio_head
.b16 nvc0_gpc_mmio_tail
.b16 nvc0_tpc_mmio_head
.b16 nvc0_tpc_mmio_tail
.b8 0xce 0 0 0
.b16 nvc0_gpc_mmio_head
.b16 nvc0_gpc_mmio_tail
.b16 nvc0_tpc_mmio_head
.b16 nvc3_tpc_mmio_tail
.b8 0 0 0 0
// GPC mmio lists
nvc0_gpc_mmio_head:
mmctx_data(0x000380, 1)
mmctx_data(0x000400, 6)
mmctx_data(0x000450, 9)
mmctx_data(0x000600, 1)
mmctx_data(0x000684, 1)
mmctx_data(0x000700, 5)
mmctx_data(0x000800, 1)
mmctx_data(0x000808, 3)
mmctx_data(0x000828, 1)
mmctx_data(0x000830, 1)
mmctx_data(0x0008d8, 1)
mmctx_data(0x0008e0, 1)
mmctx_data(0x0008e8, 6)
mmctx_data(0x00091c, 1)
mmctx_data(0x000924, 3)
mmctx_data(0x000b00, 1)
mmctx_data(0x000b08, 6)
mmctx_data(0x000bb8, 1)
mmctx_data(0x000c08, 1)
mmctx_data(0x000c10, 8)
mmctx_data(0x000c80, 1)
mmctx_data(0x000c8c, 1)
mmctx_data(0x001000, 3)
mmctx_data(0x001014, 1)
nvc0_gpc_mmio_tail:
mmctx_data(0x000c6c, 1);
nvc1_gpc_mmio_tail:
// TPC mmio lists
nvc0_tpc_mmio_head:
mmctx_data(0x000018, 1)
mmctx_data(0x00003c, 1)
mmctx_data(0x000048, 1)
mmctx_data(0x000064, 1)
mmctx_data(0x000088, 1)
mmctx_data(0x000200, 6)
mmctx_data(0x00021c, 2)
mmctx_data(0x000300, 6)
mmctx_data(0x0003d0, 1)
mmctx_data(0x0003e0, 2)
mmctx_data(0x000400, 3)
mmctx_data(0x000420, 1)
mmctx_data(0x0004b0, 1)
mmctx_data(0x0004e8, 1)
mmctx_data(0x0004f4, 1)
mmctx_data(0x000520, 2)
mmctx_data(0x000604, 4)
mmctx_data(0x000644, 20)
mmctx_data(0x000698, 1)
mmctx_data(0x000750, 2)
nvc0_tpc_mmio_tail:
mmctx_data(0x000758, 1)
mmctx_data(0x0002c4, 1)
mmctx_data(0x0004bc, 1)
mmctx_data(0x0006e0, 1)
nvc3_tpc_mmio_tail:
mmctx_data(0x000544, 1)
nvc1_tpc_mmio_tail:
.section nvc0_grgpc_code
bra init
define(`include_code')
include(`nvc0_graph.fuc')
// reports an exception to the host
//
// In: $r15 error code (see nvc0_graph.fuc)
//
error:
push $r14
mov $r14 -0x67ec // 0x9814
sethi $r14 0x400000
call nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
add b32 $r14 0x41c
mov $r15 1
call nv_wr32 // HUB_CTXCTL_INTR_UP_SET
pop $r14
ret
// GPC fuc initialisation, executed by triggering ucode start, will
// fall through to main loop after completion.
//
// Input:
// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
// CC_SCRATCH[1]: context base
//
// Output:
// CC_SCRATCH[0]:
// 31:31: set to signal completion
// CC_SCRATCH[1]:
// 31:0: GPC context size
//
init:
clear b32 $r0
mov $sp $r0
// enable fifo access
mov $r1 0x1200
mov $r2 2
iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
// setup i0 handler, and route all interrupts to it
mov $r1 ih
mov $iv0 $r1
mov $r1 0x400
iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
// enable fifo interrupt
mov $r2 4
iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
// enable interrupts
bset $flags ie0
// figure out which GPC we are, and how many TPCs we have
mov $r1 0x608
shl b32 $r1 6
iord $r2 I[$r1 + 0x000] // UNITS
mov $r3 1
and $r2 0x1f
shl b32 $r3 $r2
sub b32 $r3 1
st b32 D[$r0 + tpc_count] $r2
st b32 D[$r0 + tpc_mask] $r3
add b32 $r1 0x400
iord $r2 I[$r1 + 0x000] // MYINDEX
st b32 D[$r0 + gpc_id] $r2
// find context data for this chipset
mov $r2 0x800
shl b32 $r2 6
iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
mov $r1 chipsets - 12
init_find_chipset:
add b32 $r1 12
ld b32 $r3 D[$r1 + 0x00]
cmpu b32 $r3 $r2
bra e init_context
cmpu b32 $r3 0
bra ne init_find_chipset
// unknown chipset
ret
// initialise context base, and size tracking
init_context:
mov $r2 0x800
shl b32 $r2 6
iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base
clear b32 $r3 // track GPC context size here
// set mmctx base addresses now so we don't have to do it later,
// they don't currently ever change
mov $r4 0x700
shl b32 $r4 6
shr b32 $r5 $r2 8
iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE
iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE
// calculate GPC mmio context size, store the chipset-specific
// mmio list pointers somewhere we can get at them later without
// re-parsing the chipset list
clear b32 $r14
clear b32 $r15
ld b16 $r14 D[$r1 + 4]
ld b16 $r15 D[$r1 + 6]
st b16 D[$r0 + gpc_mmio_list_head] $r14
st b16 D[$r0 + gpc_mmio_list_tail] $r15
call mmctx_size
add b32 $r2 $r15
add b32 $r3 $r15
// calculate per-TPC mmio context size, store the list pointers
ld b16 $r14 D[$r1 + 8]
ld b16 $r15 D[$r1 + 10]
st b16 D[$r0 + tpc_mmio_list_head] $r14
st b16 D[$r0 + tpc_mmio_list_tail] $r15
call mmctx_size
ld b32 $r14 D[$r0 + tpc_count]
mulu $r14 $r15
add b32 $r2 $r14
add b32 $r3 $r14
// round up base/size to 256 byte boundary (for strand SWBASE)
add b32 $r4 0x1300
shr b32 $r3 2
iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!?
shr b32 $r2 8
shr b32 $r3 6
add b32 $r2 1
add b32 $r3 1
shl b32 $r2 8
shl b32 $r3 8
// calculate size of strand context data
mov b32 $r15 $r2
call strand_ctx_init
add b32 $r3 $r15
// save context size, and tell HUB we're done
mov $r1 0x800
shl b32 $r1 6
iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size
add b32 $r1 0x800
clear b32 $r2
bset $r2 31
iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
// Main program loop, very simple, sleeps until woken up by the interrupt
// handler, pulls a command from the queue and executes its handler
//
main:
bset $flags $p0
sleep $p0
mov $r13 cmd_queue
call queue_get
bra $p1 main
// 0x0000-0x0003 are all context transfers
cmpu b32 $r14 0x04
bra nc main_not_ctx_xfer
// fetch $flags and mask off $p1/$p2
mov $r1 $flags
mov $r2 0x0006
not b32 $r2
and $r1 $r2
// set $p1/$p2 according to transfer type
shl b32 $r14 1
or $r1 $r14
mov $flags $r1
// transfer context data
call ctx_xfer
bra main
main_not_ctx_xfer:
shl b32 $r15 $r14 16
or $r15 E_BAD_COMMAND
call error
bra main
// interrupt handler
ih:
push $r8
mov $r8 $flags
push $r8
push $r9
push $r10
push $r11
push $r13
push $r14
push $r15
// incoming fifo command?
iord $r10 I[$r0 + 0x200] // INTR
and $r11 $r10 0x00000004
bra e ih_no_fifo
// queue incoming fifo command for later processing
mov $r11 0x1900
mov $r13 cmd_queue
iord $r14 I[$r11 + 0x100] // FIFO_CMD
iord $r15 I[$r11 + 0x000] // FIFO_DATA
call queue_put
add b32 $r11 0x400
mov $r14 1
iowr I[$r11 + 0x000] $r14 // FIFO_ACK
// ack, and wake up main()
ih_no_fifo:
iowr I[$r0 + 0x100] $r10 // INTR_ACK
pop $r15
pop $r14
pop $r13
pop $r11
pop $r10
pop $r9
pop $r8
mov $flags $r8
pop $r8
bclr $flags $p0
iret
// Set this GPC's bit in HUB_BAR, used to signal completion of various
// activities to the HUB fuc
//
hub_barrier_done:
mov $r15 1
ld b32 $r14 D[$r0 + gpc_id]
shl b32 $r15 $r14
mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
sethi $r14 0x400000
call nv_wr32
ret
// Disables various things, waits a bit, and re-enables them..
//
// Not sure how exactly this helps, perhaps "ENABLE" is not such a
// good description for the bits we turn off? Anyways, without this,
// funny things happen.
//
ctx_redswitch:
mov $r14 0x614
shl b32 $r14 6
mov $r15 0x020
iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER
mov $r15 8
ctx_redswitch_delay:
sub b32 $r15 1
bra ne ctx_redswitch_delay
mov $r15 0xa20
iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
ret
// Transfer GPC context data between GPU and storage area
//
// In: $r15 context base address
// $p1 clear on save, set on load
// $p2 set if opposite direction done/will be done, so:
// on save it means: "a load will follow this save"
// on load it means: "a save preceeded this load"
//
ctx_xfer:
// set context base address
mov $r1 0xa04
shl b32 $r1 6
iowr I[$r1 + 0x000] $r15// MEM_BASE
bra not $p1 ctx_xfer_not_load
call ctx_redswitch
ctx_xfer_not_load:
// strands
mov $r1 0x4afc
sethi $r1 0x20000
mov $r2 0xc
iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
call strand_wait
mov $r2 0x47fc
sethi $r2 0x20000
iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
xbit $r2 $flags $p1
add b32 $r2 3
iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
// mmio context
xbit $r10 $flags $p1 // direction
or $r10 2 // first
mov $r11 0x0000
sethi $r11 0x500000
ld b32 $r12 D[$r0 + gpc_id]
shl b32 $r12 15
add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
ld b32 $r12 D[$r0 + gpc_mmio_list_head]
ld b32 $r13 D[$r0 + gpc_mmio_list_tail]
mov $r14 0 // not multi
call mmctx_xfer
// per-TPC mmio context
xbit $r10 $flags $p1 // direction
or $r10 4 // last
mov $r11 0x4000
sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
ld b32 $r12 D[$r0 + gpc_id]
shl b32 $r12 15
add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
ld b32 $r12 D[$r0 + tpc_mmio_list_head]
ld b32 $r13 D[$r0 + tpc_mmio_list_tail]
ld b32 $r15 D[$r0 + tpc_mask]
mov $r14 0x800 // stride = 0x800
call mmctx_xfer
// wait for strands to finish
call strand_wait
// if load, or a save without a load following, do some
// unknown stuff that's done after finishing a block of
// strand commands
bra $p1 ctx_xfer_post
bra not $p2 ctx_xfer_done
ctx_xfer_post:
mov $r1 0x4afc
sethi $r1 0x20000
mov $r2 0xd
iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
call strand_wait
// mark completion in HUB's barrier
ctx_xfer_done:
call hub_barrier_done
ret
.align 256
uint32_t nvc0_grgpc_data[] = {
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x000000c0,
0x011000b0,
0x01640114,
0x000000c1,
0x011400b0,
0x01780114,
0x000000c3,
0x011000b0,
0x01740114,
0x000000c4,
0x011000b0,
0x01740114,
0x000000c8,
0x011000b0,
0x01640114,
0x000000ce,
0x011000b0,
0x01740114,
0x00000000,
0x00000380,
0x14000400,
0x20000450,
0x00000600,
0x00000684,
0x10000700,
0x00000800,
0x08000808,
0x00000828,
0x00000830,
0x000008d8,
0x000008e0,
0x140008e8,
0x0000091c,
0x08000924,
0x00000b00,
0x14000b08,
0x00000bb8,
0x00000c08,
0x1c000c10,
0x00000c80,
0x00000c8c,
0x08001000,
0x00001014,
0x00000c6c,
0x00000018,
0x0000003c,
0x00000048,
0x00000064,
0x00000088,
0x14000200,
0x0400021c,
0x14000300,
0x000003d0,
0x040003e0,
0x08000400,
0x00000420,
0x000004b0,
0x000004e8,
0x000004f4,
0x04000520,
0x0c000604,
0x4c000644,
0x00000698,
0x04000750,
0x00000758,
0x000002c4,
0x000004bc,
0x000006e0,
0x00000544,
};
uint32_t nvc0_grgpc_code[] = {
0x03060ef5,
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
0x00f802ec,
0xb60798c4,
0x8dbb0384,
0x0880b600,
0x80008e80,
0x90b6018f,
0x0f94f001,
0xf801d980,
0x0131f400,
0x9800d898,
0x89b801d9,
0x210bf404,
0xb60789c4,
0x9dbb0394,
0x0890b600,
0x98009e98,
0x80b6019f,
0x0f84f001,
0xf400d880,
0x00f80132,
0x0728b7f1,
0xb906b4b6,
0xc9f002ec,
0x00bcd01f,
0xc800bccf,
0x1bf41fcc,
0x06a7f0fa,
0x010321f5,
0xf840bfcf,
0x28b7f100,
0x06b4b607,
0xb980bfd0,
0xc9f002ec,
0x1ec9f01f,
0xcf00bcd0,
0xccc800bc,
0xfa1bf41f,
0x87f100f8,
0x84b60430,
0x1ff9f006,
0xf8008fd0,
0x3087f100,
0x0684b604,
0xf80080d0,
0x3c87f100,
0x0684b608,
0x99f094bd,
0x0089d000,
0x081887f1,
0xd00684b6,
0x87f1008a,
0x84b60400,
0x0088cf06,
0xf4888aff,
0x87f1f31b,
0x84b6085c,
0xf094bd06,
0x89d00099,
0xf100f800,
0xb6083c87,
0x94bd0684,
0xd00099f0,
0x87f10089,
0x84b60818,
0x008ad006,
0x040087f1,
0xcf0684b6,
0x8aff0088,
0xf30bf488,
0x085c87f1,
0xbd0684b6,
0x0099f094,
0xf80089d0,
0x9894bd00,
0x85b600e8,
0x0180b61a,
0xbb0284b6,
0xe0b60098,
0x04efb804,
0xb9eb1bf4,
0x00f8029f,
0x083c87f1,
0xbd0684b6,
0x0199f094,
0xf10089d0,
0xb6071087,
0x94bd0684,
0xf405bbfd,
0x8bd0090b,
0x0099f000,
0xf405eefd,
0x8ed00c0b,
0xc08fd080,
0xb70199f0,
0xc8010080,
0xb4b600ab,
0x0cb9f010,
0xb601aec8,
0xbefd11e4,
0x008bd005,
0xf0008ecf,
0x0bf41fe4,
0x00ce98fa,
0xd005e9fd,
0xc0b6c08e,
0x04cdb804,
0xc8e81bf4,
0x1bf402ab,
0x008bcf18,
0xb01fb4f0,
0x1bf410b4,
0x02a7f0f7,
0xf4c921f4,
0xabc81b0e,
0x10b4b600,
0xf00cb9f0,
0x8bd012b9,
0x008bcf00,
0xf412bbc8,
0x87f1fa1b,
0x84b6085c,
0xf094bd06,
0x89d00199,
0xf900f800,
0x02a7f0a0,
0xfcc921f4,
0xf100f8a0,
0xf04afc87,
0x97f00283,
0x0089d00c,
0x020721f5,
0x87f100f8,
0x83f04afc,
0x0d97f002,
0xf50089d0,
0xf8020721,
0xfca7f100,
0x02a3f04f,
0x0500aba2,
0xd00fc7f0,
0xc7f000ac,
0x00bcd00b,
0x020721f5,
0xf000aed0,
0xbcd00ac7,
0x0721f500,
0xf100f802,
0xb6083c87,
0x94bd0684,
0xd00399f0,
0x21f50089,
0xe7f00213,
0x3921f503,
0xfca7f102,
0x02a3f046,
0x0400aba0,
0xf040a0d0,
0xbcd001c7,
0x0721f500,
0x010c9202,
0xf000acd0,
0xbcd002c7,
0x0721f500,
0x2621f502,
0x8087f102,
0x0684b608,
0xb70089cf,
0x95220080,
0x8ed008fe,
0x408ed000,
0xb6808acf,
0xa0b606a5,
0x00eabb01,
0xb60480b6,
0x1bf40192,
0x08e4b6e8,
0xf1f2efbc,
0xb6085c87,
0x94bd0684,
0xd00399f0,
0x00f80089,
0xe7f1e0f9,
0xe3f09814,
0x8d21f440,
0x041ce0b7,
0xf401f7f0,
0xe0fc8d21,
0x04bd00f8,
0xf10004fe,
0xf0120017,
0x12d00227,
0x3e17f100,
0x0010fe04,
0x040017f1,
0xf0c010d0,
0x12d00427,
0x1031f400,
0x060817f1,
0xcf0614b6,
0x37f00012,
0x1f24f001,
0xb60432bb,
0x02800132,
0x04038003,
0x040010b7,
0x800012cf,
0x27f10002,
0x24b60800,
0x0022cf06,
0xb65817f0,
0x13980c10,
0x0432b800,
0xb00b0bf4,
0x1bf40034,
0xf100f8f1,
0xb6080027,
0x22cf0624,
0xf134bd40,
0xb6070047,
0x25950644,
0x0045d008,
0xbd4045d0,
0x58f4bde4,
0x1f58021e,
0x020e4003,
0xf5040f40,
0xbb013d21,
0x3fbb002f,
0x041e5800,
0x40051f58,
0x0f400a0e,
0x3d21f50c,
0x030e9801,
0xbb00effd,
0x3ebb002e,
0x0040b700,
0x0235b613,
0xb60043d0,
0x35b60825,
0x0120b606,
0xb60130b6,
0x34b60824,
0x022fb908,
0x026321f5,
0xf1003fbb,
0xb6080017,
0x13d00614,
0x0010b740,
0xf024bd08,
0x12d01f29,
0x0031f400,
0xf00028f4,
0x21f41cd7,
0xf401f439,
0xf404e4b0,
0x81fe1e18,
0x0627f001,
0x12fd20bd,
0x01e4b604,
0xfe051efd,
0x21f50018,
0x0ef404c3,
0x10ef94d3,
0xf501f5f0,
0xf402ec21,
0x80f9c60e,
0xf90188fe,
0xf990f980,
0xf9b0f9a0,
0xf9e0f9d0,
0x800acff0,
0xf404abc4,
0xb7f11d0b,
0xd7f01900,
0x40becf1c,
0xf400bfcf,
0xb0b70421,
0xe7f00400,
0x00bed001,
0xfc400ad0,
0xfce0fcf0,
0xfcb0fcd0,
0xfc90fca0,
0x0088fe80,
0x32f480fc,
0xf001f800,
0x0e9801f7,
0x04febb00,
0x9418e7f1,
0xf440e3f0,
0x00f88d21,
0x0614e7f1,
0xf006e4b6,
0xefd020f7,
0x08f7f000,
0xf401f2b6,
0xf7f1fd1b,
0xefd00a20,
0xf100f800,
0xb60a0417,
0x1fd00614,
0x0711f400,
0x04a421f5,
0x4afc17f1,
0xf00213f0,
0x12d00c27,
0x0721f500,
0xfc27f102,
0x0223f047,
0xf00020d0,
0x20b6012c,
0x0012d003,
0xf001acf0,
0xb7f002a5,
0x50b3f000,
0xb6000c98,
0xbcbb0fc4,
0x010c9800,
0xf0020d98,
0x21f500e7,
0xacf0015c,
0x04a5f001,
0x4000b7f1,
0x9850b3f0,
0xc4b6000c,
0x00bcbb0f,
0x98050c98,
0x0f98060d,
0x00e7f104,
0x5c21f508,
0x0721f501,
0x0601f402,
0xf11412f4,
0xf04afc17,
0x27f00213,
0x0012d00d,
0x020721f5,
0x048f21f5,
0x000000f8,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
};
/* fuc microcode for nvc0 PGRAPH/HUB
*
* Copyright 2011 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
/* To build:
* m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
*/
.section nvc0_grhub_data
include(`nvc0_graph.fuc')
gpc_count: .b32 0
rop_count: .b32 0
cmd_queue: queue_init
hub_mmio_list_head: .b32 0
hub_mmio_list_tail: .b32 0
ctx_current: .b32 0
chipsets:
.b8 0xc0 0 0 0
.b16 nvc0_hub_mmio_head
.b16 nvc0_hub_mmio_tail
.b8 0xc1 0 0 0
.b16 nvc0_hub_mmio_head
.b16 nvc1_hub_mmio_tail
.b8 0xc3 0 0 0
.b16 nvc0_hub_mmio_head
.b16 nvc0_hub_mmio_tail
.b8 0xc4 0 0 0
.b16 nvc0_hub_mmio_head
.b16 nvc0_hub_mmio_tail
.b8 0xc8 0 0 0
.b16 nvc0_hub_mmio_head
.b16 nvc0_hub_mmio_tail
.b8 0xce 0 0 0
.b16 nvc0_hub_mmio_head
.b16 nvc0_hub_mmio_tail
.b8 0 0 0 0
nvc0_hub_mmio_head:
mmctx_data(0x17e91c, 2)
mmctx_data(0x400204, 2)
mmctx_data(0x404004, 11)
mmctx_data(0x404044, 1)
mmctx_data(0x404094, 14)
mmctx_data(0x4040d0, 7)
mmctx_data(0x4040f8, 1)
mmctx_data(0x404130, 3)
mmctx_data(0x404150, 3)
mmctx_data(0x404164, 2)
mmctx_data(0x404174, 3)
mmctx_data(0x404200, 8)
mmctx_data(0x404404, 14)
mmctx_data(0x404460, 4)
mmctx_data(0x404480, 1)
mmctx_data(0x404498, 1)
mmctx_data(0x404604, 4)
mmctx_data(0x404618, 32)
mmctx_data(0x404698, 21)
mmctx_data(0x4046f0, 2)
mmctx_data(0x404700, 22)
mmctx_data(0x405800, 1)
mmctx_data(0x405830, 3)
mmctx_data(0x405854, 1)
mmctx_data(0x405870, 4)
mmctx_data(0x405a00, 2)
mmctx_data(0x405a18, 1)
mmctx_data(0x406020, 1)
mmctx_data(0x406028, 4)
mmctx_data(0x4064a8, 2)
mmctx_data(0x4064b4, 2)
mmctx_data(0x407804, 1)
mmctx_data(0x40780c, 6)
mmctx_data(0x4078bc, 1)
mmctx_data(0x408000, 7)
mmctx_data(0x408064, 1)
mmctx_data(0x408800, 3)
mmctx_data(0x408900, 4)
mmctx_data(0x408980, 1)
nvc0_hub_mmio_tail:
mmctx_data(0x4064c0, 2)
nvc1_hub_mmio_tail:
.align 256
chan_data:
chan_mmio_count: .b32 0
chan_mmio_address: .b32 0
.align 256
xfer_data: .b32 0
.section nvc0_grhub_code
bra init
define(`include_code')
include(`nvc0_graph.fuc')
// reports an exception to the host
//
// In: $r15 error code (see nvc0_graph.fuc)
//
error:
push $r14
mov $r14 0x814
shl b32 $r14 6
iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code
mov $r14 0xc1c
shl b32 $r14 6
mov $r15 1
iowr I[$r14 + 0x000] $r15 // INTR_UP_SET
pop $r14
ret
// HUB fuc initialisation, executed by triggering ucode start, will
// fall through to main loop after completion.
//
// Input:
// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
//
// Output:
// CC_SCRATCH[0]:
// 31:31: set to signal completion
// CC_SCRATCH[1]:
// 31:0: total PGRAPH context size
//
init:
clear b32 $r0
mov $sp $r0
mov $xdbase $r0
// enable fifo access
mov $r1 0x1200
mov $r2 2
iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
// setup i0 handler, and route all interrupts to it
mov $r1 ih
mov $iv0 $r1
mov $r1 0x400
iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
// route HUB_CHANNEL_SWITCH to fuc interrupt 8
mov $r3 0x404
shl b32 $r3 6
mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
iowr I[$r3 + 0x000] $r2
// not sure what these are, route them because NVIDIA does, and
// the IRQ handler will signal the host if we ever get one.. we
// may find out if/why we need to handle these if so..
//
mov $r2 0x2004
iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
mov $r2 0x200b
iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
mov $r2 0x200c
iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
// enable all INTR_UP interrupts
mov $r2 0xc24
shl b32 $r2 6
not b32 $r3 $r0
iowr I[$r2] $r3
// enable fifo, ctxsw, 9, 10, 15 interrupts
mov $r2 -0x78fc // 0x8704
sethi $r2 0
iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
// fifo level triggered, rest edge
sub b32 $r1 0x100
mov $r2 4
iowr I[$r1] $r2
// enable interrupts
bset $flags ie0
// fetch enabled GPC/ROP counts
mov $r14 -0x69fc // 0x409604
sethi $r14 0x400000
call nv_rd32
extr $r1 $r15 16:20
st b32 D[$r0 + rop_count] $r1
and $r15 0x1f
st b32 D[$r0 + gpc_count] $r15
// set BAR_REQMASK to GPC mask
mov $r1 1
shl b32 $r1 $r15
sub b32 $r1 1
mov $r2 0x40c
shl b32 $r2 6
iowr I[$r2 + 0x000] $r1
iowr I[$r2 + 0x100] $r1
// find context data for this chipset
mov $r2 0x800
shl b32 $r2 6
iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
mov $r15 chipsets - 8
init_find_chipset:
add b32 $r15 8
ld b32 $r3 D[$r15 + 0x00]
cmpu b32 $r3 $r2
bra e init_context
cmpu b32 $r3 0
bra ne init_find_chipset
// unknown chipset
ret
// context size calculation, reserve first 256 bytes for use by fuc
init_context:
mov $r1 256
// calculate size of mmio context data
ld b16 $r14 D[$r15 + 4]
ld b16 $r15 D[$r15 + 6]
sethi $r14 0
st b32 D[$r0 + hub_mmio_list_head] $r14
st b32 D[$r0 + hub_mmio_list_tail] $r15
call mmctx_size
// set mmctx base addresses now so we don't have to do it later,
// they don't (currently) ever change
mov $r3 0x700
shl b32 $r3 6
shr b32 $r4 $r1 8
iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE
iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE
add b32 $r3 0x1300
add b32 $r1 $r15
shr b32 $r15 2
iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!?
// strands, base offset needs to be aligned to 256 bytes
shr b32 $r1 8
add b32 $r1 1
shl b32 $r1 8
mov b32 $r15 $r1
call strand_ctx_init
add b32 $r1 $r15
// initialise each GPC in sequence by passing in the offset of its
// context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
// has previously been uploaded by the host) running.
//
// the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
// when it has completed, and return the size of its context data
// in GPCn_CC_SCRATCH[1]
//
ld b32 $r3 D[$r0 + gpc_count]
mov $r4 0x2000
sethi $r4 0x500000
init_gpc:
// setup, and start GPC ucode running
add b32 $r14 $r4 0x804
mov b32 $r15 $r1
call nv_wr32 // CC_SCRATCH[1] = ctx offset
add b32 $r14 $r4 0x800
mov b32 $r15 $r2
call nv_wr32 // CC_SCRATCH[0] = chipset
add b32 $r14 $r4 0x10c
clear b32 $r15
call nv_wr32
add b32 $r14 $r4 0x104
call nv_wr32 // ENTRY
add b32 $r14 $r4 0x100
mov $r15 2 // CTRL_START_TRIGGER
call nv_wr32 // CTRL
// wait for it to complete, and adjust context size
add b32 $r14 $r4 0x800
init_gpc_wait:
call nv_rd32
xbit $r15 $r15 31
bra e init_gpc_wait
add b32 $r14 $r4 0x804
call nv_rd32
add b32 $r1 $r15
// next!
add b32 $r4 0x8000
sub b32 $r3 1
bra ne init_gpc
// save context size, and tell host we're ready
mov $r2 0x800
shl b32 $r2 6
iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size
add b32 $r2 0x800
clear b32 $r1
bset $r1 31
iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000
// Main program loop, very simple, sleeps until woken up by the interrupt
// handler, pulls a command from the queue and executes its handler
//
main:
// sleep until we have something to do
bset $flags $p0
sleep $p0
mov $r13 cmd_queue
call queue_get
bra $p1 main
// context switch, requested by GPU?
cmpu b32 $r14 0x4001
bra ne main_not_ctx_switch
trace_set(T_AUTO)
mov $r1 0xb00
shl b32 $r1 6
iord $r2 I[$r1 + 0x100] // CHAN_NEXT
iord $r1 I[$r1 + 0x000] // CHAN_CUR
xbit $r3 $r1 31
bra e chsw_no_prev
xbit $r3 $r2 31
bra e chsw_prev_no_next
push $r2
mov b32 $r2 $r1
trace_set(T_SAVE)
bclr $flags $p1
bset $flags $p2
call ctx_xfer
trace_clr(T_SAVE);
pop $r2
trace_set(T_LOAD);
bset $flags $p1
call ctx_xfer
trace_clr(T_LOAD);
bra chsw_done
chsw_prev_no_next:
push $r2
mov b32 $r2 $r1
bclr $flags $p1
bclr $flags $p2
call ctx_xfer
pop $r2
mov $r1 0xb00
shl b32 $r1 6
iowr I[$r1] $r2
bra chsw_done
chsw_no_prev:
xbit $r3 $r2 31
bra e chsw_done
bset $flags $p1
bclr $flags $p2
call ctx_xfer
// ack the context switch request
chsw_done:
mov $r1 0xb0c
shl b32 $r1 6
mov $r2 1
iowr I[$r1 + 0x000] $r2 // 0x409b0c
trace_clr(T_AUTO)
bra main
// request to set current channel? (*not* a context switch)
main_not_ctx_switch:
cmpu b32 $r14 0x0001
bra ne main_not_ctx_chan
mov b32 $r2 $r15
call ctx_chan
bra main_done
// request to store current channel context?
main_not_ctx_chan:
cmpu b32 $r14 0x0002
bra ne main_not_ctx_save
trace_set(T_SAVE)
bclr $flags $p1
bclr $flags $p2
call ctx_xfer
trace_clr(T_SAVE)
bra main_done
main_not_ctx_save:
shl b32 $r15 $r14 16
or $r15 E_BAD_COMMAND
call error
bra main
main_done:
mov $r1 0x820
shl b32 $r1 6
clear b32 $r2
bset $r2 31
iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
bra main
// interrupt handler
ih:
push $r8
mov $r8 $flags
push $r8
push $r9
push $r10
push $r11
push $r13
push $r14
push $r15
// incoming fifo command?
iord $r10 I[$r0 + 0x200] // INTR
and $r11 $r10 0x00000004
bra e ih_no_fifo
// queue incoming fifo command for later processing
mov $r11 0x1900
mov $r13 cmd_queue
iord $r14 I[$r11 + 0x100] // FIFO_CMD
iord $r15 I[$r11 + 0x000] // FIFO_DATA
call queue_put
add b32 $r11 0x400
mov $r14 1
iowr I[$r11 + 0x000] $r14 // FIFO_ACK
// context switch request?
ih_no_fifo:
and $r11 $r10 0x00000100
bra e ih_no_ctxsw
// enqueue a context switch for later processing
mov $r13 cmd_queue
mov $r14 0x4001
call queue_put
// anything we didn't handle, bring it to the host's attention
ih_no_ctxsw:
mov $r11 0x104
not b32 $r11
and $r11 $r10 $r11
bra e ih_no_other
mov $r10 0xc1c
shl b32 $r10 6
iowr I[$r10] $r11 // INTR_UP_SET
// ack, and wake up main()
ih_no_other:
iowr I[$r0 + 0x100] $r10 // INTR_ACK
pop $r15
pop $r14
pop $r13
pop $r11
pop $r10
pop $r9
pop $r8
mov $flags $r8
pop $r8
bclr $flags $p0
iret
// Not real sure, but, MEM_CMD 7 will hang forever if this isn't done
ctx_4160s:
mov $r14 0x4160
sethi $r14 0x400000
mov $r15 1
call nv_wr32
ctx_4160s_wait:
call nv_rd32
xbit $r15 $r15 4
bra e ctx_4160s_wait
ret
// Without clearing again at end of xfer, some things cause PGRAPH
// to hang with STATUS=0x00000007 until it's cleared.. fbcon can
// still function with it set however...
ctx_4160c:
mov $r14 0x4160
sethi $r14 0x400000
clear b32 $r15
call nv_wr32
ret
// Again, not real sure
//
// In: $r15 value to set 0x404170 to
//
ctx_4170s:
mov $r14 0x4170
sethi $r14 0x400000
or $r15 0x10
call nv_wr32
ret
// Waits for a ctx_4170s() call to complete
//
ctx_4170w:
mov $r14 0x4170
sethi $r14 0x400000
call nv_rd32
and $r15 0x10
bra ne ctx_4170w
ret
// Disables various things, waits a bit, and re-enables them..
//
// Not sure how exactly this helps, perhaps "ENABLE" is not such a
// good description for the bits we turn off? Anyways, without this,
// funny things happen.
//
ctx_redswitch:
mov $r14 0x614
shl b32 $r14 6
mov $r15 0x270
iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
mov $r15 8
ctx_redswitch_delay:
sub b32 $r15 1
bra ne ctx_redswitch_delay
mov $r15 0x770
iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
ret
// Not a clue what this is for, except that unless the value is 0x10, the
// strand context is saved (and presumably restored) incorrectly..
//
// In: $r15 value to set to (0x00/0x10 are used)
//
ctx_86c:
mov $r14 0x86c
shl b32 $r14 6
iowr I[$r14] $r15 // HUB(0x86c) = val
mov $r14 -0x75ec
sethi $r14 0x400000
call nv_wr32 // ROP(0xa14) = val
mov $r14 -0x5794
sethi $r14 0x410000
call nv_wr32 // GPC(0x86c) = val
ret
// ctx_load - load's a channel's ctxctl data, and selects its vm
//
// In: $r2 channel address
//
ctx_load:
trace_set(T_CHAN)
// switch to channel, somewhat magic in parts..
mov $r10 12 // DONE_UNK12
call wait_donez
mov $r1 0xa24
shl b32 $r1 6
iowr I[$r1 + 0x000] $r0 // 0x409a24
mov $r3 0xb00
shl b32 $r3 6
iowr I[$r3 + 0x100] $r2 // CHAN_NEXT
mov $r1 0xa0c
shl b32 $r1 6
mov $r4 7
iowr I[$r1 + 0x000] $r2 // MEM_CHAN
iowr I[$r1 + 0x100] $r4 // MEM_CMD
ctx_chan_wait_0:
iord $r4 I[$r1 + 0x100]
and $r4 0x1f
bra ne ctx_chan_wait_0
iowr I[$r3 + 0x000] $r2 // CHAN_CUR
// load channel header, fetch PGRAPH context pointer
mov $xtargets $r0
bclr $r2 31
shl b32 $r2 4
add b32 $r2 2
trace_set(T_LCHAN)
mov $r1 0xa04
shl b32 $r1 6
iowr I[$r1 + 0x000] $r2 // MEM_BASE
mov $r1 0xa20
shl b32 $r1 6
mov $r2 0x0002
sethi $r2 0x80000000
iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
mov $r1 0x10 // chan + 0x0210
mov $r2 xfer_data
sethi $r2 0x00020000 // 16 bytes
xdld $r1 $r2
xdwait
trace_clr(T_LCHAN)
// update current context
ld b32 $r1 D[$r0 + xfer_data + 4]
shl b32 $r1 24
ld b32 $r2 D[$r0 + xfer_data + 0]
shr b32 $r2 8
or $r1 $r2
st b32 D[$r0 + ctx_current] $r1
// set transfer base to start of context, and fetch context header
trace_set(T_LCTXH)
mov $r2 0xa04
shl b32 $r2 6
iowr I[$r2 + 0x000] $r1 // MEM_BASE
mov $r2 1
mov $r1 0xa20
shl b32 $r1 6
iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
mov $r1 chan_data
sethi $r1 0x00060000 // 256 bytes
xdld $r0 $r1
xdwait
trace_clr(T_LCTXH)
trace_clr(T_CHAN)
ret
// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
// the active channel for ctxctl, but not actually transfer
// any context data. intended for use only during initial
// context construction.
//
// In: $r2 channel address
//
ctx_chan:
call ctx_4160s
call ctx_load
mov $r10 12 // DONE_UNK12
call wait_donez
mov $r1 0xa10
shl b32 $r1 6
mov $r2 5
iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???)
ctx_chan_wait:
iord $r2 I[$r1 + 0x000]
or $r2 $r2
bra ne ctx_chan_wait
call ctx_4160c
ret
// Execute per-context state overrides list
//
// Only executed on the first load of a channel. Might want to look into
// removing this and having the host directly modify the channel's context
// to change this state... The nouveau DRM already builds this list as
// it's definitely needed for NVIDIA's, so we may as well use it for now
//
// Input: $r1 mmio list length
//
ctx_mmio_exec:
// set transfer base to be the mmio list
ld b32 $r3 D[$r0 + chan_mmio_address]
mov $r2 0xa04
shl b32 $r2 6
iowr I[$r2 + 0x000] $r3 // MEM_BASE
clear b32 $r3
ctx_mmio_loop:
// fetch next 256 bytes of mmio list if necessary
and $r4 $r3 0xff
bra ne ctx_mmio_pull
mov $r5 xfer_data
sethi $r5 0x00060000 // 256 bytes
xdld $r3 $r5
xdwait
// execute a single list entry
ctx_mmio_pull:
ld b32 $r14 D[$r4 + xfer_data + 0x00]
ld b32 $r15 D[$r4 + xfer_data + 0x04]
call nv_wr32
// next!
add b32 $r3 8
sub b32 $r1 1
bra ne ctx_mmio_loop
// set transfer base back to the current context
ctx_mmio_done:
ld b32 $r3 D[$r0 + ctx_current]
iowr I[$r2 + 0x000] $r3 // MEM_BASE
// disable the mmio list now, we don't need/want to execute it again
st b32 D[$r0 + chan_mmio_count] $r0
mov $r1 chan_data
sethi $r1 0x00060000 // 256 bytes
xdst $r0 $r1
xdwait
ret
// Transfer HUB context data between GPU and storage area
//
// In: $r2 channel address
// $p1 clear on save, set on load
// $p2 set if opposite direction done/will be done, so:
// on save it means: "a load will follow this save"
// on load it means: "a save preceeded this load"
//
ctx_xfer:
bra not $p1 ctx_xfer_pre
bra $p2 ctx_xfer_pre_load
ctx_xfer_pre:
mov $r15 0x10
call ctx_86c
call ctx_4160s
bra not $p1 ctx_xfer_exec
ctx_xfer_pre_load:
mov $r15 2
call ctx_4170s
call ctx_4170w
call ctx_redswitch
clear b32 $r15
call ctx_4170s
call ctx_load
// fetch context pointer, and initiate xfer on all GPCs
ctx_xfer_exec:
ld b32 $r1 D[$r0 + ctx_current]
mov $r2 0x414
shl b32 $r2 6
iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
mov $r14 -0x5b00
sethi $r14 0x410000
mov b32 $r15 $r1
call nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
add b32 $r14 4
xbit $r15 $flags $p1
xbit $r2 $flags $p2
shl b32 $r2 1
or $r15 $r2
call nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
// strands
mov $r1 0x4afc
sethi $r1 0x20000
mov $r2 0xc
iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
call strand_wait
mov $r2 0x47fc
sethi $r2 0x20000
iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
xbit $r2 $flags $p1
add b32 $r2 3
iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
// mmio context
xbit $r10 $flags $p1 // direction
or $r10 6 // first, last
mov $r11 0 // base = 0
ld b32 $r12 D[$r0 + hub_mmio_list_head]
ld b32 $r13 D[$r0 + hub_mmio_list_tail]
mov $r14 0 // not multi
call mmctx_xfer
// wait for GPCs to all complete
mov $r10 8 // DONE_BAR
call wait_doneo
// wait for strand xfer to complete
call strand_wait
// post-op
bra $p1 ctx_xfer_post
mov $r10 12 // DONE_UNK12
call wait_donez
mov $r1 0xa10
shl b32 $r1 6
mov $r2 5
iowr I[$r1] $r2 // MEM_CMD
ctx_xfer_post_save_wait:
iord $r2 I[$r1]
or $r2 $r2
bra ne ctx_xfer_post_save_wait
bra $p2 ctx_xfer_done
ctx_xfer_post:
mov $r15 2
call ctx_4170s
clear b32 $r15
call ctx_86c
call strand_post
call ctx_4170w
clear b32 $r15
call ctx_4170s
bra not $p1 ctx_xfer_no_post_mmio
ld b32 $r1 D[$r0 + chan_mmio_count]
or $r1 $r1
bra e ctx_xfer_no_post_mmio
call ctx_mmio_exec
ctx_xfer_no_post_mmio:
call ctx_4160c
ctx_xfer_done:
ret
.align 256
uint32_t nvc0_grhub_data[] = {
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x000000c0,
0x012c0090,
0x000000c1,
0x01300090,
0x000000c3,
0x012c0090,
0x000000c4,
0x012c0090,
0x000000c8,
0x012c0090,
0x000000ce,
0x012c0090,
0x00000000,
0x0417e91c,
0x04400204,
0x28404004,
0x00404044,
0x34404094,
0x184040d0,
0x004040f8,
0x08404130,
0x08404150,
0x04404164,
0x08404174,
0x1c404200,
0x34404404,
0x0c404460,
0x00404480,
0x00404498,
0x0c404604,
0x7c404618,
0x50404698,
0x044046f0,
0x54404700,
0x00405800,
0x08405830,
0x00405854,
0x0c405870,
0x04405a00,
0x00405a18,
0x00406020,
0x0c406028,
0x044064a8,
0x044064b4,
0x00407804,
0x1440780c,
0x004078bc,
0x18408000,
0x00408064,
0x08408800,
0x0c408900,
0x00408980,
0x044064c0,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
};
uint32_t nvc0_grhub_code[] = {
0x03090ef5,
0x9800d898,
0x86f001d9,
0x0489b808,
0xf00c1bf4,
0x21f502f7,
0x00f802ec,
0xb60798c4,
0x8dbb0384,
0x0880b600,
0x80008e80,
0x90b6018f,
0x0f94f001,
0xf801d980,
0x0131f400,
0x9800d898,
0x89b801d9,
0x210bf404,
0xb60789c4,
0x9dbb0394,
0x0890b600,
0x98009e98,
0x80b6019f,
0x0f84f001,
0xf400d880,
0x00f80132,
0x0728b7f1,
0xb906b4b6,
0xc9f002ec,
0x00bcd01f,
0xc800bccf,
0x1bf41fcc,
0x06a7f0fa,
0x010321f5,
0xf840bfcf,
0x28b7f100,
0x06b4b607,
0xb980bfd0,
0xc9f002ec,
0x1ec9f01f,
0xcf00bcd0,
0xccc800bc,
0xfa1bf41f,
0x87f100f8,
0x84b60430,
0x1ff9f006,
0xf8008fd0,
0x3087f100,
0x0684b604,
0xf80080d0,
0x3c87f100,
0x0684b608,
0x99f094bd,
0x0089d000,
0x081887f1,
0xd00684b6,
0x87f1008a,
0x84b60400,
0x0088cf06,
0xf4888aff,
0x87f1f31b,
0x84b6085c,
0xf094bd06,
0x89d00099,
0xf100f800,
0xb6083c87,
0x94bd0684,
0xd00099f0,
0x87f10089,
0x84b60818,
0x008ad006,
0x040087f1,
0xcf0684b6,
0x8aff0088,
0xf30bf488,
0x085c87f1,
0xbd0684b6,
0x0099f094,
0xf80089d0,
0x9894bd00,
0x85b600e8,
0x0180b61a,
0xbb0284b6,
0xe0b60098,
0x04efb804,
0xb9eb1bf4,
0x00f8029f,
0x083c87f1,
0xbd0684b6,
0x0199f094,
0xf10089d0,
0xb6071087,
0x94bd0684,
0xf405bbfd,
0x8bd0090b,
0x0099f000,
0xf405eefd,
0x8ed00c0b,
0xc08fd080,
0xb70199f0,
0xc8010080,
0xb4b600ab,
0x0cb9f010,
0xb601aec8,
0xbefd11e4,
0x008bd005,
0xf0008ecf,
0x0bf41fe4,
0x00ce98fa,
0xd005e9fd,
0xc0b6c08e,
0x04cdb804,
0xc8e81bf4,
0x1bf402ab,
0x008bcf18,
0xb01fb4f0,
0x1bf410b4,
0x02a7f0f7,
0xf4c921f4,
0xabc81b0e,
0x10b4b600,
0xf00cb9f0,
0x8bd012b9,
0x008bcf00,
0xf412bbc8,
0x87f1fa1b,
0x84b6085c,
0xf094bd06,
0x89d00199,
0xf900f800,
0x02a7f0a0,
0xfcc921f4,
0xf100f8a0,
0xf04afc87,
0x97f00283,
0x0089d00c,
0x020721f5,
0x87f100f8,
0x83f04afc,
0x0d97f002,
0xf50089d0,
0xf8020721,
0xfca7f100,
0x02a3f04f,
0x0500aba2,
0xd00fc7f0,
0xc7f000ac,
0x00bcd00b,
0x020721f5,
0xf000aed0,
0xbcd00ac7,
0x0721f500,
0xf100f802,
0xb6083c87,
0x94bd0684,
0xd00399f0,
0x21f50089,
0xe7f00213,
0x3921f503,
0xfca7f102,
0x02a3f046,
0x0400aba0,
0xf040a0d0,
0xbcd001c7,
0x0721f500,
0x010c9202,
0xf000acd0,
0xbcd002c7,
0x0721f500,
0x2621f502,
0x8087f102,
0x0684b608,
0xb70089cf,
0x95220080,
0x8ed008fe,
0x408ed000,
0xb6808acf,
0xa0b606a5,
0x00eabb01,
0xb60480b6,
0x1bf40192,
0x08e4b6e8,
0xf1f2efbc,
0xb6085c87,
0x94bd0684,
0xd00399f0,
0x00f80089,
0xe7f1e0f9,
0xe4b60814,
0x00efd006,
0x0c1ce7f1,
0xf006e4b6,
0xefd001f7,
0xf8e0fc00,
0xfe04bd00,
0x07fe0004,
0x0017f100,
0x0227f012,
0xf10012d0,
0xfe05b917,
0x17f10010,
0x10d00400,
0x0437f1c0,
0x0634b604,
0x200327f1,
0xf10032d0,
0xd0200427,
0x27f10132,
0x32d0200b,
0x0c27f102,
0x0732d020,
0x0c2427f1,
0xb90624b6,
0x23d00003,
0x0427f100,
0x0023f087,
0xb70012d0,
0xf0010012,
0x12d00427,
0x1031f400,
0x9604e7f1,
0xf440e3f0,
0xf1c76821,
0x01018090,
0x801ff4f0,
0x17f0000f,
0x041fbb01,
0xf10112b6,
0xb6040c27,
0x21d00624,
0x4021d000,
0x080027f1,
0xcf0624b6,
0xf7f00022,
0x08f0b654,
0xb800f398,
0x0bf40432,
0x0034b00b,
0xf8f11bf4,
0x0017f100,
0x02fe5801,
0xf003ff58,
0x0e8000e3,
0x150f8014,
0x013d21f5,
0x070037f1,
0x950634b6,
0x34d00814,
0x4034d000,
0x130030b7,
0xb6001fbb,
0x3fd002f5,
0x0815b600,
0xb60110b6,
0x1fb90814,
0x6321f502,
0x001fbb02,
0xf1000398,
0xf0200047,
0x4ea05043,
0x1fb90804,
0x8d21f402,
0x08004ea0,
0xf4022fb9,
0x4ea08d21,
0xf4bd010c,
0xa08d21f4,
0xf401044e,
0x4ea08d21,
0xf7f00100,
0x8d21f402,
0x08004ea0,
0xc86821f4,
0x0bf41fff,
0x044ea0fa,
0x6821f408,
0xb7001fbb,
0xb6800040,
0x1bf40132,
0x0027f1b4,
0x0624b608,
0xb74021d0,
0xbd080020,
0x1f19f014,
0xf40021d0,
0x28f40031,
0x08d7f000,
0xf43921f4,
0xe4b1f401,
0x1bf54001,
0x87f100d1,
0x84b6083c,
0xf094bd06,
0x89d00499,
0x0017f100,
0x0614b60b,
0xcf4012cf,
0x13c80011,
0x7e0bf41f,
0xf41f23c8,
0x20f95a0b,
0xf10212b9,
0xb6083c87,
0x94bd0684,
0xd00799f0,
0x32f40089,
0x0231f401,
0x082921f5,
0x085c87f1,
0xbd0684b6,
0x0799f094,
0xfc0089d0,
0x3c87f120,
0x0684b608,
0x99f094bd,
0x0089d006,
0xf50131f4,
0xf1082921,
0xb6085c87,
0x94bd0684,
0xd00699f0,
0x0ef40089,
0xb920f931,
0x32f40212,
0x0232f401,
0x082921f5,
0x17f120fc,
0x14b60b00,
0x0012d006,
0xc8130ef4,
0x0bf41f23,
0x0131f40d,
0xf50232f4,
0xf1082921,
0xb60b0c17,
0x27f00614,
0x0012d001,
0x085c87f1,
0xbd0684b6,
0x0499f094,
0xf50089d0,
0xb0ff200e,
0x1bf401e4,
0x02f2b90d,
0x07b521f5,
0xb0420ef4,
0x1bf402e4,
0x3c87f12e,
0x0684b608,
0x99f094bd,
0x0089d007,
0xf40132f4,
0x21f50232,
0x87f10829,
0x84b6085c,
0xf094bd06,
0x89d00799,
0x110ef400,
0xf010ef94,
0x21f501f5,
0x0ef502ec,
0x17f1fed1,
0x14b60820,
0xf024bd06,
0x12d01f29,
0xbe0ef500,
0xfe80f9fe,
0x80f90188,
0xa0f990f9,
0xd0f9b0f9,
0xf0f9e0f9,
0xc4800acf,
0x0bf404ab,
0x00b7f11d,
0x08d7f019,
0xcf40becf,
0x21f400bf,
0x00b0b704,
0x01e7f004,
0xe400bed0,
0xf40100ab,
0xd7f00d0b,
0x01e7f108,
0x0421f440,
0x0104b7f1,
0xabffb0bd,
0x0d0bf4b4,
0x0c1ca7f1,
0xd006a4b6,
0x0ad000ab,
0xfcf0fc40,
0xfcd0fce0,
0xfca0fcb0,
0xfe80fc90,
0x80fc0088,
0xf80032f4,
0x60e7f101,
0x40e3f041,
0xf401f7f0,
0x21f48d21,
0x04ffc868,
0xf8fa0bf4,
0x60e7f100,
0x40e3f041,
0x21f4f4bd,
0xf100f88d,
0xf04170e7,
0xf5f040e3,
0x8d21f410,
0xe7f100f8,
0xe3f04170,
0x6821f440,
0xf410f4f0,
0x00f8f31b,
0x0614e7f1,
0xf106e4b6,
0xd00270f7,
0xf7f000ef,
0x01f2b608,
0xf1fd1bf4,
0xd00770f7,
0x00f800ef,
0x086ce7f1,
0xd006e4b6,
0xe7f100ef,
0xe3f08a14,
0x8d21f440,
0xa86ce7f1,
0xf441e3f0,
0x00f88d21,
0x083c87f1,
0xbd0684b6,
0x0599f094,
0xf00089d0,
0x21f40ca7,
0x2417f1c9,
0x0614b60a,
0xf10010d0,
0xb60b0037,
0x32d00634,
0x0c17f140,
0x0614b60a,
0xd00747f0,
0x14d00012,
0x4014cf40,
0xf41f44f0,
0x32d0fa1b,
0x000bfe00,
0xb61f2af0,
0x20b60424,
0x3c87f102,
0x0684b608,
0x99f094bd,
0x0089d008,
0x0a0417f1,
0xd00614b6,
0x17f10012,
0x14b60a20,
0x0227f006,
0x800023f1,
0xf00012d0,
0x27f11017,
0x23f00300,
0x0512fa02,
0x87f103f8,
0x84b6085c,
0xf094bd06,
0x89d00899,
0xc1019800,
0x981814b6,
0x25b6c002,
0x0512fd08,
0xf1160180,
0xb6083c87,
0x94bd0684,
0xd00999f0,
0x27f10089,
0x24b60a04,
0x0021d006,
0xf10127f0,
0xb60a2017,
0x12d00614,
0x0017f100,
0x0613f002,
0xf80501fa,
0x5c87f103,
0x0684b608,
0x99f094bd,
0x0089d009,
0x085c87f1,
0xbd0684b6,
0x0599f094,
0xf80089d0,
0x3121f500,
0xb821f506,
0x0ca7f006,
0xf1c921f4,
0xb60a1017,
0x27f00614,
0x0012d005,
0xfd0012cf,
0x1bf40522,
0x4921f5fa,
0x9800f806,
0x27f18103,
0x24b60a04,
0x0023d006,
0x34c434bd,
0x0f1bf4ff,
0x030057f1,
0xfa0653f0,
0x03f80535,
0x98c04e98,
0x21f4c14f,
0x0830b68d,
0xf40112b6,
0x0398df1b,
0x0023d016,
0xf1800080,
0xf0020017,
0x01fa0613,
0xf803f806,
0x0611f400,
0xf01102f4,
0x21f510f7,
0x21f50698,
0x11f40631,
0x02f7f01c,
0x065721f5,
0x066621f5,
0x067821f5,
0x21f5f4bd,
0x21f50657,
0x019806b8,
0x1427f116,
0x0624b604,
0xf10020d0,
0xf0a500e7,
0x1fb941e3,
0x8d21f402,
0xf004e0b6,
0x2cf001fc,
0x0124b602,
0xf405f2fd,
0x17f18d21,
0x13f04afc,
0x0c27f002,
0xf50012d0,
0xf1020721,
0xf047fc27,
0x20d00223,
0x012cf000,
0xd00320b6,
0xacf00012,
0x06a5f001,
0x9800b7f0,
0x0d98140c,
0x00e7f015,
0x015c21f5,
0xf508a7f0,
0xf5010321,
0xf4020721,
0xa7f02201,
0xc921f40c,
0x0a1017f1,
0xf00614b6,
0x12d00527,
0x0012cf00,
0xf40522fd,
0x02f4fa1b,
0x02f7f032,
0x065721f5,
0x21f5f4bd,
0x21f50698,
0x21f50226,
0xf4bd0666,
0x065721f5,
0x981011f4,
0x11fd8001,
0x070bf405,
0x07df21f5,
0x064921f5,
0x000000f8,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
};
...@@ -32,7 +32,6 @@ struct nvc0_instmem_priv { ...@@ -32,7 +32,6 @@ struct nvc0_instmem_priv {
struct nouveau_channel *bar1; struct nouveau_channel *bar1;
struct nouveau_gpuobj *bar3_pgd; struct nouveau_gpuobj *bar3_pgd;
struct nouveau_channel *bar3; struct nouveau_channel *bar3;
struct nouveau_gpuobj *chan_pgd;
}; };
int int
...@@ -181,17 +180,11 @@ nvc0_instmem_init(struct drm_device *dev) ...@@ -181,17 +180,11 @@ nvc0_instmem_init(struct drm_device *dev)
goto error; goto error;
/* channel vm */ /* channel vm */
ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm); ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
&dev_priv->chan_vm);
if (ret) if (ret)
goto error; goto error;
ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd);
if (ret)
goto error;
nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd);
nouveau_vm_ref(NULL, &vm, NULL);
nvc0_instmem_resume(dev); nvc0_instmem_resume(dev);
return 0; return 0;
error: error:
...@@ -211,8 +204,7 @@ nvc0_instmem_takedown(struct drm_device *dev) ...@@ -211,8 +204,7 @@ nvc0_instmem_takedown(struct drm_device *dev)
nv_wr32(dev, 0x1704, 0x00000000); nv_wr32(dev, 0x1704, 0x00000000);
nv_wr32(dev, 0x1714, 0x00000000); nv_wr32(dev, 0x1714, 0x00000000);
nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd); nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
nouveau_gpuobj_ref(NULL, &priv->chan_pgd);
nvc0_channel_del(&priv->bar1); nvc0_channel_del(&priv->bar1);
nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd); nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
......
...@@ -105,7 +105,11 @@ nvc0_vm_flush(struct nouveau_vm *vm) ...@@ -105,7 +105,11 @@ nvc0_vm_flush(struct nouveau_vm *vm)
struct drm_device *dev = vm->dev; struct drm_device *dev = vm->dev;
struct nouveau_vm_pgd *vpgd; struct nouveau_vm_pgd *vpgd;
unsigned long flags; unsigned long flags;
u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; u32 engine;
engine = 1;
if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm)
engine |= 4;
pinstmem->flush(vm->dev); pinstmem->flush(vm->dev);
......
...@@ -61,9 +61,7 @@ nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin, ...@@ -61,9 +61,7 @@ nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
u32 type, struct nouveau_mem **pmem) u32 type, struct nouveau_mem **pmem)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; struct nouveau_mm *mm = dev_priv->engine.vram.mm;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r; struct nouveau_mm_node *r;
struct nouveau_mem *mem; struct nouveau_mem *mem;
int ret; int ret;
...@@ -105,9 +103,15 @@ int ...@@ -105,9 +103,15 @@ int
nvc0_vram_init(struct drm_device *dev) nvc0_vram_init(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
u32 length;
dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
dev_priv->vram_size *= nv_rd32(dev, 0x121c74); dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
dev_priv->vram_rblock_size = 4096;
return 0; length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
return nouveau_mm_init(&vram->mm, rsvd_head, length, 1);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment