Commit 573a2a37 authored by Ben Skeggs's avatar Ben Skeggs

drm/nv50: implement custom vram mm

This is required on nv50 as we need to be able to have more precise control
over physical VRAM allocations to avoid buffer corruption when using
buffers of mixed memory types.

This removes some nasty overallocation/alignment that we were previously
using to "control" this problem.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 937c3471
......@@ -9,7 +9,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
nouveau_dp.o nouveau_ramht.o \
nouveau_dp.o nouveau_ramht.o nouveau_mm.o \
nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
......@@ -26,7 +26,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
nv10_gpio.o nv50_gpio.o \
nv50_calc.o \
nv04_pm.o nv50_pm.o nva3_pm.o
nv04_pm.o nv50_pm.o nva3_pm.o \
nv50_vram.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
......
......@@ -57,42 +57,7 @@ nouveau_bo_fixup_align(struct drm_device *dev,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
/*
* Some of the tile_flags have a periodic structure of N*4096 bytes,
* align to to that as well as the page size. Align the size to the
* appropriate boundaries. This does imply that sizes are rounded up
* 3-7 pages, so be aware of this and do not waste memory by allocating
* many small buffers.
*/
if (dev_priv->card_type == NV_50) {
uint32_t block_size = dev_priv->vram_size >> 15;
int i;
switch (tile_flags) {
case 0x1800:
case 0x2800:
case 0x4800:
case 0x7a00:
if (is_power_of_2(block_size)) {
for (i = 1; i < 10; i++) {
*align = 12 * i * block_size;
if (!(*align % 65536))
break;
}
} else {
for (i = 1; i < 10; i++) {
*align = 8 * i * block_size;
if (!(*align % 65536))
break;
}
}
*size = roundup(*size, *align);
break;
default:
break;
}
} else {
if (dev_priv->card_type < NV_50) {
if (tile_mode) {
if (dev_priv->chipset >= 0x40) {
*align = 65536;
......@@ -115,7 +80,6 @@ nouveau_bo_fixup_align(struct drm_device *dev,
/* ALIGN works only on powers of two. */
*size = roundup(*size, PAGE_SIZE);
if (dev_priv->card_type == NV_50) {
*size = roundup(*size, 65536);
*align = max(65536, *align);
......@@ -422,7 +386,10 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
man->func = &ttm_bo_manager_func;
if (dev_priv->card_type == NV_50)
man->func = &nouveau_vram_manager;
else
man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
......
......@@ -66,6 +66,15 @@ struct nouveau_grctx;
#define NV50_VM_BLOCK (512*1024*1024ULL)
#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
struct nouveau_vram {
struct drm_device *dev;
struct list_head regions;
u32 memtype;
u64 offset;
u64 size;
};
struct nouveau_tile_reg {
bool used;
uint32_t addr;
......@@ -821,6 +830,7 @@ extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
uint64_t phys);
extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
uint32_t size);
extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
/* nouveau_notifier.c */
extern int nouveau_notifier_init_channel(struct nouveau_channel *);
......
......@@ -36,6 +36,7 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
#include "nouveau_mm.h"
/*
* NV10-NV40 tiling helpers
......@@ -333,61 +334,6 @@ nouveau_mem_detect_nforce(struct drm_device *dev)
return 0;
}
static void
nv50_vram_preinit(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int i, parts, colbits, rowbitsa, rowbitsb, banks;
u64 rowsize, predicted;
u32 r0, r4, rt, ru;
r0 = nv_rd32(dev, 0x100200);
r4 = nv_rd32(dev, 0x100204);
rt = nv_rd32(dev, 0x100250);
ru = nv_rd32(dev, 0x001540);
NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
for (i = 0, parts = 0; i < 8; i++) {
if (ru & (0x00010000 << i))
parts++;
}
colbits = (r4 & 0x0000f000) >> 12;
rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
banks = ((r4 & 0x01000000) ? 8 : 4);
rowsize = parts * banks * (1 << colbits) * 8;
predicted = rowsize << rowbitsa;
if (r0 & 0x00000004)
predicted += rowsize << rowbitsb;
if (predicted != dev_priv->vram_size) {
NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
(u32)(dev_priv->vram_size >> 20));
NV_WARN(dev, "we calculated %dMiB VRAM\n",
(u32)(predicted >> 20));
}
dev_priv->vram_rblock_size = rowsize >> 12;
if (rt & 1)
dev_priv->vram_rblock_size *= 3;
NV_DEBUG(dev, "rblock %lld bytes\n",
(u64)dev_priv->vram_rblock_size << 12);
}
static void
nvaa_vram_preinit(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
/* To our knowledge, there's no large scale reordering of pages
* that occurs on IGP chipsets.
*/
dev_priv->vram_rblock_size = 1;
}
static int
nouveau_mem_detect(struct drm_device *dev)
{
......@@ -404,22 +350,8 @@ nouveau_mem_detect(struct drm_device *dev)
dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
} else
if (dev_priv->card_type < NV_C0) {
dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
dev_priv->vram_size &= 0xffffffff00ll;
switch (dev_priv->chipset) {
case 0xaa:
case 0xac:
case 0xaf:
dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
dev_priv->vram_sys_base <<= 12;
nvaa_vram_preinit(dev);
break;
default:
nv50_vram_preinit(dev);
break;
}
if (nv50_vram_init(dev))
return -ENOMEM;
} else {
dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
......@@ -568,10 +500,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
if (ret)
return ret;
ret = nouveau_mem_detect(dev);
if (ret)
return ret;
dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
ret = nouveau_ttm_global_init(dev_priv);
......@@ -587,13 +515,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
return ret;
}
dev_priv->fb_available_size = dev_priv->vram_size;
dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
dev_priv->fb_mappable_pages =
pci_resource_len(dev->pdev, 1);
dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
/* reserve space at end of VRAM for PRAMIN */
if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
......@@ -604,6 +525,17 @@ nouveau_mem_vram_init(struct drm_device *dev)
else
dev_priv->ramin_rsvd_vram = (512 * 1024);
/* initialise gpu-specific vram backend */
ret = nouveau_mem_detect(dev);
if (ret)
return ret;
dev_priv->fb_available_size = dev_priv->vram_size;
dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
dev_priv->fb_aper_free = dev_priv->fb_available_size;
......@@ -820,3 +752,108 @@ nouveau_mem_timing_fini(struct drm_device *dev)
kfree(mem->timing);
}
static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_mm *mm;
u32 b_size;
int ret;
p_size = (p_size << PAGE_SHIFT) >> 12;
b_size = dev_priv->vram_rblock_size >> 12;
ret = nouveau_mm_init(&mm, 0, p_size, b_size);
if (ret)
return ret;
man->priv = mm;
return 0;
}
static int
nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
{
struct nouveau_mm *mm = man->priv;
int ret;
ret = nouveau_mm_fini(&mm);
if (ret)
return ret;
man->priv = NULL;
return 0;
}
static void
nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct drm_device *dev = dev_priv->dev;
nv50_vram_del(dev, (struct nouveau_vram **)&mem->mm_node);
}
static int
nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_vram *vram;
int ret;
ret = nv50_vram_new(dev, mem->num_pages << PAGE_SHIFT, 65536, 0,
(nvbo->tile_flags >> 8) & 0x7f, &vram);
if (ret)
return ret;
mem->mm_node = vram;
mem->start = vram->offset >> PAGE_SHIFT;
return 0;
}
void
nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{
struct ttm_bo_global *glob = man->bdev->glob;
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
int i;
mutex_lock(&mm->mutex);
list_for_each_entry(r, &mm->nodes, nl_entry) {
printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
prefix, r->free ? "free" : "used", r->type,
((u64)r->offset << 12),
(((u64)r->offset + r->length) << 12));
total += r->length;
ttotal[r->type] += r->length;
if (r->free)
tfree[r->type] += r->length;
else
tused[r->type] += r->length;
}
mutex_unlock(&mm->mutex);
printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12);
for (i = 0; i < 3; i++) {
printk(KERN_DEBUG "%s type %d: 0x%010llx, "
"used 0x%010llx, free 0x%010llx\n", prefix,
i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
}
}
const struct ttm_mem_type_manager_func nouveau_vram_manager = {
nouveau_vram_manager_init,
nouveau_vram_manager_fini,
nouveau_vram_manager_new,
nouveau_vram_manager_del,
nouveau_vram_manager_debug
};
/*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_mm.h"
static inline void
region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
{
list_del(&a->nl_entry);
list_del(&a->fl_entry);
kfree(a);
}
static struct nouveau_mm_node *
region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
{
struct nouveau_mm_node *b;
if (a->length == size)
return a;
b = kmalloc(sizeof(*b), GFP_KERNEL);
if (unlikely(b == NULL))
return NULL;
b->offset = a->offset;
b->length = size;
b->free = a->free;
b->type = a->type;
a->offset += size;
a->length -= size;
list_add_tail(&b->nl_entry, &a->nl_entry);
if (b->free)
list_add_tail(&b->fl_entry, &a->fl_entry);
return b;
}
static struct nouveau_mm_node *
nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
{
struct nouveau_mm_node *prev, *next;
/* try to merge with free adjacent entries of same type */
prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
if (this->nl_entry.prev != &rmm->nodes) {
if (prev->free && prev->type == this->type) {
prev->length += this->length;
region_put(rmm, this);
this = prev;
}
}
next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
if (this->nl_entry.next != &rmm->nodes) {
if (next->free && next->type == this->type) {
next->offset = this->offset;
next->length += this->length;
region_put(rmm, this);
this = next;
}
}
return this;
}
void
nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
{
u32 block_s, block_l;
this->free = true;
list_add(&this->fl_entry, &rmm->free);
this = nouveau_mm_merge(rmm, this);
/* any entirely free blocks now? we'll want to remove typing
* on them now so they can be use for any memory allocation
*/
block_s = roundup(this->offset, rmm->block_size);
if (block_s + rmm->block_size > this->offset + this->length)
return;
/* split off any still-typed region at the start */
if (block_s != this->offset) {
if (!region_split(rmm, this, block_s - this->offset))
return;
}
/* split off the soon-to-be-untyped block(s) */
block_l = rounddown(this->length, rmm->block_size);
if (block_l != this->length) {
this = region_split(rmm, this, block_l);
if (!this)
return;
}
/* mark as having no type, and retry merge with any adjacent
* untyped blocks
*/
this->type = 0;
nouveau_mm_merge(rmm, this);
}
int
nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
u32 align, struct nouveau_mm_node **pnode)
{
struct nouveau_mm_node *this, *tmp, *next;
u32 splitoff, avail, alloc;
list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
if (this->nl_entry.next == &rmm->nodes)
next = NULL;
/* skip wrongly typed blocks */
if (this->type && this->type != type)
continue;
/* account for alignment */
splitoff = this->offset & (align - 1);
if (splitoff)
splitoff = align - splitoff;
if (this->length <= splitoff)
continue;
/* determine total memory available from this, and
* the next block (if appropriate)
*/
avail = this->length;
if (next && next->free && (!next->type || next->type == type))
avail += next->length;
avail -= splitoff;
/* determine allocation size */
if (size_nc) {
alloc = min(avail, size);
alloc = rounddown(alloc, size_nc);
if (alloc == 0)
continue;
} else {
alloc = size;
if (avail < alloc)
continue;
}
/* untyped block, split off a chunk that's a multiple
* of block_size and type it
*/
if (!this->type) {
u32 block = roundup(alloc + splitoff, rmm->block_size);
if (this->length < block)
continue;
this = region_split(rmm, this, block);
if (!this)
return -ENOMEM;
this->type = type;
}
/* stealing memory from adjacent block */
if (alloc > this->length) {
u32 amount = alloc - (this->length - splitoff);
if (!next->type) {
amount = roundup(amount, rmm->block_size);
next = region_split(rmm, next, amount);
if (!next)
return -ENOMEM;
next->type = type;
}
this->length += amount;
next->offset += amount;
next->length -= amount;
if (!next->length) {
list_del(&next->nl_entry);
list_del(&next->fl_entry);
kfree(next);
}
}
if (splitoff) {
if (!region_split(rmm, this, splitoff))
return -ENOMEM;
}
this = region_split(rmm, this, alloc);
if (this == NULL)
return -ENOMEM;
this->free = false;
list_del(&this->fl_entry);
*pnode = this;
return 0;
}
return -ENOMEM;
}
int
nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
{
struct nouveau_mm *rmm;
struct nouveau_mm_node *heap;
heap = kzalloc(sizeof(*heap), GFP_KERNEL);
if (!heap)
return -ENOMEM;
heap->free = true;
heap->offset = roundup(offset, block);
heap->length = rounddown(offset + length, block) - heap->offset;
rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
if (!rmm) {
kfree(heap);
return -ENOMEM;
}
rmm->block_size = block;
mutex_init(&rmm->mutex);
INIT_LIST_HEAD(&rmm->nodes);
INIT_LIST_HEAD(&rmm->free);
list_add(&heap->nl_entry, &rmm->nodes);
list_add(&heap->fl_entry, &rmm->free);
*prmm = rmm;
return 0;
}
int
nouveau_mm_fini(struct nouveau_mm **prmm)
{
struct nouveau_mm *rmm = *prmm;
struct nouveau_mm_node *heap =
list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
if (!list_is_singular(&rmm->nodes))
return -EBUSY;
kfree(heap);
kfree(rmm);
*prmm = NULL;
return 0;
}
/*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#ifndef __NOUVEAU_REGION_H__
#define __NOUVEAU_REGION_H__
struct nouveau_mm_node {
struct list_head nl_entry;
struct list_head fl_entry;
struct list_head rl_entry;
bool free;
int type;
u32 offset;
u32 length;
};
struct nouveau_mm {
struct list_head nodes;
struct list_head free;
struct mutex mutex;
u32 block_size;
};
int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block);
int nouveau_mm_fini(struct nouveau_mm **);
int nouveau_mm_pre(struct nouveau_mm *);
int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
u32 align, struct nouveau_mm_node **);
void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
int nv50_vram_init(struct drm_device *);
int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
u32 memtype, struct nouveau_vram **);
void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
#endif
......@@ -325,6 +325,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
0, 0x0000, true, false, &node->vram);
if (ret) {
NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
WARN_ON(1);
return ret;
}
......
/*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_mm.h"
static int types[0x80] = {
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
};
void
nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *this;
struct nouveau_vram *vram;
vram = *pvram;
*pvram = NULL;
if (unlikely(vram == NULL))
return;
mutex_lock(&mm->mutex);
while (!list_empty(&vram->regions)) {
this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
list_del(&this->rl_entry);
nouveau_mm_put(mm, this);
}
mutex_unlock(&mm->mutex);
kfree(vram);
}
int
nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
u32 type, struct nouveau_vram **pvram)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
struct nouveau_vram *vram;
int ret;
if (!types[type])
return -EINVAL;
size >>= 12;
align >>= 12;
size_nc >>= 12;
vram = kzalloc(sizeof(*vram), GFP_KERNEL);
if (!vram)
return -ENOMEM;
INIT_LIST_HEAD(&vram->regions);
vram->dev = dev_priv->dev;
vram->memtype = type;
vram->size = size;
mutex_lock(&mm->mutex);
do {
ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
if (ret) {
mutex_unlock(&mm->mutex);
nv50_vram_del(dev, &vram);
return ret;
}
list_add_tail(&r->rl_entry, &vram->regions);
size -= r->length;
} while (size);
mutex_unlock(&mm->mutex);
r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
vram->offset = (u64)r->offset << 12;
*pvram = vram;
return 0;
}
static u32
nv50_vram_rblock(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int i, parts, colbits, rowbitsa, rowbitsb, banks;
u64 rowsize, predicted;
u32 r0, r4, rt, ru, rblock_size;
r0 = nv_rd32(dev, 0x100200);
r4 = nv_rd32(dev, 0x100204);
rt = nv_rd32(dev, 0x100250);
ru = nv_rd32(dev, 0x001540);
NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
for (i = 0, parts = 0; i < 8; i++) {
if (ru & (0x00010000 << i))
parts++;
}
colbits = (r4 & 0x0000f000) >> 12;
rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
banks = ((r4 & 0x01000000) ? 8 : 4);
rowsize = parts * banks * (1 << colbits) * 8;
predicted = rowsize << rowbitsa;
if (r0 & 0x00000004)
predicted += rowsize << rowbitsb;
if (predicted != dev_priv->vram_size) {
NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
(u32)(dev_priv->vram_size >> 20));
NV_WARN(dev, "we calculated %dMiB VRAM\n",
(u32)(predicted >> 20));
}
rblock_size = rowsize;
if (rt & 1)
rblock_size *= 3;
NV_DEBUG(dev, "rblock %d bytes\n", rblock_size);
return rblock_size;
}
int
nv50_vram_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
dev_priv->vram_size = nv_rd32(dev, 0x10020c);
dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
dev_priv->vram_size &= 0xffffffff00ULL;
switch (dev_priv->chipset) {
case 0xaa:
case 0xac:
case 0xaf:
dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
dev_priv->vram_rblock_size = 4096;
break;
default:
dev_priv->vram_rblock_size = nv50_vram_rblock(dev);
break;
}
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment