Commit 584ef2cd authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm/radeon/kms: balance asic_reset functions
  drm/radeon/kms: remove duplicate card_posted() functions
  drm/radeon/kms: add module option for pcie gen2
  drm/radeon/kms: fix typo in evergreen safe reg
  drm/nouveau: fix gpu page faults triggered by plymouthd
  drm/nouveau: greatly simplify mm, killing some bugs in the process
  drm/nvc0: enable protection of system-use-only structures in vm
  drm/nv40: initialise 0x17xx on all chipsets that have it
  drm/nv40: make detection of 0x4097-ful chipsets available everywhere
parents e1288cd7 25b2ec5b
...@@ -160,6 +160,7 @@ enum nouveau_flags { ...@@ -160,6 +160,7 @@ enum nouveau_flags {
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
#define NVOBJ_FLAG_ZERO_FREE (1 << 2) #define NVOBJ_FLAG_ZERO_FREE (1 << 2)
#define NVOBJ_FLAG_VM (1 << 3) #define NVOBJ_FLAG_VM (1 << 3)
#define NVOBJ_FLAG_VM_USER (1 << 4)
#define NVOBJ_CINST_GLOBAL 0xdeadbeef #define NVOBJ_CINST_GLOBAL 0xdeadbeef
...@@ -1576,6 +1577,20 @@ nv_match_device(struct drm_device *dev, unsigned device, ...@@ -1576,6 +1577,20 @@ nv_match_device(struct drm_device *dev, unsigned device,
dev->pdev->subsystem_device == sub_device; dev->pdev->subsystem_device == sub_device;
} }
/* returns 1 if device is one of the nv4x using the 0x4497 object class,
* helpful to determine a number of other hardware features
*/
static inline int
nv44_graph_class(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
if ((dev_priv->chipset & 0xf0) == 0x60)
return 1;
return !(0x0baf & (1 << (dev_priv->chipset & 0x0f)));
}
/* memory type/access flags, do not match hardware values */ /* memory type/access flags, do not match hardware values */
#define NV_MEM_ACCESS_RO 1 #define NV_MEM_ACCESS_RO 1
#define NV_MEM_ACCESS_WO 2 #define NV_MEM_ACCESS_WO 2
......
...@@ -352,8 +352,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, ...@@ -352,8 +352,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
FBINFO_HWACCEL_IMAGEBLIT; FBINFO_HWACCEL_IMAGEBLIT;
info->flags |= FBINFO_CAN_FORCE_OUTPUT; info->flags |= FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &nouveau_fbcon_sw_ops; info->fbops = &nouveau_fbcon_sw_ops;
info->fix.smem_start = dev->mode_config.fb_base + info->fix.smem_start = nvbo->bo.mem.bus.base +
(nvbo->bo.mem.start << PAGE_SHIFT); nvbo->bo.mem.bus.offset;
info->fix.smem_len = size; info->fix.smem_len = size;
info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
......
...@@ -742,30 +742,24 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) ...@@ -742,30 +742,24 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
{ {
struct nouveau_mm *mm = man->priv; struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r; struct nouveau_mm_node *r;
u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {}; u32 total = 0, free = 0;
int i;
mutex_lock(&mm->mutex); mutex_lock(&mm->mutex);
list_for_each_entry(r, &mm->nodes, nl_entry) { list_for_each_entry(r, &mm->nodes, nl_entry) {
printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n", printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
prefix, r->free ? "free" : "used", r->type, prefix, r->type, ((u64)r->offset << 12),
((u64)r->offset << 12),
(((u64)r->offset + r->length) << 12)); (((u64)r->offset + r->length) << 12));
total += r->length; total += r->length;
ttotal[r->type] += r->length; if (!r->type)
if (r->free) free += r->length;
tfree[r->type] += r->length;
else
tused[r->type] += r->length;
} }
mutex_unlock(&mm->mutex); mutex_unlock(&mm->mutex);
printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12); printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
for (i = 0; i < 3; i++) { prefix, (u64)total << 12, (u64)free << 12);
printk(KERN_DEBUG "%s type %d: 0x%010llx, " printk(KERN_DEBUG "%s block: 0x%08x\n",
"used 0x%010llx, free 0x%010llx\n", prefix, prefix, mm->block_size << 12);
i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
}
} }
const struct ttm_mem_type_manager_func nouveau_vram_manager = { const struct ttm_mem_type_manager_func nouveau_vram_manager = {
......
...@@ -48,175 +48,76 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) ...@@ -48,175 +48,76 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
b->offset = a->offset; b->offset = a->offset;
b->length = size; b->length = size;
b->free = a->free;
b->type = a->type; b->type = a->type;
a->offset += size; a->offset += size;
a->length -= size; a->length -= size;
list_add_tail(&b->nl_entry, &a->nl_entry); list_add_tail(&b->nl_entry, &a->nl_entry);
if (b->free) if (b->type == 0)
list_add_tail(&b->fl_entry, &a->fl_entry); list_add_tail(&b->fl_entry, &a->fl_entry);
return b; return b;
} }
static struct nouveau_mm_node * #define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this) list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
void
nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
{ {
struct nouveau_mm_node *prev, *next; struct nouveau_mm_node *prev = node(this, prev);
struct nouveau_mm_node *next = node(this, next);
/* try to merge with free adjacent entries of same type */ list_add(&this->fl_entry, &rmm->free);
prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry); this->type = 0;
if (this->nl_entry.prev != &rmm->nodes) {
if (prev->free && prev->type == this->type) { if (prev && prev->type == 0) {
prev->length += this->length; prev->length += this->length;
region_put(rmm, this); region_put(rmm, this);
this = prev; this = prev;
} }
}
next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); if (next && next->type == 0) {
if (this->nl_entry.next != &rmm->nodes) {
if (next->free && next->type == this->type) {
next->offset = this->offset; next->offset = this->offset;
next->length += this->length; next->length += this->length;
region_put(rmm, this); region_put(rmm, this);
this = next;
} }
}
return this;
}
void
nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
{
u32 block_s, block_l;
this->free = true;
list_add(&this->fl_entry, &rmm->free);
this = nouveau_mm_merge(rmm, this);
/* any entirely free blocks now? we'll want to remove typing
* on them now so they can be use for any memory allocation
*/
block_s = roundup(this->offset, rmm->block_size);
if (block_s + rmm->block_size > this->offset + this->length)
return;
/* split off any still-typed region at the start */
if (block_s != this->offset) {
if (!region_split(rmm, this, block_s - this->offset))
return;
}
/* split off the soon-to-be-untyped block(s) */
block_l = rounddown(this->length, rmm->block_size);
if (block_l != this->length) {
this = region_split(rmm, this, block_l);
if (!this)
return;
}
/* mark as having no type, and retry merge with any adjacent
* untyped blocks
*/
this->type = 0;
nouveau_mm_merge(rmm, this);
} }
int int
nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
u32 align, struct nouveau_mm_node **pnode) u32 align, struct nouveau_mm_node **pnode)
{ {
struct nouveau_mm_node *this, *tmp, *next; struct nouveau_mm_node *prev, *this, *next;
u32 splitoff, avail, alloc; u32 min = size_nc ? size_nc : size;
u32 align_mask = align - 1;
list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) { u32 splitoff;
next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); u32 s, e;
if (this->nl_entry.next == &rmm->nodes)
next = NULL; list_for_each_entry(this, &rmm->free, fl_entry) {
e = this->offset + this->length;
/* skip wrongly typed blocks */ s = this->offset;
if (this->type && this->type != type)
prev = node(this, prev);
if (prev && prev->type != type)
s = roundup(s, rmm->block_size);
next = node(this, next);
if (next && next->type != type)
e = rounddown(e, rmm->block_size);
s = (s + align_mask) & ~align_mask;
e &= ~align_mask;
if (s > e || e - s < min)
continue; continue;
/* account for alignment */ splitoff = s - this->offset;
splitoff = this->offset & (align - 1); if (splitoff && !region_split(rmm, this, splitoff))
if (splitoff) return -ENOMEM;
splitoff = align - splitoff;
if (this->length <= splitoff)
continue;
/* determine total memory available from this, and
* the next block (if appropriate)
*/
avail = this->length;
if (next && next->free && (!next->type || next->type == type))
avail += next->length;
avail -= splitoff;
/* determine allocation size */
if (size_nc) {
alloc = min(avail, size);
alloc = rounddown(alloc, size_nc);
if (alloc == 0)
continue;
} else {
alloc = size;
if (avail < alloc)
continue;
}
/* untyped block, split off a chunk that's a multiple
* of block_size and type it
*/
if (!this->type) {
u32 block = roundup(alloc + splitoff, rmm->block_size);
if (this->length < block)
continue;
this = region_split(rmm, this, block); this = region_split(rmm, this, min(size, e - s));
if (!this) if (!this)
return -ENOMEM; return -ENOMEM;
this->type = type; this->type = type;
}
/* stealing memory from adjacent block */
if (alloc > this->length) {
u32 amount = alloc - (this->length - splitoff);
if (!next->type) {
amount = roundup(amount, rmm->block_size);
next = region_split(rmm, next, amount);
if (!next)
return -ENOMEM;
next->type = type;
}
this->length += amount;
next->offset += amount;
next->length -= amount;
if (!next->length) {
list_del(&next->nl_entry);
list_del(&next->fl_entry);
kfree(next);
}
}
if (splitoff) {
if (!region_split(rmm, this, splitoff))
return -ENOMEM;
}
this = region_split(rmm, this, alloc);
if (this == NULL)
return -ENOMEM;
this->free = false;
list_del(&this->fl_entry); list_del(&this->fl_entry);
*pnode = this; *pnode = this;
return 0; return 0;
...@@ -234,7 +135,6 @@ nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block) ...@@ -234,7 +135,6 @@ nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
heap = kzalloc(sizeof(*heap), GFP_KERNEL); heap = kzalloc(sizeof(*heap), GFP_KERNEL);
if (!heap) if (!heap)
return -ENOMEM; return -ENOMEM;
heap->free = true;
heap->offset = roundup(offset, block); heap->offset = roundup(offset, block);
heap->length = rounddown(offset + length, block) - heap->offset; heap->length = rounddown(offset + length, block) - heap->offset;
......
...@@ -30,9 +30,7 @@ struct nouveau_mm_node { ...@@ -30,9 +30,7 @@ struct nouveau_mm_node {
struct list_head fl_entry; struct list_head fl_entry;
struct list_head rl_entry; struct list_head rl_entry;
bool free; u8 type;
int type;
u32 offset; u32 offset;
u32 length; u32 length;
}; };
......
...@@ -451,8 +451,7 @@ nv40_graph_register(struct drm_device *dev) ...@@ -451,8 +451,7 @@ nv40_graph_register(struct drm_device *dev)
NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
/* curie */ /* curie */
if (dev_priv->chipset >= 0x60 || if (nv44_graph_class(dev))
0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
NVOBJ_CLASS(dev, 0x4497, GR); NVOBJ_CLASS(dev, 0x4497, GR);
else else
NVOBJ_CLASS(dev, 0x4097, GR); NVOBJ_CLASS(dev, 0x4097, GR);
......
...@@ -117,17 +117,6 @@ ...@@ -117,17 +117,6 @@
* - get vs count from 0x1540 * - get vs count from 0x1540
*/ */
static int
nv40_graph_4097(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
if ((dev_priv->chipset & 0xf0) == 0x60)
return 0;
return !!(0x0baf & (1 << dev_priv->chipset));
}
static int static int
nv40_graph_vs_count(struct drm_device *dev) nv40_graph_vs_count(struct drm_device *dev)
{ {
...@@ -219,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx) ...@@ -219,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
gr_def(ctx, 0x4009dc, 0x80000000); gr_def(ctx, 0x4009dc, 0x80000000);
} else { } else {
cp_ctx(ctx, 0x400840, 20); cp_ctx(ctx, 0x400840, 20);
if (!nv40_graph_4097(ctx->dev)) { if (nv44_graph_class(ctx->dev)) {
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
gr_def(ctx, 0x400860 + (i * 4), 0x00000001); gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
} }
...@@ -228,7 +217,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx) ...@@ -228,7 +217,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
gr_def(ctx, 0x400888, 0x00000040); gr_def(ctx, 0x400888, 0x00000040);
cp_ctx(ctx, 0x400894, 11); cp_ctx(ctx, 0x400894, 11);
gr_def(ctx, 0x400894, 0x00000040); gr_def(ctx, 0x400894, 0x00000040);
if (nv40_graph_4097(ctx->dev)) { if (!nv44_graph_class(ctx->dev)) {
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000); gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
} }
...@@ -546,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx) ...@@ -546,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
static void static void
nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx) nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
{ {
int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084; int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684;
cp_out (ctx, 0x300000); cp_out (ctx, 0x300000);
cp_lsr (ctx, len - 4); cp_lsr (ctx, len - 4);
...@@ -582,11 +571,11 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx) ...@@ -582,11 +571,11 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
} else { } else {
b0_offset = 0x1d40/4; /* 2200 */ b0_offset = 0x1d40/4; /* 2200 */
b1_offset = 0x3f40/4; /* 0b00 : 0a40 */ b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4; vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
} }
cp_lsr(ctx, vs_len * vs_nr + 0x300/4); cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029); cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);
offset = ctx->ctxvals_pos; offset = ctx->ctxvals_pos;
ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len)); ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
......
...@@ -6,27 +6,17 @@ ...@@ -6,27 +6,17 @@
int int
nv40_mc_init(struct drm_device *dev) nv40_mc_init(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
/* Power up everything, resetting each individual unit will /* Power up everything, resetting each individual unit will
* be done later if needed. * be done later if needed.
*/ */
nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
switch (dev_priv->chipset) { if (nv44_graph_class(dev)) {
case 0x44: u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
case 0x46: /* G72 */
case 0x4e:
case 0x4c: /* C51_G7X */
tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
nv_wr32(dev, NV40_PMC_1700, tmp); nv_wr32(dev, NV40_PMC_1700, tmp);
nv_wr32(dev, NV40_PMC_1704, 0); nv_wr32(dev, NV40_PMC_1704, 0);
nv_wr32(dev, NV40_PMC_1708, 0); nv_wr32(dev, NV40_PMC_1708, 0);
nv_wr32(dev, NV40_PMC_170C, tmp); nv_wr32(dev, NV40_PMC_170C, tmp);
break;
default:
break;
} }
return 0; return 0;
......
...@@ -332,8 +332,11 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) ...@@ -332,8 +332,11 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
gpuobj->vinst = node->vram->offset; gpuobj->vinst = node->vram->offset;
if (gpuobj->flags & NVOBJ_FLAG_VM) { if (gpuobj->flags & NVOBJ_FLAG_VM) {
ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, u32 flags = NV_MEM_ACCESS_RW;
NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
flags |= NV_MEM_ACCESS_SYS;
ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags,
&node->chan_vma); &node->chan_vma);
if (ret) { if (ret) {
vram->put(dev, &node->vram); vram->put(dev, &node->vram);
......
...@@ -105,7 +105,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) ...@@ -105,7 +105,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
if (ret) if (ret)
return ret; return ret;
ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM, ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096,
NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
&grch->unk418810); &grch->unk418810);
if (ret) if (ret)
return ret; return ret;
......
...@@ -48,8 +48,8 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) ...@@ -48,8 +48,8 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
phys >>= 8; phys >>= 8;
phys |= 0x00000001; /* present */ phys |= 0x00000001; /* present */
// if (vma->access & NV_MEM_ACCESS_SYS) if (vma->access & NV_MEM_ACCESS_SYS)
// phys |= 0x00000002; phys |= 0x00000002;
phys |= ((u64)target << 32); phys |= ((u64)target << 32);
phys |= ((u64)memtype << 36); phys |= ((u64)memtype << 36);
......
...@@ -3002,31 +3002,6 @@ int evergreen_copy_blit(struct radeon_device *rdev, ...@@ -3002,31 +3002,6 @@ int evergreen_copy_blit(struct radeon_device *rdev,
return 0; return 0;
} }
static bool evergreen_card_posted(struct radeon_device *rdev)
{
u32 reg;
/* first check CRTCs */
if (rdev->flags & RADEON_IS_IGP)
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
else
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
if (reg & EVERGREEN_CRTC_MASTER_EN)
return true;
/* then check MEM_SIZE, in case the crtcs are off */
if (RREG32(CONFIG_MEMSIZE))
return true;
return false;
}
/* Plan is to move initialization in that function and use /* Plan is to move initialization in that function and use
* helper function so that radeon_device_init pretty much * helper function so that radeon_device_init pretty much
* do nothing more than calling asic specific function. This * do nothing more than calling asic specific function. This
...@@ -3063,7 +3038,7 @@ int evergreen_init(struct radeon_device *rdev) ...@@ -3063,7 +3038,7 @@ int evergreen_init(struct radeon_device *rdev)
if (radeon_asic_reset(rdev)) if (radeon_asic_reset(rdev))
dev_warn(rdev->dev, "GPU reset failed !\n"); dev_warn(rdev->dev, "GPU reset failed !\n");
/* Post card if necessary */ /* Post card if necessary */
if (!evergreen_card_posted(rdev)) { if (!radeon_card_posted(rdev)) {
if (!rdev->bios) { if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL; return -EINVAL;
...@@ -3158,6 +3133,9 @@ static void evergreen_pcie_gen2_enable(struct radeon_device *rdev) ...@@ -3158,6 +3133,9 @@ static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{ {
u32 link_width_cntl, speed_cntl; u32 link_width_cntl, speed_cntl;
if (radeon_pcie_gen2 == 0)
return;
if (rdev->flags & RADEON_IS_IGP) if (rdev->flags & RADEON_IS_IGP)
return; return;
......
...@@ -2086,12 +2086,13 @@ int r100_asic_reset(struct radeon_device *rdev) ...@@ -2086,12 +2086,13 @@ int r100_asic_reset(struct radeon_device *rdev)
{ {
struct r100_mc_save save; struct r100_mc_save save;
u32 status, tmp; u32 status, tmp;
int ret = 0;
r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS); status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) { if (!G_000E40_GUI_ACTIVE(status)) {
return 0; return 0;
} }
r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS); status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */ /* stop CP */
...@@ -2131,11 +2132,11 @@ int r100_asic_reset(struct radeon_device *rdev) ...@@ -2131,11 +2132,11 @@ int r100_asic_reset(struct radeon_device *rdev)
G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n"); dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true; rdev->gpu_lockup = true;
return -1; ret = -1;
} } else
r100_mc_resume(rdev, &save);
dev_info(rdev->dev, "GPU reset succeed\n"); dev_info(rdev->dev, "GPU reset succeed\n");
return 0; r100_mc_resume(rdev, &save);
return ret;
} }
void r100_set_common_regs(struct radeon_device *rdev) void r100_set_common_regs(struct radeon_device *rdev)
......
...@@ -405,12 +405,13 @@ int r300_asic_reset(struct radeon_device *rdev) ...@@ -405,12 +405,13 @@ int r300_asic_reset(struct radeon_device *rdev)
{ {
struct r100_mc_save save; struct r100_mc_save save;
u32 status, tmp; u32 status, tmp;
int ret = 0;
r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS); status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) { if (!G_000E40_GUI_ACTIVE(status)) {
return 0; return 0;
} }
r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS); status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */ /* stop CP */
...@@ -451,11 +452,11 @@ int r300_asic_reset(struct radeon_device *rdev) ...@@ -451,11 +452,11 @@ int r300_asic_reset(struct radeon_device *rdev)
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n"); dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true; rdev->gpu_lockup = true;
return -1; ret = -1;
} } else
r100_mc_resume(rdev, &save);
dev_info(rdev->dev, "GPU reset succeed\n"); dev_info(rdev->dev, "GPU reset succeed\n");
return 0; r100_mc_resume(rdev, &save);
return ret;
} }
/* /*
......
...@@ -2358,24 +2358,6 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg) ...@@ -2358,24 +2358,6 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
/* FIXME: implement */ /* FIXME: implement */
} }
bool r600_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
/* first check CRTCs */
reg = RREG32(D1CRTC_CONTROL) |
RREG32(D2CRTC_CONTROL);
if (reg & CRTC_EN)
return true;
/* then check MEM_SIZE, in case the crtcs are off */
if (RREG32(CONFIG_MEMSIZE))
return true;
return false;
}
int r600_startup(struct radeon_device *rdev) int r600_startup(struct radeon_device *rdev)
{ {
int r; int r;
...@@ -2536,7 +2518,7 @@ int r600_init(struct radeon_device *rdev) ...@@ -2536,7 +2518,7 @@ int r600_init(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
/* Post card if necessary */ /* Post card if necessary */
if (!r600_card_posted(rdev)) { if (!radeon_card_posted(rdev)) {
if (!rdev->bios) { if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL; return -EINVAL;
...@@ -3658,6 +3640,9 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev) ...@@ -3658,6 +3640,9 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
u16 link_cntl2; u16 link_cntl2;
if (radeon_pcie_gen2 == 0)
return;
if (rdev->flags & RADEON_IS_IGP) if (rdev->flags & RADEON_IS_IGP)
return; return;
......
...@@ -92,6 +92,7 @@ extern int radeon_tv; ...@@ -92,6 +92,7 @@ extern int radeon_tv;
extern int radeon_audio; extern int radeon_audio;
extern int radeon_disp_priority; extern int radeon_disp_priority;
extern int radeon_hw_i2c; extern int radeon_hw_i2c;
extern int radeon_pcie_gen2;
/* /*
* Copy from radeon_drv.h so we don't have to include both and have conflicting * Copy from radeon_drv.h so we don't have to include both and have conflicting
......
...@@ -104,6 +104,7 @@ int radeon_tv = 1; ...@@ -104,6 +104,7 @@ int radeon_tv = 1;
int radeon_audio = 1; int radeon_audio = 1;
int radeon_disp_priority = 0; int radeon_disp_priority = 0;
int radeon_hw_i2c = 0; int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = 0;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444); module_param_named(no_wb, radeon_no_wb, int, 0444);
...@@ -147,6 +148,9 @@ module_param_named(disp_priority, radeon_disp_priority, int, 0444); ...@@ -147,6 +148,9 @@ module_param_named(disp_priority, radeon_disp_priority, int, 0444);
MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)");
module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
static int radeon_suspend(struct drm_device *dev, pm_message_t state) static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{ {
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
......
...@@ -439,7 +439,7 @@ evergreen 0x9400 ...@@ -439,7 +439,7 @@ evergreen 0x9400
0x000286EC SPI_COMPUTE_NUM_THREAD_X 0x000286EC SPI_COMPUTE_NUM_THREAD_X
0x000286F0 SPI_COMPUTE_NUM_THREAD_Y 0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
0x000286F4 SPI_COMPUTE_NUM_THREAD_Z 0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
0x000286F8 GDS_ADDR_SIZE 0x00028724 GDS_ADDR_SIZE
0x00028780 CB_BLEND0_CONTROL 0x00028780 CB_BLEND0_CONTROL
0x00028784 CB_BLEND1_CONTROL 0x00028784 CB_BLEND1_CONTROL
0x00028788 CB_BLEND2_CONTROL 0x00028788 CB_BLEND2_CONTROL
......
...@@ -339,16 +339,16 @@ void rs600_bm_disable(struct radeon_device *rdev) ...@@ -339,16 +339,16 @@ void rs600_bm_disable(struct radeon_device *rdev)
int rs600_asic_reset(struct radeon_device *rdev) int rs600_asic_reset(struct radeon_device *rdev)
{ {
u32 status, tmp;
struct rv515_mc_save save; struct rv515_mc_save save;
u32 status, tmp;
int ret = 0;
/* Stops all mc clients */
rv515_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS); status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) { if (!G_000E40_GUI_ACTIVE(status)) {
return 0; return 0;
} }
/* Stops all mc clients */
rv515_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS); status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */ /* stop CP */
...@@ -392,11 +392,11 @@ int rs600_asic_reset(struct radeon_device *rdev) ...@@ -392,11 +392,11 @@ int rs600_asic_reset(struct radeon_device *rdev)
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n"); dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true; rdev->gpu_lockup = true;
return -1; ret = -1;
} } else
rv515_mc_resume(rdev, &save);
dev_info(rdev->dev, "GPU reset succeed\n"); dev_info(rdev->dev, "GPU reset succeed\n");
return 0; rv515_mc_resume(rdev, &save);
return ret;
} }
/* /*
......
...@@ -1268,7 +1268,7 @@ int rv770_init(struct radeon_device *rdev) ...@@ -1268,7 +1268,7 @@ int rv770_init(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
/* Post card if necessary */ /* Post card if necessary */
if (!r600_card_posted(rdev)) { if (!radeon_card_posted(rdev)) {
if (!rdev->bios) { if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL; return -EINVAL;
...@@ -1372,6 +1372,9 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev) ...@@ -1372,6 +1372,9 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
u32 link_width_cntl, lanes, speed_cntl, tmp; u32 link_width_cntl, lanes, speed_cntl, tmp;
u16 link_cntl2; u16 link_cntl2;
if (radeon_pcie_gen2 == 0)
return;
if (rdev->flags & RADEON_IS_IGP) if (rdev->flags & RADEON_IS_IGP)
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment