Commit 27ddabc3 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6

Fixes for some locking issues, and fence timeouts.

* 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6:
  drm/nouveau: do not allow negative sizes for now
  drm/nouveau: add falcon interrupt handler
  drm/nouveau: use dedicated channel for async moves on GT/GF chipsets.
  drm/nouveau: bump fence timeout to 15 seconds
  drm/nouveau: do not unpin in nouveau_gem_object_del
  drm/nv50/kms: fix pin refcnt leaks
  drm/nouveau: fix some error-path leaks in fbcon handling code
  drm/nouveau: fix locking issues in page flipping paths
parents 25f397a4 0108bc80
...@@ -90,6 +90,7 @@ nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, ...@@ -90,6 +90,7 @@ nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret; return ret;
nv_subdev(priv)->unit = 0x00008000; nv_subdev(priv)->unit = 0x00008000;
nv_subdev(priv)->intr = nouveau_falcon_intr;
nv_engine(priv)->cclass = &nvc0_bsp_cclass; nv_engine(priv)->cclass = &nvc0_bsp_cclass;
nv_engine(priv)->sclass = nvc0_bsp_sclass; nv_engine(priv)->sclass = nvc0_bsp_sclass;
return 0; return 0;
......
...@@ -90,6 +90,7 @@ nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, ...@@ -90,6 +90,7 @@ nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret; return ret;
nv_subdev(priv)->unit = 0x00008000; nv_subdev(priv)->unit = 0x00008000;
nv_subdev(priv)->intr = nouveau_falcon_intr;
nv_engine(priv)->cclass = &nve0_bsp_cclass; nv_engine(priv)->cclass = &nve0_bsp_cclass;
nv_engine(priv)->sclass = nve0_bsp_sclass; nv_engine(priv)->sclass = nve0_bsp_sclass;
return 0; return 0;
......
...@@ -23,6 +23,25 @@ ...@@ -23,6 +23,25 @@
#include <engine/falcon.h> #include <engine/falcon.h>
#include <subdev/timer.h> #include <subdev/timer.h>
void
nouveau_falcon_intr(struct nouveau_subdev *subdev)
{
struct nouveau_falcon *falcon = (void *)subdev;
u32 dispatch = nv_ro32(falcon, 0x01c);
u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
if (intr & 0x00000010) {
nv_debug(falcon, "ucode halted\n");
nv_wo32(falcon, 0x004, 0x00000010);
intr &= ~0x00000010;
}
if (intr) {
nv_error(falcon, "unhandled intr 0x%08x\n", intr);
nv_wo32(falcon, 0x004, intr);
}
}
u32 u32
_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) _nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
{ {
......
...@@ -90,6 +90,7 @@ nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, ...@@ -90,6 +90,7 @@ nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret; return ret;
nv_subdev(priv)->unit = 0x00000002; nv_subdev(priv)->unit = 0x00000002;
nv_subdev(priv)->intr = nouveau_falcon_intr;
nv_engine(priv)->cclass = &nvc0_ppp_cclass; nv_engine(priv)->cclass = &nvc0_ppp_cclass;
nv_engine(priv)->sclass = nvc0_ppp_sclass; nv_engine(priv)->sclass = nvc0_ppp_sclass;
return 0; return 0;
......
...@@ -90,6 +90,7 @@ nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, ...@@ -90,6 +90,7 @@ nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret; return ret;
nv_subdev(priv)->unit = 0x00020000; nv_subdev(priv)->unit = 0x00020000;
nv_subdev(priv)->intr = nouveau_falcon_intr;
nv_engine(priv)->cclass = &nvc0_vp_cclass; nv_engine(priv)->cclass = &nvc0_vp_cclass;
nv_engine(priv)->sclass = nvc0_vp_sclass; nv_engine(priv)->sclass = nvc0_vp_sclass;
return 0; return 0;
......
...@@ -90,6 +90,7 @@ nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, ...@@ -90,6 +90,7 @@ nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret; return ret;
nv_subdev(priv)->unit = 0x00020000; nv_subdev(priv)->unit = 0x00020000;
nv_subdev(priv)->intr = nouveau_falcon_intr;
nv_engine(priv)->cclass = &nve0_vp_cclass; nv_engine(priv)->cclass = &nve0_vp_cclass;
nv_engine(priv)->sclass = nve0_vp_sclass; nv_engine(priv)->sclass = nve0_vp_sclass;
return 0; return 0;
......
...@@ -72,6 +72,8 @@ int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *, ...@@ -72,6 +72,8 @@ int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, u32, bool, const char *, struct nouveau_oclass *, u32, bool, const char *,
const char *, int, void **); const char *, int, void **);
void nouveau_falcon_intr(struct nouveau_subdev *subdev);
#define _nouveau_falcon_dtor _nouveau_engine_dtor #define _nouveau_falcon_dtor _nouveau_engine_dtor
int _nouveau_falcon_init(struct nouveau_object *); int _nouveau_falcon_init(struct nouveau_object *);
int _nouveau_falcon_fini(struct nouveau_object *, bool); int _nouveau_falcon_fini(struct nouveau_object *, bool);
......
...@@ -148,6 +148,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) ...@@ -148,6 +148,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
if (unlikely(nvbo->gem)) if (unlikely(nvbo->gem))
DRM_ERROR("bo %p still attached to GEM object\n", bo); DRM_ERROR("bo %p still attached to GEM object\n", bo);
WARN_ON(nvbo->pin_refcnt > 0);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL); nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
kfree(nvbo); kfree(nvbo);
} }
...@@ -197,6 +198,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, ...@@ -197,6 +198,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
size_t acc_size; size_t acc_size;
int ret; int ret;
int type = ttm_bo_type_device; int type = ttm_bo_type_device;
int max_size = INT_MAX & ~((1 << drm->client.base.vm->vmm->lpg_shift) - 1);
if (size <= 0 || size > max_size) {
nv_warn(drm, "skipped size %x\n", (u32)size);
return -EINVAL;
}
if (sg) if (sg)
type = ttm_bo_type_sg; type = ttm_bo_type_sg;
...@@ -340,13 +347,15 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) ...@@ -340,13 +347,15 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
{ {
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo; struct ttm_buffer_object *bo = &nvbo->bo;
int ret; int ret, ref;
ret = ttm_bo_reserve(bo, false, false, false, 0); ret = ttm_bo_reserve(bo, false, false, false, 0);
if (ret) if (ret)
return ret; return ret;
if (--nvbo->pin_refcnt) ref = --nvbo->pin_refcnt;
WARN_ON_ONCE(ref < 0);
if (ref)
goto out; goto out;
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
...@@ -578,7 +587,7 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) ...@@ -578,7 +587,7 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
int ret = RING_SPACE(chan, 2); int ret = RING_SPACE(chan, 2);
if (ret == 0) { if (ret == 0) {
BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
OUT_RING (chan, handle); OUT_RING (chan, handle & 0x0000ffff);
FIRE_RING (chan); FIRE_RING (chan);
} }
return ret; return ret;
...@@ -973,7 +982,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, ...@@ -973,7 +982,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg *old_mem = &bo->mem;
int ret; int ret;
mutex_lock(&chan->cli->mutex); mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
/* create temporary vmas for the transfer and attach them to the /* create temporary vmas for the transfer and attach them to the
* old nouveau_mem node, these will get cleaned up after ttm has * old nouveau_mem node, these will get cleaned up after ttm has
...@@ -1014,7 +1023,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) ...@@ -1014,7 +1023,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_mem_reg *, struct ttm_mem_reg *); struct ttm_mem_reg *, struct ttm_mem_reg *);
int (*init)(struct nouveau_channel *, u32 handle); int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = { } _methods[] = {
{ "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
...@@ -1034,7 +1043,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) ...@@ -1034,7 +1043,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct nouveau_channel *chan; struct nouveau_channel *chan;
u32 handle = (mthd->engine << 16) | mthd->oclass; u32 handle = (mthd->engine << 16) | mthd->oclass;
if (mthd->init == nve0_bo_move_init) if (mthd->engine)
chan = drm->cechan; chan = drm->cechan;
else else
chan = drm->channel; chan = drm->channel;
......
...@@ -138,7 +138,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev, ...@@ -138,7 +138,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
{ {
struct nouveau_framebuffer *nouveau_fb; struct nouveau_framebuffer *nouveau_fb;
struct drm_gem_object *gem; struct drm_gem_object *gem;
int ret; int ret = -ENOMEM;
gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
if (!gem) if (!gem)
...@@ -146,15 +146,19 @@ nouveau_user_framebuffer_create(struct drm_device *dev, ...@@ -146,15 +146,19 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
if (!nouveau_fb) if (!nouveau_fb)
return ERR_PTR(-ENOMEM); goto err_unref;
ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
if (ret) { if (ret)
drm_gem_object_unreference(gem); goto err;
return ERR_PTR(ret);
}
return &nouveau_fb->base; return &nouveau_fb->base;
err:
kfree(nouveau_fb);
err_unref:
drm_gem_object_unreference(gem);
return ERR_PTR(ret);
} }
static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
...@@ -524,9 +528,12 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -524,9 +528,12 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_page_flip_state *s; struct nouveau_page_flip_state *s;
struct nouveau_channel *chan = NULL; struct nouveau_channel *chan = NULL;
struct nouveau_fence *fence; struct nouveau_fence *fence;
struct list_head res; struct ttm_validate_buffer resv[2] = {
struct ttm_validate_buffer res_val[2]; { .bo = &old_bo->bo },
{ .bo = &new_bo->bo },
};
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
LIST_HEAD(res);
int ret; int ret;
if (!drm->channel) if (!drm->channel)
...@@ -545,27 +552,19 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -545,27 +552,19 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
chan = drm->channel; chan = drm->channel;
spin_unlock(&old_bo->bo.bdev->fence_lock); spin_unlock(&old_bo->bo.bdev->fence_lock);
mutex_lock(&chan->cli->mutex);
if (new_bo != old_bo) { if (new_bo != old_bo) {
ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
if (likely(!ret)) { if (ret)
res_val[0].bo = &old_bo->bo; goto fail_free;
res_val[1].bo = &new_bo->bo;
INIT_LIST_HEAD(&res);
list_add_tail(&res_val[0].head, &res);
list_add_tail(&res_val[1].head, &res);
ret = ttm_eu_reserve_buffers(&ticket, &res);
if (ret)
nouveau_bo_unpin(new_bo);
}
} else
ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
if (ret) { list_add(&resv[1].head, &res);
mutex_unlock(&chan->cli->mutex);
goto fail_free;
} }
list_add(&resv[0].head, &res);
mutex_lock(&chan->cli->mutex);
ret = ttm_eu_reserve_buffers(&ticket, &res);
if (ret)
goto fail_unpin;
/* Initialize a page flip struct */ /* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state) *s = (struct nouveau_page_flip_state)
...@@ -576,10 +575,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -576,10 +575,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Emit a page flip */ /* Emit a page flip */
if (nv_device(drm->device)->card_type >= NV_50) { if (nv_device(drm->device)->card_type >= NV_50) {
ret = nv50_display_flip_next(crtc, fb, chan, 0); ret = nv50_display_flip_next(crtc, fb, chan, 0);
if (ret) { if (ret)
mutex_unlock(&chan->cli->mutex);
goto fail_unreserve; goto fail_unreserve;
}
} }
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
...@@ -590,22 +587,18 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -590,22 +587,18 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Update the crtc struct and cleanup */ /* Update the crtc struct and cleanup */
crtc->fb = fb; crtc->fb = fb;
if (old_bo != new_bo) { ttm_eu_fence_buffer_objects(&ticket, &res, fence);
ttm_eu_fence_buffer_objects(&ticket, &res, fence); if (old_bo != new_bo)
nouveau_bo_unpin(old_bo); nouveau_bo_unpin(old_bo);
} else {
nouveau_bo_fence(new_bo, fence);
ttm_bo_unreserve(&new_bo->bo);
}
nouveau_fence_unref(&fence); nouveau_fence_unref(&fence);
return 0; return 0;
fail_unreserve: fail_unreserve:
if (old_bo != new_bo) { ttm_eu_backoff_reservation(&ticket, &res);
ttm_eu_backoff_reservation(&ticket, &res); fail_unpin:
mutex_unlock(&chan->cli->mutex);
if (old_bo != new_bo)
nouveau_bo_unpin(new_bo); nouveau_bo_unpin(new_bo);
} else
ttm_bo_unreserve(&new_bo->bo);
fail_free: fail_free:
kfree(s); kfree(s);
return ret; return ret;
......
...@@ -192,6 +192,18 @@ nouveau_accel_init(struct nouveau_drm *drm) ...@@ -192,6 +192,18 @@ nouveau_accel_init(struct nouveau_drm *drm)
arg0 = NVE0_CHANNEL_IND_ENGINE_GR; arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
arg1 = 1; arg1 = 1;
} else
if (device->chipset >= 0xa3 &&
device->chipset != 0xaa &&
device->chipset != 0xac) {
ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
NVDRM_CHAN + 1, NvDmaFB, NvDmaTT,
&drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
arg0 = NvDmaFB;
arg1 = NvDmaTT;
} else { } else {
arg0 = NvDmaFB; arg0 = NvDmaFB;
arg1 = NvDmaTT; arg1 = NvDmaTT;
...@@ -284,8 +296,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev, ...@@ -284,8 +296,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
return 0; return 0;
} }
static struct lock_class_key drm_client_lock_class_key;
static int static int
nouveau_drm_load(struct drm_device *dev, unsigned long flags) nouveau_drm_load(struct drm_device *dev, unsigned long flags)
{ {
...@@ -297,7 +307,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) ...@@ -297,7 +307,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
if (ret) if (ret)
return ret; return ret;
lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
dev->dev_private = drm; dev->dev_private = drm;
drm->dev = dev; drm->dev = dev;
......
...@@ -385,6 +385,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, ...@@ -385,6 +385,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (chan) if (chan)
nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma); nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma);
nouveau_bo_unmap(nvbo);
out_unpin: out_unpin:
nouveau_bo_unpin(nvbo); nouveau_bo_unpin(nvbo);
out_unref: out_unref:
......
...@@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) ...@@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
int ret; int ret;
fence->channel = chan; fence->channel = chan;
fence->timeout = jiffies + (3 * DRM_HZ); fence->timeout = jiffies + (15 * DRM_HZ);
fence->sequence = ++fctx->sequence; fence->sequence = ++fctx->sequence;
ret = fctx->emit(fence); ret = fctx->emit(fence);
......
...@@ -50,12 +50,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) ...@@ -50,12 +50,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
return; return;
nvbo->gem = NULL; nvbo->gem = NULL;
/* Lockdep hates you for doing reserve with gem object lock held */
if (WARN_ON_ONCE(nvbo->pin_refcnt)) {
nvbo->pin_refcnt = 1;
nouveau_bo_unpin(nvbo);
}
if (gem->import_attach) if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg); drm_prime_gem_destroy(gem, nvbo->bo.sg);
......
...@@ -355,6 +355,7 @@ struct nv50_oimm { ...@@ -355,6 +355,7 @@ struct nv50_oimm {
struct nv50_head { struct nv50_head {
struct nouveau_crtc base; struct nouveau_crtc base;
struct nouveau_bo *image;
struct nv50_curs curs; struct nv50_curs curs;
struct nv50_sync sync; struct nv50_sync sync;
struct nv50_ovly ovly; struct nv50_ovly ovly;
...@@ -517,9 +518,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -517,9 +518,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
{ {
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_head *head = nv50_head(crtc);
struct nv50_sync *sync = nv50_sync(crtc); struct nv50_sync *sync = nv50_sync(crtc);
int head = nv_crtc->index, ret;
u32 *push; u32 *push;
int ret;
swap_interval <<= 4; swap_interval <<= 4;
if (swap_interval == 0) if (swap_interval == 0)
...@@ -537,7 +539,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -537,7 +539,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return ret; return ret;
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvEvoSema0 + head); OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
OUT_RING (chan, sync->addr ^ 0x10); OUT_RING (chan, sync->addr ^ 0x10);
BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, sync->data + 1); OUT_RING (chan, sync->data + 1);
...@@ -546,7 +548,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -546,7 +548,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (chan, sync->data); OUT_RING (chan, sync->data);
} else } else
if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
u64 addr = nv84_fence_crtc(chan, head) + sync->addr; u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
ret = RING_SPACE(chan, 12); ret = RING_SPACE(chan, 12);
if (ret) if (ret)
return ret; return ret;
...@@ -565,7 +567,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -565,7 +567,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
} else } else
if (chan) { if (chan) {
u64 addr = nv84_fence_crtc(chan, head) + sync->addr; u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
ret = RING_SPACE(chan, 10); ret = RING_SPACE(chan, 10);
if (ret) if (ret)
return ret; return ret;
...@@ -630,6 +632,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -630,6 +632,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
evo_mthd(push, 0x0080, 1); evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000); evo_data(push, 0x00000000);
evo_kick(push, sync); evo_kick(push, sync);
nouveau_bo_ref(nv_fb->nvbo, &head->image);
return 0; return 0;
} }
...@@ -1038,18 +1042,17 @@ static int ...@@ -1038,18 +1042,17 @@ static int
nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
{ {
struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
struct nv50_head *head = nv50_head(crtc);
int ret; int ret;
ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
if (ret) if (ret == 0) {
return ret; if (head->image)
nouveau_bo_unpin(head->image);
if (old_fb) { nouveau_bo_ref(nvfb->nvbo, &head->image);
nvfb = nouveau_framebuffer(old_fb);
nouveau_bo_unpin(nvfb->nvbo);
} }
return 0; return ret;
} }
static int static int
...@@ -1198,6 +1201,15 @@ nv50_crtc_lut_load(struct drm_crtc *crtc) ...@@ -1198,6 +1201,15 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
} }
} }
static void
nv50_crtc_disable(struct drm_crtc *crtc)
{
struct nv50_head *head = nv50_head(crtc);
if (head->image)
nouveau_bo_unpin(head->image);
nouveau_bo_ref(NULL, &head->image);
}
static int static int
nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height) uint32_t handle, uint32_t width, uint32_t height)
...@@ -1271,18 +1283,29 @@ nv50_crtc_destroy(struct drm_crtc *crtc) ...@@ -1271,18 +1283,29 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_disp *disp = nv50_disp(crtc->dev); struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nv50_head *head = nv50_head(crtc); struct nv50_head *head = nv50_head(crtc);
nv50_dmac_destroy(disp->core, &head->ovly.base); nv50_dmac_destroy(disp->core, &head->ovly.base);
nv50_pioc_destroy(disp->core, &head->oimm.base); nv50_pioc_destroy(disp->core, &head->oimm.base);
nv50_dmac_destroy(disp->core, &head->sync.base); nv50_dmac_destroy(disp->core, &head->sync.base);
nv50_pioc_destroy(disp->core, &head->curs.base); nv50_pioc_destroy(disp->core, &head->curs.base);
/*XXX: this shouldn't be necessary, but the core doesn't call
* disconnect() during the cleanup paths
*/
if (head->image)
nouveau_bo_unpin(head->image);
nouveau_bo_ref(NULL, &head->image);
nouveau_bo_unmap(nv_crtc->cursor.nvbo); nouveau_bo_unmap(nv_crtc->cursor.nvbo);
if (nv_crtc->cursor.nvbo) if (nv_crtc->cursor.nvbo)
nouveau_bo_unpin(nv_crtc->cursor.nvbo); nouveau_bo_unpin(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
nouveau_bo_unmap(nv_crtc->lut.nvbo); nouveau_bo_unmap(nv_crtc->lut.nvbo);
if (nv_crtc->lut.nvbo) if (nv_crtc->lut.nvbo)
nouveau_bo_unpin(nv_crtc->lut.nvbo); nouveau_bo_unpin(nv_crtc->lut.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
drm_crtc_cleanup(crtc); drm_crtc_cleanup(crtc);
kfree(crtc); kfree(crtc);
} }
...@@ -1296,6 +1319,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = { ...@@ -1296,6 +1319,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
.mode_set_base = nv50_crtc_mode_set_base, .mode_set_base = nv50_crtc_mode_set_base,
.mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
.load_lut = nv50_crtc_lut_load, .load_lut = nv50_crtc_lut_load,
.disable = nv50_crtc_disable,
}; };
static const struct drm_crtc_funcs nv50_crtc_func = { static const struct drm_crtc_funcs nv50_crtc_func = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment