Commit d8524ae9 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 - some small fixes for msm and exynos
 - a regression revert affecting nouveau users with old userspace
 - intel pageflip deadlock and gpu hang fixes, hsw modesetting hangs

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (22 commits)
  Revert "drm: mark context support as a legacy subsystem"
  drm/i915: Don't enable the cursor on a disable pipe
  drm/i915: do not update cursor in crtc mode set
  drm/exynos: fix return value check in lowlevel_buffer_allocate()
  drm/exynos: Fix address space warnings in exynos_drm_fbdev.c
  drm/exynos: Fix address space warning in exynos_drm_buf.c
  drm/exynos: Remove redundant OF dependency
  drm/msm: drop unnecessary set_need_resched()
  drm/i915: kill set_need_resched
  drm/msm: fix potential NULL pointer dereference
  drm/i915/dvo: set crtc timings again for panel fixed modes
  drm/i915/sdvo: Robustify the dtd<->drm_mode conversions
  drm/msm: workaround for missing irq
  drm/msm: return -EBUSY if bo still active
  drm/msm: fix return value check in ERR_PTR()
  drm/msm: fix cmdstream size check
  drm/msm: hangcheck harder
  drm/msm: handle read vs write fences
  drm/i915/sdvo: Fully translate sync flags in the dtd->mode conversion
  drm/i915: Use proper print format for debug prints
  ...
parents 68cf8d0c 6ddf2ed6
...@@ -42,6 +42,10 @@ ...@@ -42,6 +42,10 @@
#include <drm/drmP.h> #include <drm/drmP.h>
/******************************************************************/
/** \name Context bitmap support */
/*@{*/
/** /**
* Free a handle from the context bitmap. * Free a handle from the context bitmap.
* *
...@@ -52,48 +56,13 @@ ...@@ -52,48 +56,13 @@
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock. * lock.
*/ */
static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{ {
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
idr_remove(&dev->ctx_idr, ctx_handle); idr_remove(&dev->ctx_idr, ctx_handle);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
/******************************************************************/
/** \name Context bitmap support */
/*@{*/
void drm_legacy_ctxbitmap_release(struct drm_device *dev,
struct drm_file *file_priv)
{
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
mutex_lock(&dev->ctxlist_mutex);
if (!list_empty(&dev->ctxlist)) {
struct drm_ctx_list *pos, *n;
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->tag == file_priv &&
pos->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev,
pos->handle);
drm_ctxbitmap_free(dev, pos->handle);
list_del(&pos->head);
kfree(pos);
--dev->ctx_count;
}
}
}
mutex_unlock(&dev->ctxlist_mutex);
}
/** /**
* Context bitmap allocation. * Context bitmap allocation.
* *
...@@ -121,12 +90,10 @@ static int drm_ctxbitmap_next(struct drm_device * dev) ...@@ -121,12 +90,10 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
* *
* Initialise the drm_device::ctx_idr * Initialise the drm_device::ctx_idr
*/ */
void drm_legacy_ctxbitmap_init(struct drm_device * dev) int drm_ctxbitmap_init(struct drm_device * dev)
{ {
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
idr_init(&dev->ctx_idr); idr_init(&dev->ctx_idr);
return 0;
} }
/** /**
...@@ -137,7 +104,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev) ...@@ -137,7 +104,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
* Free all idr members using drm_ctx_sarea_free helper function * Free all idr members using drm_ctx_sarea_free helper function
* while holding the drm_device::struct_mutex lock. * while holding the drm_device::struct_mutex lock.
*/ */
void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev) void drm_ctxbitmap_cleanup(struct drm_device * dev)
{ {
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
idr_destroy(&dev->ctx_idr); idr_destroy(&dev->ctx_idr);
...@@ -169,9 +136,6 @@ int drm_getsareactx(struct drm_device *dev, void *data, ...@@ -169,9 +136,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map; struct drm_local_map *map;
struct drm_map_list *_entry; struct drm_map_list *_entry;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
map = idr_find(&dev->ctx_idr, request->ctx_id); map = idr_find(&dev->ctx_idr, request->ctx_id);
...@@ -216,9 +180,6 @@ int drm_setsareactx(struct drm_device *dev, void *data, ...@@ -216,9 +180,6 @@ int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map = NULL; struct drm_local_map *map = NULL;
struct drm_map_list *r_list = NULL; struct drm_map_list *r_list = NULL;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) { list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map if (r_list->map
...@@ -319,9 +280,6 @@ int drm_resctx(struct drm_device *dev, void *data, ...@@ -319,9 +280,6 @@ int drm_resctx(struct drm_device *dev, void *data,
struct drm_ctx ctx; struct drm_ctx ctx;
int i; int i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
if (res->count >= DRM_RESERVED_CONTEXTS) { if (res->count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx)); memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
...@@ -352,9 +310,6 @@ int drm_addctx(struct drm_device *dev, void *data, ...@@ -352,9 +310,6 @@ int drm_addctx(struct drm_device *dev, void *data,
struct drm_ctx_list *ctx_entry; struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data; struct drm_ctx *ctx = data;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
ctx->handle = drm_ctxbitmap_next(dev); ctx->handle = drm_ctxbitmap_next(dev);
if (ctx->handle == DRM_KERNEL_CONTEXT) { if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */ /* Skip kernel's context and get a new one. */
...@@ -398,9 +353,6 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv) ...@@ -398,9 +353,6 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{ {
struct drm_ctx *ctx = data; struct drm_ctx *ctx = data;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
/* This is 0, because we don't handle any context flags */ /* This is 0, because we don't handle any context flags */
ctx->flags = 0; ctx->flags = 0;
...@@ -423,9 +375,6 @@ int drm_switchctx(struct drm_device *dev, void *data, ...@@ -423,9 +375,6 @@ int drm_switchctx(struct drm_device *dev, void *data,
{ {
struct drm_ctx *ctx = data; struct drm_ctx *ctx = data;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
DRM_DEBUG("%d\n", ctx->handle); DRM_DEBUG("%d\n", ctx->handle);
return drm_context_switch(dev, dev->last_context, ctx->handle); return drm_context_switch(dev, dev->last_context, ctx->handle);
} }
...@@ -446,9 +395,6 @@ int drm_newctx(struct drm_device *dev, void *data, ...@@ -446,9 +395,6 @@ int drm_newctx(struct drm_device *dev, void *data,
{ {
struct drm_ctx *ctx = data; struct drm_ctx *ctx = data;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
DRM_DEBUG("%d\n", ctx->handle); DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, file_priv, ctx->handle); drm_context_switch_complete(dev, file_priv, ctx->handle);
...@@ -471,9 +417,6 @@ int drm_rmctx(struct drm_device *dev, void *data, ...@@ -471,9 +417,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
{ {
struct drm_ctx *ctx = data; struct drm_ctx *ctx = data;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
DRM_DEBUG("%d\n", ctx->handle); DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle != DRM_KERNEL_CONTEXT) { if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor) if (dev->driver->context_dtor)
......
...@@ -439,7 +439,26 @@ int drm_release(struct inode *inode, struct file *filp) ...@@ -439,7 +439,26 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->driver_features & DRIVER_GEM) if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv); drm_gem_release(dev, file_priv);
drm_legacy_ctxbitmap_release(dev, file_priv); mutex_lock(&dev->ctxlist_mutex);
if (!list_empty(&dev->ctxlist)) {
struct drm_ctx_list *pos, *n;
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->tag == file_priv &&
pos->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev,
pos->handle);
drm_ctxbitmap_free(dev, pos->handle);
list_del(&pos->head);
kfree(pos);
--dev->ctx_count;
}
}
}
mutex_unlock(&dev->ctxlist_mutex);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
......
...@@ -292,7 +292,13 @@ int drm_fill_in_dev(struct drm_device *dev, ...@@ -292,7 +292,13 @@ int drm_fill_in_dev(struct drm_device *dev,
goto error_out_unreg; goto error_out_unreg;
} }
drm_legacy_ctxbitmap_init(dev);
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
goto error_out_unreg;
}
if (driver->driver_features & DRIVER_GEM) { if (driver->driver_features & DRIVER_GEM) {
retcode = drm_gem_init(dev); retcode = drm_gem_init(dev);
...@@ -446,7 +452,7 @@ void drm_put_dev(struct drm_device *dev) ...@@ -446,7 +452,7 @@ void drm_put_dev(struct drm_device *dev)
drm_rmmap(dev, r_list->map); drm_rmmap(dev, r_list->map);
drm_ht_remove(&dev->map_hash); drm_ht_remove(&dev->map_hash);
drm_legacy_ctxbitmap_cleanup(dev); drm_ctxbitmap_cleanup(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control); drm_put_minor(&dev->control);
......
...@@ -56,7 +56,7 @@ config DRM_EXYNOS_IPP ...@@ -56,7 +56,7 @@ config DRM_EXYNOS_IPP
config DRM_EXYNOS_FIMC config DRM_EXYNOS_FIMC
bool "Exynos DRM FIMC" bool "Exynos DRM FIMC"
depends on DRM_EXYNOS_IPP && MFD_SYSCON && OF depends on DRM_EXYNOS_IPP && MFD_SYSCON
help help
Choose this option if you want to use Exynos FIMC for DRM. Choose this option if you want to use Exynos FIMC for DRM.
......
...@@ -63,7 +63,8 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, ...@@ -63,7 +63,8 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
return -ENOMEM; return -ENOMEM;
} }
buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size, buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev,
buf->size,
&buf->dma_addr, GFP_KERNEL, &buf->dma_addr, GFP_KERNEL,
&buf->dma_attrs); &buf->dma_attrs);
if (!buf->kvaddr) { if (!buf->kvaddr) {
...@@ -90,9 +91,9 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, ...@@ -90,9 +91,9 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
} }
buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages); buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
if (!buf->sgt) { if (IS_ERR(buf->sgt)) {
DRM_ERROR("failed to get sg table.\n"); DRM_ERROR("failed to get sg table.\n");
ret = -ENOMEM; ret = PTR_ERR(buf->sgt);
goto err_free_attrs; goto err_free_attrs;
} }
......
...@@ -99,12 +99,13 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, ...@@ -99,12 +99,13 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
if (is_drm_iommu_supported(dev)) { if (is_drm_iommu_supported(dev)) {
unsigned int nr_pages = buffer->size >> PAGE_SHIFT; unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP, buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
nr_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL)); pgprot_writecombine(PAGE_KERNEL));
} else { } else {
phys_addr_t dma_addr = buffer->dma_addr; phys_addr_t dma_addr = buffer->dma_addr;
if (dma_addr) if (dma_addr)
buffer->kvaddr = phys_to_virt(dma_addr); buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
else else
buffer->kvaddr = (void __iomem *)NULL; buffer->kvaddr = (void __iomem *)NULL;
} }
......
...@@ -1392,14 +1392,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1392,14 +1392,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (i915_terminally_wedged(&dev_priv->gpu_error)) if (i915_terminally_wedged(&dev_priv->gpu_error))
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
case -EAGAIN: case -EAGAIN:
/* Give the error handler a chance to run and move the /*
* objects off the GPU active list. Next time we service the * EAGAIN means the gpu is hung and we'll wait for the error
* fault, we should be able to transition the page into the * handler to reset everything when re-faulting in
* GTT without touching the GPU (and so avoid further * i915_mutex_lock_interruptible.
* EIO/EGAIN). If the GPU is wedged, then there is no issue
* with coherency, just lost writes.
*/ */
set_need_resched();
case 0: case 0:
case -ERESTARTSYS: case -ERESTARTSYS:
case -EINTR: case -EINTR:
......
...@@ -1469,6 +1469,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) ...@@ -1469,6 +1469,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
return ret; return ret;
} }
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
bool reset_completed)
{
struct intel_ring_buffer *ring;
int i;
/*
* Notify all waiters for GPU completion events that reset state has
* been changed, and that they need to restart their wait after
* checking for potential errors (and bail out to drop locks if there is
* a gpu reset pending so that i915_error_work_func can acquire them).
*/
/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
wake_up_all(&dev_priv->pending_flip_queue);
/*
* Signal tasks blocked in i915_gem_wait_for_error that the pending
* reset state is cleared.
*/
if (reset_completed)
wake_up_all(&dev_priv->gpu_error.reset_queue);
}
/** /**
* i915_error_work_func - do process context error handling work * i915_error_work_func - do process context error handling work
* @work: work struct * @work: work struct
...@@ -1483,11 +1511,10 @@ static void i915_error_work_func(struct work_struct *work) ...@@ -1483,11 +1511,10 @@ static void i915_error_work_func(struct work_struct *work)
drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
gpu_error); gpu_error);
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct intel_ring_buffer *ring;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
int i, ret; int ret;
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
...@@ -1506,8 +1533,16 @@ static void i915_error_work_func(struct work_struct *work) ...@@ -1506,8 +1533,16 @@ static void i915_error_work_func(struct work_struct *work)
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
reset_event); reset_event);
/*
* All state reset _must_ be completed before we update the
* reset counter, for otherwise waiters might miss the reset
* pending state and not properly drop locks, resulting in
* deadlocks with the reset work.
*/
ret = i915_reset(dev); ret = i915_reset(dev);
intel_display_handle_reset(dev);
if (ret == 0) { if (ret == 0) {
/* /*
* After all the gem state is reset, increment the reset * After all the gem state is reset, increment the reset
...@@ -1528,12 +1563,11 @@ static void i915_error_work_func(struct work_struct *work) ...@@ -1528,12 +1563,11 @@ static void i915_error_work_func(struct work_struct *work)
atomic_set(&error->reset_counter, I915_WEDGED); atomic_set(&error->reset_counter, I915_WEDGED);
} }
for_each_ring(ring, dev_priv, i) /*
wake_up_all(&ring->irq_queue); * Note: The wake_up also serves as a memory barrier so that
* waiters see the update value of the reset counter atomic_t.
intel_display_handle_reset(dev); */
i915_error_wake_up(dev_priv, true);
wake_up_all(&dev_priv->gpu_error.reset_queue);
} }
} }
...@@ -1642,8 +1676,6 @@ static void i915_report_and_clear_eir(struct drm_device *dev) ...@@ -1642,8 +1676,6 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
void i915_handle_error(struct drm_device *dev, bool wedged) void i915_handle_error(struct drm_device *dev, bool wedged)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i;
i915_capture_error_state(dev); i915_capture_error_state(dev);
i915_report_and_clear_eir(dev); i915_report_and_clear_eir(dev);
...@@ -1653,11 +1685,19 @@ void i915_handle_error(struct drm_device *dev, bool wedged) ...@@ -1653,11 +1685,19 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
&dev_priv->gpu_error.reset_counter); &dev_priv->gpu_error.reset_counter);
/* /*
* Wakeup waiting processes so that the reset work item * Wakeup waiting processes so that the reset work function
* doesn't deadlock trying to grab various locks. * i915_error_work_func doesn't deadlock trying to grab various
* locks. By bumping the reset counter first, the woken
* processes will see a reset in progress and back off,
* releasing their locks and then wait for the reset completion.
* We must do this for _all_ gpu waiters that might hold locks
* that the reset work needs to acquire.
*
* Note: The wake_up serves as the required memory barrier to
* ensure that the waiters see the updated value of the reset
* counter atomic_t.
*/ */
for_each_ring(ring, dev_priv, i) i915_error_wake_up(dev_priv, false);
wake_up_all(&ring->irq_queue);
} }
/* /*
......
...@@ -778,7 +778,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) ...@@ -778,7 +778,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
/* Can only use the always-on power well for eDP when /* Can only use the always-on power well for eDP when
* not using the panel fitter, and when not using motion * not using the panel fitter, and when not using motion
* blur mitigation (which we don't support). */ * blur mitigation (which we don't support). */
if (intel_crtc->config.pch_pfit.size) if (intel_crtc->config.pch_pfit.enabled)
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else else
temp |= TRANS_DDI_EDP_INPUT_A_ON; temp |= TRANS_DDI_EDP_INPUT_A_ON;
......
...@@ -2249,7 +2249,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -2249,7 +2249,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
I915_WRITE(PIPESRC(intel_crtc->pipe), I915_WRITE(PIPESRC(intel_crtc->pipe),
((crtc->mode.hdisplay - 1) << 16) | ((crtc->mode.hdisplay - 1) << 16) |
(crtc->mode.vdisplay - 1)); (crtc->mode.vdisplay - 1));
if (!intel_crtc->config.pch_pfit.size && if (!intel_crtc->config.pch_pfit.enabled &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
I915_WRITE(PF_CTL(intel_crtc->pipe), 0); I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
...@@ -3203,7 +3203,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc) ...@@ -3203,7 +3203,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe; int pipe = crtc->pipe;
if (crtc->config.pch_pfit.size) { if (crtc->config.pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients /* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken, * as some pre-programmed values are broken,
* e.g. x201. * e.g. x201.
...@@ -3428,7 +3428,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc) ...@@ -3428,7 +3428,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc)
/* To avoid upsetting the power well on haswell only disable the pfit if /* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */ * it's in use. The hw state code will make sure we get this right. */
if (crtc->config.pch_pfit.size) { if (crtc->config.pch_pfit.enabled) {
I915_WRITE(PF_CTL(pipe), 0); I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_POS(pipe), 0); I915_WRITE(PF_WIN_POS(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0); I915_WRITE(PF_WIN_SZ(pipe), 0);
...@@ -4877,9 +4877,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, ...@@ -4877,9 +4877,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL; return -EINVAL;
} }
/* Ensure that the cursor is valid for the new mode before changing... */
intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) { if (is_lvds && dev_priv->lvds_downclock_avail) {
/* /*
* Ensure we match the reduced clock's P to the target clock. * Ensure we match the reduced clock's P to the target clock.
...@@ -5768,9 +5765,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, ...@@ -5768,9 +5765,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc->config.dpll.p2 = clock.p2; intel_crtc->config.dpll.p2 = clock.p2;
} }
/* Ensure that the cursor is valid for the new mode before changing... */
intel_crtc_update_cursor(crtc, true);
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (intel_crtc->config.has_pch_encoder) { if (intel_crtc->config.has_pch_encoder) {
fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll); fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
...@@ -5859,6 +5853,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc, ...@@ -5859,6 +5853,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
tmp = I915_READ(PF_CTL(crtc->pipe)); tmp = I915_READ(PF_CTL(crtc->pipe));
if (tmp & PF_ENABLE) { if (tmp & PF_ENABLE) {
pipe_config->pch_pfit.enabled = true;
pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
...@@ -6236,7 +6231,7 @@ static void haswell_modeset_global_resources(struct drm_device *dev) ...@@ -6236,7 +6231,7 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
if (!crtc->base.enabled) if (!crtc->base.enabled)
continue; continue;
if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.size || if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
crtc->config.cpu_transcoder != TRANSCODER_EDP) crtc->config.cpu_transcoder != TRANSCODER_EDP)
enable = true; enable = true;
} }
...@@ -6259,9 +6254,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, ...@@ -6259,9 +6254,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
if (!intel_ddi_pll_mode_set(crtc)) if (!intel_ddi_pll_mode_set(crtc))
return -EINVAL; return -EINVAL;
/* Ensure that the cursor is valid for the new mode before changing... */
intel_crtc_update_cursor(crtc, true);
if (intel_crtc->config.has_dp_encoder) if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc); intel_dp_set_m_n(intel_crtc);
...@@ -6494,15 +6486,15 @@ static void haswell_write_eld(struct drm_connector *connector, ...@@ -6494,15 +6486,15 @@ static void haswell_write_eld(struct drm_connector *connector,
/* Set ELD valid state */ /* Set ELD valid state */
tmp = I915_READ(aud_cntrl_st2); tmp = I915_READ(aud_cntrl_st2);
DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp); DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
I915_WRITE(aud_cntrl_st2, tmp); I915_WRITE(aud_cntrl_st2, tmp);
tmp = I915_READ(aud_cntrl_st2); tmp = I915_READ(aud_cntrl_st2);
DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp); DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
/* Enable HDMI mode */ /* Enable HDMI mode */
tmp = I915_READ(aud_config); tmp = I915_READ(aud_config);
DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp); DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
/* clear N_programing_enable and N_value_index */ /* clear N_programing_enable and N_value_index */
tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
I915_WRITE(aud_config, tmp); I915_WRITE(aud_config, tmp);
...@@ -6937,6 +6929,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -6937,6 +6929,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc->cursor_width = width; intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height; intel_crtc->cursor_height = height;
if (intel_crtc->active)
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
return 0; return 0;
...@@ -6956,6 +6949,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) ...@@ -6956,6 +6949,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
intel_crtc->cursor_x = x; intel_crtc->cursor_x = x;
intel_crtc->cursor_y = y; intel_crtc->cursor_y = y;
if (intel_crtc->active)
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
return 0; return 0;
...@@ -8205,9 +8199,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, ...@@ -8205,9 +8199,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->gmch_pfit.control, pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios, pipe_config->gmch_pfit.pgm_ratios,
pipe_config->gmch_pfit.lvds_border_bits); pipe_config->gmch_pfit.lvds_border_bits);
DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x\n", DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
pipe_config->pch_pfit.pos, pipe_config->pch_pfit.pos,
pipe_config->pch_pfit.size); pipe_config->pch_pfit.size,
pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
} }
...@@ -8603,8 +8598,11 @@ intel_pipe_config_compare(struct drm_device *dev, ...@@ -8603,8 +8598,11 @@ intel_pipe_config_compare(struct drm_device *dev,
if (INTEL_INFO(dev)->gen < 4) if (INTEL_INFO(dev)->gen < 4)
PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
PIPE_CONF_CHECK_I(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_I(pch_pfit.pos); PIPE_CONF_CHECK_I(pch_pfit.pos);
PIPE_CONF_CHECK_I(pch_pfit.size); PIPE_CONF_CHECK_I(pch_pfit.size);
}
PIPE_CONF_CHECK_I(ips_enabled); PIPE_CONF_CHECK_I(ips_enabled);
......
...@@ -280,6 +280,7 @@ struct intel_crtc_config { ...@@ -280,6 +280,7 @@ struct intel_crtc_config {
struct { struct {
u32 pos; u32 pos;
u32 size; u32 size;
bool enabled;
} pch_pfit; } pch_pfit;
/* FDI configuration, only valid if has_pch_encoder is set. */ /* FDI configuration, only valid if has_pch_encoder is set. */
......
...@@ -263,6 +263,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder, ...@@ -263,6 +263,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
C(vtotal); C(vtotal);
C(clock); C(clock);
#undef C #undef C
drm_mode_set_crtcinfo(adjusted_mode, 0);
} }
if (intel_dvo->dev.dev_ops->mode_fixup) if (intel_dvo->dev.dev_ops->mode_fixup)
......
...@@ -112,6 +112,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc, ...@@ -112,6 +112,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
done: done:
pipe_config->pch_pfit.pos = (x << 16) | y; pipe_config->pch_pfit.pos = (x << 16) | y;
pipe_config->pch_pfit.size = (width << 16) | height; pipe_config->pch_pfit.size = (width << 16) | height;
pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
} }
static void static void
......
...@@ -2096,16 +2096,16 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, ...@@ -2096,16 +2096,16 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
struct drm_crtc *crtc) struct drm_crtc *crtc)
{ {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pixel_rate, pfit_size; uint32_t pixel_rate;
pixel_rate = intel_crtc->config.adjusted_mode.clock; pixel_rate = intel_crtc->config.adjusted_mode.clock;
/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
* adjust the pixel_rate here. */ * adjust the pixel_rate here. */
pfit_size = intel_crtc->config.pch_pfit.size; if (intel_crtc->config.pch_pfit.enabled) {
if (pfit_size) {
uint64_t pipe_w, pipe_h, pfit_w, pfit_h; uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
pipe_w = intel_crtc->config.requested_mode.hdisplay; pipe_w = intel_crtc->config.requested_mode.hdisplay;
pipe_h = intel_crtc->config.requested_mode.vdisplay; pipe_h = intel_crtc->config.requested_mode.vdisplay;
......
...@@ -788,6 +788,8 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, ...@@ -788,6 +788,8 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
uint16_t h_sync_offset, v_sync_offset; uint16_t h_sync_offset, v_sync_offset;
int mode_clock; int mode_clock;
memset(dtd, 0, sizeof(*dtd));
width = mode->hdisplay; width = mode->hdisplay;
height = mode->vdisplay; height = mode->vdisplay;
...@@ -830,44 +832,51 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, ...@@ -830,44 +832,51 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
if (mode->flags & DRM_MODE_FLAG_PVSYNC) if (mode->flags & DRM_MODE_FLAG_PVSYNC)
dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE; dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
dtd->part2.sdvo_flags = 0;
dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
dtd->part2.reserved = 0;
} }
static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode,
const struct intel_sdvo_dtd *dtd) const struct intel_sdvo_dtd *dtd)
{ {
mode->hdisplay = dtd->part1.h_active; struct drm_display_mode mode = {};
mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off; mode.hdisplay = dtd->part1.h_active;
mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2; mode.hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width; mode.hsync_start = mode.hdisplay + dtd->part2.h_sync_off;
mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; mode.hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
mode->htotal = mode->hdisplay + dtd->part1.h_blank; mode.hsync_end = mode.hsync_start + dtd->part2.h_sync_width;
mode->htotal += (dtd->part1.h_high & 0xf) << 8; mode.hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
mode.htotal = mode.hdisplay + dtd->part1.h_blank;
mode->vdisplay = dtd->part1.v_active; mode.htotal += (dtd->part1.h_high & 0xf) << 8;
mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
mode->vsync_start = mode->vdisplay; mode.vdisplay = dtd->part1.v_active;
mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; mode.vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2; mode.vsync_start = mode.vdisplay;
mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0; mode.vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
mode->vsync_end = mode->vsync_start + mode.vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
mode.vsync_start += dtd->part2.v_sync_off_high & 0xc0;
mode.vsync_end = mode.vsync_start +
(dtd->part2.v_sync_off_width & 0xf); (dtd->part2.v_sync_off_width & 0xf);
mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4; mode.vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
mode->vtotal = mode->vdisplay + dtd->part1.v_blank; mode.vtotal = mode.vdisplay + dtd->part1.v_blank;
mode->vtotal += (dtd->part1.v_high & 0xf) << 8; mode.vtotal += (dtd->part1.v_high & 0xf) << 8;
mode->clock = dtd->part1.clock * 10; mode.clock = dtd->part1.clock * 10;
mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE) if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
mode->flags |= DRM_MODE_FLAG_INTERLACE; mode.flags |= DRM_MODE_FLAG_INTERLACE;
if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE) if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PHSYNC; mode.flags |= DRM_MODE_FLAG_PHSYNC;
else
mode.flags |= DRM_MODE_FLAG_NHSYNC;
if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE) if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PVSYNC; mode.flags |= DRM_MODE_FLAG_PVSYNC;
else
mode.flags |= DRM_MODE_FLAG_NVSYNC;
drm_mode_set_crtcinfo(&mode, 0);
drm_mode_copy(pmode, &mode);
} }
static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
......
...@@ -124,6 +124,8 @@ void adreno_recover(struct msm_gpu *gpu) ...@@ -124,6 +124,8 @@ void adreno_recover(struct msm_gpu *gpu)
/* reset completed fence seqno, just discard anything pending: */ /* reset completed fence seqno, just discard anything pending: */
adreno_gpu->memptrs->fence = gpu->submitted_fence; adreno_gpu->memptrs->fence = gpu->submitted_fence;
adreno_gpu->memptrs->rptr = 0;
adreno_gpu->memptrs->wptr = 0;
gpu->funcs->pm_resume(gpu); gpu->funcs->pm_resume(gpu);
ret = gpu->funcs->hw_init(gpu); ret = gpu->funcs->hw_init(gpu);
...@@ -229,7 +231,7 @@ void adreno_idle(struct msm_gpu *gpu) ...@@ -229,7 +231,7 @@ void adreno_idle(struct msm_gpu *gpu)
return; return;
} while(time_before(jiffies, t)); } while(time_before(jiffies, t));
DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name); DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
/* TODO maybe we need to reset GPU here to recover from hang? */ /* TODO maybe we need to reset GPU here to recover from hang? */
} }
...@@ -256,11 +258,17 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords) ...@@ -256,11 +258,17 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
{ {
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t freedwords; uint32_t freedwords;
unsigned long t = jiffies + ADRENO_IDLE_TIMEOUT;
do { do {
uint32_t size = gpu->rb->size / 4; uint32_t size = gpu->rb->size / 4;
uint32_t wptr = get_wptr(gpu->rb); uint32_t wptr = get_wptr(gpu->rb);
uint32_t rptr = adreno_gpu->memptrs->rptr; uint32_t rptr = adreno_gpu->memptrs->rptr;
freedwords = (rptr + (size - 1) - wptr) % size; freedwords = (rptr + (size - 1) - wptr) % size;
if (time_after(jiffies, t)) {
DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
break;
}
} while(freedwords < ndwords); } while(freedwords < ndwords);
} }
......
...@@ -499,10 +499,24 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, ...@@ -499,10 +499,24 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
struct timespec *timeout) struct timespec *timeout)
{ {
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
int ret;
if (!priv->gpu)
return 0;
if (fence > priv->gpu->submitted_fence) {
DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
fence, priv->gpu->submitted_fence);
return -EINVAL;
}
if (!timeout) {
/* no-wait: */
ret = fence_completed(dev, fence) ? 0 : -EBUSY;
} else {
unsigned long timeout_jiffies = timespec_to_jiffies(timeout); unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
unsigned long start_jiffies = jiffies; unsigned long start_jiffies = jiffies;
unsigned long remaining_jiffies; unsigned long remaining_jiffies;
int ret;
if (time_after(start_jiffies, timeout_jiffies)) if (time_after(start_jiffies, timeout_jiffies))
remaining_jiffies = 0; remaining_jiffies = 0;
...@@ -510,8 +524,9 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, ...@@ -510,8 +524,9 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
remaining_jiffies = timeout_jiffies - start_jiffies; remaining_jiffies = timeout_jiffies - start_jiffies;
ret = wait_event_interruptible_timeout(priv->fence_event, ret = wait_event_interruptible_timeout(priv->fence_event,
priv->completed_fence >= fence, fence_completed(dev, fence),
remaining_jiffies); remaining_jiffies);
if (ret == 0) { if (ret == 0) {
DBG("timeout waiting for fence: %u (completed: %u)", DBG("timeout waiting for fence: %u (completed: %u)",
fence, priv->completed_fence); fence, priv->completed_fence);
...@@ -519,6 +534,7 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, ...@@ -519,6 +534,7 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
} else if (ret != -ERESTARTSYS) { } else if (ret != -ERESTARTSYS) {
ret = 0; ret = 0;
} }
}
return ret; return ret;
} }
......
...@@ -153,7 +153,7 @@ void *msm_gem_vaddr(struct drm_gem_object *obj); ...@@ -153,7 +153,7 @@ void *msm_gem_vaddr(struct drm_gem_object *obj);
int msm_gem_queue_inactive_work(struct drm_gem_object *obj, int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
struct work_struct *work); struct work_struct *work);
void msm_gem_move_to_active(struct drm_gem_object *obj, void msm_gem_move_to_active(struct drm_gem_object *obj,
struct msm_gpu *gpu, uint32_t fence); struct msm_gpu *gpu, bool write, uint32_t fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj); void msm_gem_move_to_inactive(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
struct timespec *timeout); struct timespec *timeout);
...@@ -191,6 +191,12 @@ u32 msm_readl(const void __iomem *addr); ...@@ -191,6 +191,12 @@ u32 msm_readl(const void __iomem *addr);
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
return priv->completed_fence >= fence;
}
static inline int align_pitch(int width, int bpp) static inline int align_pitch(int width, int bpp)
{ {
int bytespp = (bpp + 7) / 8; int bytespp = (bpp + 7) / 8;
......
...@@ -40,9 +40,9 @@ static struct page **get_pages(struct drm_gem_object *obj) ...@@ -40,9 +40,9 @@ static struct page **get_pages(struct drm_gem_object *obj)
} }
msm_obj->sgt = drm_prime_pages_to_sg(p, npages); msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
if (!msm_obj->sgt) { if (IS_ERR(msm_obj->sgt)) {
dev_err(dev->dev, "failed to allocate sgt\n"); dev_err(dev->dev, "failed to allocate sgt\n");
return ERR_PTR(-ENOMEM); return ERR_CAST(msm_obj->sgt);
} }
msm_obj->pages = p; msm_obj->pages = p;
...@@ -159,7 +159,6 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -159,7 +159,6 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
out: out:
switch (ret) { switch (ret) {
case -EAGAIN: case -EAGAIN:
set_need_resched();
case 0: case 0:
case -ERESTARTSYS: case -ERESTARTSYS:
case -EINTR: case -EINTR:
...@@ -393,11 +392,14 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj, ...@@ -393,11 +392,14 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
} }
void msm_gem_move_to_active(struct drm_gem_object *obj, void msm_gem_move_to_active(struct drm_gem_object *obj,
struct msm_gpu *gpu, uint32_t fence) struct msm_gpu *gpu, bool write, uint32_t fence)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_obj->gpu = gpu; msm_obj->gpu = gpu;
msm_obj->fence = fence; if (write)
msm_obj->write_fence = fence;
else
msm_obj->read_fence = fence;
list_del_init(&msm_obj->mm_list); list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list); list_add_tail(&msm_obj->mm_list, &gpu->active_list);
} }
...@@ -411,7 +413,8 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj) ...@@ -411,7 +413,8 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!mutex_is_locked(&dev->struct_mutex));
msm_obj->gpu = NULL; msm_obj->gpu = NULL;
msm_obj->fence = 0; msm_obj->read_fence = 0;
msm_obj->write_fence = 0;
list_del_init(&msm_obj->mm_list); list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list); list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
...@@ -433,8 +436,18 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ...@@ -433,8 +436,18 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0; int ret = 0;
if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC)) if (is_active(msm_obj)) {
ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout); uint32_t fence = 0;
if (op & MSM_PREP_READ)
fence = msm_obj->write_fence;
if (op & MSM_PREP_WRITE)
fence = max(fence, msm_obj->read_fence);
if (op & MSM_PREP_NOSYNC)
timeout = NULL;
ret = msm_wait_fence_interruptable(dev, fence, timeout);
}
/* TODO cache maintenance */ /* TODO cache maintenance */
...@@ -455,9 +468,10 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) ...@@ -455,9 +468,10 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
uint64_t off = drm_vma_node_start(&obj->vma_node); uint64_t off = drm_vma_node_start(&obj->vma_node);
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!mutex_is_locked(&dev->struct_mutex));
seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n", seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
msm_obj->fence, obj->name, obj->refcount.refcount.counter, msm_obj->read_fence, msm_obj->write_fence,
obj->name, obj->refcount.refcount.counter,
off, msm_obj->vaddr, obj->size); off, msm_obj->vaddr, obj->size);
} }
......
...@@ -36,7 +36,7 @@ struct msm_gem_object { ...@@ -36,7 +36,7 @@ struct msm_gem_object {
*/ */
struct list_head mm_list; struct list_head mm_list;
struct msm_gpu *gpu; /* non-null if active */ struct msm_gpu *gpu; /* non-null if active */
uint32_t fence; uint32_t read_fence, write_fence;
/* Transiently in the process of submit ioctl, objects associated /* Transiently in the process of submit ioctl, objects associated
* with the submit are on submit->bo_list.. this only lasts for * with the submit are on submit->bo_list.. this only lasts for
......
...@@ -78,7 +78,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, ...@@ -78,7 +78,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
} }
if (submit_bo.flags & BO_INVALID_FLAGS) { if (submit_bo.flags & BO_INVALID_FLAGS) {
DBG("invalid flags: %x", submit_bo.flags); DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out_unlock;
} }
...@@ -92,7 +92,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, ...@@ -92,7 +92,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/ */
obj = idr_find(&file->object_idr, submit_bo.handle); obj = idr_find(&file->object_idr, submit_bo.handle);
if (!obj) { if (!obj) {
DBG("invalid handle %u at index %u", submit_bo.handle, i); DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out_unlock;
} }
...@@ -100,7 +100,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, ...@@ -100,7 +100,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
msm_obj = to_msm_bo(obj); msm_obj = to_msm_bo(obj);
if (!list_empty(&msm_obj->submit_entry)) { if (!list_empty(&msm_obj->submit_entry)) {
DBG("handle %u at index %u already on submit list", DRM_ERROR("handle %u at index %u already on submit list\n",
submit_bo.handle, i); submit_bo.handle, i);
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out_unlock;
...@@ -216,8 +216,9 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, ...@@ -216,8 +216,9 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
struct msm_gem_object **obj, uint32_t *iova, bool *valid) struct msm_gem_object **obj, uint32_t *iova, bool *valid)
{ {
if (idx >= submit->nr_bos) { if (idx >= submit->nr_bos) {
DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos); DRM_ERROR("invalid buffer index: %u (out of %u)\n",
return EINVAL; idx, submit->nr_bos);
return -EINVAL;
} }
if (obj) if (obj)
...@@ -239,7 +240,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob ...@@ -239,7 +240,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
int ret; int ret;
if (offset % 4) { if (offset % 4) {
DBG("non-aligned cmdstream buffer: %u", offset); DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
return -EINVAL; return -EINVAL;
} }
...@@ -266,7 +267,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob ...@@ -266,7 +267,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
return -EFAULT; return -EFAULT;
if (submit_reloc.submit_offset % 4) { if (submit_reloc.submit_offset % 4) {
DBG("non-aligned reloc offset: %u", DRM_ERROR("non-aligned reloc offset: %u\n",
submit_reloc.submit_offset); submit_reloc.submit_offset);
return -EINVAL; return -EINVAL;
} }
...@@ -276,7 +277,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob ...@@ -276,7 +277,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
if ((off >= (obj->base.size / 4)) || if ((off >= (obj->base.size / 4)) ||
(off < last_offset)) { (off < last_offset)) {
DBG("invalid offset %u at reloc %u", off, i); DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
return -EINVAL; return -EINVAL;
} }
...@@ -374,14 +375,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -374,14 +375,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out; goto out;
if (submit_cmd.size % 4) { if (submit_cmd.size % 4) {
DBG("non-aligned cmdstream buffer size: %u", DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
submit_cmd.size); submit_cmd.size);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
if (submit_cmd.size >= msm_obj->base.size) { if ((submit_cmd.size + submit_cmd.submit_offset) >=
DBG("invalid cmdstream size: %u", submit_cmd.size); msm_obj->base.size) {
DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
......
...@@ -29,13 +29,14 @@ ...@@ -29,13 +29,14 @@
static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
{ {
struct drm_device *dev = gpu->dev; struct drm_device *dev = gpu->dev;
struct kgsl_device_platform_data *pdata = pdev->dev.platform_data; struct kgsl_device_platform_data *pdata;
if (!pdev) { if (!pdev) {
dev_err(dev->dev, "could not find dtv pdata\n"); dev_err(dev->dev, "could not find dtv pdata\n");
return; return;
} }
pdata = pdev->dev.platform_data;
if (pdata->bus_scale_table) { if (pdata->bus_scale_table) {
gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table); gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
DBG("bus scale client: %08x", gpu->bsc); DBG("bus scale client: %08x", gpu->bsc);
...@@ -230,6 +231,8 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu) ...@@ -230,6 +231,8 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
static void hangcheck_handler(unsigned long data) static void hangcheck_handler(unsigned long data)
{ {
struct msm_gpu *gpu = (struct msm_gpu *)data; struct msm_gpu *gpu = (struct msm_gpu *)data;
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
uint32_t fence = gpu->funcs->last_fence(gpu); uint32_t fence = gpu->funcs->last_fence(gpu);
if (fence != gpu->hangcheck_fence) { if (fence != gpu->hangcheck_fence) {
...@@ -237,14 +240,22 @@ static void hangcheck_handler(unsigned long data) ...@@ -237,14 +240,22 @@ static void hangcheck_handler(unsigned long data)
gpu->hangcheck_fence = fence; gpu->hangcheck_fence = fence;
} else if (fence < gpu->submitted_fence) { } else if (fence < gpu->submitted_fence) {
/* no progress and not done.. hung! */ /* no progress and not done.. hung! */
struct msm_drm_private *priv = gpu->dev->dev_private;
gpu->hangcheck_fence = fence; gpu->hangcheck_fence = fence;
dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
gpu->name);
dev_err(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
dev_err(dev->dev, "%s: submitted fence: %u\n",
gpu->name, gpu->submitted_fence);
queue_work(priv->wq, &gpu->recover_work); queue_work(priv->wq, &gpu->recover_work);
} }
/* if still more pending work, reset the hangcheck timer: */ /* if still more pending work, reset the hangcheck timer: */
if (gpu->submitted_fence > gpu->hangcheck_fence) if (gpu->submitted_fence > gpu->hangcheck_fence)
hangcheck_timer_reset(gpu); hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
queue_work(priv->wq, &gpu->retire_work);
} }
/* /*
...@@ -265,7 +276,8 @@ static void retire_worker(struct work_struct *work) ...@@ -265,7 +276,8 @@ static void retire_worker(struct work_struct *work)
obj = list_first_entry(&gpu->active_list, obj = list_first_entry(&gpu->active_list,
struct msm_gem_object, mm_list); struct msm_gem_object, mm_list);
if (obj->fence <= fence) { if ((obj->read_fence <= fence) &&
(obj->write_fence <= fence)) {
/* move to inactive: */ /* move to inactive: */
msm_gem_move_to_inactive(&obj->base); msm_gem_move_to_inactive(&obj->base);
msm_gem_put_iova(&obj->base, gpu->id); msm_gem_put_iova(&obj->base, gpu->id);
...@@ -321,7 +333,11 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -321,7 +333,11 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
submit->gpu->id, &iova); submit->gpu->id, &iova);
} }
msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence); if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
} }
hangcheck_timer_reset(gpu); hangcheck_timer_reset(gpu);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -1322,10 +1322,9 @@ extern int drm_newctx(struct drm_device *dev, void *data, ...@@ -1322,10 +1322,9 @@ extern int drm_newctx(struct drm_device *dev, void *data,
extern int drm_rmctx(struct drm_device *dev, void *data, extern int drm_rmctx(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern void drm_legacy_ctxbitmap_init(struct drm_device *dev); extern int drm_ctxbitmap_init(struct drm_device *dev);
extern void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev); extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
extern void drm_legacy_ctxbitmap_release(struct drm_device *dev, extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
struct drm_file *file_priv);
extern int drm_setsareactx(struct drm_device *dev, void *data, extern int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment