Commit 4daad1b2 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2017-03-14' of...

Merge tag 'drm-intel-fixes-2017-03-14' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes

drm/i915 fixes for v4.11-rc3

* tag 'drm-intel-fixes-2017-03-14' of git://anongit.freedesktop.org/git/drm-intel:
  drm/i915: Fix forcewake active domain tracking
  drm/i915: Nuke skl_update_plane debug message from the pipe update critical section
  drm/i915: use correct node for handling cache domain eviction
  drm/i915: Drain the freed state from the tail of the next commit
  drm/i915: Nuke debug messages from the pipe update critical section
  drm/i915: Use pagecache write to prepopulate shmemfs from pwrite-ioctl
  drm/i915: Store a permanent error in obj->mm.pages
  drm/i915: Move updating color management to before vblank evasion
  drm/i915/gen9: Increase PCODE request timeout to 50ms
  drm/i915: Avoid tweaking evaluation thresholds on Baytrail v3
  drm/i915: Remove the vma from the drm_mm if binding fails
  drm/i915/fbdev: Stop repeating tile configuration on stagnation
  drm/i915/glk: Fix watermark computations for third sprite plane
  drm/i915: Squelch any ktime/jiffie rounding errors for wait-ioctl
parents 490b8981 6aef6603
...@@ -293,6 +293,7 @@ enum plane_id { ...@@ -293,6 +293,7 @@ enum plane_id {
PLANE_PRIMARY, PLANE_PRIMARY,
PLANE_SPRITE0, PLANE_SPRITE0,
PLANE_SPRITE1, PLANE_SPRITE1,
PLANE_SPRITE2,
PLANE_CURSOR, PLANE_CURSOR,
I915_MAX_PLANES, I915_MAX_PLANES,
}; };
......
...@@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
trace_i915_gem_object_pwrite(obj, args->offset, args->size); trace_i915_gem_object_pwrite(obj, args->offset, args->size);
ret = -ENODEV;
if (obj->ops->pwrite)
ret = obj->ops->pwrite(obj, args);
if (ret != -ENODEV)
goto err;
ret = i915_gem_object_wait(obj, ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_INTERRUPTIBLE |
I915_WAIT_ALL, I915_WAIT_ALL,
...@@ -2119,6 +2125,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) ...@@ -2119,6 +2125,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
*/ */
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
obj->mm.madv = __I915_MADV_PURGED; obj->mm.madv = __I915_MADV_PURGED;
obj->mm.pages = ERR_PTR(-EFAULT);
} }
/* Try to discard unwanted pages */ /* Try to discard unwanted pages */
...@@ -2218,7 +2225,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, ...@@ -2218,7 +2225,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
__i915_gem_object_reset_page_iter(obj); __i915_gem_object_reset_page_iter(obj);
obj->ops->put_pages(obj, pages); if (!IS_ERR(pages))
obj->ops->put_pages(obj, pages);
unlock: unlock:
mutex_unlock(&obj->mm.lock); mutex_unlock(&obj->mm.lock);
} }
...@@ -2437,7 +2446,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) ...@@ -2437,7 +2446,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
if (err) if (err)
return err; return err;
if (unlikely(!obj->mm.pages)) { if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
err = ____i915_gem_object_get_pages(obj); err = ____i915_gem_object_get_pages(obj);
if (err) if (err)
goto unlock; goto unlock;
...@@ -2515,7 +2524,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -2515,7 +2524,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
pinned = true; pinned = true;
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
if (unlikely(!obj->mm.pages)) { if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
ret = ____i915_gem_object_get_pages(obj); ret = ____i915_gem_object_get_pages(obj);
if (ret) if (ret)
goto err_unlock; goto err_unlock;
...@@ -2563,6 +2572,75 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -2563,6 +2572,75 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
goto out_unlock; goto out_unlock;
} }
static int
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg)
{
struct address_space *mapping = obj->base.filp->f_mapping;
char __user *user_data = u64_to_user_ptr(arg->data_ptr);
u64 remain, offset;
unsigned int pg;
/* Before we instantiate/pin the backing store for our use, we
* can prepopulate the shmemfs filp efficiently using a write into
* the pagecache. We avoid the penalty of instantiating all the
* pages, important if the user is just writing to a few and never
* uses the object on the GPU, and using a direct write into shmemfs
* allows it to avoid the cost of retrieving a page (either swapin
* or clearing-before-use) before it is overwritten.
*/
if (READ_ONCE(obj->mm.pages))
return -ENODEV;
/* Before the pages are instantiated the object is treated as being
* in the CPU domain. The pages will be clflushed as required before
* use, and we can freely write into the pages directly. If userspace
* races pwrite with any other operation; corruption will ensue -
* that is userspace's prerogative!
*/
remain = arg->size;
offset = arg->offset;
pg = offset_in_page(offset);
do {
unsigned int len, unwritten;
struct page *page;
void *data, *vaddr;
int err;
len = PAGE_SIZE - pg;
if (len > remain)
len = remain;
err = pagecache_write_begin(obj->base.filp, mapping,
offset, len, 0,
&page, &data);
if (err < 0)
return err;
vaddr = kmap(page);
unwritten = copy_from_user(vaddr + pg, user_data, len);
kunmap(page);
err = pagecache_write_end(obj->base.filp, mapping,
offset, len, len - unwritten,
page, data);
if (err < 0)
return err;
if (unwritten)
return -EFAULT;
remain -= len;
user_data += len;
offset += len;
pg = 0;
} while (remain);
return 0;
}
static bool ban_context(const struct i915_gem_context *ctx) static bool ban_context(const struct i915_gem_context *ctx)
{ {
return (i915_gem_context_is_bannable(ctx) && return (i915_gem_context_is_bannable(ctx) &&
...@@ -3029,6 +3107,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -3029,6 +3107,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
if (args->timeout_ns < 0) if (args->timeout_ns < 0)
args->timeout_ns = 0; args->timeout_ns = 0;
/*
* Apparently ktime isn't accurate enough and occasionally has a
* bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
* things up to make the test happy. We allow up to 1 jiffy.
*
* This is a regression from the timespec->ktime conversion.
*/
if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
args->timeout_ns = 0;
} }
i915_gem_object_put(obj); i915_gem_object_put(obj);
...@@ -3974,8 +4062,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, ...@@ -3974,8 +4062,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
static const struct drm_i915_gem_object_ops i915_gem_object_ops = { static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE, I915_GEM_OBJECT_IS_SHRINKABLE,
.get_pages = i915_gem_object_get_pages_gtt, .get_pages = i915_gem_object_get_pages_gtt,
.put_pages = i915_gem_object_put_pages_gtt, .put_pages = i915_gem_object_put_pages_gtt,
.pwrite = i915_gem_object_pwrite_gtt,
}; };
struct drm_i915_gem_object * struct drm_i915_gem_object *
......
...@@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, ...@@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* those as well to make room for our guard pages. * those as well to make room for our guard pages.
*/ */
if (check_color) { if (check_color) {
if (vma->node.start + vma->node.size == node->start) { if (node->start + node->size == target->start) {
if (vma->node.color == node->color) if (node->color == target->color)
continue; continue;
} }
if (vma->node.start == node->start + node->size) { if (node->start == target->start + target->size) {
if (vma->node.color == node->color) if (node->color == target->color)
continue; continue;
} }
} }
......
...@@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops { ...@@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops {
struct sg_table *(*get_pages)(struct drm_i915_gem_object *); struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
int (*pwrite)(struct drm_i915_gem_object *,
const struct drm_i915_gem_pwrite *);
int (*dmabuf_export)(struct drm_i915_gem_object *); int (*dmabuf_export)(struct drm_i915_gem_object *);
void (*release)(struct drm_i915_gem_object *); void (*release)(struct drm_i915_gem_object *);
}; };
......
...@@ -512,10 +512,36 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -512,10 +512,36 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
return ret; return ret;
} }
static void
i915_vma_remove(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
/* Since the unbound list is global, only move to that list if
* no more VMAs exist.
*/
if (--obj->bind_count == 0)
list_move_tail(&obj->global_link,
&to_i915(obj->base.dev)->mm.unbound_list);
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
* reaped by the shrinker.
*/
i915_gem_object_unpin_pages(obj);
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
}
int __i915_vma_do_pin(struct i915_vma *vma, int __i915_vma_do_pin(struct i915_vma *vma,
u64 size, u64 alignment, u64 flags) u64 size, u64 alignment, u64 flags)
{ {
unsigned int bound = vma->flags; const unsigned int bound = vma->flags;
int ret; int ret;
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
...@@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma, ...@@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,
if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
ret = -EBUSY; ret = -EBUSY;
goto err; goto err_unpin;
} }
if ((bound & I915_VMA_BIND_MASK) == 0) { if ((bound & I915_VMA_BIND_MASK) == 0) {
ret = i915_vma_insert(vma, size, alignment, flags); ret = i915_vma_insert(vma, size, alignment, flags);
if (ret) if (ret)
goto err; goto err_unpin;
} }
ret = i915_vma_bind(vma, vma->obj->cache_level, flags); ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
if (ret) if (ret)
goto err; goto err_remove;
if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
...@@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma, ...@@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0; return 0;
err: err_remove:
if ((bound & I915_VMA_BIND_MASK) == 0) {
GEM_BUG_ON(vma->pages);
i915_vma_remove(vma);
}
err_unpin:
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
return ret; return ret;
} }
...@@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma)
} }
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
if (vma->pages != obj->mm.pages) { if (vma->pages != obj->mm.pages) {
GEM_BUG_ON(!vma->pages); GEM_BUG_ON(!vma->pages);
sg_free_table(vma->pages); sg_free_table(vma->pages);
...@@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma)
} }
vma->pages = NULL; vma->pages = NULL;
/* Since the unbound list is global, only move to that list if i915_vma_remove(vma);
* no more VMAs exist. */
if (--obj->bind_count == 0)
list_move_tail(&obj->global_link,
&to_i915(obj->base.dev)->mm.unbound_list);
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
* reaped by the shrinker.
*/
i915_gem_object_unpin_pages(obj);
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
destroy: destroy:
if (unlikely(i915_vma_is_closed(vma))) if (unlikely(i915_vma_is_closed(vma)))
......
...@@ -3669,10 +3669,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc, ...@@ -3669,10 +3669,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
/* drm_atomic_helper_update_legacy_modeset_state might not be called. */ /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
crtc->base.mode = crtc->base.state->mode; crtc->base.mode = crtc->base.state->mode;
DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
/* /*
* Update pipe size and adjust fitter if needed: the reason for this is * Update pipe size and adjust fitter if needed: the reason for this is
* that in compute_mode_changes we check the native mode (not the pfit * that in compute_mode_changes we check the native mode (not the pfit
...@@ -4796,23 +4792,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc) ...@@ -4796,23 +4792,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
struct intel_crtc_scaler_state *scaler_state = struct intel_crtc_scaler_state *scaler_state =
&crtc->config->scaler_state; &crtc->config->scaler_state;
DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
if (crtc->config->pch_pfit.enabled) { if (crtc->config->pch_pfit.enabled) {
int id; int id;
if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) { if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
DRM_ERROR("Requesting pfit without getting a scaler first\n");
return; return;
}
id = scaler_state->scaler_id; id = scaler_state->scaler_id;
I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
} }
} }
...@@ -14379,6 +14369,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state, ...@@ -14379,6 +14369,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
} while (progress); } while (progress);
} }
static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
{
struct intel_atomic_state *state, *next;
struct llist_node *freed;
freed = llist_del_all(&dev_priv->atomic_helper.free_list);
llist_for_each_entry_safe(state, next, freed, freed)
drm_atomic_state_put(&state->base);
}
static void intel_atomic_helper_free_state_worker(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), atomic_helper.free_work);
intel_atomic_helper_free_state(dev_priv);
}
static void intel_atomic_commit_tail(struct drm_atomic_state *state) static void intel_atomic_commit_tail(struct drm_atomic_state *state)
{ {
struct drm_device *dev = state->dev; struct drm_device *dev = state->dev;
...@@ -14545,6 +14553,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) ...@@ -14545,6 +14553,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* can happen also when the device is completely off. * can happen also when the device is completely off.
*/ */
intel_uncore_arm_unclaimed_mmio_detection(dev_priv); intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
intel_atomic_helper_free_state(dev_priv);
} }
static void intel_atomic_commit_work(struct work_struct *work) static void intel_atomic_commit_work(struct work_struct *work)
...@@ -14946,17 +14956,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, ...@@ -14946,17 +14956,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
to_intel_atomic_state(old_crtc_state->state); to_intel_atomic_state(old_crtc_state->state);
bool modeset = needs_modeset(crtc->state); bool modeset = needs_modeset(crtc->state);
if (!modeset &&
(intel_cstate->base.color_mgmt_changed ||
intel_cstate->update_pipe)) {
intel_color_set_csc(crtc->state);
intel_color_load_luts(crtc->state);
}
/* Perform vblank evasion around commit operation */ /* Perform vblank evasion around commit operation */
intel_pipe_update_start(intel_crtc); intel_pipe_update_start(intel_crtc);
if (modeset) if (modeset)
goto out; goto out;
if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
intel_color_set_csc(crtc->state);
intel_color_load_luts(crtc->state);
}
if (intel_cstate->update_pipe) if (intel_cstate->update_pipe)
intel_update_pipe_config(intel_crtc, old_intel_cstate); intel_update_pipe_config(intel_crtc, old_intel_cstate);
else if (INTEL_GEN(dev_priv) >= 9) else if (INTEL_GEN(dev_priv) >= 9)
...@@ -16599,18 +16611,6 @@ static void sanitize_watermarks(struct drm_device *dev) ...@@ -16599,18 +16611,6 @@ static void sanitize_watermarks(struct drm_device *dev)
drm_modeset_acquire_fini(&ctx); drm_modeset_acquire_fini(&ctx);
} }
static void intel_atomic_helper_free_state(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), atomic_helper.free_work);
struct intel_atomic_state *state, *next;
struct llist_node *freed;
freed = llist_del_all(&dev_priv->atomic_helper.free_list);
llist_for_each_entry_safe(state, next, freed, freed)
drm_atomic_state_put(&state->base);
}
int intel_modeset_init(struct drm_device *dev) int intel_modeset_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
...@@ -16631,7 +16631,7 @@ int intel_modeset_init(struct drm_device *dev) ...@@ -16631,7 +16631,7 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = &intel_mode_funcs; dev->mode_config.funcs = &intel_mode_funcs;
INIT_WORK(&dev_priv->atomic_helper.free_work, INIT_WORK(&dev_priv->atomic_helper.free_work,
intel_atomic_helper_free_state); intel_atomic_helper_free_state_worker);
intel_init_quirks(dev); intel_init_quirks(dev);
......
...@@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, ...@@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height) bool *enabled, int width, int height)
{ {
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
unsigned long conn_configured, mask; unsigned long conn_configured, conn_seq, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
int i, j; int i, j;
bool *save_enabled; bool *save_enabled;
bool fallback = true; bool fallback = true;
int num_connectors_enabled = 0; int num_connectors_enabled = 0;
int num_connectors_detected = 0; int num_connectors_detected = 0;
int pass = 0;
save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL); save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
if (!save_enabled) if (!save_enabled)
...@@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, ...@@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
mask = BIT(count) - 1; mask = BIT(count) - 1;
conn_configured = 0; conn_configured = 0;
retry: retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct drm_fb_helper_connector *fb_conn; struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector; struct drm_connector *connector;
...@@ -387,7 +387,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, ...@@ -387,7 +387,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
if (conn_configured & BIT(i)) if (conn_configured & BIT(i))
continue; continue;
if (pass == 0 && !connector->has_tile) if (conn_seq == 0 && !connector->has_tile)
continue; continue;
if (connector->status == connector_status_connected) if (connector->status == connector_status_connected)
...@@ -498,10 +498,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, ...@@ -498,10 +498,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
conn_configured |= BIT(i); conn_configured |= BIT(i);
} }
if ((conn_configured & mask) != mask) { if ((conn_configured & mask) != mask && conn_configured != conn_seq)
pass++;
goto retry; goto retry;
}
/* /*
* If the BIOS didn't enable everything it could, fall back to have the * If the BIOS didn't enable everything it could, fall back to have the
......
...@@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) ...@@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
break; break;
} }
/* When byt can survive without system hang with dynamic
* sw freq adjustments, this restriction can be lifted.
*/
if (IS_VALLEYVIEW(dev_priv))
goto skip_hw_write;
I915_WRITE(GEN6_RP_UP_EI, I915_WRITE(GEN6_RP_UP_EI,
GT_INTERVAL_FROM_US(dev_priv, ei_up)); GT_INTERVAL_FROM_US(dev_priv, ei_up));
I915_WRITE(GEN6_RP_UP_THRESHOLD, I915_WRITE(GEN6_RP_UP_THRESHOLD,
...@@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) ...@@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
GEN6_RP_UP_BUSY_AVG | GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG); GEN6_RP_DOWN_IDLE_AVG);
skip_hw_write:
dev_priv->rps.power = new_power; dev_priv->rps.power = new_power;
dev_priv->rps.up_threshold = threshold_up; dev_priv->rps.up_threshold = threshold_up;
dev_priv->rps.down_threshold = threshold_down; dev_priv->rps.down_threshold = threshold_down;
...@@ -7916,10 +7923,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox, ...@@ -7916,10 +7923,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
* @timeout_base_ms: timeout for polling with preemption enabled * @timeout_base_ms: timeout for polling with preemption enabled
* *
* Keep resending the @request to @mbox until PCODE acknowledges it, PCODE * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
* reports an error or an overall timeout of @timeout_base_ms+10 ms expires. * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
* The request is acknowledged once the PCODE reply dword equals @reply after * The request is acknowledged once the PCODE reply dword equals @reply after
* applying @reply_mask. Polling is first attempted with preemption enabled * applying @reply_mask. Polling is first attempted with preemption enabled
* for @timeout_base_ms and if this times out for another 10 ms with * for @timeout_base_ms and if this times out for another 50 ms with
* preemption disabled. * preemption disabled.
* *
* Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
...@@ -7955,14 +7962,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, ...@@ -7955,14 +7962,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
* worst case) _and_ PCODE was busy for some reason even after a * worst case) _and_ PCODE was busy for some reason even after a
* (queued) request and @timeout_base_ms delay. As a workaround retry * (queued) request and @timeout_base_ms delay. As a workaround retry
* the poll with preemption disabled to maximize the number of * the poll with preemption disabled to maximize the number of
* requests. Increase the timeout from @timeout_base_ms to 10ms to * requests. Increase the timeout from @timeout_base_ms to 50ms to
* account for interrupts that could reduce the number of these * account for interrupts that could reduce the number of these
* requests. * requests, and for any quirks of the PCODE firmware that delays
* the request completion.
*/ */
DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
WARN_ON_ONCE(timeout_base_ms > 3); WARN_ON_ONCE(timeout_base_ms > 3);
preempt_disable(); preempt_disable();
ret = wait_for_atomic(COND, 10); ret = wait_for_atomic(COND, 50);
preempt_enable(); preempt_enable();
out: out:
......
...@@ -254,9 +254,6 @@ skl_update_plane(struct drm_plane *drm_plane, ...@@ -254,9 +254,6 @@ skl_update_plane(struct drm_plane *drm_plane,
int scaler_id = plane_state->scaler_id; int scaler_id = plane_state->scaler_id;
const struct intel_scaler *scaler; const struct intel_scaler *scaler;
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
plane_id, PS_PLANE_SEL(plane_id));
scaler = &crtc_state->scaler_state.scalers[scaler_id]; scaler = &crtc_state->scaler_state.scalers[scaler_id];
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
......
...@@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma ...@@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
for_each_fw_domain_masked(d, fw_domains, dev_priv) for_each_fw_domain_masked(d, fw_domains, dev_priv)
fw_domain_wait_ack(d); fw_domain_wait_ack(d);
dev_priv->uncore.fw_domains_active |= fw_domains;
} }
static void static void
...@@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma ...@@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
fw_domain_put(d); fw_domain_put(d);
fw_domain_posting_read(d); fw_domain_posting_read(d);
} }
dev_priv->uncore.fw_domains_active &= ~fw_domains;
} }
static void static void
...@@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) ...@@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
if (WARN_ON(domain->wake_count == 0)) if (WARN_ON(domain->wake_count == 0))
domain->wake_count++; domain->wake_count++;
if (--domain->wake_count == 0) { if (--domain->wake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
dev_priv->uncore.fw_domains_active &= ~domain->mask;
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
...@@ -454,10 +456,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, ...@@ -454,10 +456,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
fw_domains &= ~domain->mask; fw_domains &= ~domain->mask;
} }
if (fw_domains) { if (fw_domains)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
dev_priv->uncore.fw_domains_active |= fw_domains;
}
} }
/** /**
...@@ -968,7 +968,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv, ...@@ -968,7 +968,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
fw_domain_arm_timer(domain); fw_domain_arm_timer(domain);
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
dev_priv->uncore.fw_domains_active |= fw_domains;
} }
static inline void __force_wake_auto(struct drm_i915_private *dev_priv, static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment