Commit 3d76917f authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2017-06-08' of...

Merge tag 'drm-intel-fixes-2017-06-08' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes

drm/i915 fixes for v4.12-rc5

* tag 'drm-intel-fixes-2017-06-08' of git://anongit.freedesktop.org/git/drm-intel:
  drm/i915: fix warning for unused variable
  drm/i915: Fix 90/270 rotated coordinates for FBC
  drm/i915: Restore has_fbc=1 for ILK-M
  drm/i915: Workaround VLV/CHV DSI scanline counter hardware fail
  drm/i915: Fix logical inversion for gen4 quirking
  drm/i915: Guard against i915_ggtt_disable_guc() being invoked unconditionally
  drm/i915: Always recompute watermarks when distrust_bios_wm is set, v2.
  drm/i915: Prevent the system suspend complete optimization
  drm/i915/psr: disable psr2 for resolution greater than 32X20
  drm/i915: Hold a wakeref for probing the ring registers
  drm/i915: Short-circuit i915_gem_wait_for_idle() if already idle
  drm/i915: Disable decoupled MMIO
  drm/i915/guc: Remove stale comment for q_fail
  drm/i915: Serialize GTT/Aperture accesses on BXT
parents b62dba55 ef6c4d75
...@@ -1235,6 +1235,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1235,6 +1235,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_fini; goto out_fini;
pci_set_drvdata(pdev, &dev_priv->drm); pci_set_drvdata(pdev, &dev_priv->drm);
/*
* Disable the system suspend direct complete optimization, which can
* leave the device suspended skipping the driver's suspend handlers
* if the device was already runtime suspended. This is needed due to
* the difference in our runtime and system suspend sequence and
* becaue the HDA driver may require us to enable the audio power
* domain during system suspend.
*/
pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
ret = i915_driver_init_early(dev_priv, ent); ret = i915_driver_init_early(dev_priv, ent);
if (ret < 0) if (ret < 0)
......
...@@ -2991,6 +2991,16 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) ...@@ -2991,6 +2991,16 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
return false; return false;
} }
static inline bool
intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
{
#ifdef CONFIG_INTEL_IOMMU
if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped)
return true;
#endif
return false;
}
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt); int enable_ppgtt);
......
...@@ -3298,6 +3298,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) ...@@ -3298,6 +3298,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{ {
int ret; int ret;
/* If the device is asleep, we have no requests outstanding */
if (!READ_ONCE(i915->gt.awake))
return 0;
if (flags & I915_WAIT_LOCKED) { if (flags & I915_WAIT_LOCKED) {
struct i915_gem_timeline *tl; struct i915_gem_timeline *tl;
......
...@@ -2191,6 +2191,101 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, ...@@ -2191,6 +2191,101 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
gen8_set_pte(&gtt_base[i], scratch_pte); gen8_set_pte(&gtt_base[i], scratch_pte);
} }
static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
{
struct drm_i915_private *dev_priv = vm->i915;
/*
* Make sure the internal GAM fifo has been cleared of all GTT
* writes before exiting stop_machine(). This guarantees that
* any aperture accesses waiting to start in another process
* cannot back up behind the GTT writes causing a hang.
* The register can be any arbitrary GAM register.
*/
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
struct insert_page {
struct i915_address_space *vm;
dma_addr_t addr;
u64 offset;
enum i915_cache_level level;
};
static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
{
struct insert_page *arg = _arg;
gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
}
static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
enum i915_cache_level level,
u32 unused)
{
struct insert_page arg = { vm, addr, offset, level };
stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
}
struct insert_entries {
struct i915_address_space *vm;
struct sg_table *st;
u64 start;
enum i915_cache_level level;
};
static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
}
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
struct sg_table *st,
u64 start,
enum i915_cache_level level,
u32 unused)
{
struct insert_entries arg = { vm, st, start, level };
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}
struct clear_range {
struct i915_address_space *vm;
u64 start;
u64 length;
};
static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
{
struct clear_range *arg = _arg;
gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
}
static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
u64 start,
u64 length)
{
struct clear_range arg = { vm, start, length };
stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
}
static void gen6_ggtt_clear_range(struct i915_address_space *vm, static void gen6_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length) u64 start, u64 length)
{ {
...@@ -2785,6 +2880,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ...@@ -2785,6 +2880,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.insert_entries = gen8_ggtt_insert_entries; ggtt->base.insert_entries = gen8_ggtt_insert_entries;
/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
if (ggtt->base.clear_range != nop_clear_range)
ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
}
ggtt->invalidate = gen6_ggtt_invalidate; ggtt->invalidate = gen6_ggtt_invalidate;
return ggtt_probe_common(ggtt, size); return ggtt_probe_common(ggtt, size);
...@@ -2997,6 +3100,7 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915) ...@@ -2997,6 +3100,7 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915)
void i915_ggtt_disable_guc(struct drm_i915_private *i915) void i915_ggtt_disable_guc(struct drm_i915_private *i915)
{ {
if (i915->ggtt.invalidate == guc_ggtt_invalidate)
i915->ggtt.invalidate = gen6_ggtt_invalidate; i915->ggtt.invalidate = gen6_ggtt_invalidate;
} }
......
...@@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, ...@@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
obj->mm.quirked = false; obj->mm.quirked = false;
} }
if (!i915_gem_object_is_tiled(obj)) { if (!i915_gem_object_is_tiled(obj)) {
GEM_BUG_ON(!obj->mm.quirked); GEM_BUG_ON(obj->mm.quirked);
__i915_gem_object_pin_pages(obj); __i915_gem_object_pin_pages(obj);
obj->mm.quirked = true; obj->mm.quirked = true;
} }
......
...@@ -208,7 +208,7 @@ static const struct intel_device_info intel_ironlake_d_info = { ...@@ -208,7 +208,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = { static const struct intel_device_info intel_ironlake_m_info = {
GEN5_FEATURES, GEN5_FEATURES,
.platform = INTEL_IRONLAKE, .platform = INTEL_IRONLAKE,
.is_mobile = 1, .is_mobile = 1, .has_fbc = 1,
}; };
#define GEN6_FEATURES \ #define GEN6_FEATURES \
...@@ -390,7 +390,6 @@ static const struct intel_device_info intel_skylake_gt3_info = { ...@@ -390,7 +390,6 @@ static const struct intel_device_info intel_skylake_gt3_info = {
.has_hw_contexts = 1, \ .has_hw_contexts = 1, \
.has_logical_ring_contexts = 1, \ .has_logical_ring_contexts = 1, \
.has_guc = 1, \ .has_guc = 1, \
.has_decoupled_mmio = 1, \
.has_aliasing_ppgtt = 1, \ .has_aliasing_ppgtt = 1, \
.has_full_ppgtt = 1, \ .has_full_ppgtt = 1, \
.has_full_48bit_ppgtt = 1, \ .has_full_48bit_ppgtt = 1, \
......
...@@ -12203,6 +12203,15 @@ static void update_scanline_offset(struct intel_crtc *crtc) ...@@ -12203,6 +12203,15 @@ static void update_scanline_offset(struct intel_crtc *crtc)
* type. For DP ports it behaves like most other platforms, but on HDMI * type. For DP ports it behaves like most other platforms, but on HDMI
* there's an extra 1 line difference. So we need to add two instead of * there's an extra 1 line difference. So we need to add two instead of
* one to the value. * one to the value.
*
* On VLV/CHV DSI the scanline counter would appear to increment
* approx. 1/3 of a scanline before start of vblank. Unfortunately
* that means we can't tell whether we're in vblank or not while
* we're on that particular line. We must still set scanline_offset
* to 1 so that the vblank timestamps come out correct when we query
* the scanline counter from within the vblank interrupt handler.
* However if queried just before the start of vblank we'll get an
* answer that's slightly in the future.
*/ */
if (IS_GEN2(dev_priv)) { if (IS_GEN2(dev_priv)) {
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
......
...@@ -1075,6 +1075,22 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) ...@@ -1075,6 +1075,22 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
return 0; return 0;
} }
static bool ring_is_idle(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
bool idle = true;
intel_runtime_pm_get(dev_priv);
/* No bit for gen2, so assume the CS parser is idle */
if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
idle = false;
intel_runtime_pm_put(dev_priv);
return idle;
}
/** /**
* intel_engine_is_idle() - Report if the engine has finished process all work * intel_engine_is_idle() - Report if the engine has finished process all work
* @engine: the intel_engine_cs * @engine: the intel_engine_cs
...@@ -1084,8 +1100,6 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) ...@@ -1084,8 +1100,6 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
*/ */
bool intel_engine_is_idle(struct intel_engine_cs *engine) bool intel_engine_is_idle(struct intel_engine_cs *engine)
{ {
struct drm_i915_private *dev_priv = engine->i915;
/* Any inflight/incomplete requests? */ /* Any inflight/incomplete requests? */
if (!i915_seqno_passed(intel_engine_get_seqno(engine), if (!i915_seqno_passed(intel_engine_get_seqno(engine),
intel_engine_last_submit(engine))) intel_engine_last_submit(engine)))
...@@ -1100,7 +1114,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) ...@@ -1100,7 +1114,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return false; return false;
/* Ring stopped? */ /* Ring stopped? */
if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) if (!ring_is_idle(engine))
return false; return false;
return true; return true;
......
...@@ -82,20 +82,10 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc) ...@@ -82,20 +82,10 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
int *width, int *height) int *width, int *height)
{ {
int w, h;
if (drm_rotation_90_or_270(cache->plane.rotation)) {
w = cache->plane.src_h;
h = cache->plane.src_w;
} else {
w = cache->plane.src_w;
h = cache->plane.src_h;
}
if (width) if (width)
*width = w; *width = cache->plane.src_w;
if (height) if (height)
*height = h; *height = cache->plane.src_h;
} }
static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
...@@ -746,6 +736,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, ...@@ -746,6 +736,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
cache->plane.rotation = plane_state->base.rotation; cache->plane.rotation = plane_state->base.rotation;
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16; cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16; cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
cache->plane.visible = plane_state->base.visible; cache->plane.visible = plane_state->base.visible;
......
...@@ -4335,10 +4335,18 @@ skl_compute_wm(struct drm_atomic_state *state) ...@@ -4335,10 +4335,18 @@ skl_compute_wm(struct drm_atomic_state *state)
struct drm_crtc_state *cstate; struct drm_crtc_state *cstate;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct skl_wm_values *results = &intel_state->wm_results; struct skl_wm_values *results = &intel_state->wm_results;
struct drm_device *dev = state->dev;
struct skl_pipe_wm *pipe_wm; struct skl_pipe_wm *pipe_wm;
bool changed = false; bool changed = false;
int ret, i; int ret, i;
/*
* When we distrust bios wm we always need to recompute to set the
* expected DDB allocations for each CRTC.
*/
if (to_i915(dev)->wm.distrust_bios_wm)
changed = true;
/* /*
* If this transaction isn't actually touching any CRTC's, don't * If this transaction isn't actually touching any CRTC's, don't
* bother with watermark calculation. Note that if we pass this * bother with watermark calculation. Note that if we pass this
...@@ -4349,6 +4357,7 @@ skl_compute_wm(struct drm_atomic_state *state) ...@@ -4349,6 +4357,7 @@ skl_compute_wm(struct drm_atomic_state *state)
*/ */
for_each_new_crtc_in_state(state, crtc, cstate, i) for_each_new_crtc_in_state(state, crtc, cstate, i)
changed = true; changed = true;
if (!changed) if (!changed)
return 0; return 0;
......
...@@ -435,8 +435,9 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) ...@@ -435,8 +435,9 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
} }
/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */ /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
if (intel_crtc->config->pipe_src_w > 3200 || if (dev_priv->psr.psr2_support &&
intel_crtc->config->pipe_src_h > 2000) { (intel_crtc->config->pipe_src_w > 3200 ||
intel_crtc->config->pipe_src_h > 2000)) {
dev_priv->psr.psr2_support = false; dev_priv->psr.psr2_support = false;
return false; return false;
} }
......
...@@ -83,10 +83,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, ...@@ -83,10 +83,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
*/ */
void intel_pipe_update_start(struct intel_crtc *crtc) void intel_pipe_update_start(struct intel_crtc *crtc)
{ {
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
long timeout = msecs_to_jiffies_timeout(1); long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start; int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
vblank_start = adjusted_mode->crtc_vblank_start; vblank_start = adjusted_mode->crtc_vblank_start;
...@@ -139,6 +142,24 @@ void intel_pipe_update_start(struct intel_crtc *crtc) ...@@ -139,6 +142,24 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
drm_crtc_vblank_put(&crtc->base); drm_crtc_vblank_put(&crtc->base);
/*
* On VLV/CHV DSI the scanline counter would appear to
* increment approx. 1/3 of a scanline before start of vblank.
* The registers still get latched at start of vblank however.
* This means we must not write any registers on the first
* line of vblank (since not the whole line is actually in
* vblank). And unfortunately we can't use the interrupt to
* wait here since it will fire too soon. We could use the
* frame start interrupt instead since it will fire after the
* critical scanline, but that would require more changes
* in the interrupt code. So for now we'll just do the nasty
* thing and poll for the bad scanline to pass us by.
*
* FIXME figure out if BXT+ DSI suffers from this as well
*/
while (need_vlv_dsi_wa && scanline == vblank_start)
scanline = intel_get_crtc_scanline(crtc);
crtc->debug.scanline_start = scanline; crtc->debug.scanline_start = scanline;
crtc->debug.start_vbl_time = ktime_get(); crtc->debug.start_vbl_time = ktime_get();
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
......
...@@ -59,8 +59,6 @@ struct drm_i915_gem_request; ...@@ -59,8 +59,6 @@ struct drm_i915_gem_request;
* available in the work queue (note, the queue is shared, * available in the work queue (note, the queue is shared,
* not per-engine). It is OK for this to be nonzero, but * not per-engine). It is OK for this to be nonzero, but
* it should not be huge! * it should not be huge!
* q_fail: failed to enqueue a work item. This should never happen,
* because we check for space beforehand.
* b_fail: failed to ring the doorbell. This should never happen, unless * b_fail: failed to ring the doorbell. This should never happen, unless
* somehow the hardware misbehaves, or maybe if the GuC firmware * somehow the hardware misbehaves, or maybe if the GuC firmware
* crashes? We probably need to reset the GPU to recover. * crashes? We probably need to reset the GPU to recover.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment