Commit a389b6a1 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel into drm-fixes

Daniel Vetter writes:

"A few important fixers:
- fix various lvds backlight issues, regressed in 3.6 (Takashi Iwai)
- make the retina mbp work (ignore bogus edp bpc value in vbt)
- fix a gmbus regression introduced in (iirc) 3.4 (Jani Nikula)
- fix an edp panel power sequence regression, fixes the new macbook air
- apply the tlb invalidate w/a

Otherwise we still have another gmbus regression (patches are awaiting
tested-bys) and there's something odd going with some rare systems not
entering rc6 often enough (and hence blowing through too much power).  It
seems to be a timing-related issue and can be mitigated by frobbing the
magic tuning parameters. We're still working on that one. Also, we still
have some fallout from the hw context support, but you can only hit that
with mesa master."

* 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel:
  drm/i915: Apply post-sync write for pipe control invalidates
  drm/i915: reorder edp disabling to fix ivb MacBook Air
  drm/i915: ensure i2c adapter is all set before adding it
  drm/i915: ignore eDP bpc settings from vbt
  drm/i915: Fix blank panel at reopening lid
parents 7bac6b46 7d54a904
......@@ -3754,17 +3754,6 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
continue;
}
if (intel_encoder->type == INTEL_OUTPUT_EDP) {
/* Use VBT settings if we have an eDP panel */
unsigned int edp_bpc = dev_priv->edp.bpp / 3;
if (edp_bpc < display_bpc) {
DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
display_bpc = edp_bpc;
}
continue;
}
/* Not one of the known troublemakers, check the EDID */
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
......
......@@ -1174,10 +1174,14 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(dev_priv);
pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
intel_dp->want_panel_vdd = false;
ironlake_wait_panel_off(intel_dp);
}
......@@ -1287,11 +1291,9 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
* ensure that we have vdd while we switch off the panel. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
ironlake_edp_panel_off(intel_dp);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
}
static void intel_dp_commit(struct drm_encoder *encoder)
......@@ -1326,11 +1328,9 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
/* Switching the panel off requires vdd. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
ironlake_edp_panel_off(intel_dp);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
if (is_cpu_edp(intel_dp))
ironlake_edp_pll_off(encoder);
......
......@@ -486,9 +486,6 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->dev_priv = dev_priv;
bus->adapter.algo = &gmbus_algorithm;
ret = i2c_add_adapter(&bus->adapter);
if (ret)
goto err;
/* By default use a conservative clock rate */
bus->reg0 = port | GMBUS_RATE_100KHZ;
......@@ -498,6 +495,10 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->force_bit = true;
intel_gpio_setup(bus, port);
ret = i2c_add_adapter(&bus->adapter);
if (ret)
goto err;
}
intel_i2c_reset(dev_priv->dev);
......
......@@ -311,9 +311,6 @@ void intel_panel_enable_backlight(struct drm_device *dev,
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
dev_priv->backlight_enabled = true;
intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
if (INTEL_INFO(dev)->gen >= 4) {
uint32_t reg, tmp;
......@@ -326,7 +323,7 @@ void intel_panel_enable_backlight(struct drm_device *dev,
* we don't track the backlight dpms state, hence check whether
* we have to do anything first. */
if (tmp & BLM_PWM_ENABLE)
return;
goto set_level;
if (dev_priv->num_pipe == 3)
tmp &= ~BLM_PIPE_SELECT_IVB;
......@@ -347,6 +344,14 @@ void intel_panel_enable_backlight(struct drm_device *dev,
I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
}
}
set_level:
/* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
* BLC_PWM_CPU_CTL may be cleared to zero automatically when these
* registers are set.
*/
dev_priv->backlight_enabled = true;
intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
}
static void intel_panel_init_backlight(struct drm_device *dev)
......
......@@ -227,31 +227,36 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
* number of bits based on the write domains has little performance
* impact.
*/
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
/*
* Ensure that any following seqno writes only happen when the render
* cache is indeed flushed (but only if the caller actually wants that).
*/
if (flush_domains)
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/*
* Ensure that any following seqno writes only happen
* when the render cache is indeed flushed.
*/
flags |= PIPE_CONTROL_CS_STALL;
}
if (invalidate_domains) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
}
ret = intel_ring_begin(ring, 6);
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
intel_ring_emit(ring, flags);
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, 0); /* lower dword */
intel_ring_emit(ring, 0); /* uppwer dword */
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment