Commit 311548f1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-for-v4.12-rc7' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "A varied bunch of fixes, one for an API regression with connectors.

  Otherwise amdgpu and i915 have a bunch of varied fixes, the shrinker
  ones being the most important"

* tag 'drm-fixes-for-v4.12-rc7' of git://people.freedesktop.org/~airlied/linux:
  drm: Fix GETCONNECTOR regression
  drm/radeon: add a quirk for Toshiba Satellite L20-183
  drm/radeon: add a PX quirk for another K53TK variant
  drm/amdgpu: adjust default display clock
  drm/amdgpu/atom: fix ps allocation size for EnableDispPowerGating
  drm/amdgpu: add Polaris12 DID
  drm/i915: Don't enable backlight at setup time.
  drm/i915: Plumb the correct acquire ctx into intel_crtc_disable_noatomic()
  drm/i915: Fix deadlock witha the pipe A quirk during resume
  drm/i915: Remove __GFP_NORETRY from our buffer allocator
  drm/i915: Encourage our shrinker more when our shmemfs allocations fails
  drm/i915: Differentiate between sw write location into ring and last hw read
parents 7139a06b 33ce7563
...@@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) ...@@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
adev->clock.default_dispclk / 100); adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 60000; adev->clock.default_dispclk = 60000;
} else if (adev->clock.default_dispclk <= 60000) {
DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 62500;
} }
adev->clock.dp_extclk = adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
......
...@@ -449,6 +449,7 @@ static const struct pci_device_id pciidlist[] = { ...@@ -449,6 +449,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
/* Vega 10 */ /* Vega 10 */
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
......
...@@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) ...@@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
...@@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) ...@@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
{ {
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
......
...@@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, ...@@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (!connector) if (!connector)
return -ENOENT; return -ENOENT;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
encoder = drm_connector_get_encoder(connector);
if (encoder)
out_resp->encoder_id = encoder->base.id;
else
out_resp->encoder_id = 0;
ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
&out_resp->count_props);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
if (ret)
goto out_unref;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
if (connector->encoder_ids[i] != 0) if (connector->encoder_ids[i] != 0)
encoders_count++; encoders_count++;
...@@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, ...@@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (put_user(connector->encoder_ids[i], if (put_user(connector->encoder_ids[i],
encoder_ptr + copied)) { encoder_ptr + copied)) {
ret = -EFAULT; ret = -EFAULT;
goto out_unref; goto out;
} }
copied++; copied++;
} }
...@@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, ...@@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (copy_to_user(mode_ptr + copied, if (copy_to_user(mode_ptr + copied,
&u_mode, sizeof(u_mode))) { &u_mode, sizeof(u_mode))) {
ret = -EFAULT; ret = -EFAULT;
mutex_unlock(&dev->mode_config.mutex);
goto out; goto out;
} }
copied++; copied++;
} }
} }
out_resp->count_modes = mode_count; out_resp->count_modes = mode_count;
out:
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
out_unref:
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
encoder = drm_connector_get_encoder(connector);
if (encoder)
out_resp->encoder_id = encoder->base.id;
else
out_resp->encoder_id = 0;
/* Only grab properties after probing, to make sure EDID and other
* properties reflect the latest status. */
ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
&out_resp->count_props);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
out:
drm_connector_put(connector); drm_connector_put(connector);
return ret; return ret;
......
...@@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct page *page; struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */ unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment; unsigned int max_segment;
gfp_t noreclaim;
int ret; int ret;
gfp_t gfp;
/* Assert that the object is not currently in any GPU domain. As it /* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in * wasn't in the GTT, there shouldn't be any way it could have been in
...@@ -2315,22 +2315,31 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2315,22 +2315,31 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* Fail silently without starting the shrinker * Fail silently without starting the shrinker
*/ */
mapping = obj->base.filp->f_mapping; mapping = obj->base.filp->f_mapping;
gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); noreclaim = mapping_gfp_constraint(mapping,
gfp |= __GFP_NORETRY | __GFP_NOWARN; ~(__GFP_IO | __GFP_RECLAIM));
noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
sg = st->sgl; sg = st->sgl;
st->nents = 0; st->nents = 0;
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp); const unsigned int shrink[] = {
if (unlikely(IS_ERR(page))) { I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
i915_gem_shrink(dev_priv, 0,
page_count, }, *s = shrink;
I915_SHRINK_BOUND | gfp_t gfp = noreclaim;
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE); do {
page = shmem_read_mapping_page_gfp(mapping, i, gfp); page = shmem_read_mapping_page_gfp(mapping, i, gfp);
} if (likely(!IS_ERR(page)))
if (unlikely(IS_ERR(page))) { break;
gfp_t reclaim;
if (!*s) {
ret = PTR_ERR(page);
goto err_sg;
}
i915_gem_shrink(dev_priv, 2 * page_count, *s++);
cond_resched();
/* We've tried hard to allocate the memory by reaping /* We've tried hard to allocate the memory by reaping
* our own buffer, now let the real VM do its job and * our own buffer, now let the real VM do its job and
...@@ -2340,15 +2349,26 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2340,15 +2349,26 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* defer the oom here by reporting the ENOMEM back * defer the oom here by reporting the ENOMEM back
* to userspace. * to userspace.
*/ */
reclaim = mapping_gfp_mask(mapping); if (!*s) {
reclaim |= __GFP_NORETRY; /* reclaim, but no oom */ /* reclaim and warn, but no oom */
gfp = mapping_gfp_mask(mapping);
page = shmem_read_mapping_page_gfp(mapping, i, reclaim);
if (IS_ERR(page)) { /* Our bo are always dirty and so we require
ret = PTR_ERR(page); * kswapd to reclaim our pages (direct reclaim
goto err_sg; * does not effectively begin pageout of our
* buffers on its own). However, direct reclaim
* only waits for kswapd when under allocation
* congestion. So as a result __GFP_RECLAIM is
* unreliable and fails to actually reclaim our
* dirty pages -- unless you try over and over
* again with !__GFP_NORETRY. However, we still
* want to fail this allocation rather than
* trigger the out-of-memory killer and for
* this we want the future __GFP_MAYFAIL.
*/
} }
} } while (1);
if (!i || if (!i ||
sg->length >= max_segment || sg->length >= max_segment ||
page_to_pfn(page) != last_pfn + 1) { page_to_pfn(page) != last_pfn + 1) {
...@@ -4222,6 +4242,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) ...@@ -4222,6 +4242,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
mapping = obj->base.filp->f_mapping; mapping = obj->base.filp->f_mapping;
mapping_set_gfp_mask(mapping, mask); mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
i915_gem_object_init(obj, &i915_gem_object_ops); i915_gem_object_init(obj, &i915_gem_object_ops);
......
...@@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* GPU processing the request, we never over-estimate the * GPU processing the request, we never over-estimate the
* position of the head. * position of the head.
*/ */
req->head = req->ring->tail; req->head = req->ring->emit;
/* Check that we didn't interrupt ourselves with a new request */ /* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
......
...@@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client, ...@@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client,
GEM_BUG_ON(freespace < wqi_size); GEM_BUG_ON(freespace < wqi_size);
/* The GuC firmware wants the tail index in QWords, not bytes */ /* The GuC firmware wants the tail index in QWords, not bytes */
tail = rq->tail; tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;
assert_ring_tail_valid(rq->ring, rq->tail);
tail >>= 3;
GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
......
...@@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc, ...@@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
static void skylake_pfit_enable(struct intel_crtc *crtc); static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc); static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev); static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
struct intel_limit { struct intel_limit {
...@@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev, ...@@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev,
struct drm_crtc *crtc; struct drm_crtc *crtc;
int i, ret; int i, ret;
intel_modeset_setup_hw_state(dev); intel_modeset_setup_hw_state(dev, ctx);
i915_redisable_vga(to_i915(dev)); i915_redisable_vga(to_i915(dev));
if (!state) if (!state)
...@@ -5825,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, ...@@ -5825,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_update_watermarks(intel_crtc); intel_update_watermarks(intel_crtc);
} }
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{ {
struct intel_encoder *encoder; struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
...@@ -5855,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) ...@@ -5855,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
return; return;
} }
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; state->acquire_ctx = ctx;
/* Everything's already locked, -EDEADLK can't happen. */ /* Everything's already locked, -EDEADLK can't happen. */
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
...@@ -15030,7 +15032,7 @@ int intel_modeset_init(struct drm_device *dev) ...@@ -15030,7 +15032,7 @@ int intel_modeset_init(struct drm_device *dev)
intel_setup_outputs(dev_priv); intel_setup_outputs(dev_priv);
drm_modeset_lock_all(dev); drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev); intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
drm_modeset_unlock_all(dev); drm_modeset_unlock_all(dev);
for_each_intel_crtc(dev, crtc) { for_each_intel_crtc(dev, crtc) {
...@@ -15067,13 +15069,13 @@ int intel_modeset_init(struct drm_device *dev) ...@@ -15067,13 +15069,13 @@ int intel_modeset_init(struct drm_device *dev)
return 0; return 0;
} }
static void intel_enable_pipe_a(struct drm_device *dev) static void intel_enable_pipe_a(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{ {
struct intel_connector *connector; struct intel_connector *connector;
struct drm_connector_list_iter conn_iter; struct drm_connector_list_iter conn_iter;
struct drm_connector *crt = NULL; struct drm_connector *crt = NULL;
struct intel_load_detect_pipe load_detect_temp; struct intel_load_detect_pipe load_detect_temp;
struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
int ret; int ret;
/* We can't just switch on the pipe A, we need to set things up with a /* We can't just switch on the pipe A, we need to set things up with a
...@@ -15145,7 +15147,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv, ...@@ -15145,7 +15147,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
} }
static void intel_sanitize_crtc(struct intel_crtc *crtc) static void intel_sanitize_crtc(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{ {
struct drm_device *dev = crtc->base.dev; struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
...@@ -15191,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) ...@@ -15191,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
plane = crtc->plane; plane = crtc->plane;
crtc->base.primary->state->visible = true; crtc->base.primary->state->visible = true;
crtc->plane = !plane; crtc->plane = !plane;
intel_crtc_disable_noatomic(&crtc->base); intel_crtc_disable_noatomic(&crtc->base, ctx);
crtc->plane = plane; crtc->plane = plane;
} }
...@@ -15201,13 +15204,13 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) ...@@ -15201,13 +15204,13 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* resume. Force-enable the pipe to fix this, the update_dpms * resume. Force-enable the pipe to fix this, the update_dpms
* call below we restore the pipe to the right state, but leave * call below we restore the pipe to the right state, but leave
* the required bits on. */ * the required bits on. */
intel_enable_pipe_a(dev); intel_enable_pipe_a(dev, ctx);
} }
/* Adjust the state of the output pipe according to whether we /* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */ * have active connectors/encoders. */
if (crtc->active && !intel_crtc_has_encoders(crtc)) if (crtc->active && !intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base); intel_crtc_disable_noatomic(&crtc->base, ctx);
if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
/* /*
...@@ -15505,7 +15508,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) ...@@ -15505,7 +15508,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
* and sanitizes it to the current state * and sanitizes it to the current state
*/ */
static void static void
intel_modeset_setup_hw_state(struct drm_device *dev) intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe; enum pipe pipe;
...@@ -15525,7 +15529,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev) ...@@ -15525,7 +15529,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
crtc = intel_get_crtc_for_pipe(dev_priv, pipe); crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
intel_sanitize_crtc(crtc); intel_sanitize_crtc(crtc, ctx);
intel_dump_pipe_config(crtc, crtc->config, intel_dump_pipe_config(crtc, crtc->config,
"[setup_hw_state]"); "[setup_hw_state]");
} }
......
...@@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector, ...@@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
intel_dp_aux_enable_backlight(connector);
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
panel->backlight.max = 0xFFFF; panel->backlight.max = 0xFFFF;
else else
......
...@@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq) ...@@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state; u32 *reg_state = ce->lrc_reg_state;
assert_ring_tail_valid(rq->ring, rq->tail); reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
reg_state[CTX_RING_TAIL+1] = rq->tail;
/* True 32b PPGTT with dynamic page allocation: update PDP /* True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page. * registers and point the unallocated PDPs to scratch page.
...@@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv) ...@@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
ce->state->obj->mm.dirty = true; ce->state->obj->mm.dirty = true;
i915_gem_object_unpin_map(ce->state->obj); i915_gem_object_unpin_map(ce->state->obj);
ce->ring->head = ce->ring->tail = 0; intel_ring_reset(ce->ring, 0);
intel_ring_update_space(ce->ring);
} }
} }
} }
...@@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size) ...@@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)
void intel_ring_update_space(struct intel_ring *ring) void intel_ring_update_space(struct intel_ring *ring)
{ {
ring->space = __intel_ring_space(ring->head, ring->tail, ring->size); ring->space = __intel_ring_space(ring->head, ring->emit, ring->size);
} }
static int static int
...@@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request) ...@@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
i915_gem_request_submit(request); i915_gem_request_submit(request);
assert_ring_tail_valid(request->ring, request->tail); I915_WRITE_TAIL(request->engine,
I915_WRITE_TAIL(request->engine, request->tail); intel_ring_set_tail(request->ring, request->tail));
} }
static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
...@@ -1316,11 +1316,23 @@ int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias) ...@@ -1316,11 +1316,23 @@ int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias)
return PTR_ERR(addr); return PTR_ERR(addr);
} }
void intel_ring_reset(struct intel_ring *ring, u32 tail)
{
GEM_BUG_ON(!list_empty(&ring->request_list));
ring->tail = tail;
ring->head = tail;
ring->emit = tail;
intel_ring_update_space(ring);
}
void intel_ring_unpin(struct intel_ring *ring) void intel_ring_unpin(struct intel_ring *ring)
{ {
GEM_BUG_ON(!ring->vma); GEM_BUG_ON(!ring->vma);
GEM_BUG_ON(!ring->vaddr); GEM_BUG_ON(!ring->vaddr);
/* Discard any unused bytes beyond that submitted to hw. */
intel_ring_reset(ring, ring->tail);
if (i915_vma_is_map_and_fenceable(ring->vma)) if (i915_vma_is_map_and_fenceable(ring->vma))
i915_vma_unpin_iomap(ring->vma); i915_vma_unpin_iomap(ring->vma);
else else
...@@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv) ...@@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
/* Restart from the beginning of the rings for convenience */
for_each_engine(engine, dev_priv, id) for_each_engine(engine, dev_priv, id)
engine->buffer->head = engine->buffer->tail; intel_ring_reset(engine->buffer, 0);
} }
static int ring_request_alloc(struct drm_i915_gem_request *request) static int ring_request_alloc(struct drm_i915_gem_request *request)
...@@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) ...@@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
unsigned space; unsigned space;
/* Would completion of this request free enough space? */ /* Would completion of this request free enough space? */
space = __intel_ring_space(target->postfix, ring->tail, space = __intel_ring_space(target->postfix, ring->emit,
ring->size); ring->size);
if (space >= bytes) if (space >= bytes)
break; break;
...@@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) ...@@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{ {
struct intel_ring *ring = req->ring; struct intel_ring *ring = req->ring;
int remain_actual = ring->size - ring->tail; int remain_actual = ring->size - ring->emit;
int remain_usable = ring->effective_size - ring->tail; int remain_usable = ring->effective_size - ring->emit;
int bytes = num_dwords * sizeof(u32); int bytes = num_dwords * sizeof(u32);
int total_bytes, wait_bytes; int total_bytes, wait_bytes;
bool need_wrap = false; bool need_wrap = false;
...@@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) ...@@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
if (unlikely(need_wrap)) { if (unlikely(need_wrap)) {
GEM_BUG_ON(remain_actual > ring->space); GEM_BUG_ON(remain_actual > ring->space);
GEM_BUG_ON(ring->tail + remain_actual > ring->size); GEM_BUG_ON(ring->emit + remain_actual > ring->size);
/* Fill the tail with MI_NOOP */ /* Fill the tail with MI_NOOP */
memset(ring->vaddr + ring->tail, 0, remain_actual); memset(ring->vaddr + ring->emit, 0, remain_actual);
ring->tail = 0; ring->emit = 0;
ring->space -= remain_actual; ring->space -= remain_actual;
} }
GEM_BUG_ON(ring->tail > ring->size - bytes); GEM_BUG_ON(ring->emit > ring->size - bytes);
cs = ring->vaddr + ring->tail; cs = ring->vaddr + ring->emit;
ring->tail += bytes; ring->emit += bytes;
ring->space -= bytes; ring->space -= bytes;
GEM_BUG_ON(ring->space < 0); GEM_BUG_ON(ring->space < 0);
...@@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) ...@@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
int intel_ring_cacheline_align(struct drm_i915_gem_request *req) int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{ {
int num_dwords = int num_dwords =
(req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
u32 *cs; u32 *cs;
if (num_dwords == 0) if (num_dwords == 0)
......
...@@ -145,6 +145,7 @@ struct intel_ring { ...@@ -145,6 +145,7 @@ struct intel_ring {
u32 head; u32 head;
u32 tail; u32 tail;
u32 emit;
int space; int space;
int size; int size;
...@@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) ...@@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
struct intel_ring * struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size); intel_engine_create_ring(struct intel_engine_cs *engine, int size);
int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias); int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
void intel_ring_reset(struct intel_ring *ring, u32 tail);
void intel_ring_update_space(struct intel_ring *ring);
void intel_ring_unpin(struct intel_ring *ring); void intel_ring_unpin(struct intel_ring *ring);
void intel_ring_free(struct intel_ring *ring); void intel_ring_free(struct intel_ring *ring);
...@@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs) ...@@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
* reserved for the command packet (i.e. the value passed to * reserved for the command packet (i.e. the value passed to
* intel_ring_begin()). * intel_ring_begin()).
*/ */
GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs); GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
} }
static inline u32 static inline u32
...@@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) ...@@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
GEM_BUG_ON(tail >= ring->size); GEM_BUG_ON(tail >= ring->size);
} }
void intel_ring_update_space(struct intel_ring *ring); static inline unsigned int
intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
{
/* Whilst writes to the tail are strictly order, there is no
* serialisation between readers and the writers. The tail may be
* read by i915_gem_request_retire() just as it is being updated
* by execlists, as although the breadcrumb is complete, the context
* switch hasn't been seen.
*/
assert_ring_tail_valid(ring, tail);
ring->tail = tail;
return tail;
}
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
......
...@@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev) ...@@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
rdev->pdev->subsystem_vendor == 0x103c && rdev->pdev->subsystem_vendor == 0x103c &&
rdev->pdev->subsystem_device == 0x280a) rdev->pdev->subsystem_device == 0x280a)
return; return;
/* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
* - it hangs on resume inside the dynclk 1 table.
*/
if (rdev->family == CHIP_RS400 &&
rdev->pdev->subsystem_vendor == 0x1179 &&
rdev->pdev->subsystem_device == 0xff31)
return;
/* DYN CLK 1 */ /* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
......
...@@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = { ...@@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
* https://bugzilla.kernel.org/show_bug.cgi?id=51381 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/ */
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
* https://bugs.freedesktop.org/show_bug.cgi?id=101491
*/
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* macbook pro 8.2 */ /* macbook pro 8.2 */
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
{ 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 },
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment