Commit 95ea5529 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2019-04-18' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Since Easter is looming for me, I'm just pushing whatever is in my
  tree, I'll see what else turns up and maybe I'll send another pull
  early next week if there is anything.

  tegra:
   - stream id programming fix
   - avoid divide by 0 for bad hdmi audio setup code

  ttm:
   - Hugepages fix
   - refcount imbalance in error path fix

  amdgpu:
   - GPU VM fixes for Vega/RV
   - DC AUX fix for active DP-DVI dongles
   - DC fix for multihead regression"

* tag 'drm-fixes-2019-04-18' of git://anongit.freedesktop.org/drm/drm:
  drm/tegra: hdmi: Setup audio only if configured
  drm/amd/display: If one stream full updates, full update all planes
  drm/amdgpu/gmc9: fix VM_L2_CNTL3 programming
  drm/amdgpu: shadow in shadow_list without tbo.mem.start cause page fault in sriov TDR
  gpu: host1x: Program stream ID to bypass without SMMU
  drm/amd/display: extending AUX SW Timeout
  drm/ttm: fix dma_fence refcount imbalance on error path
  drm/ttm: fix incrementing the page pointer for huge pages
  drm/ttm: fix start page for huge page check in ttm_put_pages()
  drm/ttm: fix out-of-bounds read in ttm_put_pages() v2
parents e53f31bf 00fd14ff
...@@ -3165,6 +3165,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) ...@@ -3165,6 +3165,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
/* No need to recover an evicted BO */ /* No need to recover an evicted BO */
if (shadow->tbo.mem.mem_type != TTM_PL_TT || if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
continue; continue;
......
...@@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) ...@@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 6); L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
} }
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT; tmp = mmVM_L2_CNTL4_DEFAULT;
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
......
...@@ -1377,6 +1377,11 @@ static enum surface_update_type det_surface_update(const struct dc *dc, ...@@ -1377,6 +1377,11 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
return UPDATE_TYPE_FULL; return UPDATE_TYPE_FULL;
} }
if (u->surface->force_full_update) {
update_flags->bits.full_update = 1;
return UPDATE_TYPE_FULL;
}
type = get_plane_info_update_type(u); type = get_plane_info_update_type(u);
elevate_update_type(&overall_type, type); elevate_update_type(&overall_type, type);
...@@ -1802,6 +1807,14 @@ void dc_commit_updates_for_stream(struct dc *dc, ...@@ -1802,6 +1807,14 @@ void dc_commit_updates_for_stream(struct dc *dc,
} }
dc_resource_state_copy_construct(state, context); dc_resource_state_copy_construct(state, context);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
new_pipe->plane_state->force_full_update = true;
}
} }
...@@ -1838,6 +1851,12 @@ void dc_commit_updates_for_stream(struct dc *dc, ...@@ -1838,6 +1851,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
dc->current_state = context; dc->current_state = context;
dc_release_state(old); dc_release_state(old);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
pipe_ctx->plane_state->force_full_update = false;
}
} }
/*let's use current_state to update watermark etc*/ /*let's use current_state to update watermark etc*/
if (update_type >= UPDATE_TYPE_FULL) if (update_type >= UPDATE_TYPE_FULL)
......
...@@ -503,6 +503,9 @@ struct dc_plane_state { ...@@ -503,6 +503,9 @@ struct dc_plane_state {
struct dc_plane_status status; struct dc_plane_status status;
struct dc_context *ctx; struct dc_context *ctx;
/* HACK: Workaround for forcing full reprogramming under some conditions */
bool force_full_update;
/* private to dc_surface.c */ /* private to dc_surface.c */
enum dc_irq_source irq_source; enum dc_irq_source irq_source;
struct kref refcount; struct kref refcount;
......
...@@ -190,6 +190,12 @@ static void submit_channel_request( ...@@ -190,6 +190,12 @@ static void submit_channel_request(
1, 1,
0); 0);
} }
REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
10, aux110->timeout_period/10);
/* set the delay and the number of bytes to write */ /* set the delay and the number of bytes to write */
/* The length include /* The length include
...@@ -242,9 +248,6 @@ static void submit_channel_request( ...@@ -242,9 +248,6 @@ static void submit_channel_request(
} }
} }
REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
10, aux110->timeout_period/10);
REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1); REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
} }
......
...@@ -71,11 +71,11 @@ enum { /* This is the timeout as defined in DP 1.2a, ...@@ -71,11 +71,11 @@ enum { /* This is the timeout as defined in DP 1.2a,
* at most within ~240usec. That means, * at most within ~240usec. That means,
* increasing this timeout will not affect normal operation, * increasing this timeout will not affect normal operation,
* and we'll timeout after * and we'll timeout after
* SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec. * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 2400usec.
* This timeout is especially important for * This timeout is especially important for
* resume from S3 and CTS. * converters, resume from S3, and CTS.
*/ */
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 6
}; };
struct dce_aux { struct dce_aux {
......
...@@ -1260,9 +1260,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) ...@@ -1260,9 +1260,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
hdmi->dvi = !tegra_output_is_hdmi(output); hdmi->dvi = !tegra_output_is_hdmi(output);
if (!hdmi->dvi) { if (!hdmi->dvi) {
err = tegra_hdmi_setup_audio(hdmi); /*
if (err < 0) * Make sure that the audio format has been configured before
hdmi->dvi = true; * enabling audio, otherwise we may try to divide by zero.
*/
if (hdmi->format.sample_rate > 0) {
err = tegra_hdmi_setup_audio(hdmi);
if (err < 0)
hdmi->dvi = true;
}
} }
if (hdmi->config->has_hda) if (hdmi->config->has_hda)
......
...@@ -876,8 +876,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, ...@@ -876,8 +876,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
reservation_object_add_shared_fence(bo->resv, fence); reservation_object_add_shared_fence(bo->resv, fence);
ret = reservation_object_reserve_shared(bo->resv, 1); ret = reservation_object_reserve_shared(bo->resv, 1);
if (unlikely(ret)) if (unlikely(ret)) {
dma_fence_put(fence);
return ret; return ret;
}
dma_fence_put(bo->moving); dma_fence_put(bo->moving);
bo->moving = fence; bo->moving = fence;
......
...@@ -730,9 +730,10 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, ...@@ -730,9 +730,10 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (!(flags & TTM_PAGE_FLAG_DMA32)) { if (!(flags & TTM_PAGE_FLAG_DMA32) &&
for (j = 0; j < HPAGE_PMD_NR; ++j) (npages - i) >= HPAGE_PMD_NR) {
if (p++ != pages[i + j]) for (j = 1; j < HPAGE_PMD_NR; ++j)
if (++p != pages[i + j])
break; break;
if (j == HPAGE_PMD_NR) if (j == HPAGE_PMD_NR)
...@@ -759,15 +760,15 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, ...@@ -759,15 +760,15 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
unsigned max_size, n2free; unsigned max_size, n2free;
spin_lock_irqsave(&huge->lock, irq_flags); spin_lock_irqsave(&huge->lock, irq_flags);
while (i < npages) { while ((npages - i) >= HPAGE_PMD_NR) {
struct page *p = pages[i]; struct page *p = pages[i];
unsigned j; unsigned j;
if (!p) if (!p)
break; break;
for (j = 0; j < HPAGE_PMD_NR; ++j) for (j = 1; j < HPAGE_PMD_NR; ++j)
if (p++ != pages[i + j]) if (++p != pages[i + j])
break; break;
if (j != HPAGE_PMD_NR) if (j != HPAGE_PMD_NR)
......
...@@ -114,9 +114,13 @@ static inline void synchronize_syncpt_base(struct host1x_job *job) ...@@ -114,9 +114,13 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
static void host1x_channel_set_streamid(struct host1x_channel *channel) static void host1x_channel_set_streamid(struct host1x_channel *channel)
{ {
#if IS_ENABLED(CONFIG_IOMMU_API) && HOST1X_HW >= 6 #if HOST1X_HW >= 6
u32 sid = 0x7f;
#ifdef CONFIG_IOMMU_API
struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent); struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent);
u32 sid = spec ? spec->ids[0] & 0xffff : 0x7f; if (spec)
sid = spec->ids[0] & 0xffff;
#endif
host1x_ch_writel(channel, sid, HOST1X_CHANNEL_SMMU_STREAMID); host1x_ch_writel(channel, sid, HOST1X_CHANNEL_SMMU_STREAMID);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment