Commit 00fd14ff authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-fixes-5.1' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

- GPUVM fixes for vega/RV and shadow buffers
- TTM fixes for hugepages
- TTM fix for refcount imbalance in error path
- DC AUX fix for some active DP-DVI dongles
- DC fix for multihead VT switch regression
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190415051703.3377-1-alexander.deucher@amd.com
parents ce519c1b c238bfe0
...@@ -3165,6 +3165,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) ...@@ -3165,6 +3165,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
/* No need to recover an evicted BO */ /* No need to recover an evicted BO */
if (shadow->tbo.mem.mem_type != TTM_PL_TT || if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
continue; continue;
......
...@@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) ...@@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 6); L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
} }
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT; tmp = mmVM_L2_CNTL4_DEFAULT;
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
......
...@@ -1377,6 +1377,11 @@ static enum surface_update_type det_surface_update(const struct dc *dc, ...@@ -1377,6 +1377,11 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
return UPDATE_TYPE_FULL; return UPDATE_TYPE_FULL;
} }
if (u->surface->force_full_update) {
update_flags->bits.full_update = 1;
return UPDATE_TYPE_FULL;
}
type = get_plane_info_update_type(u); type = get_plane_info_update_type(u);
elevate_update_type(&overall_type, type); elevate_update_type(&overall_type, type);
...@@ -1802,6 +1807,14 @@ void dc_commit_updates_for_stream(struct dc *dc, ...@@ -1802,6 +1807,14 @@ void dc_commit_updates_for_stream(struct dc *dc,
} }
dc_resource_state_copy_construct(state, context); dc_resource_state_copy_construct(state, context);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
new_pipe->plane_state->force_full_update = true;
}
} }
...@@ -1838,6 +1851,12 @@ void dc_commit_updates_for_stream(struct dc *dc, ...@@ -1838,6 +1851,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
dc->current_state = context; dc->current_state = context;
dc_release_state(old); dc_release_state(old);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
pipe_ctx->plane_state->force_full_update = false;
}
} }
/*let's use current_state to update watermark etc*/ /*let's use current_state to update watermark etc*/
if (update_type >= UPDATE_TYPE_FULL) if (update_type >= UPDATE_TYPE_FULL)
......
...@@ -503,6 +503,9 @@ struct dc_plane_state { ...@@ -503,6 +503,9 @@ struct dc_plane_state {
struct dc_plane_status status; struct dc_plane_status status;
struct dc_context *ctx; struct dc_context *ctx;
/* HACK: Workaround for forcing full reprogramming under some conditions */
bool force_full_update;
/* private to dc_surface.c */ /* private to dc_surface.c */
enum dc_irq_source irq_source; enum dc_irq_source irq_source;
struct kref refcount; struct kref refcount;
......
...@@ -190,6 +190,12 @@ static void submit_channel_request( ...@@ -190,6 +190,12 @@ static void submit_channel_request(
1, 1,
0); 0);
} }
REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
10, aux110->timeout_period/10);
/* set the delay and the number of bytes to write */ /* set the delay and the number of bytes to write */
/* The length include /* The length include
...@@ -242,9 +248,6 @@ static void submit_channel_request( ...@@ -242,9 +248,6 @@ static void submit_channel_request(
} }
} }
REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
10, aux110->timeout_period/10);
REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1); REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
} }
......
...@@ -71,11 +71,11 @@ enum { /* This is the timeout as defined in DP 1.2a, ...@@ -71,11 +71,11 @@ enum { /* This is the timeout as defined in DP 1.2a,
* at most within ~240usec. That means, * at most within ~240usec. That means,
* increasing this timeout will not affect normal operation, * increasing this timeout will not affect normal operation,
* and we'll timeout after * and we'll timeout after
* SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec. * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 2400usec.
* This timeout is especially important for * This timeout is especially important for
* resume from S3 and CTS. * converters, resume from S3, and CTS.
*/ */
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 6
}; };
struct dce_aux { struct dce_aux {
......
...@@ -876,8 +876,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, ...@@ -876,8 +876,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
reservation_object_add_shared_fence(bo->resv, fence); reservation_object_add_shared_fence(bo->resv, fence);
ret = reservation_object_reserve_shared(bo->resv, 1); ret = reservation_object_reserve_shared(bo->resv, 1);
if (unlikely(ret)) if (unlikely(ret)) {
dma_fence_put(fence);
return ret; return ret;
}
dma_fence_put(bo->moving); dma_fence_put(bo->moving);
bo->moving = fence; bo->moving = fence;
......
...@@ -730,9 +730,10 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, ...@@ -730,9 +730,10 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (!(flags & TTM_PAGE_FLAG_DMA32)) { if (!(flags & TTM_PAGE_FLAG_DMA32) &&
for (j = 0; j < HPAGE_PMD_NR; ++j) (npages - i) >= HPAGE_PMD_NR) {
if (p++ != pages[i + j]) for (j = 1; j < HPAGE_PMD_NR; ++j)
if (++p != pages[i + j])
break; break;
if (j == HPAGE_PMD_NR) if (j == HPAGE_PMD_NR)
...@@ -759,15 +760,15 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, ...@@ -759,15 +760,15 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
unsigned max_size, n2free; unsigned max_size, n2free;
spin_lock_irqsave(&huge->lock, irq_flags); spin_lock_irqsave(&huge->lock, irq_flags);
while (i < npages) { while ((npages - i) >= HPAGE_PMD_NR) {
struct page *p = pages[i]; struct page *p = pages[i];
unsigned j; unsigned j;
if (!p) if (!p)
break; break;
for (j = 0; j < HPAGE_PMD_NR; ++j) for (j = 1; j < HPAGE_PMD_NR; ++j)
if (p++ != pages[i + j]) if (++p != pages[i + j])
break; break;
if (j != HPAGE_PMD_NR) if (j != HPAGE_PMD_NR)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment