Commit b4bb9f15 authored by Rob Clark's avatar Rob Clark

drm/msm/dpu: unwind async commit handling

It attempted to avoid fps drops in the presence of cursor updates.  But
it is racing, and can result in hw updates after flush before vblank,
which leads to underruns.
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Reviewed-by: default avatarSean Paul <sean@poorly.run>
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent 0c91ed51
...@@ -612,7 +612,7 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) ...@@ -612,7 +612,7 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
return rc; return rc;
} }
void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async) void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
{ {
struct drm_encoder *encoder; struct drm_encoder *encoder;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
...@@ -638,35 +638,32 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async) ...@@ -638,35 +638,32 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
crtc->state->encoder_mask) crtc->state->encoder_mask)
dpu_encoder_prepare_for_kickoff(encoder); dpu_encoder_prepare_for_kickoff(encoder);
if (!async) { /* wait for previous frame_event_done completion */
/* wait for previous frame_event_done completion */ DPU_ATRACE_BEGIN("wait_for_frame_done_event");
DPU_ATRACE_BEGIN("wait_for_frame_done_event"); ret = _dpu_crtc_wait_for_frame_done(crtc);
ret = _dpu_crtc_wait_for_frame_done(crtc); DPU_ATRACE_END("wait_for_frame_done_event");
DPU_ATRACE_END("wait_for_frame_done_event"); if (ret) {
if (ret) { DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", crtc->base.id,
crtc->base.id, atomic_read(&dpu_crtc->frame_pending));
atomic_read(&dpu_crtc->frame_pending)); goto end;
goto end; }
}
if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) { if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
/* acquire bandwidth and other resources */ /* acquire bandwidth and other resources */
DPU_DEBUG("crtc%d first commit\n", crtc->base.id); DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
} else } else
DPU_DEBUG("crtc%d commit\n", crtc->base.id); DPU_DEBUG("crtc%d commit\n", crtc->base.id);
dpu_crtc->play_count++; dpu_crtc->play_count++;
}
dpu_vbif_clear_errors(dpu_kms); dpu_vbif_clear_errors(dpu_kms);
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_kickoff(encoder, async); dpu_encoder_kickoff(encoder);
end: end:
if (!async) reinit_completion(&dpu_crtc->frame_done_comp);
reinit_completion(&dpu_crtc->frame_done_comp);
DPU_ATRACE_END("crtc_commit"); DPU_ATRACE_END("crtc_commit");
} }
......
...@@ -238,9 +238,8 @@ void dpu_crtc_vblank_callback(struct drm_crtc *crtc); ...@@ -238,9 +238,8 @@ void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
/** /**
* dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
* @crtc: Pointer to drm crtc object * @crtc: Pointer to drm crtc object
* @async: true if the commit is asynchronous, false otherwise
*/ */
void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async); void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
/** /**
* dpu_crtc_complete_commit - callback signalling completion of current commit * dpu_crtc_complete_commit - callback signalling completion of current commit
......
...@@ -1423,8 +1423,7 @@ static void dpu_encoder_off_work(struct work_struct *work) ...@@ -1423,8 +1423,7 @@ static void dpu_encoder_off_work(struct work_struct *work)
* extra_flush_bits: Additional bit mask to include in flush trigger * extra_flush_bits: Additional bit mask to include in flush trigger
*/ */
static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phys, uint32_t extra_flush_bits, struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
bool async)
{ {
struct dpu_hw_ctl *ctl; struct dpu_hw_ctl *ctl;
int pending_kickoff_cnt; int pending_kickoff_cnt;
...@@ -1441,10 +1440,7 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, ...@@ -1441,10 +1440,7 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
return; return;
} }
if (!async) pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
else
pending_kickoff_cnt = atomic_read(&phys->pending_kickoff_cnt);
if (extra_flush_bits && ctl->ops.update_pending_flush) if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits); ctl->ops.update_pending_flush(ctl, extra_flush_bits);
...@@ -1555,8 +1551,7 @@ static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc) ...@@ -1555,8 +1551,7 @@ static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
* a time. * a time.
* dpu_enc: Pointer to virtual encoder structure * dpu_enc: Pointer to virtual encoder structure
*/ */
static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc, static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
bool async)
{ {
struct dpu_hw_ctl *ctl; struct dpu_hw_ctl *ctl;
uint32_t i, pending_flush; uint32_t i, pending_flush;
...@@ -1583,13 +1578,12 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc, ...@@ -1583,13 +1578,12 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
* for async commits. So don't set this for async, since it'll * for async commits. So don't set this for async, since it'll
* roll over to the next commit. * roll over to the next commit.
*/ */
if (!async && phys->split_role != ENC_ROLE_SLAVE) if (phys->split_role != ENC_ROLE_SLAVE)
set_bit(i, dpu_enc->frame_busy_mask); set_bit(i, dpu_enc->frame_busy_mask);
if (!phys->ops.needs_single_flush || if (!phys->ops.needs_single_flush ||
!phys->ops.needs_single_flush(phys)) !phys->ops.needs_single_flush(phys))
_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0, _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
async);
else if (ctl->ops.get_pending_flush) else if (ctl->ops.get_pending_flush)
pending_flush |= ctl->ops.get_pending_flush(ctl); pending_flush |= ctl->ops.get_pending_flush(ctl);
} }
...@@ -1599,7 +1593,7 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc, ...@@ -1599,7 +1593,7 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
_dpu_encoder_trigger_flush( _dpu_encoder_trigger_flush(
&dpu_enc->base, &dpu_enc->base,
dpu_enc->cur_master, dpu_enc->cur_master,
pending_flush, async); pending_flush);
} }
_dpu_encoder_trigger_start(dpu_enc->cur_master); _dpu_encoder_trigger_start(dpu_enc->cur_master);
...@@ -1817,11 +1811,12 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) ...@@ -1817,11 +1811,12 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
} }
} }
void dpu_encoder_kickoff(struct drm_encoder *drm_enc, bool async) void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
{ {
struct dpu_encoder_virt *dpu_enc; struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys; struct dpu_encoder_phys *phys;
ktime_t wakeup_time; ktime_t wakeup_time;
unsigned long timeout_ms;
unsigned int i; unsigned int i;
DPU_ATRACE_BEGIN("encoder_kickoff"); DPU_ATRACE_BEGIN("encoder_kickoff");
...@@ -1829,23 +1824,15 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc, bool async) ...@@ -1829,23 +1824,15 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc, bool async)
trace_dpu_enc_kickoff(DRMID(drm_enc)); trace_dpu_enc_kickoff(DRMID(drm_enc));
/* timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
* Asynchronous frames don't handle FRAME_DONE events. As such, they
* shouldn't enable the frame_done watchdog since it will always time
* out.
*/
if (!async) {
unsigned long timeout_ms;
timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode); drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms); atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
mod_timer(&dpu_enc->frame_done_timer, mod_timer(&dpu_enc->frame_done_timer,
jiffies + msecs_to_jiffies(timeout_ms)); jiffies + msecs_to_jiffies(timeout_ms));
}
/* All phys encs are ready to go, trigger the kickoff */ /* All phys encs are ready to go, trigger the kickoff */
_dpu_encoder_kickoff_phys(dpu_enc, async); _dpu_encoder_kickoff_phys(dpu_enc);
/* allow phys encs to handle any post-kickoff business */ /* allow phys encs to handle any post-kickoff business */
for (i = 0; i < dpu_enc->num_phys_encs; i++) { for (i = 0; i < dpu_enc->num_phys_encs; i++) {
......
...@@ -82,9 +82,8 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder); ...@@ -82,9 +82,8 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
* dpu_encoder_kickoff - trigger a double buffer flip of the ctl path * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
* (i.e. ctl flush and start) immediately. * (i.e. ctl flush and start) immediately.
* @encoder: encoder pointer * @encoder: encoder pointer
* @async: true if this is an asynchronous commit
*/ */
void dpu_encoder_kickoff(struct drm_encoder *encoder, bool async); void dpu_encoder_kickoff(struct drm_encoder *encoder);
/** /**
* dpu_encoder_wait_for_event - Waits for encoder events * dpu_encoder_wait_for_event - Waits for encoder events
......
...@@ -300,7 +300,7 @@ void dpu_kms_encoder_enable(struct drm_encoder *encoder) ...@@ -300,7 +300,7 @@ void dpu_kms_encoder_enable(struct drm_encoder *encoder)
continue; continue;
trace_dpu_kms_enc_enable(DRMID(crtc)); trace_dpu_kms_enc_enable(DRMID(crtc));
dpu_crtc_commit_kickoff(crtc, false); dpu_crtc_commit_kickoff(crtc);
} }
} }
...@@ -317,8 +317,7 @@ static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state) ...@@ -317,8 +317,7 @@ static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
if (crtc->state->active) { if (crtc->state->active) {
trace_dpu_kms_commit(DRMID(crtc)); trace_dpu_kms_commit(DRMID(crtc));
dpu_crtc_commit_kickoff(crtc, dpu_crtc_commit_kickoff(crtc);
state->legacy_cursor_update);
} }
} }
} }
......
...@@ -67,8 +67,7 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) ...@@ -67,8 +67,7 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
kms->funcs->commit(kms, state); kms->funcs->commit(kms, state);
} }
if (!state->legacy_cursor_update) msm_atomic_wait_for_commit_done(dev, state);
msm_atomic_wait_for_commit_done(dev, state);
kms->funcs->complete_commit(kms, state); kms->funcs->complete_commit(kms, state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment