Commit eb3dc897 authored by Nicholas Kazlauskas's avatar Nicholas Kazlauskas Committed by Alex Deucher

drm/amd/display: Use private obj helpers for dm_atomic_state

[Why]
Two non-blocking commits in succession can result in a sequence where
the same dc->current_state is queried for both commits.

1. 1st commit -> check -> commit -> swaps atomic state -> queues work
2. 2nd commit -> check -> commit -> swaps atomic state -> queues work
3. 1st commit work finishes

The issue with this sequence is that the same dc->current_state is
read in both atomic checks. If the first commit modifies streams or
planes those will be missing from the dc->current_state for the
second atomic check. This result in many stream and plane errors in
atomic commit tail.

[How]
The driver still needs to track old to new state to determine if the
commit in its current implementation. Updating the dc_state in
atomic tail is wrong since the dc_state swap should be happening as
part of drm_atomic_helper_swap_state *before* the worker queue kicks
its work off.

The simplest replacement for the subclassing (which doesn't properly
manage the old to new atomic state swap) is to use the drm private
object helpers. While some of the dc_state members could be merged
into dm_crtc_state or dm_plane_state and copied over that way it is
easier for now to just treat the whole dc_state structure as a single
private object.

This allows amdgpu_dm to drop the dc->current_state copy from within
atomic check. It's replaced by a copy from the current atomic state
which is propagated correctly for the sequence described above.

Since access to the dm_state private object is now locked this should
also fix issues that could arise if submitting non-blocking commits
from different threads.

Cc: Harry Wentland <harry.wentland@amd.com>
Cc: Leo Li <sunpeng.li@amd.com>
Signed-off-by: default avatarNicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Reviewed-by: default avatarLeo Li <sunpeng.li@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 68c12d24
......@@ -989,45 +989,6 @@ const struct amdgpu_ip_block_version dm_ip_block =
};
static struct drm_atomic_state *
dm_atomic_state_alloc(struct drm_device *dev)
{
struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
if (drm_atomic_state_init(dev, &state->base) < 0)
goto fail;
return &state->base;
fail:
kfree(state);
return NULL;
}
static void
dm_atomic_state_clear(struct drm_atomic_state *state)
{
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
if (dm_state->context) {
dc_release_state(dm_state->context);
dm_state->context = NULL;
}
drm_atomic_state_default_clear(state);
}
static void
dm_atomic_state_alloc_free(struct drm_atomic_state *state)
{
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
drm_atomic_state_default_release(state);
kfree(dm_state);
}
/**
* DOC: atomic
*
......@@ -1039,9 +1000,6 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = amdgpu_dm_atomic_check,
.atomic_commit = amdgpu_dm_atomic_commit,
.atomic_state_alloc = dm_atomic_state_alloc,
.atomic_state_clear = dm_atomic_state_clear,
.atomic_state_free = dm_atomic_state_alloc_free
};
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
......@@ -1563,8 +1521,117 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
}
#endif
/*
* Acquires the lock for the atomic state object and returns
* the new atomic state.
*
* This should only be called during atomic check.
*/
static int dm_atomic_get_state(struct drm_atomic_state *state,
struct dm_atomic_state **dm_state)
{
struct drm_device *dev = state->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
struct drm_private_state *priv_state;
int ret;
if (*dm_state)
return 0;
ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx);
if (ret)
return ret;
priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
if (IS_ERR(priv_state))
return PTR_ERR(priv_state);
*dm_state = to_dm_atomic_state(priv_state);
return 0;
}
struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
struct drm_private_obj *obj;
struct drm_private_state *new_obj_state;
int i;
for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
if (obj->funcs == dm->atomic_obj.funcs)
return to_dm_atomic_state(new_obj_state);
}
return NULL;
}
struct dm_atomic_state *
dm_atomic_get_old_state(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
struct drm_private_obj *obj;
struct drm_private_state *old_obj_state;
int i;
for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
if (obj->funcs == dm->atomic_obj.funcs)
return to_dm_atomic_state(old_obj_state);
}
return NULL;
}
static struct drm_private_state *
dm_atomic_duplicate_state(struct drm_private_obj *obj)
{
struct dm_atomic_state *old_state, *new_state;
new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
if (!new_state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
new_state->context = dc_create_state();
if (!new_state->context) {
kfree(new_state);
return NULL;
}
old_state = to_dm_atomic_state(obj->state);
if (old_state && old_state->context)
dc_resource_state_copy_construct(old_state->context,
new_state->context);
return &new_state->base;
}
static void dm_atomic_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
if (dm_state && dm_state->context)
dc_release_state(dm_state->context);
kfree(dm_state);
}
static struct drm_private_state_funcs dm_atomic_state_funcs = {
.atomic_duplicate_state = dm_atomic_duplicate_state,
.atomic_destroy_state = dm_atomic_destroy_state,
};
static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
{
struct dm_atomic_state *state;
int r;
adev->mode_info.mode_config_initialized = true;
......@@ -1582,6 +1649,24 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
drm_modeset_lock_init(&adev->dm.atomic_obj_lock);
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
state->context = dc_create_state();
if (!state->context) {
kfree(state);
return -ENOMEM;
}
dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
drm_atomic_private_obj_init(&adev->dm.atomic_obj,
&state->base,
&dm_atomic_state_funcs);
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
......@@ -1905,6 +1990,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
drm_mode_config_cleanup(dm->ddev);
drm_atomic_private_obj_fini(&dm->atomic_obj);
return;
}
......@@ -4349,6 +4435,20 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
acrtc->crtc_id);
}
struct dc_stream_status *dc_state_get_stream_status(
struct dc_state *state,
struct dc_stream_state *stream)
{
uint8_t i;
for (i = 0; i < state->stream_count; i++) {
if (stream == state->streams[i])
return &state->stream_status[i];
}
return NULL;
}
/*
* Executes flip
*
......@@ -4558,6 +4658,7 @@ static bool commit_planes_to_stream(
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct drm_device *dev,
struct amdgpu_display_manager *dm,
struct drm_crtc *pcrtc,
......@@ -4574,7 +4675,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
struct dm_crtc_state *dm_old_crtc_state =
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
int planes_count = 0;
unsigned long flags;
......@@ -4635,7 +4735,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
crtc,
fb,
(uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
dm_state->context);
dc_state);
}
}
......@@ -4661,7 +4761,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
planes_count,
acrtc_state,
dm_old_crtc_state,
dm_state->context))
dc_state))
dm_error("%s: Failed to attach plane!\n", __func__);
} else {
/*TODO BUG Here should go disable planes on CRTC. */
......@@ -4729,6 +4829,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
struct dm_atomic_state *dm_state;
struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
uint32_t i, j;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
......@@ -4741,7 +4842,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_update_legacy_modeset_state(dev, state);
dm_state = to_dm_atomic_state(state);
dm_state = dm_atomic_get_new_state(state);
if (dm_state && dm_state->context) {
dc_state = dm_state->context;
} else {
/* No state changes, retain current state. */
dc_state_temp = dc_create_state();
ASSERT(dc_state_temp);
dc_state = dc_state_temp;
dc_resource_state_copy_construct_current(dm->dc, dc_state);
}
/* update changed items */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
......@@ -4814,9 +4924,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
} /* for_each_crtc_in_state() */
if (dm_state->context) {
dm_enable_per_frame_crtc_master_sync(dm_state->context);
WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
if (dc_state) {
dm_enable_per_frame_crtc_master_sync(dc_state);
WARN_ON(!dc_commit_state(dm->dc, dc_state));
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
......@@ -4828,6 +4938,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
const struct dc_stream_status *status =
dc_stream_get_status(dm_new_crtc_state->stream);
if (!status)
status = dc_state_get_stream_status(dc_state,
dm_new_crtc_state->stream);
if (!status)
DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
else
......@@ -4914,7 +5028,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->stream)
amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
amdgpu_dm_commit_planes(state, dc_state, dev,
dm, crtc, &wait_for_vblank);
}
......@@ -4954,6 +5069,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for (i = 0; i < crtc_disable_count; i++)
pm_runtime_put_autosuspend(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
if (dc_state_temp)
dc_release_state(dc_state_temp);
}
......@@ -5140,11 +5258,11 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
bool enable,
bool *lock_and_validation_needed)
{
struct dm_atomic_state *dm_state = NULL;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
struct dc_stream_state *new_stream;
int ret = 0;
......@@ -5245,6 +5363,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
if (!dm_old_crtc_state->stream)
goto next_crtc;
ret = dm_atomic_get_state(state, &dm_state);
if (ret)
goto fail;
DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
crtc->base.id);
......@@ -5279,6 +5401,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
WARN_ON(dm_new_crtc_state->stream);
ret = dm_atomic_get_state(state, &dm_state);
if (ret)
goto fail;
dm_new_crtc_state->stream = new_stream;
dc_stream_retain(new_stream);
......@@ -5353,12 +5479,13 @@ static int dm_update_planes_state(struct dc *dc,
bool enable,
bool *lock_and_validation_needed)
{
struct dm_atomic_state *dm_state = NULL;
struct drm_crtc *new_plane_crtc, *old_plane_crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
int i ;
/* TODO return page_flip_needed() function */
......@@ -5396,6 +5523,10 @@ static int dm_update_planes_state(struct dc *dc,
DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
plane->base.id, old_plane_crtc->base.id);
ret = dm_atomic_get_state(state, &dm_state);
if (ret)
return ret;
if (!dc_remove_plane_from_context(
dc,
dm_old_crtc_state->stream,
......@@ -5450,6 +5581,12 @@ static int dm_update_planes_state(struct dc *dc,
return ret;
}
ret = dm_atomic_get_state(state, &dm_state);
if (ret) {
dc_plane_state_release(dc_new_plane_state);
return ret;
}
/*
* Any atomic check errors that occur after this will
* not need a release. The plane state will be attached
......@@ -5481,11 +5618,14 @@ static int dm_update_planes_state(struct dc *dc,
return ret;
}
enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, struct drm_atomic_state *state)
{
int i, j, num_plane;
static int
dm_determine_update_type_for_commit(struct dc *dc,
struct drm_atomic_state *state,
enum surface_update_type *out_type)
{
struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
int i, j, num_plane, ret = 0;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
struct drm_crtc *new_plane_crtc, *old_plane_crtc;
......@@ -5505,7 +5645,7 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
DRM_ERROR("Plane or surface update failed to allocate");
/* Set type to FULL to avoid crashing in DC*/
update_type = UPDATE_TYPE_FULL;
goto ret;
goto cleanup;
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
......@@ -5559,27 +5699,40 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
}
if (num_plane > 0) {
status = dc_stream_get_status(new_dm_crtc_state->stream);
ret = dm_atomic_get_state(state, &dm_state);
if (ret)
goto cleanup;
old_dm_state = dm_atomic_get_old_state(state);
if (!old_dm_state) {
ret = -EINVAL;
goto cleanup;
}
status = dc_state_get_stream_status(old_dm_state->context,
new_dm_crtc_state->stream);
update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
&stream_update, status);
if (update_type > UPDATE_TYPE_MED) {
update_type = UPDATE_TYPE_FULL;
goto ret;
goto cleanup;
}
}
} else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
update_type = UPDATE_TYPE_FULL;
goto ret;
goto cleanup;
}
}
ret:
cleanup:
kfree(updates);
kfree(surface);
return update_type;
*out_type = update_type;
return ret;
}
/**
......@@ -5611,8 +5764,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct amdgpu_device *adev = dev->dev_private;
struct dm_atomic_state *dm_state = NULL;
struct dc *dc = adev->dm.dc;
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
struct drm_connector *connector;
struct drm_connector_state *old_con_state, *new_con_state;
struct drm_crtc *crtc;
......@@ -5653,10 +5806,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
dm_state->context = dc_create_state();
ASSERT(dm_state->context);
dc_resource_state_copy_construct_current(dc, dm_state->context);
/* Remove exiting planes if they are modified */
ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
if (ret) {
......@@ -5709,7 +5858,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
update_type = dm_determine_update_type_for_commit(dc, state);
ret = dm_determine_update_type_for_commit(dc, state, &update_type);
if (ret)
goto fail;
if (overall_update_type < update_type)
overall_update_type = update_type;
......@@ -5727,6 +5878,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (overall_update_type > UPDATE_TYPE_FAST) {
ret = dm_atomic_get_state(state, &dm_state);
if (ret)
goto fail;
ret = do_aquire_global_lock(dev, state);
if (ret)
......
......@@ -123,6 +123,17 @@ struct amdgpu_display_manager {
struct drm_device *ddev;
u16 display_indexes_num;
/**
* @atomic_obj
*
* In combination with &dm_atomic_state it helps manage
* global atomic state that doesn't map cleanly into existing
* drm resources, like &dc_context.
*/
struct drm_private_obj atomic_obj;
struct drm_modeset_lock atomic_obj_lock;
/**
* @irq_handler_list_low_tab:
*
......@@ -254,7 +265,7 @@ struct dm_crtc_state {
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
struct dm_atomic_state {
struct drm_atomic_state base;
struct drm_private_state base;
struct dc_state *context;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment